diff --git a/DGL_testing/nn_data/.DS_Store b/DGL_testing/nn_data/.DS_Store deleted file mode 100644 index f9d96db..0000000 Binary files a/DGL_testing/nn_data/.DS_Store and /dev/null differ diff --git a/DGL_testing/nn_data/both_PudgeFiveLayer_1024/0409_175055/config.json b/DGL_testing/nn_data/both_PudgeFiveLayer_1024/0409_175055/config.json deleted file mode 100755 index fc05eb4..0000000 --- a/DGL_testing/nn_data/both_PudgeFiveLayer_1024/0409_175055/config.json +++ /dev/null @@ -1,73 +0,0 @@ -{ - "name": "both_PudgeFiveLayer_1024", - "n_gpu": 1, - "seed": 21, - "arch": { - "type": "PudgeFiveLayer", - "args": { - "input_dim": 4, - "output_dim": 2, - "max_nodes": 1024 - } - }, - "data_loader": { - "type": "BellmanDataLoader", - "args": { - "data_dir": "data/", - "input_csv_file": "DatasetSmall/AgentData.csv", - "output_csv_file": "DatasetSmall/ResultsFinal.csv", - "categories": { - "N": 0.0, - "L": 0.25, - "H": 0.45 - }, - "output_variable": "both", - "cons_scale": true, - "batch_size": 64, - "shuffle": true, - "validation_split": 0.2, - "num_workers": 2, - "scale": 49.73094382723671 - } - }, - "optimizer": { - "type": "Adam", - "args": { - "lr": 0.001, - "weight_decay": 0, - "amsgrad": true - } - }, - "loss": "mse_loss", - "metrics": [ - "model_mae", - "model_max_ae", - "consumption_mae", - "consumption_max_ae", - "i_a_mae", - "i_a_max_ae", - "n_wrong_i_a", - "n_exceeding_i_a_k" - ], - "possible_i_a": [ - 0.0, - 0.25, - 0.45 - ], - "lr_scheduler": { - "type": "StepLR", - "args": { - "step_size": 50, - "gamma": 0.1 - } - }, - "trainer": { - "epochs": 200, - "save_dir": "saved/both/S21", - "save_period": 50, - "verbosity": 1, - "monitor": "min val_loss", - "early_stop": 15, - "tensorboard": true - } -} \ No newline at end of file diff --git a/DGL_testing/nn_data/both_PudgeFiveLayer_1024/0409_175055/model_best.pth b/DGL_testing/nn_data/both_PudgeFiveLayer_1024/0409_175055/model_best.pth deleted file mode 100755 index 33fd146..0000000 Binary files a/DGL_testing/nn_data/both_PudgeFiveLayer_1024/0409_175055/model_best.pth and /dev/null differ diff --git a/DGL_testing/nn_data/both_PudgeSixLayer_1024/.DS_Store b/DGL_testing/nn_data/both_PudgeSixLayer_1024/.DS_Store deleted file mode 100644 index 5008ddf..0000000 Binary files a/DGL_testing/nn_data/both_PudgeSixLayer_1024/.DS_Store and /dev/null differ diff --git a/DGL_testing/nn_data/both_PudgeSixLayer_1024/0409_190204/config.json b/DGL_testing/nn_data/both_PudgeSixLayer_1024/0409_190204/config.json deleted file mode 100755 index e357a5f..0000000 --- a/DGL_testing/nn_data/both_PudgeSixLayer_1024/0409_190204/config.json +++ /dev/null @@ -1,73 +0,0 @@ -{ - "name": "both_PudgeSixLayer_1024", - "n_gpu": 1, - "seed": 21, - "arch": { - "type": "PudgeSixLayer", - "args": { - "input_dim": 4, - "output_dim": 2, - "max_nodes": 1024 - } - }, - "data_loader": { - "type": "BellmanDataLoader", - "args": { - "data_dir": "data/", - "input_csv_file": "DatasetSmall/AgentData.csv", - "output_csv_file": "DatasetSmall/ResultsFinal.csv", - "categories": { - "N": 0.0, - "L": 0.25, - "H": 0.45 - }, - "output_variable": "both", - "cons_scale": true, - "batch_size": 64, - "shuffle": true, - "validation_split": 0.2, - "num_workers": 2, - "scale": 49.73094382723671 - } - }, - "optimizer": { - "type": "Adam", - "args": { - "lr": 0.001, - "weight_decay": 0, - "amsgrad": true - } - }, - "loss": "mse_loss", - "metrics": [ - "model_mae", - "model_max_ae", - "consumption_mae", - "consumption_max_ae", - "i_a_mae", - "i_a_max_ae", - "n_wrong_i_a", - "n_exceeding_i_a_k" - ], - "possible_i_a": [ - 0.0, - 0.25, - 0.45 - ], - "lr_scheduler": { - "type": "StepLR", - "args": { - "step_size": 50, - "gamma": 0.1 - } - }, - "trainer": { - "epochs": 200, - "save_dir": "saved/both/S21", - "save_period": 50, - "verbosity": 1, - "monitor": "min val_loss", - "early_stop": 15, - "tensorboard": true - } -} \ No newline at end of file diff --git a/DGL_testing/nn_data/both_PudgeSixLayer_1024/0409_190204/model_best.pth b/DGL_testing/nn_data/both_PudgeSixLayer_1024/0409_190204/model_best.pth deleted file mode 100755 index f6988d8..0000000 Binary files a/DGL_testing/nn_data/both_PudgeSixLayer_1024/0409_190204/model_best.pth and /dev/null differ diff --git a/DGL_testing/nn_data/both_PudgeSixLayer_1024/0723_110813/README.txt b/DGL_testing/nn_data/both_PudgeSixLayer_1024/0723_110813/README.txt deleted file mode 100644 index 6b4dd8a..0000000 --- a/DGL_testing/nn_data/both_PudgeSixLayer_1024/0723_110813/README.txt +++ /dev/null @@ -1,6 +0,0 @@ - -This model was trained on 23 July after a change was made to the Bellman equation (not to shock all assets but just those carried over). -This model was selected as the superior architecture and combination of hyperparameters produced the closest match to the results of the iterative equation. -It is to be used with the nn_bellman_past_shock_consumption method. -Fun note: Said arch/combination is identical to the superior arrangement resulting from the previous grid search for the first version of the Bellman equation. --VMG (Victoria.Garibay@gmail.com) \ No newline at end of file diff --git a/DGL_testing/nn_data/both_PudgeSixLayer_1024/0723_110813/config.json b/DGL_testing/nn_data/both_PudgeSixLayer_1024/0723_110813/config.json deleted file mode 100755 index f950c7c..0000000 --- a/DGL_testing/nn_data/both_PudgeSixLayer_1024/0723_110813/config.json +++ /dev/null @@ -1,73 +0,0 @@ -{ - "name": "both_PudgeSixLayer_1024", - "n_gpu": 1, - "seed": 21, - "arch": { - "type": "PudgeSixLayer", - "args": { - "input_dim": 4, - "output_dim": 2, - "max_nodes": 1024 - } - }, - "data_loader": { - "type": "BellmanDataLoader", - "args": { - "data_dir": "data/", - "input_csv_file": "DatasetJuly/AgentData-Updated19July2024.csv", - "output_csv_file": "DatasetJuly/ResultsFinal-Updated19July2024.csv", - "categories": { - "N": 0.0, - "L": 0.25, - "H": 0.45 - }, - "output_variable": "both", - "cons_scale": true, - "batch_size": 64, - "shuffle": true, - "validation_split": 0.2, - "num_workers": 2, - "scale": 9.732827674445847 - } - }, - "optimizer": { - "type": "Adam", - "args": { - "lr": 0.001, - "weight_decay": 0, - "amsgrad": true - } - }, - "loss": "mse_loss", - "metrics": [ - "model_mae", - "model_max_ae", - "consumption_mae", - "consumption_max_ae", - "i_a_mae", - "i_a_max_ae", - "n_wrong_i_a", - "n_exceeding_i_a_k" - ], - "possible_i_a": [ - 0.0, - 0.25, - 0.45 - ], - "lr_scheduler": { - "type": "StepLR", - "args": { - "step_size": 50, - "gamma": 0.1 - } - }, - "trainer": { - "epochs": 200, - "save_dir": "saved/both/S21", - "save_period": 50, - "verbosity": 1, - "monitor": "min val_loss", - "early_stop": 15, - "tensorboard": true - } -} \ No newline at end of file diff --git a/DGL_testing/nn_data/both_PudgeSixLayer_1024/0723_110813/model_best.pth b/DGL_testing/nn_data/both_PudgeSixLayer_1024/0723_110813/model_best.pth deleted file mode 100755 index 6804c10..0000000 Binary files a/DGL_testing/nn_data/both_PudgeSixLayer_1024/0723_110813/model_best.pth and /dev/null differ diff --git a/Sensitivity_Analysis/testing_sa.ipynb b/Sensitivity_Analysis/testing_sa.ipynb index f5b1979..7e72040 100644 --- a/Sensitivity_Analysis/testing_sa.ipynb +++ b/Sensitivity_Analysis/testing_sa.ipynb @@ -28,7 +28,7 @@ "metadata": {}, "outputs": [], "source": [ - "model = dgl_ptm.PovertyTrapModel(model_identifier='test_SA')" + "model = dgl_ptm.PovertyTrapModel(model_identifier='SA_test')" ] }, { @@ -41,7 +41,7 @@ "output_type": "stream", "text": [ "20\n", - "edata=None epath='test_SA/edge_data' format='xarray' mode='w' ndata=['all_except', ['a_table']] npath='test_SA/agent_data.zarr' capital_method='past_shock' wealth_method='weighted_transfer' income_method='income_generation' consume_method='past_shock_bellman_consumption' nn_path='/nn_data/both_PudgeSixLayer_1024/0723_110813/model_best.pth' capital_update_method='default' characteristic_distance=3.33 homophily_parameter=1 adapt_m=tensor([0.0000, 0.5000, 0.9000]) adapt_cost=tensor([0.0000, 0.2500, 0.4500]) depreciation=0.6 discount=0.95 m_theta_dist=MThetaDist(type='beta', parameters=tensor([4.1300, 0.0700]), round=False, decimals=None) tech_gamma=tensor([0.3000, 0.3500, 0.4500]) tech_cost=tensor([0.0000, 0.1500, 0.6500]) del_method='size' del_threshold=0.5 noise_ratio=0.05 local_ratio=0.25 truncation_weight=1e-10 step_type='ptm' data_collection_period=1 data_collection_list=None\n" + "edata=None epath='data_mode_B/edge_data' format='xarray' mode='w' ndata=['all_except', ['a_table', 'gamma', 'cost', 'zeros', 'ones']] npath='data_mode_B/agent_data.zarr' capital_method='past_shock' wealth_method='weighted_transfer' income_method='income_generation' consume_method='past_shock_bellman_consumption' nn_path='/nn_data/both_PudgeSixLayer_1024/0723_110813/model_best.pth' capital_update_method='default' characteristic_distance=3.33 homophily_parameter=1 adapt_m=tensor([0.0000, 0.5000, 0.9000]) adapt_cost=tensor([0.0000, 0.2500, 0.4500]) depreciation=0.08 discount=0.95 m_theta_dist=MThetaDist(type='beta', parameters=tensor([4.1300, 0.0700]), round=False, decimals=None) tech_gamma=tensor([0.3000, 0.3500, 0.4500]) tech_cost=tensor([0.0000, 0.1500, 0.6500]) del_method='size' del_threshold='balance' noise_ratio=0.05 local_ratio=0.25 truncation_weight=1e-10 step_type='ptm' data_collection_period=1 data_collection_list=None\n" ] } ], @@ -49,9 +49,7 @@ "import torch\n", "model.set_model_parameters(**{'number_agents': 20 , \n", " 'seed':42,\n", - " 'gamma_vals':torch.tensor([0.3,0.45]) , #for pseudo income\n", " 'sigma_dist': {'type':'uniform','parameters':[0.05,1.94],'round':True,'decimals':1},\n", - " 'cost_vals': torch.tensor([0.,0.45]), #for pseudo income\n", " 'a_theta_dist': {'type':'uniform','parameters':[0.1,1],'round':False,'decimals':None},\n", " 'sensitivity_dist':{'type':'uniform','parameters':[0.0,1],'round':False,'decimals':None},\n", " 'capital_dist': {'type':'uniform','parameters':[0.1,10.],'round':False,'decimals':None}, \n", @@ -63,7 +61,7 @@ " 'step_target':20,\n", " 'steering_parameters':{'npath':'./agent_data.zarr',\n", " 'epath':'./edge_data', \n", - " 'ndata':['all_except',['a_table']],\n", + " 'ndata':[['degree','i_a','income','net_trade', 'tech_index','theta', 'wealth', 'wealth_consumption','weighted_degree'],['initial_only',['alpha','lambda','sigma','sensitivity']]],\n", " 'edata':None,\n", " 'mode':'w',\n", " 'capital_method':'past_shock',\n", @@ -167,17 +165,152 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": {}, - "outputs": [], - "source": [] + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/\n", + " ├── alpha (20, 20) float32\n", + " ├── degree (20, 20) int64\n", + " ├── disposable_wealth (20, 20) float32\n", + " ├── i_a (20, 20) float32\n", + " ├── income (20, 20) float32\n", + " ├── lambda (20, 20) float32\n", + " ├── m (20, 20) float32\n", + " ├── net_trade (20, 20) float32\n", + " ├── sensitivity (20, 20) float32\n", + " ├── sigma (20, 20) float32\n", + " ├── tec (20, 20) float32\n", + " ├── tech_index (20, 20) int64\n", + " ├── theta (20, 20) float32\n", + " ├── total_weight (20, 20) float32\n", + " ├── wealth (20, 20) float32\n", + " ├── wealth_consumption (20, 20) float32\n", + " └── weighted_degree (20, 20) float32\n", + "/\n", + " ├── alpha (20, 20) float32\n", + " ├── degree (20, 20) int64\n", + " ├── disposable_wealth (20, 20) float32\n", + " ├── i_a (20, 20) float32\n", + " ├── income (20, 20) float32\n", + " ├── lambda (20, 20) float32\n", + " ├── m (20, 20) float32\n", + " ├── net_trade (20, 20) float32\n", + " ├── sensitivity (20, 20) float32\n", + " ├── sigma (20, 20) float32\n", + " ├── tec (20, 20) float32\n", + " ├── tech_index (20, 20) int64\n", + " ├── theta (20, 20) float32\n", + " ├── total_weight (20, 20) float32\n", + " ├── wealth (20, 20) float32\n", + " ├── wealth_consumption (20, 20) float32\n", + " └── weighted_degree (20, 20) float32\n", + "/\n", + " ├── alpha (20, 1) float32\n", + " ├── lambda (20, 1) float32\n", + " ├── sensitivity (20, 1) float32\n", + " └── sigma (20, 1) float32\n", + "[[ 8.834466 9.08635 9.338289 9.593652 9.849971 10.110308\n", + " 10.391772 10.678069 10.966155 11.2272625 11.444739 11.652809\n", + " 11.841147 12.007421 12.149493 12.272718 12.378548 12.468998\n", + " 12.545652 12.610272 ]\n", + " [ 9.158539 9.821314 10.101976 10.331101 10.51959 10.656298\n", + " 10.761174 10.803776 10.802509 11.096467 11.523632 11.989528\n", + " 12.45638 12.935303 13.408632 13.864526 14.2867985 14.658942\n", + " 14.982725 15.277907 ]]\n", + "[[ 8.834466 9.08635 9.338289 9.593652 9.849971 10.110308\n", + " 10.391772 10.678069 10.966155 11.2272625 11.444739 11.652809\n", + " 11.841147 12.007421 12.149493 12.272718 12.378548 12.468998\n", + " 12.545652 12.610272 ]\n", + " [ 9.158539 9.821314 10.101976 10.331101 10.51959 10.656298\n", + " 10.761174 10.803776 10.802509 11.096467 11.523632 11.989528\n", + " 12.45638 12.935303 13.408632 13.864526 14.2867985 14.658942\n", + " 14.982725 15.277907 ]]\n", + "[[1.0238984 1.0238984 1.0238984 1.0238984 1.0238984 1.0238984 1.0238984\n", + " 1.0238984 1.0238984 1.0238984 1.0238984 1.0238984 1.0238984 1.0238984\n", + " 1.0238984 1.0238984 1.0238984 1.0238984 1.0238984 1.0238984]\n", + " [1.1597955 1.1597955 1.1597955 1.1597955 1.1597955 1.1597955 1.1597955\n", + " 1.1597955 1.1597955 1.1597955 1.1597955 1.1597955 1.1597955 1.1597955\n", + " 1.1597955 1.1597955 1.1597955 1.1597955 1.1597955 1.1597955]]\n", + "[[1.0238984 1.0238984 1.0238984 1.0238984 1.0238984 1.0238984 1.0238984\n", + " 1.0238984 1.0238984 1.0238984 1.0238984 1.0238984 1.0238984 1.0238984\n", + " 1.0238984 1.0238984 1.0238984 1.0238984 1.0238984 1.0238984]\n", + " [1.1597955 1.1597955 1.1597955 1.1597955 1.1597955 1.1597955 1.1597955\n", + " 1.1597955 1.1597955 1.1597955 1.1597955 1.1597955 1.1597955 1.1597955\n", + " 1.1597955 1.1597955 1.1597955 1.1597955 1.1597955 1.1597955]]\n" + ] + } + ], + "source": [ + "import zarr\n", + "import os\n", + "zarr_file_one = zarr.open('/Users/victoria/Documents/Scripts/Python/DGL-PTM/Sensitivity_Analysis/data_mode_A/agent_data.zarr', mode='r')\n", + "zarr_file_two = zarr.open('/Users/victoria/Documents/Scripts/Python/DGL-PTM/Sensitivity_Analysis/data_mode_B/agent_data.zarr', mode='r')\n", + "zarr_file_three = zarr.open('/Users/victoria/Documents/Scripts/Python/DGL-PTM/Sensitivity_Analysis/data_mode_B/agent_data_initial.zarr', mode='r')\n", + "print(zarr_file_one.tree())\n", + "print(zarr_file_two.tree())\n", + "print(zarr_file_three.tree())\n", + "\n", + "#attributes=os.listdir('/Users/victoria/Documents/Scripts/Python/DGL-PTM/Sensitivity_Analysis/test_SA/agent_data.zarr')\n", + "#attributes = [item for item in attributes if item not in ['.zattrs','.zmetadata','.zgroup']]\n", + "#attributes = {key: None for key in attributes}\n", + "#print(attributes)\n", + "print(zarr_file_one['wealth'][0:2])\n", + "print(zarr_file_two['wealth'][0:2])\n", + "\n", + "print(zarr_file_one['alpha'][0:2])\n", + "print(zarr_file_two['alpha'][0:2])\n", + "#Unnecessary:'a_table','m','tec','gamma','cost','zeros','ones', 'total_weight', \"disposable_wealth\"\n", + "#One-Time: 'alpha',\"lambda\",\"sigma\",\"sensitivity\"\n", + "#Every-Time: 'degree','i_a','income','net_trade', 'tech_index','theta', 'wealth', 'wealth_consumption','weighted_degree'\n" + ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": {}, - "outputs": [], - "source": [] + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['degree', 'i_a', 'income', 'net_trade', 'tech_index', 'theta', 'wealth', 'wealth_consumption', 'weighted_degree']\n" + ] + } + ], + "source": [ + "ndata = ['all']\n", + "ndata = ['all_except',['a_table','m','tec','gamma','cost','zeros','ones', 'total_weight', \"disposable_wealth\"]]\n", + "ndata = [['all_except',['a_table','m','tec','gamma','cost','zeros','ones', 'total_weight', \"disposable_wealth\"]], [\"initial_only\",['alpha',\"lambda\",\"sigma\",\"sensitivity\"]]]\n", + "ndata = [['degree','i_a','income','net_trade', 'tech_index','theta', 'wealth', 'wealth_consumption','weighted_degree'], [\"initial_only\",['alpha',\"lambda\",\"sigma\",\"sensitivity\"]]]\n", + "\n", + "timestep = 10\n", + "\n", + "if ndata == ['all']:\n", + " ndata = list(attributes.keys())\n", + "elif ndata[0] == 'all_except':\n", + " ndata = list(attributes.keys() - ndata[1])\n", + "elif sum(1 for item in ndata if isinstance(item, list)) > 1:\n", + " ndata_list = ndata\n", + " initial_only=[]\n", + " for specification in ndata_list:\n", + " if specification == ['all']:\n", + " raise ValueError('Use of \"all\" is not compatible with multiple data collection specification lists.')\n", + " elif specification[0] == 'all_except':\n", + " ndata = list(attributes.keys() - specification[1])\n", + " elif specification[0] == 'initial_only':\n", + " if timestep == 0:\n", + " initial_only = specification[1]\n", + " else:\n", + " ndata = specification\n", + " ndata=ndata+initial_only\n", + "\n", + "\n", + "print(ndata)\n" + ] } ], "metadata": { diff --git a/dgl_ptm/dgl_ptm/config.py b/dgl_ptm/dgl_ptm/config.py index 545318d..86904ee 100644 --- a/dgl_ptm/dgl_ptm/config.py +++ b/dgl_ptm/dgl_ptm/config.py @@ -44,7 +44,7 @@ class SteeringParams(BaseModel): epath: str = "./edge_data" format: str = "xarray" mode: str = "w" - ndata: list[str | list[str]] | None = ["all_except", ["a_table"]] + ndata: list[str | list[str | list[str]]] | None = ["all_except", ["a_table"]] npath: str = "./agent_data.zarr" capital_method: str = "present_shock" wealth_method: str = "singular_transfer" diff --git a/dgl_ptm/dgl_ptm/model/data_collection.py b/dgl_ptm/dgl_ptm/model/data_collection.py index 5a7f28a..a5bd33d 100644 --- a/dgl_ptm/dgl_ptm/model/data_collection.py +++ b/dgl_ptm/dgl_ptm/model/data_collection.py @@ -22,7 +22,13 @@ def data_collection(agent_graph, npath: path to store node data. epath: path to store edge data with one file for each timestep. ndata: node data properties to be stored. - ['all'] implies all node properties will be saved + [list] specifies node properties to be stored at every time step + ['all'] implies all node properties will be saved at every time step + ['all_except', [list]] specifies that all but the listed properties will be saved + ['initial_only', [list]] specifies properties saved at timestep 0 only + [list] and ['all_except', [list]] can be used with [initial_only, [list]] + formatted as [[specification list],[specification list]] + ['all'] should not be used together with any other specification edata: edge data properties to be stored. ['all'] implies all edge properties will be saved format: storage format @@ -31,8 +37,25 @@ def data_collection(agent_graph, """ if ndata == ['all']: ndata = list(agent_graph.node_attr_schemes().keys()) - if ndata[0] == 'all_except': + elif ndata[0] == 'all_except': ndata = list(agent_graph.node_attr_schemes().keys() - ndata[1]) + elif sum(1 for item in ndata if isinstance(item, list)) > 1: + ndata_list = ndata + initial_only=[] + for specification in ndata_list: + if specification == ['all']: + raise ValueError('Use of "all" is not compatible with multiple data collection specification lists.') + elif specification[0] == 'all_except': + ndata = list(agent_graph.node_attr_schemes().keys() - specification[1]) + elif specification[0] == 'initial_only': + if timestep == 0: + initial_only = specification[1] + else: + ndata = specification + + else: + raise ValueError('Invalid node data collection specification.') + if edata == ['all']: edata = list(agent_graph.edge_attr_schemes().keys()) @@ -40,6 +63,9 @@ def data_collection(agent_graph, if timestep == 0: print("ATTENTION: No node data collection requested for this simulation!") else: + if timestep == 0 and initial_only != []: + initialpath = npath.split('.')[0] + '_initial.zarr' + _node_property_collector(agent_graph, initialpath, initial_only, timestep, format, mode) _node_property_collector(agent_graph, npath, ndata, timestep, format, mode) if edata == None: if timestep == 0: diff --git a/dgl_ptm/dgl_ptm/model/initialize_model.py b/dgl_ptm/dgl_ptm/model/initialize_model.py index 74da8d5..b235c56 100644 --- a/dgl_ptm/dgl_ptm/model/initialize_model.py +++ b/dgl_ptm/dgl_ptm/model/initialize_model.py @@ -369,6 +369,7 @@ def initialize_agent_properties(self): self.graph.ndata['wealth_consumption'] = torch.zeros(self.graph.num_nodes()) self.graph.ndata['i_a'] = torch.zeros(self.graph.num_nodes()) self.graph.ndata['m'] = torch.zeros(self.graph.num_nodes()) + self.graph.ndata['net_trade'] = torch.zeros(self.graph.num_nodes()) self.graph.ndata['degree'] = torch.zeros(self.graph.num_nodes()) self.graph.ndata['weighted_degree'] = torch.zeros(self.graph.num_nodes()) self.graph.ndata['zeros'] = torch.zeros(self.graph.num_nodes()) diff --git a/dgl_ptm/dgl_ptm/model/step.py b/dgl_ptm/dgl_ptm/model/step.py index 0e89f80..eb0ccc2 100644 --- a/dgl_ptm/dgl_ptm/model/step.py +++ b/dgl_ptm/dgl_ptm/model/step.py @@ -132,6 +132,10 @@ def ptm_step(agent_graph, device, timestep, params): link_deletion( agent_graph, method = params['del_method'], threshold = threshold ) + #Update agent degree and weighted degree + agent_update(agent_graph, method='degree') + agent_update(agent_graph, method='weighted_degree') + #Wealth transfer trade_money(agent_graph, device, method = params['wealth_method']) @@ -145,10 +149,7 @@ def ptm_step(agent_graph, device, timestep, params): agent_update( agent_graph, params, device=device, timestep=timestep, method ='consumption' ) - #Update agent degree and weighted degree - agent_update(agent_graph, method='degree') - agent_update(agent_graph, method='weighted_degree') # Data can be collected periodically (every X steps) and/or at specified time steps. do_periodical_data_collection = (