-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlsnm
956 lines (765 loc) · 47.9 KB
/
lsnm
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
#!/usr/bin/env python
# ============================================================================
#
# PUBLIC DOMAIN NOTICE
#
# National Institute on Deafness and Other Communication Disorders
#
# This software/database is a "United States Government Work" under the
# terms of the United States Copyright Act. It was written as part of
# the author's official duties as a United States Government employee and
# thus cannot be copyrighted. This software/database is freely available
# to the public for use. The NIDCD and the U.S. Government have not placed
# any restriction on its use or reproduction.
#
# Although all reasonable efforts have been taken to ensure the accuracy
# and reliability of the software and data, the NIDCD and the U.S. Government
# do not and cannot warrant the performance or results that may be obtained
# by using this software or data. The NIDCD and the U.S. Government disclaim
# all warranties, express or implied, including warranties of performance,
# merchantability or fitness for any particular purpose.
#
# Please cite the author in any work or product based on this material.
#
# ==========================================================================
# ***************************************************************************
#
# Large-Scale Neural Modeling software (LSNM)
#
# Section on Brain Imaging and Modeling
# Voice, Speech and Language Branch
# National Institute on Deafness and Other Communication Disorders
# National Institutes of Health
#
# This file (lsnm.py) was created on January 26 2017.
#
#
# Author: Antonio Ulloa. Last updated by Antonio Ulloa on February 6 2023.
#
# Based on computer code originally developed by Malle Tagamets and
# Barry Horwitz (Tagamets and Horwitz, 1998)
# **************************************************************************/
# lsnm.py
#
# Simulates neural dynamics using Wilson-Cowan neuronal population model
#
# THIS IS THE COMMAND-LINE VERSION OF "sim.py"
# import regular expression modules (useful for reading weight files)
import re
# import random function modules
import random as rdm
# import math function modules
import math
# import json module for storing output data files
import json
# import time module (needed for recording start and end simulation times in log file
import time as time_module
try:
# import TVB modules only if simulation requires TVB connectome
# the following modules are imported from TVB library
from tvb.simulator.lab import *
import tvb.datatypes.time_series
from tvb.simulator.plot.tools import *
import tvb.simulator.plot.timeseries_interactive as ts_int
# end of TVB modules import
except ImportError:
pass
# import 'numpy' module, which contains useful matrix functions
import numpy as np
# module to import external or user created modules into current script
import importlib as il
# import 'sys' module, which gives you access to file read/write functions
import sys
# import module that parses command line input parameters
import argparse
import importlib
def run(model, weights_list, script, tvb_link):
start_time = time_module.asctime(time_module.localtime(time_module.time()))
print ('\rStart Time: ', start_time)
print ('\rBuilding network...')
if tvb_link == None:
useTVBConnectome=False
else:
useTVBConnectome=True
embedding_file=tvb_link
generateSubject = False
neural_net=""
global noise
# load a TVB simulation of a 998-node brain and uses it to provide variability
# to an LSNM visual model network. It runs a simulation of the LSNM visual
# network and writes out neural activities for each LSNM node and -relevant-
# TVB nodes. Plots output as well.
# define a flag that tells the network whether to send feedback connections
# from LSNM to TVB
FEEDBACK = True
# define a number that the simulator will use to generate new subjects. If this
# option is checked at simulation time, the simulator with multiply the connection
# weights given by a random amount of between the number given and 1.0
subject_variation = 0.98
# define how many milliseconds there are in each simulation timestep
timesteps_to_time_constant = 5.0
# define white matter transmission speed in mm/ms for TVB simulation
TVB_speed = 4.0
# define global coupling strength as in Sanz-Leon et al (2015), figure 17,
# 3rd column, 3rd row
TVB_global_coupling_strength = 0.0042
# declare a variable that describes number of nodes in TVB connectome
TVB_number_of_nodes = 998
# declare a file name for the output file where the neural network structure will be
# stored (modules and weights among modules)
neural_network = 'neuralnet.json'
# declare a file name for the output file that contains simulation log
log_file = 'log.txt'
# the following are the weights used among excitatory and inhibitory populations
# in the TVB's implementation of the Wilson-Cowan equations. These values were
# taken from the default values in the TVB source code in script "models.npy"
# The values are also in Table 11 Column a of Sanz-Leon et al (2015). Note that
# the subscripts in Sanz-Leon's notation are reversed, thus w_ei, in her notation,
# means a weight from inhibitory to excitatory. In our notation, W_ei means a
# weight from excitatory to inhibitory.
#
# The reason we re-defined the weights below is to be able to calculate local
# synaptic activity at each timestep, as the TVB original source code does not
# calculate synaptic activities
w_ee = 12.0
w_ii = 11.0
w_ei = 13.0
w_ie = 4.0
# now load white matter connectivity (998 ROI matrix from TVB demo set, AKA Hagmann's connectome)
if useTVBConnectome == True:
WC = models.WilsonCowan(variables_of_interest=["E", "I"])
white_matter = connectivity.Connectivity.from_file("connectivity_998.zip")
# Define the transmission speed of white matter tracts (4 mm/ms)
white_matter.speed = numpy.array([TVB_speed])
# Define the coupling function between white matter tracts and brain regions
white_matter_coupling = coupling.Linear(a=TVB_global_coupling_strength)
#Initialize an Integrator for TVB
euler_int = integrators.EulerStochastic(dt=5, noise=noise.Additive(nsig=0.01))
euler_int.configure()
# Define a monitor to be used for TVB simulation (i.e., which simulated data is
# going to be collected
what_to_watch = monitors.Raw()
# Initialize a TVB simulator
sim = simulator.Simulator(model=WC, connectivity=white_matter,
coupling=white_matter_coupling,
integrator=euler_int, monitors=what_to_watch)
sim.configure()
# To maintain consistency with Husain et al (2004) and Tagamets and Horwitz (1998),
# we are assuming that each simulation timestep is equivalent to 5 milliseconds
# of real time.
# The TVB brain areas where our LSNM units are going to be embedded are
# contained in a file called "lsnm_tvb_link.txt"
# create a python dictionary of LSNM modules and the location of the corresponding
# TVB node in which the TVB node is to be embedded. In other words, the closest TVB node
# is used as a 'host' node to embed a given LSNM module
lsnm_tvb_link = {}
# open the input file containing LSNM <-> TVB link declarations, then
# load the file into a python list of lists and close file safely
with open(embedding_file) as f:
for line in f:
(key, val) = line.split()
lsnm_tvb_link[key] = int(val)
print ('\r LSNM <-> TVB Link: ', lsnm_tvb_link)
# create two arrays to store synaptic activity for each and all TVB nodes,
# one for absolute values of synaptic activities (for fMRI computation):
tvb_abs_syna = []
# ... and the other one for signed values of synaptic activity (for MEG computation):
tvb_signed_syna = []
# also, create an array to store electrical activity for all TVB nodes
tvb_elec = []
# create and initialize array to store synaptic activity for all TVB nodes, excitatory
# and inhibitory parts.
# The synaptic activity for each node is zero at first, then it accumulates values
# (integration) during a given number of timesteps. Every number of timesteps
# (given by 'synaptic_interval'), the array below is re-initialized to zero.
# One array is for accumulating the absolute value of synaptic activity (for BOLD computation):
current_tvb_abs_syn = [ [0.0]*TVB_number_of_nodes for _ in range(2) ]
# ... and another array for accumulating the signed values of synaptic activity (for MEG computation):
current_tvb_signed_syn = [ [0.0]*TVB_number_of_nodes for _ in range(2) ]
# number of units in each LSNM sub-module
n = 81
# declare a gain for the link from TVB to LSNM (around which normally distributed
# random numbers will be generated). I obtained this number of diving the TVB gain used
# within the connectome nodes by 81 (# of units in each LSNM module).
# Please note that each module can have a different number of 9x9 or 1x81 matrices, and
# so that needs to be taken into account as well by multiplying the number of units in
# a matrix times the number of modules contained in an embedded brain region.
lsnm_tvb_gain = TVB_global_coupling_strength / n
# declare an integration interval for the 'integrated' synaptic activity,
# for fMRI computation, in number of timesteps.
# The same variable is used to know how often we are going to write to
# output files
synaptic_interval = 10
######### THE FOLLOWING SIMULATES LSNM NETWORK ########################
# initialize an empty list to store ALL of the modules of the LSNM neural network
# NOTE: This is the main data structure holding all of the LSNM network values
# at each timestep, including neural activity, connections weights, neural
# population model parameters, synaptic activity, module dimensions, among others.
# if no model and weights list were specified, upload the whole network (modules and weights)
# from neural network file. Otherwise, upload modules from model file and weights from weights
# files given by the weights list file and build a neural network from scratch
if model == "" and weights_list == "":
nn_file = open(neural_net, 'r')
print ('\rReading neural network from file...')
try:
modules = json.load(nn_file)
finally:
nn_file.close()
else:
modules = []
# open the input file containing module declarations (i.e., the 'model'), then
# load the file into a python list of lists and close file safely
f = open(model, 'r')
try:
modules = [line.split() for line in f]
finally:
f.close()
# convert ALL module dimensions to integers since we will need those numbers
# later
for module in modules:
module[1] = int(module[1])
module[2] = int(module[2])
# convert ALL parameters in the modules to float since we will need to use those
# to solve Wilson-Cowan equations
for module in modules:
module[4] = float(module[4])
module[5] = float(module[5])
module[6] = float(module[6])
module[7] = float(module[7])
module[8] = float(module[8])
module[9] = float(module[9])
# add a list of units to each module, using the module dimensions specified
# in the input file (x_dim * y_dim) and initialize all units in each module to 'initial_value'
# It also adds three extra elements per each unit to store (1) sum of all incoming activity,
# (2) sum of inbititory, and (3) sum
# of excitatory activity, at the current time step. It also add an empty list, '[]', to store
# list of outgoing weights
for module in modules:
# remove initial value from the list
initial_value = module.pop()
x_dim = module[1]
y_dim = module[2]
# create a matrix for each unit in the module, to contain unit value,
# total sum of inputs, sum of excitatory inputs, sum of inhibitory inputs,
# and connection weights
unit_matrix = [[[initial_value, 0.0, 0.0, 0.0, []] for x in range(y_dim)] for y in range(x_dim)]
# now append that matrix to the current module
module.append(unit_matrix)
# now turn the list modules into a Python dictionary so we can access each module using the
# module name as key (this makes index 0 dissapear and shifts all other list indexes by 1)
# Therefore, the key to the dictionary 'modules' is now the name of the LSNM module
# The indexes of each module list are as follows:
# 0: module's X dimension (number of columns)
# 1: module's Y dimension (number of rows)
# 2: activation rule (neural population equation) or 'clamp' (constant value)
# 3: Wilson-Cowan parameter 'threshold'
# 4: Wilson-Cowan parameter 'Delta'
# 5: Wilson-Cowan parameter 'delta'
# 6: Wilson-Cowan parameter 'K'
# 7: Wilson-Cowan parameter 'N'
# 8: A python list of lists of X x Y elements containing the following elements:
# 0: neural activity of current unit
# 1: Sum of all inputs to current unit
# 2: Sum of excitatory inputs to current unit
# 3: Sum of inhibitory inputs to current unit
# 4: a Python list of lists containing all outgoing connections arising from current unit, There are as
# many elements as outgoing connection weights and each element contains the following:
# 0: destination module (where is the connection going to)
# 1: X coordinate of location of destination unit
# 2: Y coordinate of location of destination unit
# 3: Connection weight
modules = {m[0]: m[1:] for m in modules}
# read file that contains list of weight files, store the list of files in a python list,
# and close the file safely
f = open(weights_list, 'r')
try:
weight_files = [line.strip() for line in f]
finally:
f.close()
# build a dictionary of replacements for parsing the weight files
replacements = {'Connect': '',
'From:': '',
'(':'[',
')':']',
'{':'[',
'}':']',
'|':''}
# the following variable counts the total number of synapses in the network (for
# informational purposes
synapse_count = 0
# open each weight file in the list of weight files, one by one, and transfer weights
# from those files to each unit in the module list
# Note: file f is closed automatically at the end of 'with' since block 'with' is a
# context manager for file I/O
for file in weight_files:
with open(file) as f:
# read the whole file and store it in a string
whole_thing = f.read()
# find which module is connected to which module
module_connection = re.search(r'Connect\((.+?),(.+?)\)', whole_thing)
# get rid of white spaces from origin and destination modules
origin_module = module_connection.group(1).strip()
destination_module = module_connection.group(2).strip()
# gets rid of C-style comments at the beginning of weight files
whole_thing = re.sub(re.compile('%.*?\n'), '', whole_thing)
# removes all white spaces (space, tab, newline, etc) from weight files
whole_thing = ''.join(whole_thing.split())
# replaces Malle-style language with python lists characters
for i, j in replacements.items():
whole_thing = whole_thing.replace(i, j)
# now add commas between pairs of brackets
whole_thing = whole_thing.replace('][', '],[')
# now insert commas between right brackets and numbers (temporary hack!)
whole_thing = whole_thing.replace(']0', '],0')
whole_thing = whole_thing.replace(']1', '],1')
whole_thing = whole_thing.replace(']-', '],-')
# add extra string delimiters to origin_module and destination_module so
# that they can be recognized as python "strings" when the list or lists
# is formed
whole_thing = whole_thing.replace(origin_module+','+destination_module,
"'"+origin_module+"','"+destination_module+"'", 1)
# now convert the whole thing into a python list of lists, using Python's
# own interpreter
whole_thing = eval(whole_thing)
# remove [origin_module, destination_module] from list of connections
whole_thing = whole_thing[1]
# now groups items in the form: [(origin_unit), [[[destination_unit], weight],
# ..., [[destination_unit_2], weight_2]])]
whole_thing = list(zip(whole_thing, whole_thing[1:]))[::2]
# insert [destination_module, x_dest, y_dest, weight] in the corresponding origin
# unit location of the modules list while adjusting (x_dest, y_dest) coordinates
# to a zero-based format (as used in Python)
for connection in whole_thing:
for destination in connection[1]:
# now we decide whether the weights will be multiplied by a random amount
# varying between that amount and 1.0 in order to generate a new subject
if generateSubject == True:
connectionWeight = destination[1] * rdm.uniform(subject_variation, 1)
else:
connectionWeight = destination[1]
modules[origin_module][8][connection[0][0]-1][connection[0][1]-1][4].append (
[destination_module, # insert name of destination module
destination[0][0]-1, # insert x coordinate of destination unit
destination[0][1]-1, # insert y coordinate of destination unit
connectionWeight]) # insert connection weight
synapse_count += 1
# the following files store values over time of all units (electrical activity,
# synaptic activity, to output data files in text format
fs_neuronal = []
fs_abs_syn = []
fs_signed_syn = []
# open one output file per module to record electrical, absolute synaptic activity (used for fMRI BOLD),
# and signed synaptic activity (used for MEG source activity computation)
for module in modules.keys():
# open one output file per module
fs_neuronal.append(open(module + '.out', 'w'))
fs_abs_syn.append(open(module + '_abs_syn.out', 'w'))
fs_signed_syn.append(open(module + '_signed_syn.out', 'w'))
# create a dictionary so that each module name is associated with one output file
fs_dict_neuronal = dict(zip(modules.keys(), fs_neuronal))
fs_dict_abs_syn = dict(zip(modules.keys(), fs_abs_syn))
fs_dict_signed_syn= dict(zip(modules.keys(), fs_signed_syn))
# import the experimental script and store in a vaiable to use later
experiment_script = importlib.import_module(script)
# open a file where we will dump the whole data structure (model and weights) in case it needs
# to be used later, for inpection and/or visualization of neural network. We chose to use JSON
# for this, due to its interoperability with other computer languages and other operating
# systems.
nn_file = open(neural_network, 'w')
print ('\rSaving neural network to file...')
try:
json.dump(modules, nn_file)
finally:
nn_file.close()
# initialize timestep counter for LSNM timesteps
t = 0
# declare random number generator
random_state = np.random.RandomState()
# define length of TVB simulation in ms
TVB_simulation_length = experiment_script.LSNM_simulation_time * timesteps_to_time_constant
# initialize number of timesteps for simulation
sim_percentage = 100.0/experiment_script.LSNM_simulation_time
# run the simulation for the number of timesteps given
print ('\rRunning simulation...')
if useTVBConnectome:
# the following 'for loop' is the main loop of the TVB simulation with the parameters
# defined above. Note that the LSNM simulator is literally embedded into the TVB
# simulation and both run concurrently, timestep by timestep.
for raw in sim(simulation_length=TVB_simulation_length, random_state=random_state.get_state()):
# convert current TVB connectome electrical activity to a numpy array
RawData = numpy.array(raw[0][1])
# check script to see if there are any event to be presented to the LSNM
# network at current timestep t
current_event=simulation_events.get(str(t))
# then, introduce the event (if any was found)!
# Note that 'modules' is defined within 'sim.py', whereas 'script_params' is
# defined within the simulation script uploaded at runtime
if current_event is not None:
current_event(modules, script_params)
# The following 'for loop' computes sum of excitatory and sum of inhibitory activities
# at destination nodes using destination units and connecting weights provided
for m in modules.keys():
for x in range(modules[m][0]):
for y in range(modules[m][1]):
# we are going to do the following only for those units in the network that
# have weights that project to other units elsewhere
# extract value of origin unit (unit projecting weights elsewhere)
origin_unit = modules[m][8][x][y][0]
for w in modules[m][8][x][y][4]:
# First, find outgoing weights for all destination units and (except
# for those that do not
# have outgoing weights, in which case do nothing) compute weight * value
# at destination units
dest_module = w[0]
x_dest = w[1]
y_dest = w[2]
weight = w[3]
value_x_weight = origin_unit * weight
# Now, accumulate (i.e., 'integrate') & store those values at the
# destination units data structure,
# to be used later during neural activity computation.
# Note: Keep track of inhibitory and excitatory input summation
# separately, as shown below:
if value_x_weight > 0:
modules[dest_module][8][x_dest][y_dest][2] += value_x_weight
else:
modules[dest_module][8][x_dest][y_dest][3] += value_x_weight
# ... but also keep track of the total input summation, as shown
# below. The reason for this is that we need the input summation
# to each neuronal population unit AT EACH TIME STEP, as well as
# the excitatory and inhibitory input summations accumulated OVER
# A NUMBER OF TIMESTEPS (that number is usually 10). We call such
# accumulation of inputs
# over a number of timesteps the 'integrated synaptic activity'
# and it is used to compute fMRI and MEG.
modules[dest_module][8][x_dest][y_dest][1] += value_x_weight
# the following calculates (and integrates) synaptic activity at each TVB node
# at the current timestep
for tvb_node in range(TVB_number_of_nodes):
# rectifies or 'clamps' current tvb values to edges [0,1]
current_tvb_neu=np.clip(raw[0][1], 0, 1)
# extract TVB node numbers that are conected to TVB node above
tvb_conn = np.nonzero(white_matter.weights[tvb_node])
# extract the numpy array from it
tvb_conn = tvb_conn[0]
# build a numpy array of weights from TVB connections to the current TVB node
wm = white_matter.weights[tvb_node][tvb_conn]
# build a numpy array of origin TVB nodes connected to current TVB node
tvb_origin_node = raw[0][1][0][tvb_conn]
# clips node value to edges of interval [0, 1]
tvb_origin_node = np.clip(tvb_origin_node, 0, 1)
# do the following for each white matter connection to current TVB node:
# multiply all incoming connection weights times the value of the corresponding
# node that is sending that connection to the current TVB node
for cxn in range(tvb_conn.size):
# update synaptic activity in excitatory population, by multiplying each
# incoming connection weight times the value of the node sending such
# connection
current_tvb_abs_syn[0][tvb_node] += wm[cxn] * tvb_origin_node[cxn][0]
current_tvb_signed_syn[0][tvb_node] += wm[cxn] * tvb_origin_node[cxn][0]
# now, add the influence of the local (within the same node) connectivity
# onto the synaptic activity of the current node, excitatory population, USING ABSOLUTE
# VALUES OF SYNAPTIC ACTIVITIES (for BOLD computation).
current_tvb_abs_syn[0][tvb_node] += w_ee * current_tvb_neu[0][tvb_node] + w_ie * current_tvb_neu[1][tvb_node]
# ... but also do a sum of synaptic activities using the sign of the synaptic activity (for MEG
# computation:
current_tvb_signed_syn[0][tvb_node] += w_ee * current_tvb_neu[0][tvb_node] - w_ie * current_tvb_neu[1][tvb_node]
# now, update synaptic activity in inhibitory population
# Please note that we are assuming that there are no incoming connections
# to inhibitory nodes from other nodes (in the Virtual Brain nodes).
# Therefore, only the local (within the same node) connections are
# considered (store both aboslute synaptic and signed synaptic):
current_tvb_abs_syn[1][tvb_node] += w_ii * current_tvb_neu[1][tvb_node] + w_ei * current_tvb_neu[0][tvb_node]
current_tvb_signed_syn[1][tvb_node] += w_ei * current_tvb_neu[0][tvb_node] - w_ii * current_tvb_neu[1][tvb_node]
# the following 'for loop' goes through each LSNM module that is 'embedded' into The Virtual
# Brain, and adds the product of each TVB -> LSNM unit value times their respective
# connection weight (provided by white matter tract weights) to the sum of excitatory
# activities of each embedded LSNM unit. THIS IS THE STEP
# WHERE THE INTERACTION BETWEEN LSNM AND TVB HAPPENS. THAT INTERACTION GOES IN BOTH DIRECTIONS,
# I.E., TVB -> LSNM and LSNM -> TVB. There is a constant called "FEEDBACK" that has to be
# set to TRUE at the beginning of this file for the connections TVB->LSNM to occur.
# Please note that whereas the previous 'for loop' goes through the network updating
# unit sum of activities at destination units, the 'for loop' below goes through the
# network updating the sum of activities of the CURRENT unit
# we are going to do the following only for those modules/units in the LSNM
# network that have connections from TVB nodes
for m in lsnm_tvb_link.keys():
if modules.has_key(m):
# extract TVB node number where module is embedded
tvb_node = lsnm_tvb_link[m]
# extract TVB node numbers that are conected to TVB node above
tvb_conn = np.nonzero(white_matter.weights[tvb_node])
# extract the numpy array from it
tvb_conn = tvb_conn[0]
# build a numpy array of weights from TVB connections to TVB homologous nodes
wm = white_matter.weights[tvb_node][tvb_conn]
# now go through all the units of current LSNM modules...
for x in range(modules[m][0]):
for y in range(modules[m][1]):
# do the following for each white matter connection to current LSNM unit
for i in range(tvb_conn.size):
# extract the value of TVB node from preprocessed raw time series
# uncomment if you want to use preprocessed TVB timeseries
#value = RawData[t, 0, tvb_conn[i]]
# extract value of TVB node
value = RawData[0, tvb_conn[i]]
value = value[0]
# clips TVB node value to edges of interval [0, 1]
value = max(value, 0)
value = min(value, 1)
# calculate an incoming weight by applying a gain into the LSNM unit.
# the gain applied is a random number with a gaussian distribution
# centered around the value of lsnm_tvb_gain
weight = wm[i] * rdm.gauss(lsnm_tvb_gain,lsnm_tvb_gain/4)
value_x_weight = value * weight
# ... and add the incoming value_x_weight to the summed synaptic
# activity of the current unit
if value_x_weight > 0:
modules[m][8][x][y][2] += value_x_weight
else:
modules[m][8][x][y][3] += value_x_weight
# ... but store the total of inputs separately as well
modules[m][8][x][y][1] += value_x_weight
# And modify the connectome node that is providing the current
# connecting weight i (but only if the 'feedback' flag is TRUE)
if FEEDBACK:
raw[0][1][0][tvb_conn[i]][0] += modules[m][8][x][y][0] * wm[i] * TVB_global_coupling_strength
# the following variable will keep track of total number of units in the network
unit_count = 0
# write the neural and synaptic activity to output files of each unit at a given
# timestep interval, given by the variable <synaptic interval>.
# The reason we write to the output files before we do any computations is that we
# want to keep track of the initial values of each unit in all modules
for m in modules.keys():
for x in range(modules[m][0]):
for y in range(modules[m][1]):
# Write out neural and integrated synaptic activity, and reset
# integrated synaptic activity, but ONLY IF a given number of timesteps
# has elapsed (integration interval)
if ((experiment_script.LSNM_simulation_time + t) % synaptic_interval) == 0:
# write out neural activity first...
fs_dict_neuronal[m].write(repr(modules[m][8][x][y][0]) + ' ')
# now calculate and write out absolute sum of synaptic activity (for fMRI)...
abs_syn = modules[m][8][x][y][2] + abs(modules[m][8][x][y][3])
fs_dict_abs_syn[m].write(repr(abs_syn) + ' ')
# ... and calculate and write out signed sum of synaptic activity (for MEG):
signed_syn = modules[m][8][x][y][2] + modules[m][8][x][y][3]
fs_dict_signed_syn[m].write(repr(signed_syn) + ' ')
# ...finally, reset synaptic activity (but not neural activity).
modules[m][8][x][y][2] = 0.0
modules[m][8][x][y][3] = 0.0
# finally, insert a newline character so we can start next set of units on a
# new line
fs_dict_neuronal[m].write('\n')
fs_dict_abs_syn[m].write('\n')
fs_dict_signed_syn[m].write('\n')
# also write neural and synaptic activity of all TVB nodes to output files at
# the current
# time step, but ONLY IF a given number of timesteps has elapsed (integration
# interval)
if ((experiment_script.LSNM_simulation_time + t) % synaptic_interval) == 0:
# append the current TVB node electrical activity to array
tvb_elec.append(current_tvb_neu)
# append current synaptic activity array to synaptic activity timeseries
tvb_abs_syna.append(current_tvb_abs_syn)
tvb_signed_syna.append(current_tvb_signed_syn)
# reset TVB synaptic activity, but not TVB neuroelectrical activity
current_tvb_abs_syn = [ [0.0]*TVB_number_of_nodes for _ in range(2) ]
current_tvb_signed_syn = [ [0.0]*TVB_number_of_nodes for _ in range(2) ]
# the following 'for loop' computes the neural activity at each unit in the network,
# depending on their 'activation rule'
for m in modules.keys():
for x in range(modules[m][0]):
for y in range(modules[m][1]):
# if the current module is an LSNM unit, use in-house wilson-cowan
# algorithm below (based on original Tagamets and Horwitz, 1995)
if modules[m][2] == 'wilson_cowan':
# extract Wilson-Cowan parameters from the list
threshold = modules[m][3]
noise = modules[m][7]
K = modules[m][6]
decay = modules[m][5]
Delta = modules[m][4]
# compute input to current unit
in_value = modules[m][8][x][y][1]
# now subtract the threshold parameter from that sum
in_value = in_value - threshold
# now compute a random value between -0.5 and 0.5
r_value = rdm.uniform(0,1) - 0.5
# multiply it by the noise parameter and add it to input value
in_value = in_value + r_value * noise
# now multiply by parameter K and apply sigmoid function e
sigmoid = 1.0 / (1.0 + math.exp(-K * in_value))
# now multiply sigmoid by delta parameter, subtract decay parameter,
# ... and add all to current value of unit (x, y) in module m
modules[m][8][x][y][0] += Delta * sigmoid - decay * modules[m][8][x][y][0]
# now reset the sum of excitatory and inhibitory weigths at each unit,
# since we only need it for the current timestep (new sums of excitatory and
# inhibitory unit activations will be computed at the next time step)
modules[m][8][x][y][1] = 0.0
unit_count += 1
# increase the number of timesteps
t = t + 1
elif useTVBConnectome == False : # ... if not using TVB connectome...
for t in range(experiment_script.LSNM_simulation_time):
# check script to see if there are any event to be presented to the LSNM
# network at current timestep t
current_event=experiment_script.simulation_events.get(str(t))
# then, introduce the event (if any was found)!
# Note that 'modules' is defined within 'sim.py', whereas 'script_params' is
# defined within the simulation script uploaded at runtime
if current_event is not None:
current_event(modules, experiment_script.script_params)
# The following 'for loop' computes sum of excitatory and sum of inhibitory activities
# at destination nodes using destination units and connecting weights provided
for m in modules.keys():
for x in range(modules[m][0]):
for y in range(modules[m][1]):
# we are going to do the following only for those units in the network that
# have weights that project to other units elsewhere
# extract value of origin unit (unit projecting weights elsewhere)
origin_unit = modules[m][8][x][y][0]
for w in modules[m][8][x][y][4]:
# First, find outgoing weights for all destination units and (except
# for those that do not
# have outgoing weights, in which case do nothing) compute weight * value
# at destination units
dest_module = w[0]
x_dest = w[1]
y_dest = w[2]
weight = w[3]
value_x_weight = origin_unit * weight
# Now, accumulate (i.e., 'integrate') & store those values at the
# destination units data structure,
# to be used later during neural activity computation.
# Note: Keep track of inhibitory and excitatory input summation
# separately, as shown below:
if value_x_weight > 0:
modules[dest_module][8][x_dest][y_dest][2] += value_x_weight
else:
modules[dest_module][8][x_dest][y_dest][3] += value_x_weight
# ... but also keep track of the total input summation, as shown
# below. The reason for this is that we need the input summation
# to each neuronal population unit AT EACH TIME STEP, as well as
# the excitatory and inhibitory input summations accumulated OVER
# A NUMBER OF TIMESTEPS (that number is usually 10). We call such
# accumulation of inputs
# over a number of timesteps the 'integrated synaptic activity'
# and it is used to compute fMRI and MEG.
modules[dest_module][8][x_dest][y_dest][1] += value_x_weight
# the following variable will keep track of total number of units in the network
unit_count = 0
# write the neural and synaptic activity to output files of each unit at a given
# timestep interval, given by the variable <synaptic interval>.
# The reason we write to the output files before we do any computations is that we
# want to keep track of the initial values of each unit in all modules
for m in modules.keys():
for x in range(modules[m][0]):
for y in range(modules[m][1]):
# Write out neural and integrated synaptic activity, and reset
# integrated synaptic activity, but ONLY IF a given number of timesteps
# has elapsed (integration interval)
if ((experiment_script.LSNM_simulation_time + t) % synaptic_interval) == 0:
# write out neural activity first...
fs_dict_neuronal[m].write(repr(modules[m][8][x][y][0]) + ' ')
# now calculate and write out absolute sum of synaptic activity (for fMRI)...
abs_syn = modules[m][8][x][y][2] + abs(modules[m][8][x][y][3])
fs_dict_abs_syn[m].write(repr(abs_syn) + ' ')
# ... and calculate and write out signed sum of synaptic activity (for MEG):
signed_syn = modules[m][8][x][y][2] + modules[m][8][x][y][3]
fs_dict_signed_syn[m].write(repr(signed_syn) + ' ')
# ...finally, reset synaptic activity (but not neural activity).
modules[m][8][x][y][2] = 0.0
modules[m][8][x][y][3] = 0.0
# finally, insert a newline character so we can start next set of units on a
# new line
fs_dict_neuronal[m].write('\n')
fs_dict_abs_syn[m].write('\n')
fs_dict_signed_syn[m].write('\n')
# the following 'for loop' computes the neural activity at each unit in the network,
# depending on their 'activation rule'
for m in modules.keys():
for x in range(modules[m][0]):
for y in range(modules[m][1]):
# if the current module is an LSNM unit, use in-house wilson-cowan
# algorithm below (based on original Tagamets and Horwitz, 1995)
if modules[m][2] == 'wilson_cowan':
# extract Wilson-Cowan parameters from the list
threshold = modules[m][3]
noise = modules[m][7]
K = modules[m][6]
decay = modules[m][5]
Delta = modules[m][4]
# compute input to current unit
in_value = modules[m][8][x][y][1]
# now subtract the threshold parameter from that sum
in_value = in_value - threshold
# now compute a random value between -0.5 and 0.5
r_value = rdm.uniform(0,1) - 0.5
# multiply it by the noise parameter and add it to input value
in_value = in_value + r_value * noise
# now multiply by parameter K and apply sigmoid function e
sigmoid = 1.0 / (1.0 + math.exp(-K * in_value))
# now multiply sigmoid by delta parameter, subtract decay parameter,
# ... and add all to current value of unit (x, y) in module m
modules[m][8][x][y][0] += Delta * sigmoid - decay * modules[m][8][x][y][0]
# now reset the sum of excitatory and inhibitory weigths at each unit,
# since we only need it for the current timestep (new sums of excitatory and
# inhibitory unit activations will be computed at the next time step)
modules[m][8][x][y][1] = 0.0
unit_count += 1
# be safe and close output files properly
for f in fs_neuronal:
f.close()
for f in fs_abs_syn:
f.close()
for f in fs_signed_syn:
f.close()
if useTVBConnectome == True:
# convert electrical and synaptic activity of TVB nodes into numpy arrays
TVB_elec = numpy.array(tvb_elec)
TVB_abs_syna = numpy.array(tvb_abs_syna)
TVB_signed_syna = numpy.array(tvb_signed_syna)
# now, save the TVB electrical and synaptic activities to separate files
numpy.save("tvb_neuronal.npy", TVB_elec)
numpy.save("tvb_abs_syn.npy", TVB_abs_syna)
numpy.save("tvb_signed_syn.npy", TVB_signed_syna)
end_time = time_module.asctime(time_module.localtime(time_module.time()))
# Finally (finally), save simulation data to a log file
# ...more data to be added later, as needed
with open(log_file, 'w') as f:
f.write('* Simulation Start Time: ' + start_time)
f.write('\n* Simulation End Time: ' + end_time)
f.write('\n* Simulation duration (timesteps): ' + str(experiment_script.LSNM_simulation_time))
f.write('\n* Model description used: ' + model)
f.write('\n* Weights list used: ' + weights_list)
f.write('\n* Neural net used: ' + neural_net)
f.write('\n* Simulation script used: ' + script)
f.write('\n* Were weights changed to generate a new subject? ')
if generateSubject == True:
f.write('YES, weights multiplied by [' + str(subject_variation) + ', 1]')
else:
f.write('NO')
f.write('\n* Was TVB Connectome used in the simulation? ')
if useTVBConnectome == True:
f.write('YES')
else:
f.write('NO')
print ('\rSimulation Finished.')
print ('\rOutput data files saved.')
print ('\rEnd Time: ', end_time)
def main(argv):
model = ''
weights_list = ''
script = ''
tvb_link = ''
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', required=True, help='Model definition file (txt)')
parser.add_argument('-w', '--weights', required=True, help='Weights list file (txt)')
parser.add_argument('-s', '--script', required=True, help='Script file (py)')
parser.add_argument('-l', '--links', required=False, help='Links to TVB connectome file (txt)')
args=parser.parse_args()
run(args.model, args.weights, args.script, args.links)
# the following is the standard boilerplate that calls the main() function
if __name__ == '__main__':
main(sys.argv[1:])