-
Notifications
You must be signed in to change notification settings - Fork 0
/
publications_annotated.bib
674 lines (647 loc) · 52.3 KB
/
publications_annotated.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
@article{Baum2017_ModularSegregationStructural,
author = {Baum, Graham L.
and Ciric, Rastko
and Roalf, David R.
and Betzel, Richard F.
and Moore, Tyler M.
and Shinohara, Russell T.
and Kahn, Ari E.
and Vandekar, Simon N.
and Rupert, Petra E.
and Quarmley, Megan
and Cook, Philip A.
and Elliott, Mark A.
and Ruparel, Kosha
and Gur, Raquel E.
and Gur, Ruben C.
and Bassett, Danielle S.
and Satterthwaite, Theodore D.},
author+an = {7=highlight},
date = {2017},
doi = {10.1016/j.cub.2017.04.051},
eprint = {28552358},
eprinttype = {pmid},
issn = {0960-9822},
journaltitle = {Current Biology},
keywords = {adolescence,brain,connectome,development,DTI,executive,module,MRI,network,tractography},
langid = {english},
number = {11},
pages = {1561-1572.e8},
shortjournal = {Current Biology},
title = {Modular {{Segregation}} of {{Structural Brain Networks Supports}} the {{Development}} of {{Executive Function}} in {{Youth}}},
url = {https://www.cell.com/current-biology/abstract/S0960-9822(17)30496-7},
urldate = {2019-03-09},
volume = {27}
}
@article{Betzel2019_StructuralGeometricGenetic,
abstract = {Analyses of interregional neural networks from electrocorticography data from a large database of individuals with medication-resistant epilepsy highlight the structural, geometric and genetic factors that shape network organization.},
author = {Betzel, Richard F.
and Medaglia, John D.
and Kahn, Ari E.
and Soffer, Jonathan
and Schonhaut, Daniel R.
and Bassett, Danielle S.},
author+an = {3=highlight},
date = {2019},
doi = {10.1038/s41551-019-0404-5},
issn = {2157-846X},
journaltitle = {Nature Biomedical Engineering},
langid = {english},
number = {11},
pages = {902--916},
shortjournal = {Nat Biomed Eng},
title = {Structural, Geometric and Genetic Factors Predict Interregional Brain Connectivity Patterns Probed by Electrocorticography},
url = {https://www.nature.com/articles/s41551-019-0404-5},
urldate = {2019-11-14},
volume = {3}
}
@article{comrieHippocampalRepresentationsAlternative2024,
abstract = {The cognitive ability to go beyond the present to consider alternative possibilities, including potential futures and counterfactual pasts, can support adaptive decision making. Complex and changing real-world environments, however, have many possible alternatives. Whether and how the brain can select among them to represent alternatives that meet current cognitive needs remains unknown. We therefore examined neural representations of alternative spatial locations in the rat hippocampus during navigation in a complex patch foraging environment with changing reward probabilities. We found representations of multiple alternatives along paths ahead and behind the animal, including in distant alternative patches. Critically, these representations were modulated in distinct patterns across successive trials: alternative paths were represented proportionate to their evolving relative value and predicted subsequent decisions, whereas distant alternatives were prevalent during value updating. These results demonstrate that the brain modulates the generation of alternative possibilities in patterns that meet changing cognitive needs for adaptive behavior.},
archiveprefix = {bioRxiv},
author = {Comrie, Alison E.
and Monroe, Emily J.
and Kahn, Ari E.
and Denovellis, Eric L.
and Joshi, Abhilasha
and Guidera, Jennifer A.
and Krausz, Timothy A.
and Berke, Joshua D.
and Daw, Nathaniel D.
and Frank, Loren M.},
author+an = {3=highlight},
copyright = {{\copyright} 2024, Posted by Cold Spring Harbor Laboratory. This pre-print is available under a Creative Commons License (Attribution-NonCommercial-NoDerivs 4.0 International), CC BY-NC-ND 4.0, as described at http://creativecommons.org/licenses/by-nc-nd/4.0/},
date = {2024},
doi = {10.1101/2024.09.23.613567},
eprint = {https://www.biorxiv.org/content/10.1101/2024.09.23.613567v1},
eprinttype = {bioRxiv},
langid = {english},
title = {Hippocampal Representations of Alternative Possibilities Are Flexibly Generated to Meet Cognitive Demands},
urldate = {2024-09-24}
}
@article{Corsi2020_FunctionalDisconnectionAssociative,
abstract = {Brain-computer interfaces (BCIs) have been largely developed to allow communication, control, and neurofeedback in human beings. Despite their great potential, BCIs perform inconsistently across individuals and the neural processes that enable humans to achieve good control remain poorly understood. To address this question, we performed simultaneous high-density electroencephalographic (EEG) and magnetoencephalographic (MEG) recordings in a motor imagery-based BCI training involving a group of healthy subjects. After reconstructing the signals at the cortical level, we showed that the reinforcement of motor-related activity during the BCI skill acquisition is paralleled by a progressive disconnection of associative areas which were not directly targeted during the experiments. Notably, these network connectivity changes reflected growing automaticity associated with BCI performance and predicted future learning rate. Altogether, our findings provide new insights into the large-scale cortical organizational mechanisms underlying BCI learning, which have implications for the improvement of this technology in a broad range of real-life applications.},
author = {Corsi, Marie-Constance
and Chavez, Mario
and Schwartz, Denis
and George, Nathalie
and Hugueville, Laurent
and Kahn, Ari E.
and Dupont, Sophie
and Bassett, Danielle S.
and De Vico Fallani, Fabrizio},
author+an = {6=highlight},
date = {2020},
doi = {10.1016/j.neuroimage.2019.116500},
issn = {1053-8119},
journaltitle = {NeuroImage},
keywords = {Brain-computer interface,EEG,Learning,MEG,Motor imagery,Network},
langid = {english},
pages = {116500},
shortjournal = {NeuroImage},
title = {Functional Disconnection of Associative Cortical Areas Predicts Performance during {{BCI}} Training},
url = {http://www.sciencedirect.com/science/article/pii/S1053811919310912},
urldate = {2020-01-13}
}
@article{Corsi2021_BCILearningInduces,
abstract = {Brain-computer interfaces (BCIs) constitute a promising tool for communication and control. However, mastering non-invasive closed-loop systems remains a learned skill that is difficult to develop for a non-negligible proportion of users. The involved learning process induces neural changes associated with a brain network reorganization that remains poorly understood. To address this inter-subject variability, we adopted a multilayer approach to integrate brain network properties from electroencephalographic (EEG) and magnetoencephalographic (MEG) data resulting from a four-session BCI training program followed by a group of healthy subjects. Our method gives access to the contribution of each layer to multilayer network that tends to be equal with time. We show that regardless the chosen modality, a progressive increase in the integration of somatosensory areas in the α band was paralleled by a decrease of the integration of visual processing and working memory areas in the β band. Notably, only brain network properties in multilayer network correlated with future BCI scores in the α2 band: positively in somatosensory and decision-making related areas and negatively in associative areas. Our findings cast new light on neural processes underlying BCI training. Integrating multimodal brain network properties provides new information that correlates with behavioral performance and could be considered as a potential marker of BCI learning.},
author = {Corsi, Marie-Constance
and Chavez, Mario
and Schwartz, Denis
and George, Nathalie
and Hugueville, Laurent
and Kahn, Ari E.
and Dupont, Sophie
and Bassett, Danielle S.
and Fallani, Fabrizio De Vico},
author+an = {6=highlight},
date = {2021},
doi = {10.1088/1741-2552/abef39},
issn = {1741-2552},
journaltitle = {Journal of Neural Engineering},
langid = {english},
shortjournal = {J. Neural Eng.},
title = {{{BCI}} Learning Induces Core-Periphery Reorganization in {{M}}/{{EEG}} Multiplex Brain Networks},
url = {http://iopscience.iop.org/article/10.1088/1741-2552/abef39},
urldate = {2021-03-23}
}
@article{corsiMeasuringNeuronalAvalanches2024,
abstract = {Large-scale interactions among multiple brain regions manifest as bursts of activations called neuronal avalanches, which reconfigure according to the task at hand and, hence, might constitute natural candidates to design brain-computer interfaces (BCIs). To test this hypothesis, we used source-reconstructed magneto/electroencephalography during resting state and a motor imagery task performed within a BCI protocol. To track the probability that an avalanche would spread across any two regions, we built an avalanche transition matrix (ATM) and demonstrated that the edges whose transition probabilities significantly differed between conditions hinged selectively on premotor regions in all subjects. Furthermore, we showed that the topology of the ATMs allows task-decoding above the current gold standard. Hence, our results suggest that neuronal avalanches might capture interpretable differences between tasks that can be used to inform brain-computer interfaces.},
author = {Corsi, Marie-Constance
and Sorrentino, Pierpaolo
and Schwartz, Denis
and George, Nathalie
and Gollo, Leonardo L.
and Chevallier, Sylvain
and Hugueville, Laurent
and Kahn, Ari E.
and Dupont, Sophie
and Bassett, Danielle S.
and Jirsa, Viktor
and De Vico Fallani, Fabrizio},
author+an = {8=highlight},
date = {2024},
doi = {10.1016/j.isci.2023.108734},
issn = {2589-0042},
journal = {iScience},
keywords = {Computer science,Neuroscience},
number = {1},
pages = {108734},
title = {Measuring Neuronal Avalanches to Inform Brain-Computer Interfaces},
urldate = {2024-09-21},
volume = {27}
}
@article{Gu2015_ControllabilityStructuralBrain,
abstract = {Cognitive function is driven by dynamic interactions between large-scale neural circuits or networks, enabling behaviour. However, fundamental principles constraining these dynamic network processes have remained elusive. Here we use tools from control and network theories to offer a mechanistic explanation for how the brain moves between cognitive states drawn from the network organization of white matter microstructure. Our results suggest that densely connected areas, particularly in the default mode system, facilitate the movement of the brain to many easily reachable states. Weakly connected areas, particularly in cognitive control systems, facilitate the movement of the brain to difficult-to-reach states. Areas located on the boundary between network communities, particularly in attentional control systems, facilitate the integration or segregation of diverse cognitive systems. Our results suggest that structural network differences between cognitive circuits dictate their distinct roles in controlling trajectories of brain network function.},
author = {Gu, Shi
and Pasqualetti, Fabio
and Cieslak, Matthew
and Telesford, Qawi K.
and Yu, Alfred B.
and Kahn, Ari E.
and Medaglia, John D.
and Vettel, Jean M.
and Miller, Michael B.
and Grafton, Scott T.
and Bassett, Danielle S.},
author+an = {6=highlight},
date = {2015},
doi = {10.1038/ncomms9414},
issn = {2041-1723},
journaltitle = {Nature Communications},
langid = {english},
pages = {8414},
title = {Controllability of Structural Brain Networks},
url = {https://www.nature.com/articles/ncomms9414},
urldate = {2019-03-09},
volume = {6}
}
@article{Kahn2017_StructuralPathwaysSupporting,
author = {Kahn, Ari E.
and Mattar, Marcelo G.
and Vettel, Jean M.
and Wymbs, Nicholas F.
and Grafton, Scott T.
and Bassett, Danielle S.},
author+an = {1=highlight},
date = {2017},
doi = {10.1093/cercor/bhw335},
issn = {1047-3211},
journaltitle = {Cerebral Cortex},
number = {1},
pages = {173--184},
publisher = {{Oxford University Press}},
title = {Structural {{Pathways Supporting Swift Acquisition}} of {{New Visuomotor Skills}}},
url = {https://academic.oup.com/cercor/article/27/1/173/2632738},
urldate = {2018-08-01},
volume = {27}
}
@article{Kahn2018_NetworkConstraintsLearnability,
abstract = {Human learners are adept at grasping the complex relationships underlying incoming sequential input. In the present work, we formalize complex relationships as graph structures derived from temporal associations in motor sequences. Next, we explore the extent to which learners are sensitive to key variations in the topological properties inherent to those graph structures. Participants performed a probabilistic motor sequence task in which the order of button presses was determined by the traversal of graphs with modular, lattice-like, or random organization. Graph nodes each represented a unique button press and edges represented a transition between button presses. Results indicate that learning, indexed here by participants' response times, was strongly mediated by the graph's meso-scale organization, with modular graphs being associated with shorter response times than random and lattice graphs. Moreover, variations in a node's number of connections (degree) and a node's role in mediating long-distance communication (betweeness centrality) impacted graph learning, even after accounting for level of practice on that node. These results demonstrate that the graph architecture underlying temporal sequences of stimuli fundamentally constrains learning, and moreover that tools from network science provide a valuable framework for assessing how learners encode complex, temporally structured information.},
author = {Kahn, Ari E.
and Karuza, Elisabeth A.
and Vettel, Jean M.
and Bassett, Danielle S.},
author+an = {1=highlight},
date = {2018},
doi = {10.1038/s41562-018-0463-8},
issn = {2397-3374},
journaltitle = {Nature Human Behaviour},
langid = {english},
number = {12},
pages = {936--947},
publisher = {{Springer US}},
title = {Network Constraints on Learnability of Probabilistic Motor Sequences},
url = {https://www.nature.com/articles/s41562-018-0463-8},
volume = {2}
}
@article{kahnHumansRationallyBalance2023,
abstract = {How do people model the world’s dynamics to guide mental simulation and evaluate choices? One prominent approach, the Successor Representation (SR), takes advantage of temporal abstraction of future states: by aggregating trajectory predictions over multiple timesteps, the brain can avoid the costs of iterative, multi-step mental simulation. Human behavior broadly shows signatures of such temporal abstraction, but finer-grained characterization of individuals’ strategies and their dynamic adjustment remains an open question. We developed a novel task to measure SR usage during dynamic, trial-by-trial learning. Using this approach, we find that participants exhibit a mix of SR and model-based learning strategies that varies across individuals. Further, by dynamically manipulating the task contingencies within-subject to favor or disfavor temporal abstraction, we observe evidence of resource-rational reliance on the SR, which decreases when future states are less predictable. Our work adds to a growing body of research showing that the brain arbitrates between approximate decision strategies. The current study extends these ideas from simple habits into usage of more sophisticated approximate predictive models, and demonstrates that individuals dynamically adapt these in response to the predictability of their environment.},
author = {Kahn, Ari E.
and Daw, Nathaniel D.},
author+an = {1=highlight},
date = {2024},
doi = {10.1101/2023.11.28.569070},
journaltitle = {Nature Communications Psychology},
langid = {english},
pubstate = {Accepted},
title = {Humans Rationally Balance Mental Simulation and Temporally Abstract World Models},
url = {https://www.biorxiv.org/content/10.1101/2023.11.28.569070v3},
urldate = {2023-11-29}
}
@article{kahnNetworkStructureInfluences2023,
abstract = {Human experience is built upon sequences of discrete events. From those sequences, humans build impressively accurate models of their world. This process has been referred to as graph learning, a form of structure learning in which the mental model encodes the graph of event-to-event transition probabilities [1], [2], typically in medial temporal cortex [3]–[6]. Recent evidence suggests that some network structures are easier to learn than others [7]–[9], but the neural properties of this effect remain unknown. Here we use fMRI to show that the network structure of a temporal sequence of stimuli influences the fidelity with which those stimuli are represented in the brain. Healthy adult human participants learned a set of stimulus-motor associations following one of two graph structures. The design of our experiment allowed us to separate regional sensitivity to the structural, stimulus, and motor response components of the task. As expected, whereas the motor response could be decoded from neural representations in postcentral gyrus, the shape of the stimulus could be decoded from lateral occipital cortex. The structure of the graph impacted the nature of neural representations: when the graph was modular as opposed to lattice-like, BOLD representations in visual areas better predicted trial identity in a held-out run and displayed higher intrinsic dimensionality. Our results demonstrate that even over relatively short timescales, graph structure determines the fidelity of event representations as well as the dimensionality of the space in which those representations are encoded. More broadly, our study shows that network context influences the strength of learned neural representations, motivating future work in the design, optimization, and adaptation of network contexts for distinct types of learning over different timescales.},
author = {Kahn, Ari E.
and Szymula, Karol
and Loman, Sophie E.
and Haggerty, Edda B.
and Nyema, Nathaniel
and Aguirre, Geoffrey K.
and Bassett, Dani S.},
author+an = {1=highlight},
date = {2024},
doi = {10.1101/2023.01.23.525254},
journaltitle = {Nature Communications},
langid = {english},
pubstate = {Accepted},
title = {Network Structure Influences the Strength of Learned Neural Representations},
url = {https://www.biorxiv.org/content/10.1101/2023.01.23.525254v2},
urldate = {2023-07-21}
}
@article{Karrer2020_PracticalGuideMethodological,
abstract = {Objective. Predicting how the brain can be driven to specific states by means of internal or external control requires a fundamental understanding of the relationship between neural connectivity and activity. Network control theory is a powerful tool from the physical and engineering sciences that can provide insights regarding that relationship; it formalizes the study of how the dynamics of a complex system can arise from its underlying structure of interconnected units. Approach. Given the recent use of network control theory in neuroscience, it is now timely to offer a practical guide to methodological considerations in the controllability of structural brain networks. Here we provide a systematic overview of the framework, examine the impact of modeling choices on frequently studied control metrics, and suggest potentially useful theoretical extensions. We ground our discussions, numerical demonstrations, and theoretical advances in a dataset of high-resolution diffusion imaging with 730 diffusion directions acquired over approximately 1 h of scanning from ten healthy young adults. Main results. Following a didactic introduction of the theory, we probe how a selection of modeling choices affects four common statistics: average controllability, modal controllability, minimum control energy, and optimal control energy. Next, we extend the current state-of-the-art in two ways: first, by developing an alternative measure of structural connectivity that accounts for radial propagation of activity through abutting tissue, and second, by defining a complementary metric quantifying the complexity of the energy landscape of a system. We close with specific modeling recommendations and a discussion of methodological constraints. Significance. Our hope is that this accessible account will inspire the neuroimaging community to more fully exploit the potential of network control theory in tackling pressing questions in cognitive, developmental, and clinical neuroscience.},
author = {Karrer, Teresa M.
and Kim, Jason Z.
and Stiso, Jennifer
and Kahn, Ari E.
and Pasqualetti, Fabio
and Habel, Ute
and Bassett, Danielle S.},
author+an = {4=highlight},
date = {2020},
doi = {10.1088/1741-2552/ab6e8b},
issn = {1741-2552},
journaltitle = {Journal of Neural Engineering},
langid = {english},
number = {2},
pages = {026031},
publisher = {{IOP Publishing}},
shortjournal = {J. Neural Eng.},
title = {A Practical Guide to Methodological Considerations in the Controllability of Structural Brain Networks},
url = {https://doi.org/10.1088%2F1741-2552%2Fab6e8b},
urldate = {2020-04-12},
volume = {17}
}
@article{Karuza2017_ProcessRevealsStructure,
abstract = {Network science has emerged as a powerful tool through which we can study the higher-order architectural properties of the world around us. How human learners exploit this information remains an essential question. Here, we focus on the temporal constraints that govern such a process. Participants viewed a continuous sequence of images generated by three distinct walks on a modular network. Walks varied along two critical dimensions: their predictability and the density with which they sampled from communities of images. Learners exposed to walks that richly sampled from each community exhibited a sharp increase in processing time upon entry into a new community. This effect was eliminated in a highly regular walk that sampled exhaustively from images in short, successive cycles (i.e., that increasingly minimized uncertainty about the nature of upcoming stimuli). These results demonstrate that temporal organization plays an essential role in learners’ sensitivity to the network architecture underlying sensory input.},
author = {Karuza, Elisabeth A.
and Kahn, Ari E.
and Thompson-Schill, Sharon L.
and Bassett, Danielle S.},
author+an = {2=highlight},
date = {2017},
doi = {10.1038/s41598-017-12876-5},
issn = {2045-2322},
journaltitle = {Scientific Reports},
langid = {english},
number = {1},
pages = {12733},
shorttitle = {Process Reveals Structure},
title = {Process Reveals Structure: {{How}} a Network Is Traversed Mediates Expectations about Its Architecture},
url = {https://www.nature.com/articles/s41598-017-12876-5},
urldate = {2019-03-07},
volume = {7}
}
@article{Karuza2019_HumanSensitivityCommunity,
author = {Karuza, Elisabeth A.
and Kahn, Ari E.
and Bassett, Danielle S.},
author+an = {2=highlight},
date = {2019},
doi = {10.1155/2019/8379321},
issn = {1076-2787, 1099-0526},
journaltitle = {Complexity},
langid = {english},
pages = {1--8},
title = {Human {{Sensitivity}} to {{Community Structure Is Robust}} to {{Topological Variation}}},
url = {https://www.hindawi.com/journals/complexity/2019/8379321/},
urldate = {2019-03-10},
volume = {2019}
}
@article{Khambhati2019_FunctionalControlElectrophysiological,
abstract = {Chronically implantable neurostimulation devices are becoming a clinically viable option for treating patients with neurological disease and psychiatric disorders. Neurostimulation offers the ability to probe and manipulate distributed networks of interacting brain areas in dysfunctional circuits. Here, we use tools from network control theory to examine the dynamic reconfiguration of functionally interacting neuronal ensembles during targeted neurostimulation of cortical and subcortical brain structures. By integrating multimodal intracranial recordings and diffusion-weighted imaging from patients with drug-resistant epilepsy, we test hypothesized structural and functional rules that predict altered patterns of synchronized local field potentials. We demonstrate the ability to predictably reconfigure functional interactions depending on stimulation strength and location. Stimulation of areas with structurally weak connections largely modulates the functional hubness of downstream areas and concurrently propels the brain towards more difficult-to-reach dynamical states. By using focal perturbations to bridge large-scale structure, function, and markers of behavior, our findings suggest that stimulation may be tuned to influence different scales of network interactions driving cognition.},
author = {Khambhati, Ankit N.
and Kahn, Ari E.
and Costantini, Julia
and Ezzyat, Youssef
and Solomon, Ethan A.
and Gross, Robert E.
and Jobst, Barbara C.
and Sheth, Sameer A.
and Zaghloul, Kareem A.
and Worrell, Gregory
and Seger, Sarah
and Lega, Bradley C.
and Weiss, Shennan
and Sperling, Michael R.
and Gorniak, Richard
and Das, Sandhitsu R.
and Stein, Joel M.
and Rizzuto, Daniel S.
and Kahana, Michael J.
and Lucas, Timothy H.
and Davis, Kathryn A.
and Tracy, Joseph I.
and Bassett, Danielle S.},
author+an = {2=highlight},
date = {2019},
doi = {10.1162/netn_a_00089},
journaltitle = {Network Neuroscience},
pages = {1--30},
shortjournal = {Network Neuroscience},
title = {Functional Control of Electrophysiological Network Architecture Using Direct Neurostimulation in Humans},
url = {https://doi.org/10.1162/netn_a_00089},
urldate = {2019-07-09}
}
@article{Kim2018_RoleGraphArchitecture,
abstract = {Networked systems display complex patterns of interactions between components. In physical networks, these interactions often occur along structural connections that link components in a hard-wired connection topology, supporting a variety of system-wide dynamical behaviours such as synchronization. Although descriptions of these behaviours are important, they are only a first step towards understanding and harnessing the relationship between network topology and system behaviour. Here, we use linear network control theory to derive accurate closed-form expressions that relate the connectivity of a subset of structural connections (those linking driver nodes to non-driver nodes) to the minimum energy required to control networked systems. To illustrate the utility of the mathematics, we apply this approach to high-resolution connectomes recently reconstructed from Drosophila, mouse, and human brains. We use these principles to suggest an advantage of the human brain in supporting diverse network dynamics with small energetic costs while remaining robust to perturbations, and to perform clinically accessible targeted manipulation of the brain’s control performance by removing single edges in the network. Generally, our results ground the expectation of a control system’s behaviour in its network architecture, and directly inspire new directions in network analysis and design via distributed control.},
author = {Kim, Jason Z.
and Soffer, Jonathan M.
and Kahn, Ari E.
and Vettel, Jean M.
and Pasqualetti, Fabio
and Bassett, Danielle S.},
author+an = {3=highlight},
date = {2018},
doi = {10.1038/nphys4268},
issn = {1745-2481},
journaltitle = {Nature Physics},
langid = {english},
number = {1},
pages = {91--98},
title = {Role of Graph Architecture in Controlling Dynamical Networks with Applications to Neural Systems},
url = {https://www.nature.com/articles/nphys4268},
urldate = {2019-03-09},
volume = {14}
}
@article{krauszDualCreditAssignment2023a,
abstract = {{$<$}h2{$>$}Summary{$<$}/h2{$><$}p{$>$}Animals frequently make decisions based on expectations of future reward ("values"). Values are updated by ongoing experience: places and choices that result in reward are assigned greater value. Yet, the specific algorithms used by the brain for such credit assignment remain unclear. We monitored accumbens dopamine as rats foraged for rewards in a complex, changing environment. We observed brief dopamine pulses both at reward receipt (scaling with prediction error) and at novel path opportunities. Dopamine also ramped up as rats ran toward reward ports, in proportion to the value at each location. By examining the evolution of these dopamine place-value signals, we found evidence for two distinct update processes: progressive propagation of value along taken paths, as in temporal difference learning, and inference of value throughout the maze, using internal models. Our results demonstrate that within rich, naturalistic environments dopamine conveys place values that are updated via multiple, complementary learning algorithms.{$<$}/p{$>$}},
author = {Krausz, Timothy A.
and Comrie, Alison E.
and Kahn, Ari E.
and Frank, Loren M.
and Daw, Nathaniel D.
and Berke, Joshua D.},
author+an = {3=highlight},
date = {2023},
doi = {10.1016/j.neuron.2023.07.017},
eprint = {37611585},
eprinttype = {pmid},
issn = {0896-6273},
journaltitle = {Neuron},
langid = {english},
number = {21},
pages = {3465-3478.e7},
publisher = {{Elsevier}},
shortjournal = {Neuron},
title = {Dual Credit Assignment Processes Underlie Dopamine Signals in a Complex Spatial Environment},
url = {https://www.cell.com/neuron/abstract/S0896-6273(23)00550-0},
urldate = {2023-11-26},
volume = {111}
}
@article{lomanTimeresolvedFunctionalConnectivity2024,
abstract = {1 Abstract Humans naturally attend to patterns that emerge in our perceptual environments, building mental models that allow for future experiences to be processed more effectively and efficiently. Perceptual events and statistical relations can be represented as nodes and edges in a graph, respectively. Recent work in the field of graph learning has shown that human behavior is sensitive to graph topology, but less is known about how that topology might elicit distinct neural responses during learning. Here, we address this gap in knowledge by applying time-resolved network analyses to fMRI data collected during a visuomotor graph learning task to assess neural signatures of learning modular graphs and non-modular lattice graphs. We found that performance on this task was supported by a highly flexible visual system and otherwise relatively stable brain-wide community structure, cohesiveness within the dorsal attention, limbic, default mode, and subcortical systems, and an increasing degree of integration between the visual and ventral attention systems. Additionally, we found that the time-resolved connectivity of the limbic, default mode, temporoparietal, and subcortical systems was associated with enhanced performance for the modular group but not the lattice group. These findings provide evidence for the differential neural processing of statistical structures with distinct topologies and highlight similarities between the neural correlates of graph learning and statistical learning more broadly.},
author = {Loman, Sophie
and Caciagli, Lorenzo
and Kahn, Ari E.
and Szymula, Karol P.
and Nyema, Nathaniel
and Bassett, Dani S.},
author+an = {3=highlight},
date = {2024},
doi = {10.1101/2024.07.04.602005},
eprint = {https://www.biorxiv.org/content/10.1101/2024.07.04.602005v1},
eprinttype = {bioRxiv},
langid = {english},
shortjournal = {bioRxiv},
title = {Time-Resolved Functional Connectivity during Visuomotor Graph Learning},
urldate = {2024-08-19}
}
@article{Lynn2020_AbstractRepresentationsEvents,
abstract = {Humans are adept at uncovering abstract associations in the world around them, yet the underlying mechanisms remain poorly understood. Intuitively, learning the higher-order structure of statistical relationships should involve complex mental processes. Here we propose an alternative perspective: that higher-order associations instead arise from natural errors in learning and memory. Using the free energy principle, which bridges information theory and Bayesian inference, we derive a maximum entropy model of people’s internal representations of the transitions between stimuli. Importantly, our model (i) affords a concise analytic form, (ii) qualitatively explains the effects of transition network structure on human expectations, and (iii) quantitatively predicts human reaction times in probabilistic sequential motor tasks. Together, these results suggest that mental errors influence our abstract representations of the world in significant and predictable ways, with direct implications for the study and design of optimally learnable information sources.},
author = {Lynn, Christopher W.
and Kahn, Ari E.
and Nyema, Nathaniel
and Bassett, Danielle S.},
author+an = {2=highlight},
date = {2020},
doi = {10.1038/s41467-020-15146-7},
issn = {2041-1723},
issue = {1},
journaltitle = {Nature Communications},
langid = {english},
number = {1},
pages = {2313},
publisher = {{Nature Publishing Group}},
title = {Abstract Representations of Events Arise from Mental Errors in Learning and Memory},
url = {https://www.nature.com/articles/s41467-020-15146-7},
urldate = {2020-06-15},
volume = {11}
}
@article{Lynn2020_HumanInformationProcessing,
abstract = {Humans communicate using systems of interconnected stimuli or concepts—from language and music to literature and science—yet it remains unclear how, if at all, the structure of these networks supports the communication of information. Although information theory provides tools to quantify the information produced by a system, traditional metrics do not account for the inefficient ways that humans process this information. Here, we develop an analytical framework to study the information generated by a system as perceived by a human observer. We demonstrate experimentally that this perceived information depends critically on a system’s network topology. Applying our framework to several real networks, we find that they communicate a large amount of information (having high entropy) and do so efficiently (maintaining low divergence from human expectations). Moreover, we show that such efficient communication arises in networks that are simultaneously heterogeneous, with high-degree hubs, and clustered, with tightly connected modules—the two defining features of hierarchical organization. Together, these results suggest that many communication networks are constrained by the pressures of information transmission, and that these pressures select for specific structural features.},
author = {Lynn, Christopher W.
and Papadopoulos, Lia
and Kahn, Ari E.
and Bassett, Danielle S.},
author+an = {3=highlight},
date = {2020},
doi = {10.1038/s41567-020-0924-7},
issn = {1745-2481},
journaltitle = {Nature Physics},
langid = {english},
pages = {1--9},
publisher = {{Nature Publishing Group}},
title = {Human Information Processing in Complex Networks},
url = {https://www.nature.com/articles/s41567-020-0924-7},
urldate = {2020-06-15}
}
@article{mahadevanAlprazolamModulatesPersistence2023,
abstract = {Schizophrenia is marked by deficits in facial affect processing associated with abnormalities in GABAergic circuitry, deficits also found in first-degree relatives. Facial affect processing involves a distributed network of brain regions including limbic regions like amygdala and visual processing areas like fusiform cortex. Pharmacological modulation of GABAergic circuitry using benzodiazepines like alprazolam can be useful for studying this facial affect processing network and associated GABAergic abnormalities in schizophrenia. Here, we use pharmacological modulation and computational modeling to study the contribution of GABAergic abnormalities toward emotion processing deficits in schizophrenia. Specifically, we apply principles from network control theory to model persistence energy – the control energy required to maintain brain activation states – during emotion identification and recall tasks, with and without administration of alprazolam, in a sample of first-degree relatives and healthy controls. Here, persistence energy quantifies the magnitude of theoretical external inputs during the task. We find that alprazolam increases persistence energy in relatives but not in controls during threatening face processing, suggesting a compensatory mechanism given the relative absence of behavioral abnormalities in this sample of unaffected relatives. Further, we demonstrate that regions in the fusiform and occipital cortices are important for facilitating state transitions during facial affect processing. Finally, we uncover spatial relationships (i) between regional variation in differential control energy (alprazolam versus placebo) and (ii) both serotonin and dopamine neurotransmitter systems, indicating that alprazolam may exert its effects by altering neuromodulatory systems. Together, these findings provide a new perspective on the distributed emotion processing network and the effect of GABAergic modulation on this network, in addition to identifying an association between schizophrenia risk and abnormal GABAergic effects on persistence energy during threat processing.},
author = {Mahadevan, Arun S.
and Cornblath, Eli J.
and Lydon-Staley, David M.
and Zhou, Dale
and Parkes, Linden
and Larsen, Bart
and Adebimpe, Azeez
and Kahn, Ari E.
and Gur, Ruben C.
and Gur, Raquel E.
and Satterthwaite, Theodore D.
and Wolf, Daniel H.
and Bassett, Dani S.},
author+an = {8=highlight},
date = {2023},
doi = {10.1038/s41380-023-02121-z},
issn = {1476-5578},
journaltitle = {Molecular Psychiatry},
keywords = {Neuroscience,Schizophrenia},
langid = {english},
pages = {1--10},
publisher = {{Nature Publishing Group}},
shortjournal = {Mol Psychiatry},
shorttitle = {Alprazolam Modulates Persistence Energy during Emotion Processing in First-Degree Relatives of Individuals with Schizophrenia},
title = {Alprazolam Modulates Persistence Energy during Emotion Processing in First-Degree Relatives of Individuals with Schizophrenia: A Network Control Study},
url = {https://www.nature.com/articles/s41380-023-02121-z},
urldate = {2023-07-21}
}
@article{Sizemore2018_CliquesCavitiesHuman,
abstract = {Encoding brain regions and their connections as a network of nodes and edges captures many of the possible paths along which information can be transmitted as humans process and perform complex behaviors. Because cognitive processes involve large, distributed networks of brain areas, principled examinations of multi-node routes within larger connection patterns can offer fundamental insights into the complexities of brain function. Here, we investigate both densely connected groups of nodes that could perform local computations as well as larger patterns of interactions that would allow for parallel processing. Finding such structures necessitates that we move from considering exclusively pairwise interactions to capturing higher order relations, concepts naturally expressed in the language of algebraic topology. These tools can be used to study mesoscale network structures that arise from the arrangement of densely connected substructures called cliques in otherwise sparsely connected brain networks. We detect cliques (all-to-all connected sets of brain regions) in the average structural connectomes of 8 healthy adults scanned in triplicate and discover the presence of more large cliques than expected in null networks constructed via wiring minimization, providing architecture through which brain network can perform rapid, local processing. We then locate topological cavities of different dimensions, around which information may flow in either diverging or converging patterns. These cavities exist consistently across subjects, differ from those observed in null model networks, and – importantly – link regions of early and late evolutionary origin in long loops, underscoring their unique role in controlling brain function. These results offer a first demonstration that techniques from algebraic topology offer a novel perspective on structural connectomics, highlighting loop-like paths as crucial features in the human brain’s structural architecture.},
author = {Sizemore, Ann E.
and Giusti, Chad
and Kahn, Ari
and Vettel, Jean M.
and Betzel, Richard F.
and Bassett, Danielle S.},
author+an = {3=highlight},
date = {2018},
doi = {10.1007/s10827-017-0672-6},
issn = {1573-6873},
journaltitle = {Journal of Computational Neuroscience},
keywords = {Applied topology,Network neuroscience,Persistent homology},
langid = {english},
number = {1},
pages = {115--145},
shortjournal = {J Comput Neurosci},
title = {Cliques and Cavities in the Human Connectome},
url = {https://doi.org/10.1007/s10827-017-0672-6},
urldate = {2019-03-09},
volume = {44}
}
@article{Stiso2019_WhiteMatterNetwork,
abstract = {Summary Optimizing direct electrical stimulation for the treatment of neurological disease remains difficult due to an incomplete understanding of its physical propagation through brain tissue. Here, we use network control theory to predict how stimulation spreads through white matter to influence spatially distributed dynamics. We test the theory’s predictions using a unique dataset comprising diffusion weighted imaging and electrocorticography in epilepsy patients undergoing grid stimulation. We find statistically significant shared variance between the predicted activity state transitions and the observed activity state transitions. We then use an optimal control framework to posit testable hypotheses regarding which brain states and structural properties will efficiently improve memory encoding when stimulated. Our work quantifies the role that white matter architecture plays in guiding the dynamics of direct electrical stimulation and offers empirical support for the utility of network control theory in explaining the brain’s response to stimulation.},
author = {Stiso, Jennifer
and Khambhati, Ankit N.
and Menara, Tommaso
and Kahn, Ari E.
and Stein, Joel M.
and Das, Sandihitsu R.
and Gorniak, Richard
and Tracy, Joseph
and Litt, Brian
and Davis, Kathryn A.
and Pasqualetti, Fabio
and Lucas, Timothy H.
and Bassett, Danielle S.},
author+an = {4=highlight},
date = {2019},
doi = {10.1016/j.celrep.2019.08.008},
issn = {2211-1247},
journaltitle = {Cell Reports},
keywords = {brain network,brain stimulation,network control theory},
number = {10},
pages = {2554-2566.e7},
shortjournal = {Cell Reports},
title = {White {{Matter Network Architecture Guides Direct Electrical Stimulation}} through {{Optimal State Transitions}}},
url = {http://www.sciencedirect.com/science/article/pii/S2211124719310411},
urldate = {2019-09-12},
volume = {28}
}
@article{stisoNeurophysiologicalEvidenceCognitive2022,
abstract = {Humans deftly parse statistics from sequences. Some theories posit that humans learn these statistics by forming cognitive maps, or underlying representations of the latent space which links items in the sequence. Here, an item in the sequence is a node, and the probability of transitioning between two items is an edge. Sequences can then be generated from walks through the latent space, with different spaces giving rise to different sequence statistics. Individual or group differences in sequence learning can be modeled by changing the time scale over which estimates of transition probabilities are built, or in other words, by changing the amount of temporal discounting. Latent space models with temporal discounting bear a resemblance to models of navigation through Euclidean spaces. However, few explicit links have been made between predictions from Euclidean spatial navigation and neural activity during human sequence learning. Here, we use a combination of behavioral modeling and intracranial encephalography (iEEG) recordings to investigate how neural activity might support the formation of space-like cognitive maps through temporal discounting during sequence learning. Specifically, we acquire human reaction times from a sequential reaction time task, to which we fit a model that formulates the amount of temporal discounting as a single free parameter. From the parameter, we calculate each individual’s estimate of the latent space. We find that neural activity reflects these estimates mostly in the temporal lobe, including areas involved in spatial navigation. Similar to spatial navigation, we find that low-dimensional representations of neural activity allow for easy separation of important features, such as modules, in the latent space. Lastly, we take advantage of the high temporal resolution of iEEG data to determine the time scale on which latent spaces are learned. We find that learning typically happens within the first 500 trials, and is modulated by the underlying latent space and the amount of temporal discounting characteristic of each participant. Ultimately, this work provides important links between behavioral models of sequence learning and neural activity during the same behavior, and contextualizes these results within a broader framework of domain general cognitive maps.},
author = {Stiso, Jennifer
and Lynn, Christopher W.
and Kahn, Ari E.
and Rangarajan, Vinitha
and Szymula, Karol P.
and Archer, Ryan
and Revell, Andrew
and Stein, Joel M.
and Litt, Brian
and Davis, Kathryn A.
and Lucas, Timothy H.
and Bassett, Dani S.},
author+an = {3=highlight},
date = {2022},
doi = {10.1523/ENEURO.0361-21.2022},
eprint = {35105662},
eprinttype = {pmid},
issn = {2373-2822},
journaltitle = {eNeuro},
number = {2},
pages = {ENEURO.0361-21.2022},
pmcid = {PMC8896554},
shortjournal = {eNeuro},
title = {Neurophysiological {{Evidence}} for {{Cognitive Map Formation}} during {{Sequence Learning}}},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC8896554/},
urldate = {2022-08-03},
volume = {9}
}
@article{Tang2017_DevelopmentalIncreasesWhite,
abstract = {Human brain development is characterized by an increased control of neural activity, but how this happens is not well understood. Here, authors show that white matter connectivity in 882 youth, aged 8-22, becomes increasingly specialized locally and is optimized for network control.},
author = {Tang, Evelyn
and Giusti, Chad
and Baum, Graham L.
and Gu, Shi
and Pollock, Eli
and Kahn, Ari E.
and Roalf, David R.
and Moore, Tyler M.
and Ruparel, Kosha
and Gur, Ruben C.
and Gur, Raquel E.
and Satterthwaite, Theodore D.
and Bassett, Danielle S.},
author+an = {6=highlight},
date = {2017},
doi = {10.1038/s41467-017-01254-4},
issn = {2041-1723},
journaltitle = {Nature Communications},
langid = {english},
number = {1},
pages = {1252},
title = {Developmental Increases in White Matter Network Controllability Support a Growing Diversity of Brain Dynamics},
url = {https://www.nature.com/articles/s41467-017-01254-4},
urldate = {2019-03-09},
volume = {8}
}
@article{Tompson2019_IndividualDifferencesLearning,
abstract = {How do people acquire knowledge about which individuals belong to different cliques or communities? And to what extent does this learning process differ from the process of learning higher-order information about complex associations between nonsocial bits of information? Here, the authors use a paradigm in which the order of stimulus presentation forms temporal associations between the stimuli, collectively constituting a complex network. They examined individual differences in the ability to learn community structure of networks composed of social versus nonsocial stimuli. Although participants were able to learn community structure of both social and nonsocial networks, their performance in social network learning was uncorrelated with their performance in nonsocial network learning. In addition, social traits, including social orientation and perspective-taking, uniquely predicted the learning of social community structure but not the learning of nonsocial community structure. Taken together, the results suggest that the process of learning higher-order community structure in social networks is partially distinct from the process of learning higher-order community structure in nonsocial networks. The study design provides a promising approach to identify neurophysiological drivers of social network versus nonsocial network learning, extending knowledge about the impact of individual differences on these learning processes. (PsycINFO Database Record (c) 2019 APA, all rights reserved)},
author = {Tompson, Steven H.
and Kahn, Ari E.
and Falk, Emily B.
and Vettel, Jean M.
and Bassett, Danielle S.},
author+an = {2=highlight},
date = {2019},
doi = {10.1037/xlm0000580},
issn = {1939-1285(Electronic),0278-7393(Print)},
journaltitle = {Journal of Experimental Psychology: Learning, Memory, and Cognition},
keywords = {Individual Differences,Learning,Social Cognition,Social Networks,Statistics,Test Construction},
number = {2},
pages = {253--271},
title = {Individual Differences in Learning Social and Nonsocial Network Structures},
volume = {45}
}
@article{Tompson2020_FunctionalBrainNetwork,
abstract = {Most humans have the good fortune to live their lives embedded in richly structured social groups. Yet, it remains unclear how humans acquire knowledge about these social structures to successfully navigate social relationships. Here we address this knowledge gap with an interdisciplinary neuroimaging study drawing on recent advances in network science and statistical learning. Specifically, we collected BOLD MRI data while participants learned the community structure of both social and non-social networks, in order to examine whether the learning of these two types of networks was differentially associated with functional brain network topology. We found that participants learned the community structure of the networks, as evidenced by a slower reaction time when a trial moved between communities than when a trial moved within a community. Learning the community structure of social networks was also characterized by significantly greater functional connectivity of the hippocampus and temporoparietal junction when transitioning between communities than when transitioning within a community. Furthermore, temporoparietal regions of the default mode were more strongly connected to hippocampus, somatomotor, and visual regions for social networks than for non-social networks. Collectively, our results identify neurophysiological underpinnings of social versus non-social network learning, extending our knowledge about the impact of social context on learning processes. More broadly, this work offers an empirical approach to study the learning of social network structures, which could be fruitfully extended to other participant populations, various graph architectures, and a diversity of social contexts in future studies.},
author = {Tompson, Steven H.
and Kahn, Ari E.
and Falk, Emily B.
and Vettel, Jean M.
and Bassett, Danielle S.},
author+an = {2=highlight},
date = {2020},
doi = {10.1016/j.neuroimage.2019.116498},
issn = {1053-8119},
journaltitle = {NeuroImage},
keywords = {Functional brain networks,Social cognition,Social network learning,Statistical learning},
langid = {english},
pages = {116498},
shortjournal = {NeuroImage},
title = {Functional Brain Network Architecture Supporting the Learning of Social Networks in Humans},
url = {http://www.sciencedirect.com/science/article/pii/S1053811919310894},
urldate = {2020-01-13}
}
@article{xiaHumanLearningHierarchical2024,
abstract = {Humans are exposed to sequences of events in the environment, and the interevent transition probabilities in these sequences can be modeled as a graph or network. Many real-world networks are organized hierarchically and while much is known about how humans learn basic transition graph topology, whether and to what degree humans can learn hierarchical structures in such graphs remains unknown. We probe the mental estimates of transition probabilities via the surprisal effect phenomenon: humans react more slowly to less expected transitions. Using mean-field predictions and numerical simulations, we show that surprisal effects are stronger for finer-level than coarser-level hierarchical transitions, and that surprisal effects at coarser levels are difficult to detect for limited learning times or in small samples. Using a serial response experiment with human participants (𝑛=100), we replicate our predictions by detecting a surprisal effect at the finer level of the hierarchy but not at the coarser level of the hierarchy. We then evaluate the presence of a trade-off in learning, whereby humans who learned the finer level of the hierarchy better also tended to learn the coarser level worse, and vice versa. This study elucidates the processes by which humans learn sequential events in hierarchical contexts. More broadly, our work charts a road map for future investigation of the neural underpinnings and behavioral manifestations of graph learning.},
author = {Xia, Xiaohuan
and Klishin, Andrei A.
and Stiso, Jennifer
and Lynn, Christopher W.
and Kahn, Ari E.
and Caciagli, Lorenzo
and Bassett, Dani S.},
author+an = {5=highlight},
date = {2024},
doi = {10.1103/PhysRevE.109.044305},
journaltitle = {Physical Review E},
number = {4},
pages = {044305},
publisher = {American Physical Society},
shortjournal = {Phys. Rev. E},
title = {Human Learning of Hierarchical Graphs},
url = {https://link.aps.org/doi/10.1103/PhysRevE.109.044305},
urldate = {2024-08-19},
volume = {109}
}