diff --git a/notebook.ipynb b/notebook.ipynb
index 6034d6b..639cd7b 100644
--- a/notebook.ipynb
+++ b/notebook.ipynb
@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
@@ -14,32 +14,124 @@
"from jax import jit, grad\n",
"import jax.numpy as jnp\n",
"from functools import partial\n",
+ "from IPython.display import display, HTML, clear_output\n",
+ "import time\n",
+ "\n",
"from src.data import load_subject, make_kfolds\n",
"from src.model import loss_fn, init, apply\n",
- "from src.plots import plot_brain, plot_decoding\n",
- "from src.utils import CONFIG\n",
+ "from src.plots import plot_brain\n",
+ "from src.utils import CONFIG, matrix_to_image\n",
"from src.train import train_folds, hyperparam_fn"
]
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 2,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "2023-09-16 09:15:32.502197: W pjrt_plugin/src/mps_client.cc:535] WARNING: JAX Apple GPU support is experimental and not all JAX functionality is correctly supported!\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Metal device set to: Apple M1 Pro\n",
+ "\n",
+ "systemMemory: 16.00 GB\n",
+ "maxCacheSize: 5.33 GB\n",
+ "\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|██████████| 131/131 [00:07<00:00, 16.66it/s]\n"
+ ]
+ }
+ ],
"source": [
- "subject = load_subject('subj05', image_size=CONFIG['image_size'])"
+ "subject = load_subject('subj05', image_size=CONFIG['image_size'])\n",
+ "hyperparams = hyperparam_fn()\n",
+ "kfolds = make_kfolds(subject, hyperparams)\n",
+ "metrics, params = train_folds(kfolds, hyperparams)"
]
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 38,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "
\n",
+ "
\n",
+ "
\n",
+ "
\n",
+ "
\n",
+ "
\n",
+ "
\n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "ename": "KeyboardInterrupt",
+ "evalue": "",
+ "output_type": "error",
+ "traceback": [
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
+ "\u001b[1;32m/Users/syrkis/code/neuroscope/notebook.ipynb Cell 3\u001b[0m line \u001b[0;36m1\n\u001b[1;32m 15\u001b[0m matrix_lst \u001b[39m=\u001b[39m [np\u001b[39m.\u001b[39mrandom\u001b[39m.\u001b[39mrand(\u001b[39m100\u001b[39m, \u001b[39m100\u001b[39m) \u001b[39mfor\u001b[39;00m _ \u001b[39min\u001b[39;00m \u001b[39mrange\u001b[39m(\u001b[39m4\u001b[39m)]\n\u001b[1;32m 16\u001b[0m display_image(matrix_lst)\n\u001b[0;32m---> 17\u001b[0m time\u001b[39m.\u001b[39;49msleep(\u001b[39m1\u001b[39;49m)\n",
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
+ ]
+ }
+ ],
"source": [
- "hyperparams = hyperparam_fn()\n",
- "kfolds = make_kfolds(subject, hyperparams)\n",
- "train_folds(kfolds, hyperparams)"
+ "def display_image(matrix_lst):\n",
+ " html = ''\n",
+ " for matrix in matrix_lst:\n",
+ " image = matrix_to_image(matrix)\n",
+ " html += f\"\"\"\n",
+ "
\n",
+ "
\"\"\"\n",
+ "\n",
+ " html += '
'\n",
+ " clear_output(wait=True)\n",
+ " display(HTML(html))\n",
+ "\n",
+ "# Example usage with a random 100x100 matrix\n",
+ "for i in range(10):\n",
+ " matrix_lst = [np.random.rand(100, 100) for _ in range(4)]\n",
+ " display_image(matrix_lst)\n",
+ " time.sleep(1)"
]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
}
],
"metadata": {
diff --git a/notebooks/alex.ipynb b/notebooks/alex.ipynb
deleted file mode 100644
index 874c22b..0000000
--- a/notebooks/alex.ipynb
+++ /dev/null
@@ -1,150 +0,0 @@
-{
- "cells": [
- {
- "attachments": {},
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Alexnet based feature extractor"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "# imports\n",
- "import warnings; warnings.filterwarnings('ignore')\n",
- "import os\n",
- "import torch\n",
- "from multiprocessing import Pool\n",
- "from torchvision.models.feature_extraction import create_feature_extractor\n",
- "from sklearn.decomposition import PCA\n",
- "import numpy as np\n",
- "from tqdm import tqdm\n",
- "from PIL import Image\n",
- "from src.utils import DATA_DIR"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "# get data and model\n",
- "subjs = ['subj01', 'subj02', 'subj03', 'subj04', 'subj05', 'subj06', 'subj07', 'subj08']\n",
- "N_SAMPLES = 0\n",
- "model = torch.hub.load('pytorch/vision:v0.10.0', 'alexnet')\n",
- "feature_extractor = create_feature_extractor(model, return_nodes=[\"features.2\"])"
- ]
- },
- {
- "attachments": {},
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Get image data"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "def get_img_files(subj):\n",
- " subj_img_dir = os.path.join(DATA_DIR, subj, 'training_split/training_images')\n",
- " subj_img_files = [os.path.join(subj_img_dir, f) for f in os.listdir(subj_img_dir) if f.endswith('.png')]\n",
- " return sorted(subj_img_files)\n",
- "\n",
- "def load_img_files(subj):\n",
- " # images are pngs\n",
- " img_files = get_img_files(subj)\n",
- " img_files = img_files[:N_SAMPLES] if N_SAMPLES else img_files\n",
- " imgs = []\n",
- " for f in tqdm(img_files): # make sure not to have too many files open\n",
- " with Image.open(f) as img:\n",
- " img = img.convert('RGB').resize((224, 224))\n",
- " img = torch.from_numpy(np.array(img))\n",
- " imgs.append(img)\n",
- " imgs = torch.stack(imgs)\n",
- " imgs = imgs / 255.0\n",
- " imgs = imgs.permute(0, 3, 1, 2)\n",
- " imgs = normalize(imgs)\n",
- " return imgs\n",
- "\n",
- "def normalize(imgs):\n",
- " means = [0.485, 0.456, 0.406]\n",
- " stds = [0.229, 0.224, 0.225]\n",
- " imgs = imgs.float()\n",
- " for i in range(3):\n",
- " imgs[:, i, :, :] = (imgs[:, i, :, :] - means[i]) / stds[i]\n",
- " return imgs\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "def run_subj(subj):\n",
- " pca = PCA(n_components=100)\n",
- " data = load_img_files(subj)\n",
- " feats = feature_extractor(data)\n",
- " feats = torch.hstack([torch.flatten(l, start_dim=1) for l in feats.values()])\n",
- " feats = feats.detach().numpy()\n",
- " feats = feats.reshape(feats.shape[0], -1)\n",
- " feats = pca.fit_transform(feats)\n",
- " np.save(os.path.join(DATA_DIR, subj, 'training_split', 'alexnet_pca.npy'), feats)\n",
- "\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "# with Pool(2) as p:\n",
- "# p.map(run_subj, subjs)\n",
- "# run last 4 subjects in parallel\n",
- "for subj in subjs[4:]:\n",
- " print(f'running {subj}')\n",
- " run_subj(subj)\n",
- " print()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.11.3"
- },
- "orig_nbformat": 4
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/notebooks/data.ipynb b/notebooks/data.ipynb
deleted file mode 100644
index eec9937..0000000
--- a/notebooks/data.ipynb
+++ /dev/null
@@ -1,58 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "from matplotlib import pyplot as plt\n",
- "from src.utils import get_args_and_config\n",
- "from src.data import get_data"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "args, config = get_args_and_config()\n",
- "data = get_data(args, config)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "for subject, (folds, test_data) in data.items():\n",
- " print(subject)\n",
- " for img, cat, lh, rh in folds:\n",
- " print(img.shape, cat.shape, lh.shape, rh.shape)\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "name": "python",
- "version": "3.11.3"
- },
- "orig_nbformat": 4
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/notebooks/dropout.ipynb b/notebooks/dropout.ipynb
deleted file mode 100644
index 13bf9c2..0000000
--- a/notebooks/dropout.ipynb
+++ /dev/null
@@ -1,98 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import jax\n",
- "import jax.numpy as jnp\n",
- "import haiku as hk"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "def network_fn(x, training=True):\n",
- " mlp = hk.nets.MLP([10, 10])\n",
- " # apply dropout if training\n",
- " x = hk.dropout(hk.next_rng_key(), 0.5, x) if training else x\n",
- " x = mlp(x)\n",
- " x = hk.dropout(hk.next_rng_key(), 0.5, x) if training else x\n",
- " return x\n",
- "\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "x = jnp.ones((8, 28*28))\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "forward = hk.transform(network_fn)\n",
- "rng = jax.random.PRNGKey(42)\n",
- "params = forward.init(rng, x)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "for i in range(10):\n",
- " pred = forward.apply(params, rng, x, training=True)\n",
- " print(pred)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.11.3"
- },
- "orig_nbformat": 4
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/notebooks/eval.ipynb b/notebooks/eval.ipynb
deleted file mode 100644
index ae5b8ed..0000000
--- a/notebooks/eval.ipynb
+++ /dev/null
@@ -1,173 +0,0 @@
-{
- "cells": [
- {
- "attachments": {},
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# eval"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 1,
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "100%|██████████| 6/6 [00:00<00:00, 41.18it/s]\n",
- "100%|██████████| 6/6 [00:07<00:00, 1.19s/it]\n"
- ]
- }
- ],
- "source": [
- "import numpy as np\n",
- "from sklearn.linear_model import LinearRegression\n",
- "import pickle\n",
- "import yaml\n",
- "from tqdm import tqdm\n",
- "import jax.numpy as jnp\n",
- "from src.utils import get_args_and_config, SUBJECTS\n",
- "from src.data import get_data\n",
- "from src.eval import corr\n",
- "from src.fmri import plot_brain"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- " 0%| | 0/6 [00:00, ?it/s]"
- ]
- }
- ],
- "source": [
- "args, config = get_args_and_config()\n",
- "data = get_data(args)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "100%|██████████| 6/6 [00:47<00:00, 7.84s/it]\n"
- ]
- },
- {
- "data": {
- "text/plain": [
- "(0.20836793, 0.20575279)"
- ]
- },
- "execution_count": 3,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "def algonauts_model(subject, train_data, test_data):\n",
- " \"\"\"algonauts_model function\"\"\"\n",
- " train_data_img = np.vstack([x[0] for x in train_data])\n",
- " train_data_lh = np.vstack([x[1] for x in train_data])\n",
- " train_data_rh = np.vstack([x[2] for x in train_data])\n",
- " train_data = (train_data_img, train_data_lh, train_data_rh)\n",
- "\n",
- " lh_model = LinearRegression().fit(train_data_img, train_data_lh)\n",
- " rh_model = LinearRegression().fit(train_data_img, train_data_rh)\n",
- "\n",
- " # save model\n",
- " # pickle.dump(lh_model, open(f'./models/{subject}_lh_algonauts_model.pkl', 'wb'))\n",
- " # pickle.dump(rh_model, open(f'./models/{subject}_rh_algonauts_model.pkl', 'wb'))\n",
- "\n",
- " # test model\n",
- " test_data_img, test_data_lh, test_data_rh, _ = test_data\n",
- " \n",
- " test_lh_pred = lh_model.predict(test_data_img)\n",
- " test_rh_pred = rh_model.predict(test_data_img)\n",
- " test_lh_corr = corr(test_lh_pred, test_data_lh)\n",
- " test_rh_corr = corr(test_rh_pred, test_data_rh)\n",
- " return test_lh_corr, test_rh_corr\n",
- "\n",
- "\n",
- "lh_corrs, rh_corrs = [], []\n",
- "for subject in tqdm(data.keys()):\n",
- " test_lh_corr, test_rh_corr = algonauts_model(subject, data[subject][0], data[subject][1])\n",
- " break\n",
- "\n",
- "plot_brain(test_lh_corr, 'subject', 'lh')"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 10,
- "metadata": {},
- "outputs": [],
- "source": [
- "data = {}\n",
- "for idx, subj in enumerate(SUBJECTS):\n",
- " data[subj] = {'lh': lh_corrs[idx], 'rh': rh_corrs[idx]}"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 12,
- "metadata": {},
- "outputs": [],
- "source": [
- "with open('./corrs.csv', 'w') as f:\n",
- " f.write('subject,lh,rh\\n')\n",
- " for subj in data.keys():\n",
- " f.write(f'{subj},{data[subj][\"lh\"]},{data[subj][\"rh\"]}\\n')"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 13,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Dockerfile corrs.csv main.py pyproject.toml\t src\n",
- "README.md data\t models report\t\t streamlit_app.py\n",
- "config\t jobs\t notebooks requirements.txt wandb\n",
- "container.sif logs\t poetry.lock setup.py\n"
- ]
- }
- ],
- "source": [
- "!ls"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "orig_nbformat": 4
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/notebooks/fmri.ipynb b/notebooks/fmri.ipynb
deleted file mode 100644
index b2bdc67..0000000
--- a/notebooks/fmri.ipynb
+++ /dev/null
@@ -1,156 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import numpy as np\n",
- "import os\n",
- "import networkx as nx\n",
- "from nilearn import datasets, plotting\n",
- "from tqdm import tqdm\n",
- "from nilearn.connectome import ConnectivityMeasure\n",
- "import numpy as np\n",
- "from jax import numpy as jnp\n",
- "import jraph\n",
- "import yaml\n",
- "from src.plots import plot_graph, plot_regions\n",
- "from src.utils import ROIS, DATA_DIR, SUBJECTS"
- ]
- },
- {
- "attachments": {},
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## fmri"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import os\n",
- "def subject_dir_files(subject):\n",
- " return [\n",
- " os.path.join(DATA_DIR, subject, \"roi_masks\", f)\n",
- " for f in sorted(os.listdir(os.path.join(DATA_DIR, subject, \"roi_masks\")))\n",
- " if f.startswith(\"mapping_\")\n",
- " ]"
- ]
- },
- {
- "attachments": {},
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## connectome"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "subject = \"subj01\"\n",
- "lh_fmri, rh_fmri = get_fmri(subject)\n",
- "# res = sum(map(lambda x: fsaverage_roi_response_to_image(subject, x, 0, \"left\"), rois))\n",
- "plot_regions(subject, rois, 'left', 0)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "def connectome_from_roi_response(subject, roi, hem): # this is wrong\n",
- " roi_mask = get_roi_mask(subject, roi, hem, atlas=\"challenge\")\n",
- " fmri = lh_fmri if hem == \"left\" else rh_fmri\n",
- " roi_response = fmri[:, roi_mask]\n",
- " connectivity_measure = ConnectivityMeasure(kind=\"covariance\")\n",
- " connectivity_matrix = connectivity_measure.fit_transform([roi_response])[0]\n",
- " connectome = connectivity_matrix_to_connectome(connectivity_matrix)\n",
- " return connectome\n",
- "\n",
- "\n",
- "def connectivity_matrix_to_connectome(connectivity_matrix):\n",
- " # given a connectivity matrix, return a graph\n",
- " N = connectivity_matrix.shape[0]\n",
- " thresh = np.percentile(\n",
- " np.abs(connectivity_matrix), 100 * (N - (N / 100)) / N\n",
- " ) # consider thresholding differently as n edges increases with nodes ** 2\n",
- " connectivity_matrix[np.abs(connectivity_matrix) < thresh] = 0\n",
- " # set diagonal to 0\n",
- " np.fill_diagonal(connectivity_matrix, 0)\n",
- " graph = nx.from_numpy_array(connectivity_matrix)\n",
- " return graph, connectivity_matrix"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "def build_connectome(roi, hem):\n",
- " nx_graph, _ = connectome_from_roi_response(roi, hem)\n",
- " # nx_graph = nx.karate_club_graph() # TODO: replace with real graph\n",
- " nodes = jnp.eye(len(nx_graph.nodes))\n",
- " edges = None # jnp.ones((len(G.edges), 1))\n",
- " senders = jnp.array([e[0] for e in list(nx_graph.edges)])\n",
- " receivers = jnp.array([e[1] for e in list(nx_graph.edges)])\n",
- " n_node = len(nodes)\n",
- " n_edge = len(senders)\n",
- " global_context = None # jnp.array([1.0])\n",
- " graph = jraph.GraphsTuple(\n",
- " nodes=nodes,\n",
- " edges=edges,\n",
- " senders=senders,\n",
- " receivers=receivers,\n",
- " n_node=n_node,\n",
- " n_edge=n_edge,\n",
- " globals=global_context,\n",
- " )\n",
- " return graph\n",
- "\n",
- "\n",
- "graph = build_connectome(\"V1d\", \"left\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.11.3"
- },
- "orig_nbformat": 4
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/notebooks/mango.ipynb b/notebooks/mango.ipynb
deleted file mode 100644
index 90fa9c5..0000000
--- a/notebooks/mango.ipynb
+++ /dev/null
@@ -1,252 +0,0 @@
-{
- "cells": [
- {
- "attachments": {},
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Neuroscape playground"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import jax\n",
- "import jax.numpy as jnp\n",
- "from jax import grad, jit, vmap\n",
- "import optax\n",
- "from tqdm import tqdm\n",
- "import haiku as hk\n",
- "import numpy as np\n",
- "import wandb\n",
- "from functools import partial\n",
- "from src.data import get_data\n",
- "from src.utils import get_args_and_config\n",
- "from src.fmri import plot_brain"
- ]
- },
- {
- "attachments": {},
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## MANGO"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "args, _ = get_args_and_config()\n",
- "data = get_data(args)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "opt = optax.adamw(0.001) # perhaps hyper param search for lr and weight decay"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "def forward(x):\n",
- " x_mlp = hk.Sequential([\n",
- " hk.nets.MLP([100] * 1, activation=jnp.tanh),\n",
- " hk.Linear(100),\n",
- " ])\n",
- " lh_ml = hk.Sequential([\n",
- " hk.nets.MLP([100] * 1, activation=jnp.tanh),\n",
- " hk.Linear(19004),\n",
- " ])\n",
- " rh_ml = hk.Sequential([\n",
- " hk.nets.MLP([100] * 1, activation=jnp.tanh),\n",
- " hk.Linear(20544),\n",
- " ])\n",
- " x = x_mlp(x)\n",
- " lh_hat = lh_ml(x)\n",
- " rh_hat = rh_ml(x)\n",
- " return lh_hat, rh_hat\n",
- "\n",
- "init_fn, apply_fn = hk.without_apply_rng(hk.transform(forward))\n",
- "\n",
- "\n",
- "def loss_fn(params, batch, hem):\n",
- " x, lh, rh = batch\n",
- " lh_hat, rh_hat = apply_fn(params, x)\n",
- " return jnp.mean((lh_hat - lh) ** 2) if hem == 'lh' else jnp.mean((rh_hat - rh) ** 2)\n",
- "\n",
- "lh_loss_fn = jit(partial(loss_fn, hem='lh'))\n",
- "rh_loss_fn = jit(partial(loss_fn, hem='rh'))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "def get_fold(fold, fold_idx):\n",
- " train_data = [fold for idx, fold in enumerate(fold) if idx != fold_idx]\n",
- " train_data = list(map(jnp.vstack, zip(*train_data)))\n",
- " val_data = fold[fold_idx]\n",
- " return train_data, val_data\n",
- "\n",
- "def get_batch(data, batch_size):\n",
- " while True:\n",
- " perm = np.random.permutation(data[0].shape[0])\n",
- " for i in range(0, data[0].shape[0], batch_size):\n",
- " idx = perm[i:i + batch_size]\n",
- " # x = data[0][idx]\n",
- " x = jnp.concatenate([data[0][idx], data[3][idx]], axis=1)\n",
- " lh = data[1][idx]\n",
- " rh = data[2][idx]\n",
- " yield x, lh, rh\n",
- " \n",
- "def train(model, data, config):\n",
- " group = wandb.util.generate_id()\n",
- " for subject, (folds, test_data) in data.items():\n",
- " train_data = list(map(jnp.vstack, zip(*folds)))\n",
- " with wandb.init(project=\"neuroscope\", entity='syrkis', group=group) as run:\n",
- " train_fold(model, train_data, test_data, config)\n",
- "\n",
- "def train_fold(model, train_data, val_data, config):\n",
- " lh_params = init_fn(jax.random.PRNGKey(42), jnp.ones((1, 180)))\n",
- " rh_params = init_fn(jax.random.PRNGKey(42), jnp.ones((1, 180)))\n",
- " lh_opt_state = opt.init(lh_params)\n",
- " rh_opt_state = opt.init(rh_params)\n",
- " train_batches = get_batch(train_data, config['batch_size'])\n",
- " val_batches = get_batch(val_data, config['batch_size'])\n",
- " for step in tqdm(range(config['n_steps'])):\n",
- " train_batch = next(train_batches)\n",
- " lh_params, lh_opt_state = lh_update(lh_params, train_batch, lh_opt_state)\n",
- " rh_params, rh_opt_state = rh_update(rh_params, train_batch, rh_opt_state)\n",
- " if step % (config['n_steps'] // 100) == 0:\n",
- " metrics = evaluate(lh_params, rh_params, train_batches, val_batches)\n",
- " wandb.log(metrics)\n",
- " metrics = evaluate(lh_params, rh_params, train_batches, val_batches, steps=50)\n",
- " wandb.finish()\n",
- "\n",
- "def evaluate(lh_params, rh_params, train_batches, val_batches, steps=3):\n",
- " train_metrics = evaluate_fold(lh_params, rh_params, train_batches, steps)\n",
- " val_metrics = evaluate_fold(lh_params, rh_params, val_batches, steps)\n",
- " metrics = {f'train_{k}': v for k, v in train_metrics.items()}\n",
- " metrics.update({f'val_{k}': v for k, v in val_metrics.items()})\n",
- " return metrics\n",
- "\n",
- "def evaluate_fold(lh_params, rh_params, batches, steps):\n",
- " metrics = {}\n",
- " for i in range(steps):\n",
- " batch = next(batches)\n",
- " batch_metrics = evaluate_batch(lh_params, rh_params, batch)\n",
- " metrics = {k: metrics.get(k, 0) + v for k, v in batch_metrics.items()}\n",
- " metrics = {k: v / steps for k, v in metrics.items()}\n",
- " return metrics\n",
- " \n",
- "\n",
- "def evaluate_batch(lh_params, rh_params, batch):\n",
- " metrics = {}\n",
- " for hem, params in zip(['lh', 'rh'], [lh_params, rh_params]):\n",
- " mse, corr = evaluate_hem(params, batch, hem)\n",
- " metrics[f'{hem}_mse'] = mse\n",
- " metrics[f'{hem}_corr'] = corr\n",
- " return metrics\n",
- "\n",
- "def evaluate_hem(params, batch, hem):\n",
- " x, lh, rh = batch\n",
- " lh_hat, rh_hat = apply_fn(params, x)\n",
- " mse = jnp.mean((lh_hat - lh) ** 2) if hem == 'lh' else jnp.mean((rh_hat - rh) ** 2)\n",
- " # compute the median collumn wise correlation\n",
- " corr = pearsonr(lh_hat, lh) if hem == 'lh' else pearsonr(rh_hat, rh)\n",
- " return mse, jnp.median(corr)\n",
- "\n",
- "\n",
- "# function for computing pearson's correlation coefficient for each voxel of a subject's fMRI data\n",
- "def pearsonr(pred, target):\n",
- " def _pearsonr(x, y):\n",
- " corr = jnp.corrcoef(x, y)\n",
- " return corr[0, 1]\n",
- " hem_corr = vmap(_pearsonr)(pred.T, target.T)\n",
- " return hem_corr\n",
- "\n",
- "\n",
- "\n",
- "def update(params, batch, opt_state, hem):\n",
- " \"\"\"update function\"\"\"\n",
- " loss_fn = lh_loss_fn if hem == 'lh' else rh_loss_fn\n",
- " grads = grad(loss_fn)(params, batch)\n",
- " updates, opt_state = opt.update(grads, opt_state, params)\n",
- " new_params = optax.apply_updates(params, updates)\n",
- " return new_params, opt_state\n",
- "\n",
- "lh_update = jit(partial(update, hem='lh'))\n",
- "rh_update = jit(partial(update, hem='rh'))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "config = { 'n_steps': 6000, 'batch_size': 32 }"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "train(apply_fn, data, config)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.11.3"
- },
- "orig_nbformat": 4
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/notebooks/train.ipynb b/notebooks/train.ipynb
deleted file mode 100644
index f6991c6..0000000
--- a/notebooks/train.ipynb
+++ /dev/null
@@ -1,262 +0,0 @@
-{
- "cells": [
- {
- "attachments": {},
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Neuroscape playground"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import jax\n",
- "import jax.numpy as jnp\n",
- "from jax import grad, jit, vmap\n",
- "import optax\n",
- "from tqdm import tqdm\n",
- "import haiku as hk\n",
- "import numpy as np\n",
- "import wandb\n",
- "from functools import partial\n",
- "from src.data import get_data\n",
- "from src.utils import get_args_and_config\n",
- "from src.fmri import plot_brain\n",
- "from src.model import network_fn, mse, focal_loss, loss_fn_base"
- ]
- },
- {
- "attachments": {},
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Train with final hyperparameters (and alpha and beta set to 0 (baseline))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "args, _ = get_args_and_config()\n",
- "data = get_data(args)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "opt = optax.adamw(0.001) # perhaps hyper param search for lr and weight decay"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "config = {\n",
- " 'alpha': 0,\n",
- " 'beta': 0,\n",
- " 'n_steps': 6000,\n",
- " 'batch_size': 32,\n",
- " 'n_units': 100,\n",
- " 'n_layers': 2,\n",
- " 'latent_dim': 100,\n",
- " 'dropout': 0.15,\n",
- " }"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "\n",
- "forward = hk.transform(partial(network_fn, config=config))\n",
- "rng = jax.random.PRNGKey(42)\n",
- "\n",
- "\n",
- "def loss_fn(params, rng, batch, hem, config):\n",
- " alpha = config['alpha']\n",
- " beta = config['beta']\n",
- " x, lh, rh, cat = batch\n",
- " lh_hat, rh_hat, cat_hat = forward.apply(params, rng, x)\n",
- " lh_loss = mse(lh_hat, lh)\n",
- " rh_loss = mse(rh_hat, rh)\n",
- " cat_loss = focal_loss(cat_hat, cat)\n",
- " hem_loss = lh_loss if hem == 'lh' else rh_loss\n",
- " not_hem_loss = rh_loss if hem == 'lh' else lh_loss\n",
- " fmri_loss = (1 - beta) * hem_loss + beta * not_hem_loss\n",
- " loss = (1 - alpha) * fmri_loss + alpha * cat_loss\n",
- " return loss\n",
- "\n",
- "lh_loss_fn = jit(partial(loss_fn, hem='lh', config=config))\n",
- "rh_loss_fn = jit(partial(loss_fn, hem='rh', config=config))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "def get_fold(fold, fold_idx):\n",
- " train_data = [fold for idx, fold in enumerate(fold) if idx != fold_idx]\n",
- " train_data = list(map(jnp.vstack, zip(*train_data)))\n",
- " val_data = fold[fold_idx]\n",
- " return train_data, val_data\n",
- "\n",
- "def get_batch(data, batch_size):\n",
- " while True:\n",
- " perm = np.random.permutation(data[0].shape[0])\n",
- " for i in range(0, data[0].shape[0], batch_size):\n",
- " idx = perm[i:i + batch_size]\n",
- " x = data[0][idx]\n",
- " lh = data[1][idx]\n",
- " rh = data[2][idx]\n",
- " cat = data[3][idx]\n",
- " yield x, lh, rh, cat\n",
- " \n",
- "def train(data, config):\n",
- " group = wandb.util.generate_id()\n",
- " for subject, (folds, test_data) in data.items():\n",
- " train_data = list(map(jnp.vstack, zip(*folds)))\n",
- " with wandb.init(project=\"neuroscope\", entity='syrkis', group=group, config=config) as run:\n",
- " train_fold(train_data, test_data, config)\n",
- "\n",
- "def train_fold(train_data, val_data, config):\n",
- " lh_params = forward.init(jax.random.PRNGKey(42), jnp.ones((1, 100)))\n",
- " rh_params = forward.init(jax.random.PRNGKey(42), jnp.ones((1, 100)))\n",
- " lh_opt_state = opt.init(lh_params)\n",
- " rh_opt_state = opt.init(rh_params)\n",
- " train_batches = get_batch(train_data, config['batch_size'])\n",
- " val_batches = get_batch(val_data, config['batch_size'])\n",
- " for step in tqdm(range(config['n_steps'])):\n",
- " train_batch = next(train_batches)\n",
- " lh_params, lh_opt_state = lh_update(lh_params, train_batch, lh_opt_state)\n",
- " rh_params, rh_opt_state = rh_update(rh_params, train_batch, rh_opt_state)\n",
- " if step % (config['n_steps'] // 100) == 0:\n",
- " metrics = evaluate(lh_params, rh_params, train_batches, val_batches)\n",
- " wandb.log(metrics)\n",
- " metrics = evaluate(lh_params, rh_params, train_batches, val_batches, steps=50)\n",
- " wandb.finish()\n",
- "\n",
- "def evaluate(lh_params, rh_params, train_batches, val_batches, steps=3):\n",
- " train_metrics = evaluate_fold(lh_params, rh_params, train_batches, steps)\n",
- " val_metrics = evaluate_fold(lh_params, rh_params, val_batches, steps, training=False)\n",
- " metrics = {f'train_{k}': v for k, v in train_metrics.items()}\n",
- " metrics.update({f'val_{k}': v for k, v in val_metrics.items()})\n",
- " return metrics\n",
- "\n",
- "def evaluate_fold(lh_params, rh_params, batches, steps, training=True):\n",
- " metrics = {}\n",
- " for i in range(steps):\n",
- " batch = next(batches)\n",
- " batch_metrics = evaluate_batch(lh_params, rh_params, batch, training)\n",
- " metrics = {k: metrics.get(k, 0) + v for k, v in batch_metrics.items()}\n",
- " metrics = {k: v / steps for k, v in metrics.items()}\n",
- " return metrics\n",
- " \n",
- "\n",
- "def evaluate_batch(lh_params, rh_params, batch, training):\n",
- " metrics = {}\n",
- " for hem, params in zip(['lh', 'rh'], [lh_params, rh_params]):\n",
- " mse, corr = evaluate_hem(params, batch, hem, training)\n",
- " metrics[f'{hem}_mse'] = mse\n",
- " metrics[f'{hem}_corr'] = corr\n",
- " return metrics\n",
- "\n",
- "def evaluate_hem(params, batch, hem, training):\n",
- " x, lh, rh, _ = batch\n",
- " lh_hat, rh_hat, _ = forward.apply(params, rng, x, training=training)\n",
- " mse = jnp.mean((lh_hat - lh) ** 2) if hem == 'lh' else jnp.mean((rh_hat - rh) ** 2)\n",
- " # compute the median collumn wise correlation\n",
- " corr = pearsonr(lh_hat, lh) if hem == 'lh' else pearsonr(rh_hat, rh)\n",
- " return mse, jnp.median(corr)\n",
- "\n",
- "\n",
- "# function for computing pearson's correlation coefficient for each voxel of a subject's fMRI data\n",
- "def pearsonr(pred, target):\n",
- " def _pearsonr(x, y):\n",
- " corr = jnp.corrcoef(x, y)\n",
- " return corr[0, 1]\n",
- " hem_corr = vmap(_pearsonr)(pred.T, target.T)\n",
- " return hem_corr\n",
- "\n",
- "\n",
- "\n",
- "def update(params, batch, opt_state, hem):\n",
- " \"\"\"update function\"\"\"\n",
- " loss_fn = lh_loss_fn if hem == 'lh' else rh_loss_fn\n",
- " grads = grad(loss_fn)(params, rng, batch)\n",
- " updates, opt_state = opt.update(grads, opt_state, params)\n",
- " new_params = optax.apply_updates(params, updates)\n",
- " return new_params, opt_state\n",
- "\n",
- "lh_update = jit(partial(update, hem='lh'))\n",
- "rh_update = jit(partial(update, hem='rh'))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "train(data, config)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.11.3"
- },
- "orig_nbformat": 4
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/report/biblio.bib b/report/biblio.bib
deleted file mode 100644
index 7cc1809..0000000
--- a/report/biblio.bib
+++ /dev/null
@@ -1,924 +0,0 @@
-@article{allen2022,
- title = {A Massive {{7T fMRI}} Dataset to Bridge Cognitive Neuroscience and Artificial Intelligence},
- author = {Allen, Emily J. and St-Yves, Ghislain and Wu, Yihan and Breedlove, Jesse L. and Prince, Jacob S. and Dowdle, Logan T. and Nau, Matthias and Caron, Brad and Pestilli, Franco and Charest, Ian and Hutchinson, J. Benjamin and Naselaris, Thomas and Kay, Kendrick},
- date = {2022-01},
- journaltitle = {Nature Neuroscience},
- shortjournal = {Nat Neurosci},
- volume = {25},
- number = {1},
- pages = {116--126},
- publisher = {{Nature Publishing Group}},
- issn = {1546-1726},
- doi = {10.1038/s41593-021-00962-x},
- url = {https://www.nature.com/articles/s41593-021-00962-x},
- urldate = {2023-05-11},
- issue = {1},
- langid = {english}
-}
-
-@article{chang2019,
- title = {{{BOLD5000}}, a Public {{fMRI}} Dataset While Viewing 5000 Visual Images},
- author = {Chang, Nadine and Pyles, John A. and Marcus, Austin and Gupta, Abhinav and Tarr, Michael J. and Aminoff, Elissa M.},
- date = {2019-05-06},
- journaltitle = {Scientific Data},
- shortjournal = {Sci Data},
- volume = {6},
- number = {1},
- pages = {49},
- issn = {2052-4463},
- doi = {10.1038/s41597-019-0052-3},
- url = {https://www.nature.com/articles/s41597-019-0052-3},
- urldate = {2023-06-04},
- langid = {english}
-}
-
-@book{gibbons2012,
- title = {Multimodality, {{Cognition}}, and {{Experimental Literature}}},
- author = {Gibbons, Alison},
- date = {2012-05-22},
- eprint = {pyQ1M6hR_ckC},
- eprinttype = {googlebooks},
- publisher = {{Routledge}},
- isbn = {978-1-136-63221-1},
- langid = {english},
- pagetotal = {276}
-}
-
-@online{gifford2023,
- title = {The {{Algonauts Project}} 2023 {{Challenge}}: {{How}} the {{Human Brain Makes Sense}} of {{Natural Scenes}}},
- shorttitle = {The {{Algonauts Project}} 2023 {{Challenge}}},
- author = {Gifford, A. T. and Lahner, B. and Saba-Sadiya, S. and Vilas, M. G. and Lascelles, A. and Oliva, A. and Kay, K. and Roig, G. and Cichy, R. M.},
- date = {2023-01-10},
- eprint = {2301.03198},
- eprinttype = {arxiv},
- eprintclass = {cs, q-bio},
- url = {http://arxiv.org/abs/2301.03198},
- urldate = {2023-05-10},
- pubstate = {preprint}
-}
-
-@article{gonzalez-casillas2018,
- title = {Towards a Model of Visual Recognition Based on Neurosciences},
- author = {González-Casillas, Adrián and Parra, Luis and Martin, Luis and Avila-Contreras, Cynthia and Ramirez-Pedraza, Raymundo and Vargas, Natividad and family=Valle-Padilla, given=Juan Luis, prefix=del, useprefix=true and Ramos, Félix},
- date = {2018-01-01},
- journaltitle = {Procedia Computer Science},
- shortjournal = {Procedia Computer Science},
- series = {Postproceedings of the 9th {{Annual International Conference}} on {{Biologically Inspired Cognitive Architectures}}, {{BICA}} 2018 ({{Ninth Annual Meeting}} of the {{BICA Society}}), Held {{August}} 22-24, 2018 in {{Prague}}, {{Czech Republic}}},
- volume = {145},
- pages = {214--231},
- issn = {1877-0509},
- doi = {10.1016/j.procs.2018.11.043},
- url = {https://www.sciencedirect.com/science/article/pii/S1877050918323299},
- urldate = {2023-06-03},
- langid = {english}
-}
-
-@article{groen2017,
- title = {Contributions of Low- and High-Level Properties to Neural Processing of Visual Scenes in the Human Brain},
- author = {Groen, Iris I. A. and Silson, Edward H. and Baker, Chris I.},
- date = {2017-02-19},
- journaltitle = {Philosophical Transactions of the Royal Society B: Biological Sciences},
- shortjournal = {Philos Trans R Soc Lond B Biol Sci},
- volume = {372},
- number = {1714},
- eprint = {28044013},
- eprinttype = {pmid},
- pages = {20160102},
- issn = {0962-8436},
- doi = {10.1098/rstb.2016.0102},
- url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5206270/},
- urldate = {2023-06-03},
- pmcid = {PMC5206270}
-}
-
-@article{gu2017,
- title = {Learning {{Joint Multimodal Representation Based}} on {{Multi-fusion Deep Neural Networks}}},
- author = {Gu, Zepeng and Lang, B. and Yue, Tongyu and Huang, Lei},
- date = {2017},
- doi = {10.1007/978-3-319-70096-0_29},
- url = {https://consensus.app/details/also-introduce-learning-features-multimodal-learning-gu/a9f3576d0e4a5d379d40b90ec660e425/},
- urldate = {2023-06-04}
-}
-
-@article{haxby2001,
- title = {Distributed and Overlapping Representations of Faces and Objects in Ventral Temporal Cortex},
- author = {Haxby, J. V. and Gobbini, M. I. and Furey, M. L. and Ishai, A. and Schouten, J. L. and Pietrini, P.},
- date = {2001-09-28},
- journaltitle = {Science (New York, N.Y.)},
- shortjournal = {Science},
- volume = {293},
- number = {5539},
- eprint = {11577229},
- eprinttype = {pmid},
- pages = {2425--2430},
- issn = {0036-8075},
- doi = {10.1126/science.1063736},
- langid = {english}
-}
-
-@article{kell2018,
- title = {A {{Task-Optimized Neural Network Replicates Human Auditory Behavior}}, {{Predicts Brain Responses}}, and {{Reveals}} a {{Cortical Processing Hierarchy}}},
- author = {Kell, Alexander J. E. and Yamins, Daniel L. K. and Shook, Erica N. and Norman-Haignere, Sam V. and McDermott, Josh H.},
- date = {2018-05-02},
- journaltitle = {Neuron},
- shortjournal = {Neuron},
- volume = {98},
- number = {3},
- eprint = {29681533},
- eprinttype = {pmid},
- pages = {630-644.e16},
- issn = {1097-4199},
- doi = {10.1016/j.neuron.2018.03.044},
- langid = {english}
-}
-
-@article{kietzmann2019,
- title = {Recurrence Is Required to Capture the Representational Dynamics of the Human Visual System},
- author = {Kietzmann, Tim C. and Spoerer, Courtney J. and Sörensen, Lynn K. A. and Cichy, Radoslaw M. and Hauk, Olaf and Kriegeskorte, Nikolaus},
- date = {2019-10-22},
- journaltitle = {Proceedings of the National Academy of Sciences of the United States of America},
- shortjournal = {Proc Natl Acad Sci U S A},
- volume = {116},
- number = {43},
- eprint = {31591217},
- eprinttype = {pmid},
- pages = {21854--21863},
- issn = {1091-6490},
- doi = {10.1073/pnas.1905544116},
- langid = {english},
- pmcid = {PMC6815174}
-}
-
-@online{kliger2019,
- type = {preprint},
- title = {The Functional Organization of High-Level Visual Cortex Determines the Representation of Complex Visual Stimuli},
- author = {Kliger, Libi and Yovel, Galit},
- date = {2019-12-23},
- eprinttype = {Neuroscience},
- doi = {10.1101/2019.12.22.852293},
- url = {http://biorxiv.org/lookup/doi/10.1101/2019.12.22.852293},
- urldate = {2023-06-04},
- langid = {english},
- pubstate = {preprint}
-}
-
-@article{koivisto2011,
- title = {Recurrent {{Processing}} in {{V1}}/{{V2 Contributes}} to {{Categorization}} of {{Natural Scenes}}},
- author = {Koivisto, Mika and Railo, Henry and Revonsuo, Antti and Vanni, Simo and Salminen-Vaparanta, Niina},
- date = {2011-02-16},
- journaltitle = {The Journal of Neuroscience},
- shortjournal = {J. Neurosci.},
- volume = {31},
- number = {7},
- pages = {2488--2492},
- issn = {0270-6474, 1529-2401},
- doi = {10.1523/JNEUROSCI.3074-10.2011},
- url = {https://www.jneurosci.org/lookup/doi/10.1523/JNEUROSCI.3074-10.2011},
- urldate = {2023-06-04},
- langid = {english}
-}
-
-@article{kolchinsky2014,
- title = {Multi-Scale Integration and Predictability in Resting State Brain Activity},
- author = {Kolchinsky, Artemy and family=Heuvel, given=Martijn P., prefix=van den, useprefix=true and Griffa, Alessandra and Hagmann, Patric and Rocha, Luis M. and Sporns, Olaf and Goñi, Joaquín},
- date = {2014},
- journaltitle = {Frontiers in Neuroinformatics},
- volume = {8},
- issn = {1662-5196},
- url = {https://www.frontiersin.org/articles/10.3389/fninf.2014.00066},
- urldate = {2023-05-13}
-}
-
-@article{kriegeskorte2015,
- title = {Deep {{Neural Networks}}: {{A New Framework}} for {{Modeling Biological Vision}} and {{Brain Information Processing}}},
- shorttitle = {Deep {{Neural Networks}}},
- author = {Kriegeskorte, Nikolaus},
- date = {2015},
- journaltitle = {Annual Review of Vision Science},
- volume = {1},
- number = {1},
- eprint = {28532370},
- eprinttype = {pmid},
- pages = {417--446},
- doi = {10.1146/annurev-vision-082114-035447},
- url = {https://doi.org/10.1146/annurev-vision-082114-035447},
- urldate = {2023-06-06}
-}
-
-@inproceedings{krizhevsky2012,
- title = {{{ImageNet Classification}} with {{Deep Convolutional Neural Networks}}},
- booktitle = {Advances in {{Neural Information Processing Systems}}},
- author = {Krizhevsky, Alex and Sutskever, Ilya and Hinton, Geoffrey E},
- date = {2012},
- volume = {25},
- publisher = {{Curran Associates, Inc.}},
- url = {https://papers.nips.cc/paper_files/paper/2012/hash/c399862d3b9d6b76c8436e924a68c45b-Abstract.html},
- urldate = {2023-06-06}
-}
-
-@article{kumar2020,
- title = {{{BrainIAK}} Tutorials: {{User-friendly}} Learning Materials for Advanced {{fMRI}} Analysis},
- shorttitle = {{{BrainIAK}} Tutorials},
- author = {Kumar, Manoj and Ellis, Cameron T. and Lu, Qihong and Zhang, Hejia and Capotă, Mihai and Willke, Theodore L. and Ramadge, Peter J. and Turk-Browne, Nicholas B. and Norman, Kenneth A.},
- date = {2020-01-15},
- journaltitle = {PLOS Computational Biology},
- shortjournal = {PLOS Computational Biology},
- volume = {16},
- number = {1},
- pages = {e1007549},
- publisher = {{Public Library of Science}},
- issn = {1553-7358},
- doi = {10.1371/journal.pcbi.1007549},
- url = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1007549},
- urldate = {2023-05-12},
- langid = {english}
-}
-
-@article{lecun2015,
- title = {Deep Learning},
- author = {LeCun, Yann and Bengio, Yoshua and Hinton, Geoffrey},
- date = {2015-05},
- journaltitle = {Nature},
- volume = {521},
- number = {7553},
- pages = {436--444},
- publisher = {{Nature Publishing Group}},
- issn = {1476-4687},
- doi = {10.1038/nature14539},
- url = {https://www.nature.com/articles/nature14539},
- urldate = {2023-06-06},
- issue = {7553},
- langid = {english}
-}
-
-@online{lee2021,
- title = {A Surrogate Loss Function for Optimization of \${{F}}\_\textbackslash beta\$ Score in Binary Classification with Imbalanced Data},
- author = {Lee, Namgil and Yang, Heejung and Yoo, Hojin},
- date = {2021-04-03},
- eprint = {2104.01459},
- eprinttype = {arxiv},
- eprintclass = {cs, stat},
- url = {http://arxiv.org/abs/2104.01459},
- urldate = {2023-05-16},
- pubstate = {preprint}
-}
-
-@online{lin2015,
- title = {Microsoft {{COCO}}: {{Common Objects}} in {{Context}}},
- shorttitle = {Microsoft {{COCO}}},
- author = {Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Bourdev, Lubomir and Girshick, Ross and Hays, James and Perona, Pietro and Ramanan, Deva and Zitnick, C. Lawrence and Dollár, Piotr},
- date = {2015-02-20},
- eprint = {1405.0312},
- eprinttype = {arxiv},
- eprintclass = {cs},
- url = {http://arxiv.org/abs/1405.0312},
- urldate = {2023-05-08},
- pubstate = {preprint}
-}
-
-@online{lin2022,
- title = {Mind {{Reader}}: {{Reconstructing}} Complex Images from Brain Activities},
- shorttitle = {Mind {{Reader}}},
- author = {Lin, Sikun and Sprague, Thomas and Singh, Ambuj K.},
- date = {2022-09-30},
- url = {https://arxiv.org/abs/2210.01769v1},
- urldate = {2023-05-10},
- langid = {english},
- organization = {{arXiv.org}}
-}
-
-@online{lin2022a,
- title = {Mind {{Reader}}: {{Reconstructing}} Complex Images from Brain Activities},
- shorttitle = {Mind {{Reader}}},
- author = {Lin, Sikun and Sprague, Thomas and Singh, Ambuj K.},
- date = {2022-09-30},
- eprint = {2210.01769},
- eprinttype = {arxiv},
- eprintclass = {cs, eess, q-bio},
- url = {http://arxiv.org/abs/2210.01769},
- urldate = {2023-05-10},
- pubstate = {preprint}
-}
-
-@online{liu2018,
- title = {Learn to {{Combine Modalities}} in {{Multimodal Deep Learning}}},
- author = {Liu, Kuan and Li, Yanen and Xu, Ning and Natarajan, Prem},
- date = {2018-05-29},
- eprint = {1805.11730},
- eprinttype = {arxiv},
- eprintclass = {cs, stat},
- doi = {10.48550/arXiv.1805.11730},
- url = {http://arxiv.org/abs/1805.11730},
- urldate = {2023-06-05},
- pubstate = {preprint}
-}
-
-@online{madaan2022,
- title = {Language {{Models}} of {{Code}} Are {{Few-Shot Commonsense Learners}}},
- author = {Madaan, Aman and Zhou, Shuyan and Alon, Uri and Yang, Yiming and Neubig, Graham},
- date = {2022-12-06},
- eprint = {2210.07128},
- eprinttype = {arxiv},
- eprintclass = {cs},
- url = {http://arxiv.org/abs/2210.07128},
- urldate = {2023-05-16},
- langid = {english},
- pubstate = {preprint}
-}
-
-@article{mehrer2021,
- title = {An Ecologically Motivated Image Dataset for Deep Learning Yields Better Models of Human Vision},
- author = {Mehrer, Johannes and Spoerer, Courtney J. and Jones, Emer C. and Kriegeskorte, Nikolaus and Kietzmann, Tim C.},
- date = {2021-02-23},
- journaltitle = {Proceedings of the National Academy of Sciences},
- shortjournal = {Proc. Natl. Acad. Sci. U.S.A.},
- volume = {118},
- number = {8},
- pages = {e2011417118},
- issn = {0027-8424, 1091-6490},
- doi = {10.1073/pnas.2011417118},
- url = {https://pnas.org/doi/full/10.1073/pnas.2011417118},
- urldate = {2023-06-04},
- langid = {english}
-}
-
-@article{mehrer2021a,
- title = {An Ecologically Motivated Image Dataset for Deep Learning Yields Better Models of Human Vision},
- author = {Mehrer, Johannes and Spoerer, Courtney J. and Jones, Emer C. and Kriegeskorte, Nikolaus and Kietzmann, Tim C.},
- date = {2021-02-23},
- journaltitle = {Proceedings of the National Academy of Sciences},
- shortjournal = {Proc. Natl. Acad. Sci. U.S.A.},
- volume = {118},
- number = {8},
- pages = {e2011417118},
- issn = {0027-8424, 1091-6490},
- doi = {10.1073/pnas.2011417118},
- url = {https://pnas.org/doi/full/10.1073/pnas.2011417118},
- urldate = {2023-06-04},
- langid = {english}
-}
-
-@article{meng2022,
- title = {Decoding {{Visual fMRI Stimuli}} from {{Human Brain Based}} on {{Graph Convolutional Neural Network}}},
- author = {Meng, Lu and Ge, Kang},
- date = {2022-10},
- journaltitle = {Brain Sciences},
- volume = {12},
- number = {10},
- pages = {1394},
- publisher = {{Multidisciplinary Digital Publishing Institute}},
- issn = {2076-3425},
- doi = {10.3390/brainsci12101394},
- url = {https://www.mdpi.com/2076-3425/12/10/1394},
- urldate = {2023-05-09},
- issue = {10},
- langid = {english}
-}
-
-@article{mills2018,
- title = {Correlated {{Gene Expression}} and {{Anatomical Communication Support Synchronized Brain Activity}} in the {{Mouse Functional Connectome}}},
- author = {Mills, Brian D. and Grayson, David S. and Shunmugavel, Anandakumar and Miranda-Dominguez, Oscar and Feczko, Eric and Earl, Eric and Neve, Kim A. and Fair, Damien A.},
- date = {2018-06-20},
- journaltitle = {The Journal of Neuroscience},
- shortjournal = {J Neurosci},
- volume = {38},
- number = {25},
- eprint = {29789379},
- eprinttype = {pmid},
- pages = {5774--5787},
- issn = {0270-6474},
- doi = {10.1523/JNEUROSCI.2910-17.2018},
- url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6010566/},
- urldate = {2023-05-13},
- pmcid = {PMC6010566}
-}
-
-@article{miyawaki2008,
- title = {Visual {{Image Reconstruction}} from {{Human Brain Activity}} Using a {{Combination}} of {{Multiscale Local Image Decoders}}},
- author = {Miyawaki, Yoichi and Uchida, Hajime and Yamashita, Okito and Sato, Masa-aki and Morito, Yusuke and Tanabe, Hiroki C. and Sadato, Norihiro and Kamitani, Yukiyasu},
- date = {2008-12-10},
- journaltitle = {Neuron},
- shortjournal = {Neuron},
- volume = {60},
- number = {5},
- pages = {915--929},
- issn = {0896-6273},
- doi = {10.1016/j.neuron.2008.11.004},
- url = {https://www.sciencedirect.com/science/article/pii/S0896627308009586},
- urldate = {2023-05-08},
- langid = {english}
-}
-
-@article{mocz2023,
- title = {Representing {{Multiple Visual Objects}} in the {{Human Brain}} and {{Convolutional Neural Networks}}},
- author = {Mocz, Viola and Jeong, Su Keun and Chun, Marvin and Xu, Yaoda},
- date = {2023-03-01},
- journaltitle = {bioRxiv},
- shortjournal = {bioRxiv},
- eprint = {36909506},
- eprinttype = {pmid},
- pages = {2023.02.28.530472},
- doi = {10.1101/2023.02.28.530472},
- url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10002658/},
- urldate = {2023-06-04},
- pmcid = {PMC10002658}
-}
-
-@article{naselaris2009,
- title = {Bayesian {{Reconstruction}} of {{Natural Images}} from {{Human Brain Activity}}},
- author = {Naselaris, Thomas and Prenger, Ryan J. and Kay, Kendrick N. and Oliver, Michael and Gallant, Jack L.},
- date = {2009-09-24},
- journaltitle = {Neuron},
- shortjournal = {Neuron},
- volume = {63},
- number = {6},
- pages = {902--915},
- issn = {0896-6273},
- doi = {10.1016/j.neuron.2009.09.006},
- url = {https://www.sciencedirect.com/science/article/pii/S0896627309006850},
- urldate = {2023-05-13},
- langid = {english}
-}
-
-@article{naselaris2011,
- title = {Encoding and Decoding in {{fMRI}}},
- author = {Naselaris, Thomas and Kay, Kendrick N. and Nishimoto, Shinji and Gallant, Jack L.},
- date = {2011-05-15},
- journaltitle = {NeuroImage},
- shortjournal = {Neuroimage},
- volume = {56},
- number = {2},
- eprint = {20691790},
- eprinttype = {pmid},
- pages = {400--410},
- issn = {1095-9572},
- doi = {10.1016/j.neuroimage.2010.07.073},
- langid = {english},
- pmcid = {PMC3037423}
-}
-
-@inproceedings{ngiam2011,
- title = {Multimodal {{Deep Learning}}},
- author = {Ngiam, Jiquan and Khosla, A. and Kim, Mingyu and Nam, Juhan and Lee, Honglak and Ng, A.},
- date = {2011-06-28},
- url = {https://www.semanticscholar.org/paper/Multimodal-Deep-Learning-Ngiam-Khosla/80e9e3fc3670482c1fee16b2542061b779f47c4f},
- urldate = {2023-06-04},
- eventtitle = {International {{Conference}} on {{Machine Learning}}}
-}
-
-@article{nishimoto2011,
- title = {Reconstructing {{Visual Experiences}} from {{Brain Activity Evoked}} by {{Natural Movies}}},
- author = {Nishimoto, Shinji and Vu, An T. and Naselaris, Thomas and Benjamini, Yuval and Yu, Bin and Gallant, Jack L.},
- date = {2011-10-11},
- journaltitle = {Current Biology},
- shortjournal = {Current Biology},
- volume = {21},
- number = {19},
- pages = {1641--1646},
- issn = {0960-9822},
- doi = {10.1016/j.cub.2011.08.031},
- url = {https://www.sciencedirect.com/science/article/pii/S0960982211009377},
- urldate = {2023-05-08},
- langid = {english}
-}
-
-@article{palazzo2018,
- title = {Decoding {{Brain Representations}} by {{Multimodal Learning}} of {{Neural Activity}} and {{Visual Features}}},
- author = {Palazzo, S. and Spampinato, C. and Kavasidis, I. and Giordano, D. and Schmidt, Joseph and Shah, M.},
- date = {2018},
- journaltitle = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
- doi = {10.1109/tpami.2020.2995909},
- url = {https://consensus.app/details/obtained-results-show-features-lead-performance-palazzo/02f9823594c5535f86049beb8b991371/},
- urldate = {2023-06-04}
-}
-
-@online{papale2021,
- type = {preprint},
- title = {The Influence of Objecthood on the Representation of Natural Images in the Visual Cortex},
- author = {Papale, Paolo and Zuiderbaan, Wietske and Teeuwen, Rob R.M. and Gilhuis, Amparo and Self, Matthew W. and Roelfsema, Pieter R. and Dumoulin, Serge O.},
- date = {2021-09-24},
- eprinttype = {Neuroscience},
- doi = {10.1101/2021.09.21.461209},
- url = {http://biorxiv.org/lookup/doi/10.1101/2021.09.21.461209},
- urldate = {2023-06-04},
- langid = {english},
- pubstate = {preprint}
-}
-
-@online{parcalabescu2021,
- title = {What Is {{Multimodality}}?},
- author = {Parcalabescu, Letitia and Trost, Nils and Frank, Anette},
- date = {2021-06-10},
- eprint = {2103.06304},
- eprinttype = {arxiv},
- eprintclass = {cs},
- url = {http://arxiv.org/abs/2103.06304},
- urldate = {2023-06-05},
- pubstate = {preprint}
-}
-
-@article{parks2013,
- title = {Brain {{Connectivity}} and {{Visual Attention}}},
- author = {Parks, Emily L. and Madden, David J.},
- date = {2013-08},
- journaltitle = {Brain Connectivity},
- shortjournal = {Brain Connect},
- volume = {3},
- number = {4},
- eprint = {23597177},
- eprinttype = {pmid},
- pages = {317--338},
- issn = {2158-0014},
- doi = {10.1089/brain.2012.0139},
- url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3749701/},
- urldate = {2023-06-03},
- pmcid = {PMC3749701}
-}
-
-@article{pennick2012,
- title = {Specialization and Integration of Brain Responses to Object Recognition and Location Detection},
- author = {Pennick, Mark R and Kana, Rajesh K},
- date = {2012-01},
- journaltitle = {Brain and Behavior},
- shortjournal = {Brain Behav},
- volume = {2},
- number = {1},
- eprint = {22574269},
- eprinttype = {pmid},
- pages = {6--14},
- issn = {2162-3279},
- doi = {10.1002/brb3.27},
- url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3343293/},
- urldate = {2023-06-03},
- pmcid = {PMC3343293}
-}
-
-@article{qian2023,
- title = {Semantic {{Neural Decoding}} via {{Cross-Modal Generation}}},
- author = {Qian, Xuelin and Wang, Yikai and Fu, Yanwei and Xue, Xiangyang and Feng, Jianfeng},
- date = {2023},
- publisher = {{arXiv}},
- doi = {10.48550/ARXIV.2303.14730},
- url = {https://arxiv.org/abs/2303.14730},
- urldate = {2023-06-04},
- version = {1}
-}
-
-@online{rakhimberdina2021,
- title = {Natural {{Image Reconstruction}} from {{fMRI}} Using {{Deep Learning}}: {{A Survey}}},
- shorttitle = {Natural {{Image Reconstruction}} from {{fMRI}} Using {{Deep Learning}}},
- author = {Rakhimberdina, Zarina and Jodelet, Quentin and Liu, Xin and Murata, Tsuyoshi},
- date = {2021-10-18},
- url = {https://arxiv.org/abs/2110.09006v2},
- urldate = {2023-05-10},
- langid = {english},
- organization = {{arXiv.org}}
-}
-
-@article{rokem2020,
- title = {Fractional Ridge Regression: A Fast, Interpretable Reparameterization of Ridge Regression},
- shorttitle = {Fractional Ridge Regression},
- author = {Rokem, Ariel and Kay, Kendrick},
- date = {2020-11-30},
- journaltitle = {GigaScience},
- shortjournal = {GigaScience},
- volume = {9},
- number = {12},
- pages = {giaa133},
- issn = {2047-217X},
- doi = {10.1093/gigascience/giaa133},
- url = {https://doi.org/10.1093/gigascience/giaa133},
- urldate = {2023-05-11}
-}
-
-@article{roth2022,
- title = {Natural Scene Sampling Reveals Reliable Coarse-Scale Orientation Tuning in Human {{V1}}},
- author = {Roth, Zvi N. and Kay, Kendrick and Merriam, Elisha P.},
- date = {2022-10-29},
- journaltitle = {Nature Communications},
- shortjournal = {Nat Commun},
- volume = {13},
- number = {1},
- pages = {6469},
- publisher = {{Nature Publishing Group}},
- issn = {2041-1723},
- doi = {10.1038/s41467-022-34134-7},
- url = {https://www.nature.com/articles/s41467-022-34134-7},
- urldate = {2023-05-10},
- issue = {1},
- langid = {english}
-}
-
-@online{seeliger2018,
- title = {Generative Adversarial Networks for Reconstructing Natural Images from Brain Activity},
- author = {Seeliger, K. and Güçlü, U. and Ambrogioni, L. and Güçlütürk, Y. and family=Gerven, given=M. A. J., prefix=van, useprefix=false},
- date = {2018-06-29},
- doi = {10.1101/226688},
- url = {https://www.biorxiv.org/content/10.1101/226688v3},
- urldate = {2023-05-13},
- langid = {english},
- pubstate = {preprint}
-}
-
-@article{seeliger2021,
- title = {End-to-End Neural System Identification with Neural Information Flow},
- author = {Seeliger, K. and Ambrogioni, L. and Güçlütürk, Y. and family=Bulk, given=L. M., prefix=van den, useprefix=false and Güçlü, U. and family=Gerven, given=M. A. J., prefix=van, useprefix=false},
- date = {2021-02-04},
- journaltitle = {PLOS Computational Biology},
- shortjournal = {PLOS Computational Biology},
- volume = {17},
- number = {2},
- pages = {e1008558},
- publisher = {{Public Library of Science}},
- issn = {1553-7358},
- doi = {10.1371/journal.pcbi.1008558},
- url = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1008558},
- urldate = {2023-06-03},
- langid = {english}
-}
-
-@article{shen2019,
- title = {End-to-{{End Deep Image Reconstruction From Human Brain Activity}}},
- author = {Shen, Guohua and Dwivedi, Kshitij and Majima, Kei and Horikawa, Tomoyasu and Kamitani, Yukiyasu},
- date = {2019},
- journaltitle = {Frontiers in Computational Neuroscience},
- volume = {13},
- issn = {1662-5188},
- url = {https://www.frontiersin.org/articles/10.3389/fncom.2019.00021},
- urldate = {2023-05-09}
-}
-
-@article{shen2019a,
- title = {Deep Image Reconstruction from Human Brain Activity},
- author = {Shen, Guohua and Horikawa, Tomoyasu and Majima, Kei and Kamitani, Yukiyasu},
- date = {2019-01-14},
- journaltitle = {PLOS Computational Biology},
- shortjournal = {PLOS Computational Biology},
- volume = {15},
- number = {1},
- pages = {e1006633},
- publisher = {{Public Library of Science}},
- issn = {1553-7358},
- doi = {10.1371/journal.pcbi.1006633},
- url = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1006633},
- urldate = {2023-05-08},
- langid = {english}
-}
-
-@article{singer,
- title = {“{{The}} Spatiotemporal Neural Dynamics of Object Recognition for Natural Images and Line Drawings”},
- author = {Singer, Johannes},
- langid = {english}
-}
-
-@article{sotiropoulos2019,
- title = {Building Connectomes Using Diffusion {{MRI}}: Why, How and But},
- shorttitle = {Building Connectomes Using Diffusion {{MRI}}},
- author = {Sotiropoulos, Stamatios N. and Zalesky, Andrew},
- date = {2019},
- journaltitle = {NMR in Biomedicine},
- volume = {32},
- number = {4},
- pages = {e3752},
- issn = {1099-1492},
- doi = {10.1002/nbm.3752},
- url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/nbm.3752},
- urldate = {2023-05-13},
- langid = {english}
-}
-
-@article{spoerer2017,
- title = {Recurrent Convolutional Neural Networks: {{A}} Better Model of Biological Object Recognition},
- shorttitle = {Recurrent Convolutional Neural Networks},
- author = {Spoerer, Courtney J. and McClure, Patrick and Kriegeskorte, Nikolaus},
- date = {2017},
- journaltitle = {Frontiers in Psychology},
- volume = {8},
- publisher = {{Frontiers Media S.A.}},
- location = {{Switzerland}},
- issn = {1664-1078},
- doi = {10.3389/fpsyg.2017.01551}
-}
-
-@incollection{sporns2016,
- title = {Connectome {{Networks}}: {{From Cells}} to {{Systems}}},
- shorttitle = {Connectome {{Networks}}},
- booktitle = {Micro-, {{Meso-}} and {{Macro-Connectomics}} of the {{Brain}}},
- author = {Sporns, Olaf},
- editor = {Kennedy, Henry and Van Essen, David C. and Christen, Yves},
- date = {2016},
- eprint = {28590678},
- eprinttype = {pmid},
- publisher = {{Springer}},
- location = {{Cham (CH)}},
- url = {http://www.ncbi.nlm.nih.gov/books/NBK435773/},
- urldate = {2023-05-13},
- isbn = {978-3-319-27776-9 978-3-319-27777-6},
- langid = {english}
-}
-
-@article{stettler2018,
- title = {Using a Model of Human Visual Perception to Improve Deep Learning},
- author = {Stettler, Michael and Francis, Gregory},
- date = {2018-08},
- journaltitle = {Neural Networks: The Official Journal of the International Neural Network Society},
- shortjournal = {Neural Netw},
- volume = {104},
- eprint = {29705669},
- eprinttype = {pmid},
- pages = {40--49},
- issn = {1879-2782},
- doi = {10.1016/j.neunet.2018.04.005},
- langid = {english}
-}
-
-@article{swanson2016,
- title = {From {{Cajal}} to {{Connectome}} and {{Beyond}}},
- author = {Swanson, Larry W. and Lichtman, Jeff W.},
- date = {2016},
- journaltitle = {Annual Review of Neuroscience},
- volume = {39},
- number = {1},
- eprint = {27442070},
- eprinttype = {pmid},
- pages = {197--216},
- doi = {10.1146/annurev-neuro-071714-033954},
- url = {https://doi.org/10.1146/annurev-neuro-071714-033954},
- urldate = {2023-05-13}
-}
-
-@article{thomas2023,
- title = {Benchmarking Explanation Methods for Mental State Decoding with Deep Learning Models},
- author = {Thomas, Armin W. and Ré, Christopher and Poldrack, Russell A.},
- date = {2023-06-01},
- journaltitle = {NeuroImage},
- shortjournal = {NeuroImage},
- volume = {273},
- pages = {120109},
- issn = {1053-8119},
- doi = {10.1016/j.neuroimage.2023.120109},
- url = {https://www.sciencedirect.com/science/article/pii/S1053811923002550},
- urldate = {2023-05-10},
- langid = {english}
-}
-
-@article{tong2012,
- title = {Decoding {{Patterns}} of {{Human Brain Activity}}},
- author = {Tong, Frank and Pratte, Michael S.},
- date = {2012},
- journaltitle = {Annual review of psychology},
- shortjournal = {Annu Rev Psychol},
- volume = {63},
- eprint = {21943172},
- eprinttype = {pmid},
- pages = {483--509},
- issn = {0066-4308},
- doi = {10.1146/annurev-psych-120710-100412},
- url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7869795/},
- urldate = {2023-05-13},
- pmcid = {PMC7869795}
-}
-
-@article{tong2022,
- title = {Transdiagnostic Connectome Signatures from Resting-State {{fMRI}} Predict Individual-Level Intellectual Capacity},
- author = {Tong, Xiaoyu and Xie, Hua and Carlisle, Nancy and Fonzo, Gregory A. and Oathes, Desmond J. and Jiang, Jing and Zhang, Yu},
- date = {2022-09-06},
- journaltitle = {Translational Psychiatry},
- shortjournal = {Transl Psychiatry},
- volume = {12},
- number = {1},
- pages = {1--11},
- publisher = {{Nature Publishing Group}},
- issn = {2158-3188},
- doi = {10.1038/s41398-022-02134-2},
- url = {https://www.nature.com/articles/s41398-022-02134-2},
- urldate = {2023-05-13},
- issue = {1},
- langid = {english}
-}
-
-@article{tullo,
- title = {Preferential Signal Pathways during the Perception and Imagery of Familiar Scenes: {{An}} Effective Connectivity Study},
- shorttitle = {Preferential Signal Pathways during the Perception and Imagery of Familiar Scenes},
- author = {Tullo, Maria Giulia and Almgren, Hannes and Van de Steen, Frederik and Boccia, Maddalena and Bencivenga, Federica and Galati, Gaspare},
- journaltitle = {Human Brain Mapping},
- volume = {n/a},
- number = {n/a},
- issn = {1097-0193},
- doi = {10.1002/hbm.26313},
- url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/hbm.26313},
- urldate = {2023-06-03},
- langid = {english}
-}
-
-@article{tymofiyeva2014,
- title = {Brain without {{Anatomy}}: {{Construction}} and {{Comparison}} of {{Fully Network-Driven Structural MRI Connectomes}}},
- shorttitle = {Brain without {{Anatomy}}},
- author = {Tymofiyeva, Olga and Ziv, Etay and Barkovich, A. James and Hess, Christopher P. and Xu, Duan},
- date = {2014-05-01},
- journaltitle = {PLOS ONE},
- shortjournal = {PLOS ONE},
- volume = {9},
- number = {5},
- pages = {e96196},
- publisher = {{Public Library of Science}},
- issn = {1932-6203},
- doi = {10.1371/journal.pone.0096196},
- url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0096196},
- urldate = {2023-05-13},
- langid = {english}
-}
-
-@article{urgen2019,
- title = {Distinct Representations in Occipito-Temporal, Parietal, and Premotor Cortex during Action Perception Revealed by {{fMRI}} and Computational Modeling},
- author = {Urgen, Burcu A. and Pehlivan, Selen and Saygin, Ayse P.},
- date = {2019-04},
- journaltitle = {Neuropsychologia},
- shortjournal = {Neuropsychologia},
- volume = {127},
- pages = {35--47},
- issn = {00283932},
- doi = {10.1016/j.neuropsychologia.2019.02.006},
- url = {https://linkinghub.elsevier.com/retrieve/pii/S0028393219300314},
- urldate = {2023-06-04},
- langid = {english}
-}
-
-@article{vanessen2001,
- title = {Mapping Visual Cortex in Monkeys and Humans Using Surface-Based Atlases},
- author = {Van Essen, David C and Lewis, James W and Drury, Heather A and Hadjikhani, Nouchine and Tootell, Roger B. H and Bakircioglu, Muge and Miller, Michael I},
- date = {2001-05-01},
- journaltitle = {Vision Research},
- shortjournal = {Vision Research},
- volume = {41},
- number = {10},
- pages = {1359--1378},
- issn = {0042-6989},
- doi = {10.1016/S0042-6989(01)00045-1},
- url = {https://www.sciencedirect.com/science/article/pii/S0042698901000451},
- urldate = {2023-06-03},
- langid = {english}
-}
-
-@online{vanhoecke2018,
- title = {Assessment of {{Functional Connectome Construction Strategies}} in {{Neurodegeneration}}},
- author = {Vanhoecke, J. and McColgan, P. and Razi, A. and Gregory, S. and Seunarine, K. and Durr, A. and Roos, R. and Leavitt, B. and Scahill, R. I. and Clark, C. and Tabrizi, S. J. and Rees, G. and Investigators, Track On-HD},
- date = {2018-08-06},
- eprinttype = {bioRxiv},
- eprintclass = {New Results},
- pages = {385385},
- doi = {10.1101/385385},
- url = {https://www.biorxiv.org/content/10.1101/385385v1},
- urldate = {2023-05-13},
- langid = {english},
- pubstate = {preprint}
-}
-
-@article{venugopalan2021,
- title = {Multimodal Deep Learning Models for Early Detection of {{Alzheimer}}’s Disease Stage},
- author = {Venugopalan, Janani and Tong, Li and Hassanzadeh, Hamid Reza and Wang, May D.},
- date = {2021-02-05},
- journaltitle = {Scientific Reports},
- shortjournal = {Sci Rep},
- volume = {11},
- number = {1},
- pages = {3254},
- publisher = {{Nature Publishing Group}},
- issn = {2045-2322},
- doi = {10.1038/s41598-020-74399-w},
- url = {https://www.nature.com/articles/s41598-020-74399-w},
- urldate = {2023-06-05},
- issue = {1},
- langid = {english}
-}
-
-@article{zhang2019,
- title = {A Visual Encoding Model Based on Deep Neural Networks and Transfer Learning for Brain Activity Measured by Functional Magnetic Resonance Imaging},
- author = {Zhang, Chi and Qiao, Kai and Wang, Linyuan and Tong, Li and Hu, Guoen and Zhang, Ru-Yuan and Yan, Bin},
- date = {2019-09-01},
- journaltitle = {Journal of Neuroscience Methods},
- shortjournal = {Journal of Neuroscience Methods},
- volume = {325},
- pages = {108318},
- issn = {0165-0270},
- doi = {10.1016/j.jneumeth.2019.108318},
- url = {https://www.sciencedirect.com/science/article/pii/S0165027019301761},
- urldate = {2023-06-06},
- langid = {english}
-}
-
-@article{zheng2021,
- title = {Unraveling Neural Coding of Dynamic Natural Visual Scenes via Convolutional Recurrent Neural Networks},
- author = {Zheng, Yajing and Jia, Shanshan and Yu, Zhaofei and Liu, Jian K. and Huang, Tiejun},
- date = {2021-10-08},
- journaltitle = {Patterns},
- shortjournal = {Patterns},
- volume = {2},
- number = {10},
- pages = {100350},
- issn = {2666-3899},
- doi = {10.1016/j.patter.2021.100350},
- url = {https://www.sciencedirect.com/science/article/pii/S2666389921002051},
- urldate = {2023-06-03},
- langid = {english}
-}
diff --git a/src/__init__.py b/src/__init__.py
index e8e4624..87ff326 100644
--- a/src/__init__.py
+++ b/src/__init__.py
@@ -1,6 +1,6 @@
from src.data import make_kfolds
from src.model import network_fn, loss_fn, init, apply
-from src.plots import plot_brain, plot_decoding, plot_decoding_progress
+from src.plots import plot_brain
from src.utils import SUBJECTS, ROI_TO_CLASS, load_roi_data, DATA_DIR
__all__ = [
@@ -10,8 +10,6 @@
'init',
'apply',
'plot_brain',
- 'plot_decoding',
- 'plot_decoding_progress',
'SUBJECTS',
'ROI_TO_CLASS',
'load_roi_data',
diff --git a/src/plots.py b/src/plots.py
index f8f84de..b59e415 100644
--- a/src/plots.py
+++ b/src/plots.py
@@ -4,14 +4,22 @@
# imports
import numpy as np
+from IPython.display import display, Image, clear_output
import matplotlib.pyplot as plt
import imageio
import networkx as nx
from nilearn import plotting
from tqdm import tqdm
+from IPython.display import display, HTML
+import time
+import numpy as np
+import base64
+from PIL import Image as PILImage
+from io import BytesIO
from src.fmri import ATLAS, fsaverage_vec
+
# globals
plt.style.use("dark_background")
@@ -36,9 +44,9 @@ def plot_brain(challenge_vec, subject, hem, roi=None):
return view.resize(height=900, width=1200)
# plot decodings
-def plot_decoding(decodings, n=4):
+def monitor_decoding(decodings, n=3):
"""small multiple gifs of decodings at differnt stages of training"""
- decodings = decodings[:n * n]
+ decodings = decodings[: n * n]
fig, axs = plt.subplots(n, n, figsize=(n * 2, n * 2))
for i, ax in enumerate(axs.flatten()):
ax.imshow(decodings[i])
diff --git a/src/train.py b/src/train.py
index e021928..0035df0 100644
--- a/src/train.py
+++ b/src/train.py
@@ -48,10 +48,10 @@ def train_loop(rng, opt, train_loader, val_loader, plot_batch, hyperparams):
metrics.append(evaluate(params, key, train_loader, val_loader))
# plot_pred = apply(params, key, plot_batch[0])
# plot_decodings(plot_pred)
- return metrics
+ return metrics, params
-def evaluate(params, rng, train_loader, val_loader, n_steps=10):
+def evaluate(params, rng, train_loader, val_loader, n_steps=2):
# each batch is a tuple(lh, rh, img). Connect n_steps batches into 1
train_loss, val_loss = 0, 0
for _ in range(n_steps):
@@ -73,6 +73,6 @@ def train_folds(kfolds, hyperparams, seed=0):
for idx, (train_loader, val_loader) in enumerate(kfolds):
plot_batch = next(train_loader) if plot_batch is None else plot_batch
rng, key = jax.random.split(rng)
- fold_metrics = train_loop(key, opt, train_loader, val_loader, plot_batch, hyperparams)
+ fold_metrics, fold_params = train_loop(key, opt, train_loader, val_loader, plot_batch, hyperparams)
metrics[idx] = fold_metrics
- return metrics
\ No newline at end of file
+ return metrics, fold_params
\ No newline at end of file
diff --git a/src/utils.py b/src/utils.py
index 93f334d..bb1fa7b 100644
--- a/src/utils.py
+++ b/src/utils.py
@@ -15,6 +15,12 @@
import numpy as np
from matplotlib import pyplot as plt
import yaml
+from IPython.display import display, HTML
+import time
+import numpy as np
+import base64
+from PIL import Image as PILImage
+from io import BytesIO
# CONFIG
@@ -79,4 +85,12 @@ def load_roi_data(subject):
for roi_class in CLASS_TO_ROI.keys():
data['challenge'][hem][roi_class] = np.load(os.path.join(roi_dir, f'{hem}.{roi_class}_challenge_space.npy'))
data['fsaverage'][hem][roi_class] = np.load(os.path.join(roi_dir, f'{hem}.{roi_class}_fsaverage_space.npy'))
- return data
\ No newline at end of file
+ return data
+
+
+def matrix_to_image(matrix):
+ image = PILImage.fromarray((matrix * 255).astype(np.uint8))
+ image_bytes = BytesIO()
+ image.save(image_bytes, format='png')
+ encoded_image = base64.b64encode(image_bytes.getvalue()).decode('utf-8')
+ return encoded_image
\ No newline at end of file