Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

New plots #65

Open
wants to merge 12 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion bsi_zoo/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@
# Dev branch marker is: 'X.Y.devN' where N is an integer.
#

__version__ = "1.1a1"
__version__ = 1.1
202,401 changes: 202,401 additions & 0 deletions bsi_zoo/data/final/fixed.csv

Large diffs are not rendered by default.

Binary file added bsi_zoo/data/final/fixed.pkl
Binary file not shown.
29,921 changes: 29,921 additions & 0 deletions bsi_zoo/data/final/fixed_spatialCV.csv

Large diffs are not rendered by default.

Binary file added bsi_zoo/data/final/fixed_spatialCV.pkl
Binary file not shown.
9 changes: 5 additions & 4 deletions bsi_zoo/estimators.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def _compute_eloreta_kernel(L, *, lambda2, n_orient, whitener, loose=1.0, max_it
)
G_R_Gt = _this_normalize_R(G, R, G_3)
# extra = " (this make take a while)" if n_orient == 3 else ""
for kk in range(max_iter):
for _ in range(max_iter):
# 1. Compute inverse of the weights (stabilized) and C
s, u = eigh(G_R_Gt)
s = abs(s)
Expand Down Expand Up @@ -171,7 +171,7 @@ def _compute_eloreta_kernel(L, *, lambda2, n_orient, whitener, loose=1.0, max_it
if delta < eps:
break
else:
warnings.warn("eLORETA weight fitting did not converge (>= %s)" % eps)
warnings.warn("eLORETA weight fitting did not converge (>= %s)" % eps, UserWarning, stacklevel=2)
del G_R_Gt
G /= source_std # undo our biasing
G_3 = _get_G_3(G, n_orient)
Expand Down Expand Up @@ -412,6 +412,7 @@ def __init__(self, solver, alpha, cov_type, cov, n_orient, extra_params={}):
self.cov_type = cov_type
self.n_orient = n_orient
self.extra_params = extra_params
self.classes_ = np.array([0, 0])

def fit(self, L, y):
self.L_ = L
Expand Down Expand Up @@ -716,8 +717,8 @@ def eloreta(L, y, cov=1, alpha=1 / 9, n_orient=1):
K = _compute_eloreta_kernel(L, lambda2=alpha, n_orient=n_orient, whitener=whitener)
x = K @ y # get the source time courses with simple dot product

if n_orient > 1:
x = x.reshape((-1, n_orient, x.shape[1]))
# if n_orient > 1:
# x = x.reshape((-1, n_orient, x.shape[1]))
return x


Expand Down
1 change: 1 addition & 0 deletions bsi_zoo/run_benchmark.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"cells":[{"cell_type":"code","execution_count":75,"metadata":{},"outputs":[],"source":["import os\n","import numpy as np\n","import pandas as pd\n","\n","path = \"/home/anuja/tj/bdsg/BSI-Zoo/bsi_zoo/data/free4\"\n","files = os.listdir(path)\n","benchmark_files_free = [i for i in files if 'spatialCV' not in i]\n","benchmark_files_free_with_spatial_cv = [i for i in files if 'spatialCV' in i]\n","\n","\n","eloreta_path = \"/home/anuja/tj/bdsg/BSI-Zoo/bsi_zoo/data/eloreta\"\n","files = os.listdir(eloreta_path)\n","benchmark_files_eloreta = [i for i in files if 'benchmark' not in i]\n","\n","dfs_free = [pd.read_pickle(f'{path}/{file}') for file in benchmark_files_free]\n","df_results_free = pd.concat(dfs_free)\n","df_results_free['alpha'] = 1-df_results_free['alpha'].astype(float)\n","\n","\n","\n","dfs_free_with_spatial_cv = [pd.read_pickle(f'{path}/{file}') for file in benchmark_files_free_with_spatial_cv]\n","eloreta_dfs = [pd.read_pickle(f'{eloreta_path}/{file}') for file in benchmark_files_eloreta]\n","df_results_free_with_spatial_cv = pd.concat(dfs_free_with_spatial_cv)\n","df_results_eloreta = pd.concat(eloreta_dfs)\n","\n","# join with eloreta\n","df_results_free_with_spatial_cv = pd.concat([df_results_eloreta, df_results_free_with_spatial_cv])\n","df_results_free_with_spatial_cv['alpha'] = 1-df_results_free_with_spatial_cv['alpha'].astype(float)"]},{"cell_type":"code","execution_count":92,"metadata":{},"outputs":[],"source":["# save df_results_free as .mat file\n","import scipy.io\n","Output = {}\n","\n","scipy.io.savemat('free_spatialCV.mat', df_results_free_with_spatial_cv.to_dict('list'))"]},{"cell_type":"code","execution_count":95,"metadata":{},"outputs":[{"data":{"text/plain":["dict_keys(['__header__', '__version__', '__globals__', 'estimator', 'euclidean_distance', 'mse', 'emd', 'f1', 'reconstructed_noise', 'alpha', 'cov_type', 'n_sensors', 'n_sources', 'n_times', 'nnz', 'orientation_type', 'path_to_leadfield', 'extra_params', 'estimator__alpha', 'estimator__alpha_cv', 'error'])"]},"execution_count":95,"metadata":{},"output_type":"execute_result"}],"source":["# load .mat file /home/anuja/tj/bdsg/BSI-Zoo/bsi_zoo/data/sent to stefan/fixed.mat\n","import scipy.io\n","\n","d = scipy.io.loadmat('/home/anuja/tj/bdsg/BSI-Zoo/bsi_zoo/data/sent to stefan/free_spatialCV.mat')\n","d.keys()"]},{"cell_type":"code","execution_count":94,"metadata":{},"outputs":[{"data":{"text/plain":["dict_keys(['__header__', '__version__', '__globals__', 'Unnamed: 0', 'estimator', 'euclidean_distance', 'mse', 'emd', 'f1', 'reconstructed_noise', 'alpha', 'cov_type', 'n_times', 'nnz', 'orientation_type', 'path_to_leadfield', 'extra_params', 'estimator__alpha', 'estimator__alpha_cv', 'error'])"]},"execution_count":94,"metadata":{},"output_type":"execute_result"}],"source":["d = scipy.io.loadmat('/home/anuja/tj/bdsg/BSI-Zoo/bsi_zoo/data/sent to stefan/fixed_spatialCV.mat')\n","d.keys()"]}],"metadata":{"kernelspec":{"display_name":"Python 3","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.10.12"}},"nbformat":4,"nbformat_minor":2}
187 changes: 105 additions & 82 deletions bsi_zoo/run_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,11 @@
from bsi_zoo.metrics import euclidean_distance, mse, emd, f1, reconstructed_noise
from bsi_zoo.config import get_leadfield_path

n_jobs = 30
nruns = 10
spatial_cv = [False, True]
subjects = ["CC120166", "CC120313", "CC120264", "CC120313", "CC120309"]
n_jobs = 12
nruns = 1
spatial_cv = [True]
# True]
subjects = ["CC120313", "CC120309", "CC120166", "CC120264"]
metrics = [
euclidean_distance,
mse,
Expand All @@ -41,77 +42,88 @@
# 0.13572088,
# 0.2096144,
# ] # logspaced
estimator_alphas = np.logspace(0, -2, 20)[1:]
estimator_alphas_I = np.logspace(0, -2, 20)[1:]
estimator_alphas_II = [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000]
memory = Memory(".")

for do_spatial_cv in spatial_cv:
for subject in subjects:
"""Fixed orientation parameters for the benchmark"""

orientation_type = "fixed"
data_args_I = {
# "n_sensors": [50],
"n_times": [10],
# "n_sources": [200],
"nnz": nnzs,
"cov_type": ["diag"],
"path_to_leadfield": [get_leadfield_path(subject, type=orientation_type)],
"orientation_type": [orientation_type],
"alpha": alpha_SNR, # this is actually SNR
}

data_args_II = {
# "n_sensors": [50],
"n_times": [10],
# "n_sources": [200],
"nnz": nnzs,
"cov_type": ["full"],
"path_to_leadfield": [get_leadfield_path(subject, type=orientation_type)],
"orientation_type": [orientation_type],
"alpha": alpha_SNR, # this is actually SNR
}

estimators = [
(fake_solver, data_args_I, {"alpha": estimator_alphas}, {}),
(eloreta, data_args_I, {"alpha": estimator_alphas}, {}),
(iterative_L1, data_args_I, {"alpha": estimator_alphas}, {}),
(iterative_L2, data_args_I, {"alpha": estimator_alphas}, {}),
(iterative_sqrt, data_args_I, {"alpha": estimator_alphas}, {}),
(iterative_L1_typeII, data_args_II, {"alpha": estimator_alphas}, {}),
(iterative_L2_typeII, data_args_II, {"alpha": estimator_alphas}, {}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas}, {"update_mode": 1}),
(gamma_map, data_args_II, {"alpha": estimator_alphas}, {"update_mode": 2}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas}, {"update_mode": 3}),
]

df_results = []
for estimator, data_args, estimator_args, estimator_extra_params in estimators:
benchmark = Benchmark(
estimator,
subject,
metrics,
data_args,
estimator_args,
random_state=42,
memory=memory,
n_jobs=n_jobs,
do_spatial_cv=do_spatial_cv,
estimator_extra_params=estimator_extra_params,
)
results = benchmark.run(nruns=nruns)
df_results.append(results)

df_results = pd.concat(df_results, axis=0)

data_path = Path("bsi_zoo/data")
data_path.mkdir(exist_ok=True)
if do_spatial_cv:
FILE_NAME = f"benchmark_data_{subject}_{data_args['orientation_type'][0]}_spatialCV_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
else:
FILE_NAME = f"benchmark_data_{subject}_{data_args['orientation_type'][0]}_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
df_results.to_pickle(data_path / FILE_NAME)

print(df_results)

# """Fixed orientation parameters for the benchmark"""

# orientation_type = "fixed"
# data_args_I = {
# # "n_sensors": [50],
# "n_times": [10],
# # "n_sources": [200],
# "nnz": nnzs,
# "cov_type": ["diag"],
# "path_to_leadfield": [get_leadfield_path(subject, type=orientation_type)],
# "orientation_type": [orientation_type],
# "alpha": alpha_SNR, # this is actually SNR
# }

# data_args_II = {
# # "n_sensors": [50],
# "n_times": [10],
# # "n_sources": [200],
# "nnz": nnzs,
# "cov_type": ["full"],
# "path_to_leadfield": [get_leadfield_path(subject, type=orientation_type)],
# "orientation_type": [orientation_type],
# "alpha": alpha_SNR, # this is actually SNR
# }

# estimators = [
# # (fake_solver, data_args_I, {"alpha": estimator_alphas_I}, {}),
# (eloreta, data_args_I, {"alpha": estimator_alphas_II}, {}),
# # (iterative_L1, data_args_I, {"alpha": estimator_alphas_I}, {}),
# # (iterative_L2, data_args_I, {"alpha": estimator_alphas_I}, {}),
# # (iterative_sqrt, data_args_I, {"alpha": estimator_alphas_I}, {}),
# # (iterative_L1_typeII, data_args_II, {"alpha": estimator_alphas_I}, {}),
# # (iterative_L2_typeII, data_args_II, {"alpha": estimator_alphas_I}, {}),
# # (gamma_map, data_args_II, {"alpha": estimator_alphas_I}, {"update_mode": 1}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas_II}, {"update_mode": 2}),
# # (gamma_map, data_args_II, {"alpha": estimator_alphas_I}, {"update_mode": 3}),
# ]

# df_results = []
# for estimator, data_args, estimator_args, estimator_extra_params in estimators:
# benchmark = Benchmark(
# estimator,
# subject,
# metrics,
# data_args,
# estimator_args,
# random_state=42,
# memory=memory,
# n_jobs=n_jobs,
# do_spatial_cv=do_spatial_cv,
# estimator_extra_params=estimator_extra_params,
# )
# results = benchmark.run(nruns=nruns)
# df_results.append(results)
# # save results
# data_path = Path("bsi_zoo/data/updated_alpha_grid")
# data_path.mkdir(exist_ok=True)
# if do_spatial_cv:
# FILE_NAME = f"{estimator}_{subject}_{data_args['orientation_type'][0]}_spatialCV_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
# else:
# FILE_NAME = f"{estimator}_{subject}_{data_args['orientation_type'][0]}_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
# results.to_pickle(data_path / FILE_NAME)


# df_results = pd.concat(df_results, axis=0)

# data_path = Path("bsi_zoo/data/ramen")
# data_path.mkdir(exist_ok=True)
# if do_spatial_cv:
# FILE_NAME = f"benchmark_data_{subject}_{data_args['orientation_type'][0]}_spatialCV_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
# else:
# FILE_NAME = f"benchmark_data_{subject}_{data_args['orientation_type'][0]}_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
# df_results.to_pickle(data_path / FILE_NAME)

# print(df_results)

""" Free orientation parameters for the benchmark """

Expand Down Expand Up @@ -139,16 +151,16 @@
}

estimators = [
(fake_solver, data_args_I, {"alpha": estimator_alphas}, {}),
(eloreta, data_args_I, {"alpha": estimator_alphas}, {}),
(iterative_L1, data_args_I, {"alpha": estimator_alphas}, {}),
(iterative_L2, data_args_I, {"alpha": estimator_alphas}, {}),
(iterative_sqrt, data_args_I, {"alpha": estimator_alphas}, {}),
(iterative_L1_typeII, data_args_II, {"alpha": estimator_alphas}, {}),
(iterative_L2_typeII, data_args_II, {"alpha": estimator_alphas}, {}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas}, {"update_mode": 1}),
(gamma_map, data_args_II, {"alpha": estimator_alphas}, {"update_mode": 2}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas}, {"update_mode": 3}),
# (fake_solver, data_args_I, {"alpha": estimator_alphas_I}, {}),
(eloreta, data_args_I, {"alpha": estimator_alphas_II}, {}),
# (iterative_L1, data_args_I, {"alpha": estimator_alphas_I}, {}),
# (iterative_L2, data_args_I, {"alpha": estimator_alphas_I}, {}),
# (iterative_sqrt, data_args_I, {"alpha": estimator_alphas_I}, {}),
# (iterative_L1_typeII, data_args_II, {"alpha": estimator_alphas_I}, {}),
# (iterative_L2_typeII, data_args_II, {"alpha": estimator_alphas_I}, {}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas_I}, {"update_mode": 1}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas_II}, {"update_mode": 2}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas_I}, {"update_mode": 3}),
]

df_results = []
Expand All @@ -167,10 +179,20 @@
)
results = benchmark.run(nruns=nruns)
df_results.append(results)
# save results
data_path = Path("bsi_zoo/data/eloreta")
data_path.mkdir(exist_ok=True)

if do_spatial_cv:
FILE_NAME = f"{estimator}_{subject}_{data_args['orientation_type'][0]}_spatialCV_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
else:
FILE_NAME = f"{estimator}_{subject}_{data_args['orientation_type'][0]}_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
results.to_pickle(data_path / FILE_NAME)
print(results)

df_results = pd.concat(df_results, axis=0)

data_path = Path("bsi_zoo/data")
data_path = Path("bsi_zoo/data/eloreta")
data_path.mkdir(exist_ok=True)
if do_spatial_cv:
FILE_NAME = f"benchmark_data_{subject}_{data_args['orientation_type'][0]}_spatialCV_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
Expand All @@ -179,3 +201,4 @@
df_results.to_pickle(data_path / FILE_NAME)

print(df_results)

Loading
Loading