-
Notifications
You must be signed in to change notification settings - Fork 12
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
0 parents
commit 42f25c3
Showing
218 changed files
with
70,431 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
# Sphinx build info version 1 | ||
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. | ||
config: 420fd699726f92bf93f894befb472e90 | ||
tags: 645f666f9bcd5a90fca523b33c5a78b7 |
Empty file.
Binary file not shown.
87 changes: 87 additions & 0 deletions
87
_downloads/09df217f95985497f45d69e2d4bdc5b1/plot_2_example_add_feature.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,87 @@ | ||
""" | ||
=================== | ||
Adding New Features | ||
=================== | ||
""" | ||
# %% | ||
import py_neuromodulation as nm | ||
import numpy as np | ||
from typing import Iterable | ||
|
||
# %% | ||
# In this example we will demonstrate how a new feature can be added to the existing feature pipeline. | ||
# This can be done by creating a new feature class that implements the protocol class :class:`~nm_features.NMFeature` | ||
# and registering it with the :func:`~nm_features.AddCustomFeature` function. | ||
|
||
|
||
# %% | ||
# Let's create a new feature class called `ChannelMean` that calculates the mean signal for each channel. | ||
# We can optinally make it inherit from :class:`~nm_features.NMFeature` but as long as it has an adequate constructor | ||
# and a `calc_feature` method with the appropriate signatures it will work. | ||
# The :func:`__init__` method should take the settings, channel names and sampling frequency as arguments. | ||
# The `calc_feature` method should take the data and a dictionary of features as arguments and return the updated dictionary. | ||
class ChannelMean: | ||
def __init__( | ||
self, settings: nm.NMSettings, ch_names: Iterable[str], sfreq: float | ||
) -> None: | ||
# If required for feature calculation, store the settings, | ||
# channel names and sampling frequency (optional) | ||
self.settings = settings | ||
self.ch_names = ch_names | ||
self.sfreq = sfreq | ||
|
||
# Here you can add any additional initialization code | ||
# For example, you could store parameters for the functions\ | ||
# used in the calc_feature method | ||
|
||
self.feature_name = "channel_mean" | ||
|
||
def calc_feature(self, data: np.ndarray, features_compute: dict) -> dict: | ||
# Here you can add any feature calculation code | ||
# This example simply calculates the mean signal for each channel | ||
ch_means = np.mean(data, axis=1) | ||
|
||
# Store the calculated features in the features_compute dictionary | ||
# Be careful to use a unique keyfor each channel and metric you compute | ||
for ch_idx, ch in enumerate(self.ch_names): | ||
features_compute[f"{self.feature_name}_{ch}"] = ch_means[ch_idx] | ||
|
||
# Return the updated features_compute dictionary to the stream | ||
return features_compute | ||
|
||
|
||
nm.add_custom_feature("channel_mean", ChannelMean) | ||
|
||
# %% | ||
# Now we can instantiate settings and observe that the new feature has been added to the list of features | ||
settings = nm.NMSettings() # Get default settings | ||
|
||
settings.features | ||
|
||
# %% | ||
# Let's create some artificial data to demonstrate the feature calculation. | ||
N_CHANNELS = 5 | ||
N_SAMPLES = 10000 # 10 seconds of random data at 1000 Hz sampling frequency | ||
|
||
data = np.random.random([N_CHANNELS, N_SAMPLES]) | ||
stream = nm.Stream( | ||
sfreq=1000, | ||
data=data, | ||
settings = settings, | ||
sampling_rate_features_hz=10, | ||
verbose=False, | ||
) | ||
|
||
feature_df = stream.run() | ||
columns = [col for col in feature_df.columns if "channel_mean" in col] | ||
|
||
feature_df[columns] | ||
|
||
|
||
# %% | ||
# Remove feature so that it does not interfere with other examples | ||
nm.remove_custom_feature("channel_mean") | ||
|
||
|
||
|
261 changes: 261 additions & 0 deletions
261
_downloads/0dc588ba0560fad53b77fac59849f12c/plot_1_example_BIDS.ipynb
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,261 @@ | ||
{ | ||
"cells": [ | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"\n# ECoG Movement decoding example\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"This example notebook read openly accessible data from the publication\n*Electrocorticography is superior to subthalamic local field potentials\nfor movement decoding in Parkinson\u2019s disease*\n(`Merk et al. 2022 <https://elifesciences.org/articles/75126>_`).\nThe dataset is available [here](https://doi.org/10.7910/DVN/IO2FLM).\n\nFor simplicity one example subject is automatically shipped within\nthis repo at the *py_neuromodulation/data* folder, stored in\n[iEEG BIDS](https://www.nature.com/articles/s41597-019-0105-7) format.\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"from sklearn import metrics, model_selection, linear_model\nimport matplotlib.pyplot as plt\n\nimport py_neuromodulation as nm\nfrom py_neuromodulation import (\n nm_analysis,\n nm_decode,\n nm_define_nmchannels,\n nm_IO,\n nm_plots,\n NMSettings,\n)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Let's read the example using [mne_bids](https://mne.tools/mne-bids/stable/index.html).\nThe resulting raw object is of type [mne.RawArray](https://mne.tools/stable/generated/mne.io.RawArray.html).\nWe can use the properties such as sampling frequency, channel names, channel types all from the mne array and create the *nm_channels* DataFrame:\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"(\n RUN_NAME,\n PATH_RUN,\n PATH_BIDS,\n PATH_OUT,\n datatype,\n) = nm_IO.get_paths_example_data()\n\n(\n raw,\n data,\n sfreq,\n line_noise,\n coord_list,\n coord_names,\n) = nm_IO.read_BIDS_data(\n PATH_RUN=PATH_RUN\n)\n\nnm_channels = nm_define_nmchannels.set_channels(\n ch_names=raw.ch_names,\n ch_types=raw.get_channel_types(),\n reference=\"default\",\n bads=raw.info[\"bads\"],\n new_names=\"default\",\n used_types=(\"ecog\", \"dbs\", \"seeg\"),\n target_keywords=[\"MOV_RIGHT\"],\n)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"This example contains the grip force movement traces, we'll use the *MOV_RIGHT* channel as a decoding target channel.\nLet's check some of the raw feature and time series traces:\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"plt.figure(figsize=(12, 4), dpi=300)\nplt.subplot(121)\nplt.plot(raw.times, data[-1, :])\nplt.xlabel(\"Time [s]\")\nplt.ylabel(\"a.u.\")\nplt.title(\"Movement label\")\nplt.xlim(0, 20)\n\nplt.subplot(122)\nfor idx, ch_name in enumerate(nm_channels.query(\"used == 1\").name):\n plt.plot(raw.times, data[idx, :] + idx * 300, label=ch_name)\nplt.legend(bbox_to_anchor=(1, 0.5), loc=\"center left\")\nplt.title(\"ECoG + STN-LFP time series\")\nplt.xlabel(\"Time [s]\")\nplt.ylabel(\"Voltage a.u.\")\nplt.xlim(0, 20)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"settings = NMSettings.get_fast_compute()\n\nsettings.features.welch = True\nsettings.features.fft = True\nsettings.features.bursts = True\nsettings.features.sharpwave_analysis = True\nsettings.features.coherence = True\n\nsettings.coherence.channels = [(\"LFP_RIGHT_0-LFP_RIGHT_2\", \"ECOG_RIGHT_0-avgref\")] \n# TONI: this example was failing because the rereferenced channel have different names than originals\n# We need to handle ch_names being changed after reref with settings.coherence.channels validation\n\nsettings.coherence.frequency_bands = [\"high beta\", \"low gamma\"]\nsettings.sharpwave_analysis_settings.estimator[\"mean\"] = []\nsettings.sharpwave_analysis_settings.sharpwave_features.enable_all()\nfor sw_feature in settings.sharpwave_analysis_settings.sharpwave_features.list_all():\n settings.sharpwave_analysis_settings.estimator[\"mean\"].append(sw_feature)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"stream = nm.Stream(\n sfreq=sfreq,\n nm_channels=nm_channels,\n settings=settings,\n line_noise=line_noise,\n coord_list=coord_list,\n coord_names=coord_names,\n verbose=True,\n)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"features = stream.run(\n data=data,\n out_path_root=PATH_OUT,\n folder_name=RUN_NAME,\n)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"## Feature Analysis Movement\nThe obtained performances can now be read and visualized using the :class:`nm_analysis.Feature_Reader`.\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"# initialize analyzer\nfeature_reader = nm_analysis.FeatureReader(\n feature_dir=PATH_OUT,\n feature_file=RUN_NAME,\n)\nfeature_reader.label_name = \"MOV_RIGHT\"\nfeature_reader.label = feature_reader.feature_arr[\"MOV_RIGHT\"]" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"feature_reader.feature_arr.iloc[100:108, -6:]" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"print(feature_reader.feature_arr.shape)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"feature_reader._get_target_ch()" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"feature_reader.plot_target_averaged_channel(\n ch=\"ECOG_RIGHT_0\",\n list_feature_keywords=None,\n epoch_len=4,\n threshold=0.5,\n ytick_labelsize=7,\n figsize_x=12,\n figsize_y=12,\n)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"feature_reader.plot_all_features(\n ytick_labelsize=6,\n clim_low=-2,\n clim_high=2,\n ch_used=\"ECOG_RIGHT_0\",\n time_limit_low_s=0,\n time_limit_high_s=20,\n normalize=True,\n save=True,\n)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"nm_plots.plot_corr_matrix(\n feature=feature_reader.feature_arr.filter(regex=\"ECOG_RIGHT_0\"),\n ch_name=\"ECOG_RIGHT_0-avgref\",\n feature_names=list(\n feature_reader.feature_arr.filter(regex=\"ECOG_RIGHT_0-avgref\").columns\n ),\n feature_file=feature_reader.feature_file,\n show_plot=True,\n figsize=(15, 15),\n)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"## Decoding\n\nThe main focus of the *py_neuromodulation* pipeline is feature estimation.\nNevertheless, the user can also use the pipeline for machine learning decoding.\nIt can be used for regression and classification problems and also dimensionality reduction such as PCA and CCA.\n\nHere, we show an example using the XGBOOST classifier. The used labels came from a continuous grip force movement target, named \"MOV_RIGHT\".\n\nFirst we initialize the :class:`~nm_decode.Decoder` class, which the specified *validation method*, here being a simple 3-fold cross validation,\nthe evaluation metric, used machine learning model, and the channels we want to evaluate performances for.\n\nThere are many more implemented methods, but we will here limit it to the ones presented.\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"model = linear_model.LinearRegression()\n\nfeature_reader.decoder = nm_decode.Decoder(\n features=feature_reader.feature_arr,\n label=feature_reader.label,\n label_name=feature_reader.label_name,\n used_chs=feature_reader.used_chs,\n model=model,\n eval_method=metrics.r2_score,\n cv_method=model_selection.KFold(n_splits=3, shuffle=True),\n)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"performances = feature_reader.run_ML_model(\n estimate_channels=True,\n estimate_gridpoints=False,\n estimate_all_channels_combined=True,\n save_results=True,\n)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"The performances are a dictionary that can be transformed into a DataFrame:\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"df_per = feature_reader.get_dataframe_performances(performances)\n\ndf_per" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"ax = nm_plots.plot_df_subjects(\n df_per,\n x_col=\"sub\",\n y_col=\"performance_test\",\n hue=\"ch_type\",\n PATH_SAVE=PATH_OUT / RUN_NAME / (RUN_NAME + \"_decoding_performance.png\"),\n figsize_tuple=(8, 5),\n)\nax.set_ylabel(r\"$R^2$ Correlation\")\nax.set_xlabel(\"Subject 000\")\nax.set_title(\"Performance comparison Movement decoding\")\nplt.tight_layout()" | ||
] | ||
} | ||
], | ||
"metadata": { | ||
"kernelspec": { | ||
"display_name": "Python 3", | ||
"language": "python", | ||
"name": "python3" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 3 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython3", | ||
"version": "3.10.14" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 0 | ||
} |
Oops, something went wrong.