diff --git a/docs/build/html/.buildinfo b/docs/build/html/.buildinfo
index 163b109..c977bca 100644
--- a/docs/build/html/.buildinfo
+++ b/docs/build/html/.buildinfo
@@ -1,4 +1,4 @@
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
-config: 1c283aa17fd01622f6bf08681bef7016
+config: 8782ea0dc7d40fb0c18a8dc67655cab3
tags: 645f666f9bcd5a90fca523b33c5a78b7
diff --git a/docs/build/html/_images/edfbrowser_import_annotations.png b/docs/build/html/_images/edfbrowser_import_annotations.png
new file mode 100644
index 0000000..41a30a5
Binary files /dev/null and b/docs/build/html/_images/edfbrowser_import_annotations.png differ
diff --git a/docs/build/html/_images/edfbrowser_with_hypnogram.png b/docs/build/html/_images/edfbrowser_with_hypnogram.png
new file mode 100644
index 0000000..46648b3
Binary files /dev/null and b/docs/build/html/_images/edfbrowser_with_hypnogram.png differ
diff --git a/docs/build/html/_images/spectrogram.png b/docs/build/html/_images/spectrogram.png
new file mode 100644
index 0000000..03c25dd
Binary files /dev/null and b/docs/build/html/_images/spectrogram.png differ
diff --git a/docs/build/html/_images/visbrain.PNG b/docs/build/html/_images/visbrain.PNG
new file mode 100644
index 0000000..aa887d9
Binary files /dev/null and b/docs/build/html/_images/visbrain.PNG differ
diff --git a/docs/build/html/_images/yasa-plot_spectrogram-1.png b/docs/build/html/_images/yasa-plot_spectrogram-1.png
index 3851613..f14dc46 100644
Binary files a/docs/build/html/_images/yasa-plot_spectrogram-1.png and b/docs/build/html/_images/yasa-plot_spectrogram-1.png differ
diff --git a/docs/build/html/_images/yasa-plot_spectrogram-2.png b/docs/build/html/_images/yasa-plot_spectrogram-2.png
index fcc7761..29b856d 100644
Binary files a/docs/build/html/_images/yasa-plot_spectrogram-2.png and b/docs/build/html/_images/yasa-plot_spectrogram-2.png differ
diff --git a/docs/build/html/_images/yasa-topoplot-1.png b/docs/build/html/_images/yasa-topoplot-1.png
index 9b49839..d2cae33 100644
Binary files a/docs/build/html/_images/yasa-topoplot-1.png and b/docs/build/html/_images/yasa-topoplot-1.png differ
diff --git a/docs/build/html/_images/yasa-topoplot-2.png b/docs/build/html/_images/yasa-topoplot-2.png
index bc0e522..2011471 100644
Binary files a/docs/build/html/_images/yasa-topoplot-2.png and b/docs/build/html/_images/yasa-topoplot-2.png differ
diff --git a/docs/build/html/_images/yasa-transition_matrix-1.png b/docs/build/html/_images/yasa-transition_matrix-1.png
index 11ddc13..2fa259f 100644
Binary files a/docs/build/html/_images/yasa-transition_matrix-1.png and b/docs/build/html/_images/yasa-transition_matrix-1.png differ
diff --git a/docs/build/html/_modules/index.html b/docs/build/html/_modules/index.html
index ffc7a92..126d9c1 100644
--- a/docs/build/html/_modules/index.html
+++ b/docs/build/html/_modules/index.html
@@ -3,9 +3,10 @@
[docs]defget_coincidence_matrix(self,scaled=True):
+ """Return the (scaled) coincidence matrix.
+
+ Parameters
+ ----------
+ scaled : bool
+ If True (default), the coincidence matrix is scaled (see Notes).
+
+ Returns
+ -------
+ coincidence : pd.DataFrame
+ A symmetric matrix with the (scaled) coincidence values.
+
+ Notes
+ -----
+ Do spindles occur at the same time? One way to measure this is to
+ calculate the coincidence matrix, which gives, for each pair of
+ channel, the number of samples that were marked as a spindle in both
+ channels. The output is a symmetric matrix, in which the diagonal is
+ simply the number of data points that were marked as a spindle in the
+ channel.
+
+ The coincidence matrix can be scaled (default) by dividing the output
+ by the product of the sum of each individual binary mask, as shown in
+ the example below. It can then be used to define functional
+ networks or quickly find outlier channels.
+
+ Examples
+ --------
+ Calculate the coincidence of two binary mask:
+
+ >>> import numpy as np
+ >>> x = np.array([0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1])
+ >>> y = np.array([0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1])
+ >>> x * y
+ array([0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1])
+
+ >>> (x * y).sum() # Unscaled coincidence
+ 3
+
+ >>> (x * y).sum() / (x.sum() * y.sum()) # Scaled coincidence
+ 0.12
+
+ References
+ ----------
+ - https://github.com/Mark-Kramer/Sleep-Networks-2021
+ """
+ returnsuper().get_coincidence_matrix(scaled=scaled)
+
[docs]defget_mask(self):"""Return a boolean array indicating for each sample in data if this sample is part of a detected event (True) or not (False).
@@ -1171,7 +1252,7 @@
Source code for yasa.detection
spindles-related sigma signals are defined in ``freq_sw`` and ``freq_sp``, respectively. For more details, please refer to the `Jupyter notebook
- <https://github.com/raphaelvallat/yasa/blob/master/notebooks/12_spindles-SO_coupling.ipynb>`_
+ <https://github.com/raphaelvallat/yasa/blob/master/notebooks/12_SO-sigma_coupling.ipynb>`_ Note that setting ``coupling=True`` may significantly increase computation time.
@@ -1326,7 +1407,7 @@
Source code for yasa.detection
# Negative peaks with value comprised between -40 to -300 uVidx_neg_peaks,_=signal.find_peaks(-1*data_filt[i,:],height=amp_neg)
- # Positive peaks with values comprised between 10 to 150 uV
+ # Positive peaks with values comprised between 10 to 200 uVidx_pos_peaks,_=signal.find_peaks(data_filt[i,:],height=amp_pos)# Intersect with sleep stage vectoridx_neg_peaks=np.intersect1d(idx_neg_peaks,idx_mask,
@@ -1378,19 +1459,24 @@
Source code for yasa.detection
neg_sorted=np.searchsorted(zero_crossings,idx_neg_peaks)previous_neg_zc=zero_crossings[neg_sorted-1]-idx_neg_peaksfollowing_neg_zc=zero_crossings[neg_sorted]-idx_neg_peaks
- neg_phase_dur=(np.abs(previous_neg_zc)+following_neg_zc)/sf
- # Distance (in samples) between the positive peaks and the previous and
+ # Distance between the positive peaks and the previous and# following zero-crossingspos_sorted=np.searchsorted(zero_crossings,idx_pos_peaks)previous_pos_zc=zero_crossings[pos_sorted-1]-idx_pos_peaksfollowing_pos_zc=zero_crossings[pos_sorted]-idx_pos_peaks
+
+ # Duration of the negative and positive phases, in seconds
+ neg_phase_dur=(np.abs(previous_neg_zc)+following_neg_zc)/sfpos_phase_dur=(np.abs(previous_pos_zc)+following_pos_zc)/sf# We now compute a set of metricssw_start=times[idx_neg_peaks+previous_neg_zc]sw_end=times[idx_pos_peaks+following_pos_zc]
- sw_dur=sw_end-sw_start# Same as pos_phase_dur + neg_phase_dur
+ # This should be the same as `sw_dur = pos_phase_dur + neg_phase_dur`
+ # We round to avoid floating point errr (e.g. 1.9000000002)
+ sw_dur=(sw_end-sw_start).round(4)
+ sw_dur_both_phase=(pos_phase_dur+neg_phase_dur).round(4)sw_midcrossing=times[idx_neg_peaks+following_neg_zc]sw_idx_neg=times[idx_neg_peaks]# Location of negative peaksw_idx_pos=times[idx_pos_peaks]# Location of positive peak
@@ -1404,21 +1490,24 @@
Source code for yasa.detection
# And we apply a set of thresholds to remove bad slow wavesgood_sw=np.logical_and.reduce((
- # Data edges
- previous_neg_zc!=0,
- following_neg_zc!=0,
- previous_pos_zc!=0,
- following_pos_zc!=0,
- # Duration criteria
- neg_phase_dur>dur_neg[0],
- neg_phase_dur<dur_neg[1],
- pos_phase_dur>dur_pos[0],
- pos_phase_dur<dur_pos[1],
- # Sanity checks
- sw_midcrossing>sw_start,
- sw_midcrossing<sw_end,
- sw_slope>0,
- ))
+ # Data edges
+ previous_neg_zc!=0,
+ following_neg_zc!=0,
+ previous_pos_zc!=0,
+ following_pos_zc!=0,
+ # Duration criteria
+ sw_dur==sw_dur_both_phase,# dur = negative + positive
+ sw_dur<=dur_neg[1]+dur_pos[1],# dur < max(neg) + max(pos)
+ sw_dur>=dur_neg[0]+dur_pos[0],# dur > min(neg) + min(pos)
+ neg_phase_dur>dur_neg[0],
+ neg_phase_dur<dur_neg[1],
+ pos_phase_dur>dur_pos[0],
+ pos_phase_dur<dur_pos[1],
+ # Sanity checks
+ sw_midcrossing>sw_start,
+ sw_midcrossing<sw_end,
+ sw_slope>0,
+ ))ifall(~good_sw):logger.warning('No SW were found in channel %s.',ch_names[i])
@@ -1583,6 +1672,55 @@
[docs]defget_coincidence_matrix(self,scaled=True):
+ """Return the (scaled) coincidence matrix.
+
+ Parameters
+ ----------
+ scaled : bool
+ If True (default), the coincidence matrix is scaled (see Notes).
+
+ Returns
+ -------
+ coincidence : pd.DataFrame
+ A symmetric matrix with the (scaled) coincidence values.
+
+ Notes
+ -----
+ Do slow-waves occur at the same time? One way to measure this is to
+ calculate the coincidence matrix, which gives, for each pair of
+ channel, the number of samples that were marked as a slow-waves in both
+ channels. The output is a symmetric matrix, in which the diagonal is
+ simply the number of data points that were marked as a slow-waves in
+ the channel.
+
+ The coincidence matrix can be scaled (default) by dividing the output
+ by the product of the sum of each individual binary mask, as shown in
+ the example below. It can then be used to define functional
+ networks or quickly find outlier channels.
+
+ Examples
+ --------
+ Calculate the coincidence of two binary mask:
+
+ >>> import numpy as np
+ >>> x = np.array([0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1])
+ >>> y = np.array([0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1])
+ >>> x * y
+ array([0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1])
+
+ >>> (x * y).sum() # Coincidence
+ 3
+
+ >>> (x * y).sum() / (x.sum() * y.sum()) # Scaled coincidence
+ 0.12
+
+ References
+ ----------
+ - https://github.com/Mark-Kramer/Sleep-Networks-2021
+ """
+ returnsuper().get_coincidence_matrix(scaled=scaled)
+
[docs]defget_mask(self):"""Return a boolean array indicating for each sample in data if this sample is part of a detected event (True) or not (False).
@@ -2483,7 +2621,7 @@
30 seconds epochs. Using an upsampled hypnogram will result in an incorrect transition matrix. For best results, we recommend using an hypnogram cropped to
- either the time in bed (TIB) or the sleep period time (SPT).
+ either the time in bed (TIB) or the sleep period time (SPT), without
+ any artefact / unscored epochs. Returns -------
- counts : array
- Counts transition matrix (number of transitions from stage X to
- stage Y).
- probs : array
+ counts : :py:class:`pandas.DataFrame`
+ Counts transition matrix (number of transitions from stage A to
+ stage B). The pre-transition states are the rows and the
+ post-transition states are the columns.
+ probs : :py:class:`pandas.DataFrame` Conditional probability transition matrix, i.e.
- given that current state is X, what is the probability that
- the next state is Y.
+ given that current state is A, what is the probability that
+ the next state is B. ``probs`` is a `right stochastic matrix <https://en.wikipedia.org/wiki/Stochastic_matrix>`_, i.e. each row sums to 1. Examples --------
+ >>> import numpy as np >>> from yasa import transition_matrix
- >>> a = [1, 1, 1, 0, 0, 2, 2, 0, 2, 0, 1, 1, 0, 0]
+ >>> a = [0, 0, 0, 1, 1, 0, 1, 2, 2, 3, 3, 2, 3, 3, 0, 2, 2, 1, 2, 2, 3, 3] >>> counts, probs = transition_matrix(a) >>> counts
- 0 1 2
+ 0 1 2 3 Stage
- 0 2 1 2
- 1 2 3 0
- 2 2 0 1
+ 0 2 2 1 0
+ 1 1 1 2 0
+ 2 0 1 3 3
+ 3 1 0 1 3
- >>> probs
- 0 1 2
+ >>> probs.round(2)
+ 0 1 2 3 Stage
- 0 0.400000 0.2 0.400000
- 1 0.400000 0.6 0.000000
- 2 0.666667 0.0 0.333333
+ 0 0.40 0.40 0.20 0.00
+ 1 0.25 0.25 0.50 0.00
+ 2 0.00 0.14 0.43 0.43
+ 3 0.20 0.00 0.20 0.60
- We can plot the transition matrix using :py:func:`seaborn.heatmap`:
+ Several metrics of sleep fragmentation can be calculated from the
+ probability matrix. For example, the stability of sleep stages can be
+ calculated by taking the average of the diagonal values (excluding Wake
+ and N1 sleep):
+
+ >>> np.diag(probs.loc[2:, 2:]).mean().round(3)
+ 0.514
+
+ Finally, we can plot the transition matrix using :py:func:`seaborn.heatmap` .. plot::
@@ -172,14 +187,14 @@
Source code for yasa.sleepstats
>>> ax.xaxis.set_label_position('top') """x=np.asarray(hypno,dtype=int)
- unique,inverse=np.unique(x,return_inverse=True)
+ unique,inverse=np.unique(x,return_inverse=True)# unique is sortedn=unique.size# Integer transition countscounts=np.zeros((n,n),dtype=int)np.add.at(counts,(inverse[:-1],inverse[1:]),1)# Conditional probabilitiesprobs=counts/counts.sum(axis=-1,keepdims=True)
- # Optional, convert to Pandas
+ # Convert to a Pandas DataFramecounts=pd.DataFrame(counts,index=unique,columns=unique)probs=pd.DataFrame(probs,index=unique,columns=unique)counts.index.name='Stage'
@@ -232,7 +247,7 @@
Source code for yasa.sleepstats
* Time in Bed (TIB): total duration of the hypnogram. * Sleep Period Time (SPT): duration from first to last period of sleep. * Wake After Sleep Onset (WASO): duration of wake periods within SPT.
- * Total Sleep Time (TST): SPT - WASO.
+ * Total Sleep Time (TST): total duration of N1 + N2 + N3 + REM sleep in SPT. * Sleep Efficiency (SE): TST / TIB * 100 (%). * Sleep Maintenance Efficiency (SME): TST / SPT * 100 (%). * W, N1, N2, N3 and REM: sleep stages duration. NREM = N1 + N2 + N3.
@@ -240,6 +255,11 @@
Source code for yasa.sleepstats
* Latencies: latencies of sleep stages from the beginning of the record. * Sleep Onset Latency (SOL): Latency to first epoch of any sleep.
+ .. warning::
+ Since YASA 0.5.0, Artefact and Unscored epochs are now excluded from the calculation of the
+ total sleep time (TST). Previously, YASA calculated TST as SPT - WASO, thus including
+ Art and Uns. TST is now calculated as the sum of all REM and NREM sleep in SPT.
+
References ---------- * Iber, C. (2007). The AASM manual for the scoring of sleep and
@@ -296,7 +316,9 @@
Source code for yasa.sleepstats
hypno_s=hypno[first_sleep:(last_sleep+1)]stats['SPT']=hypno_s.sizestats['WASO']=hypno_s[hypno_s==0].size
- stats['TST']=stats['SPT']-stats['WASO']
+ # Before YASA v0.5.0, TST was calculated as SPT - WASO, meaning that Art
+ # and Unscored epochs were included. TST is now restrained to sleep stages.
+ stats['TST']=hypno_s[hypno_s>0].size# Duration of each sleep stagesstats['N1']=hypno[hypno==1].size
@@ -305,7 +327,7 @@
Source code for yasa.sleepstats
stats['REM']=hypno[hypno==4].sizestats['NREM']=stats['N1']+stats['N2']+stats['N3']
- # Sleep stage latencies
+ # Sleep stage latencies -- only relevant if hypno is cropped to TIBstats['SOL']=first_sleepstats['Lat_N1']=np.where(hypno==1)[0].min()if1inhypnoelsenp.nanstats['Lat_N2']=np.where(hypno==2)[0].min()if2inhypnoelsenp.nan
@@ -341,7 +363,7 @@
Notes -----
+ If you use the SleepStaging module in a publication, please cite the following preprint:
+
+ * A universal, open-source, high-performance tool for automated sleep staging. Raphael Vallat,
+ Matthew P. Walker. bioRxiv 2021.05.28.446165; doi: https://doi.org/10.1101/2021.05.28.446165
+
+ We provide below some key points on the algorithm and its validation. For more details,
+ we refer the reader to the preprint article. If you have any questions,
+ make sure to first check the
+ `FAQ section <https://raphaelvallat.com/yasa/build/html/faq.html>`_ of the documentation.
+ If you did not find the answer to your question, please feel free to open an issue on GitHub.
+
**1. Features extraction** For each 30-seconds epoch and each channel, the following features are
@@ -159,9 +172,10 @@
Source code for yasa.staging
* Higuchi and Petrosian fractal dimension In addition, the algorithm also calculates a smoothed and normalized
- version of these features. Specifically, a 5-min centered weighted rolling
- average and a 10 min past rolling average are applied. The resulting
- smoothed features are then normalized using a robust z-score.
+ version of these features. Specifically, a 7.5 min centered
+ triangular-weighted rolling average and a 2 min past rolling average
+ are applied. The resulting smoothed features are then normalized using a
+ robust z-score. The data are automatically downsampled to 100 Hz for faster computation.
@@ -231,6 +245,10 @@
Source code for yasa.staging
>>> confidence = proba.max(axis=1) >>> # Plot the predicted probabilities >>> sls.plot_predict_proba()
+
+ The sleep scores can then be manually edited in an external graphical user interface
+ (e.g. EDFBrowser), as described in the
+ `FAQ <https://raphaelvallat.com/yasa/build/html/faq.html>`_. """
assertsf>80,'Sampling frequency must be at least 80 Hz.'ifsf!=100:raw_pick.resample(100,npad="auto")
- sf=100
+ sf=raw_pick.info['sfreq']# Get data and convert to microVoltsdata=raw_pick.get_data()*1e6
@@ -311,25 +329,6 @@
Source code for yasa.staging
(8,12,'alpha'),(12,16,'sigma'),(16,30,'beta')]
- #######################################################################
- # HELPER FUNCTIONS
- #######################################################################
-
- defnzc(x):
- """Calculate the number of zero-crossings along the last axis."""
- return((x[...,:-1]*x[...,1:])<0).sum(axis=1)
-
- defmobility(x):
- """Calculate Hjorth mobility on the last axis."""
- returnnp.sqrt(np.diff(x,axis=1).var(axis=1)/x.var(axis=1))
-
- defpetrosian(x):
- """Calculate the Petrosian fractal dimension on the last axis."""
- n=x.shape[1]
- ln10=np.log10(n)
- diff=np.diff(x,axis=1)
- returnln10/(ln10+np.log10(n/(n+0.4*nzc(diff))))
-
######################################################################## CALCULATE FEATURES#######################################################################
@@ -346,16 +345,16 @@
Source code for yasa.staging
times,epochs=sliding_window(dt_filt,sf=sf,window=30)# Calculate standard descriptive statistics
- hmob=mobility(epochs)
+ hmob,hcomp=ant.hjorth_params(epochs,axis=1)feat={'std':np.std(epochs,ddof=1,axis=1),'iqr':sp_stats.iqr(epochs,rng=(25,75),axis=1),'skew':sp_stats.skew(epochs,axis=1),'kurt':sp_stats.kurtosis(epochs,axis=1),
- 'nzc':nzc(epochs),
+ 'nzc':ant.num_zerocross(epochs,axis=1),'hmob':hmob,
- 'hcomp':mobility(np.diff(epochs,axis=1))/hmob
+ 'hcomp':hcomp}# Calculate spectral power features (for EEG + EOG)
@@ -384,7 +383,7 @@
This is a major release with an important bugfix for the slow-waves detection as well as API-breaking changes in the automatic sleep staging module. We recommend all users to upgrade to this version with pip install –upgrade yasa.
+
Slow-waves detection
+
We have fixed a critical bug in yasa.sw_detect() in which the detection could keep slow-waves with invalid duration (e.g. several tens of seconds). We have now added extra safety checks to make sure that the total duration of the slow-waves does not exceed the maximum duration allowed by the dur_neg and dur_pos parameters (default = 2.5 seconds).
+
+
Warning
+
Please make sure to double-check any results obtained with yasa.sw_detect().
+
+
Sleep staging
+
Recently, we have published a preprint article describing YASA’s sleep staging algorithm and its validation across hundreds of polysomnography recordings. In July 2021, we have received comments from three reviewers, which have led us to implement several changes to the sleep staging algorithm.
+The most significant change is that the time lengths of the rolling windows have been updated from 5.5 minutes centered / 5 minutes past to 7.5 minutes centered / 2 min past, leading to slight improvements in accuracy. Furthermore, we have also updated the training database and the parameters of the LightGBM classifier.
+Unfortunately, these changes mean that the new version of the algorithm is no longer compatible with the previous version (0.4.0 or 0.4.1). Therefore, if you’re running a longitudinal study with YASA’s sleep staging, we either recommend to keep the previous version of YASA, or to update to the new version and reprocess all your nights with the new algorithm for consistency.
+
Sleep statistics
+
Artefact and Unscored epochs are now excluded from the calculation of the total sleep time (TST) in yasa.sleep_statistics(). Previously, YASA calculated TST as SPT - WASO, thus including Art and Uns. TST is now calculated as the sum of all REM and NREM sleep in SPT.
We have added the yasa.SpindlesResults.get_coincidence_matrix() and yasa.SWResults.get_coincidence_matrix() methods to calculate the (scaled) coincidence matrix.
+The coincidence matrix gives, for each pair of channel, the number of samples that were marked as an event (spindles or slow-waves) in both channels. In other words, it gives an indication of whether events (spindles or slow-waves) are co-occuring for any pair of channel.
+The scaled version of the coincidence matrix can then be used to define functional networks or quickly find outlier channels.
YASA now uses the antropy package to calculate non-linear features in the automatic sleep staging module. Previously, YASA was using EntroPy, which could not be installed using pip.
+
v0.4.0 (November 2020)
This is a major release with several new functions, the biggest of which is the addition of an automatic sleep staging module (yasa.SleepStaging). This means that YASA can now automatically score the sleep stages of your raw EEG data. The classifier was trained and validated on more than 3000 nights from the National Sleep Research Resource (NSRR) website.
-
Briefly, the algorithm works by calculating a set of features for each 30-sec epochs from a central EEG channel (required), as well as an EOG channel (optional) and an EMG channel (optional). For best performance, users can also specify the age and the sex of the participants. Pre-trained classifiers are already included in YASA. The automatic sleep staging algorithm requires the LightGBM and antropy package.
+
Briefly, the algorithm works by calculating a set of features for each 30-sec epochs from a central EEG channel (required), as well as an EOG channel (optional) and an EMG channel (optional). For best performance, users can also specify the age and the sex of the participants. Pre-trained classifiers are already included in YASA. The automatic sleep staging algorithm requires the LightGBM and antropy package.
Other changes
yasa.SpindlesResults() and yasa.SWResults() now have a plot_detection method which allows to interactively display the raw data with an overlay of the detected spindles. For now, this only works with Jupyter and it requires the ipywidgets package.
This is a major release with several API-breaking changes in the spindles, slow-waves and REMs detection.
@@ -147,18 +180,19 @@
v0.3.0 (May 2020)
Other changes
The coupling argument has been removed from the yasa.spindles_detect() function. Instead, slow-oscillations / sigma coupling can only be calculated from the slow-waves detection, which is 1) the most standard way, 2) better because PAC assumptions require a strong oscillatory component in the lower frequency range (slow-oscillations). This also avoids unecessary confusion between spindles-derived coupling and slow-waves-derived coupling. For more details, refer to the Jupyter notebooks.
-
Downsampling of data in detection functions has been removed. In other words, YASA will no longer downsample the data to 100 / 128 Hz before applying the events detection. If the detection is too slow, we recommend that you manually downsample your data before applying the detection. See for example mne.filter.resample().
+
Downsampling of data in detection functions has been removed. In other words, YASA will no longer downsample the data to 100 / 128 Hz before applying the events detection. If the detection is too slow, we recommend that you manually downsample your data before applying the detection. See for example mne.filter.resample().
yasa.trimbothstd() can now work with multi-dimensional arrays. The trimmed standard deviation will always be calculated on the last axis of the array.
Filtering and Hilbert transform are now applied at once on all channels (instead of looping across individual channels) in the yasa.spindles_detect() and yasa.sw_detect() functions. This should lead to some improvements in computation time.
+
v0.2.0 (April 2020)
This is a major release with several new functions, bugfixes and miscellaneous enhancements in existing functions.
Bugfixes
Sleep efficiency in the yasa.sleep_statistics() is now calculated using time in bed (TIB) as the denominator instead of sleep period time (SPT), in agreement with the AASM guidelines. The old way of computing the efficiency (TST / SPT) has now been renamed Sleep Maintenance Efficiency (SME).
-
The yasa.sliding_window() now always return an array of shape (n_epochs, …, n_samples), i.e. the epochs are now always the first dimension of the epoched array. This is consistent with MNE default shape of mne.Epochs objects.
+
The yasa.sliding_window() now always return an array of shape (n_epochs, …, n_samples), i.e. the epochs are now always the first dimension of the epoched array. This is consistent with MNE default shape of mne.Epochs objects.
New functions
@@ -188,6 +222,7 @@
v0.2.0 (April 2020)
Updated dependencies version for MNE and scikit-learn.
+
v0.1.9 (February 2020)
New functions
@@ -207,6 +242,7 @@
v0.1.9 (February 2020)
Removed Travis and AppVeyor testing for Python 3.5.
+
v0.1.8 (October 2019)
@@ -216,6 +252,7 @@
v0.1.8 (October 2019)
Added a notebook on non-linear features.
+
v0.1.7 (August 2019)
@@ -224,6 +261,7 @@
v0.1.7 (August 2019)
Reorganized code into several sub-files for readability (internal changes with no effect on user experience).
+
v0.1.6 (August 2019)
@@ -231,6 +269,7 @@
v0.1.6 (August 2019)
One can now directly pass a raw MNE object in several multi-channel functions of YASA, instead of manually passing data, sf, and ch_names. YASA will automatically convert MNE data from Volts to uV, and extract the sampling frequency and channel names. Examples of this can be found in the Jupyter notebooks examples.
+
v0.1.5 (August 2019)
@@ -239,12 +278,14 @@
v0.1.5 (August 2019)
Added yasa/spectral.py file, which includes the bandpower_from_psd function to calculate the single or multi-channel spectral power in specified bands from a pre-computed PSD (see example notebook at notebooks/10_bandpower.ipynb)
+
v0.1.4 (May 2019)
Added get_sync_sw function to get the synchronized timings of landmarks timepoints in slow-wave sleep. This can be used in combination with seaborn.lineplot to plot an average template of the detected slow-wave, per channel.
+
v0.1.3 (March 2019)
@@ -256,6 +297,7 @@
v0.1.3 (March 2019)
Travis and AppVeyor test for Python 3.5, 3.6 and 3.7
+
v0.1.2 (February 2019)
@@ -265,6 +307,7 @@
v0.1.2 (February 2019)
Changed some default parameters to optimize behavior
+
v0.1.1 (January 2019)
@@ -273,6 +316,7 @@
v0.1.1 (January 2019)
Added Cz full night in notebooks/
+
v0.1 (December 2018)
Initial release of YASA: basic spindles detection.
If you have polysomnography data in European Data Format (.edf), you can use the MNE package to load and preprocess your data in Python. MNE also supports several other standard formats (e.g. BrainVision, BDF, EEGLab). A simple preprocessing pipeline using MNE is shown below.
+
importmne
+# Load the EDF file
+raw=mne.io.read_raw_edf('MYEDFFILE.edf',preload=True)
+# Downsample the data to 100 Hz
+raw.resample(100)
+# Apply a bandpass filter from 0.1 to 40 Hz
+raw.filter(0.1,40)
+# Select a subset of EEG channels
+raw.pick_channels(['C4-A1','C3-A2'])
+
YASA is a command-line software and does not support data visualization. To scroll through your data, we recommend the free software EDFBrowser (https://www.teuniz.net/edfbrowser/):
The spindles detection is a custom adaptation of the Lacourse et al 2018 method. A step-by-step description of the algorithm can be found in this notebook.
There are several parameters that can be adjusted in the spindles / slow-waves / artefact detection. While the default parameters should work reasonably well on most data, they might not be adequate for your data, especially if you’re working with specific populations (e.g. older adults, kids, patients with certain disorders, etc).
+
For the sake of example, let’s say that you have 100 recordings and you want to apply YASA to automatically detect the spindles. However, you’d like to fine-tune the parameters to your data. We recommend the following approach:
+
+
Grab a few representative recordings (e.g. 5 or 10 out of 100) and manually annotate the sleep spindles. You can use EDFBrowser to manually score the sleep spindles. Ideally, the manual scoring should be high-quality, so you may also ask a few other trained individuals to score the same data until you reach a consensus.
+
Apply YASA on the same recordings, first with the default parameters and then by slightly varying each parameter. For example, you may want to use a different detection threshold each time you run the algorithm, or a different frequency band for the filtering. In other words, you loop across several possible combinations of parameters. Save the resulting detection dataframe.
+
Finally, find the combination of parameters that give you the results that are the most similar to your own scoring. For example, you can use the combination of parameters that maximize the F1-score of the detected spindles against your own visual detection.
+
Use the “winning” combination to score the remaining recordings in your database.
YASA does not currently support visual editing of the detected events. However, you can import the events as annotations in EDFBrowser and edit the events from there. If you simply want to visualize the detected events (no editing), you can also use the plot_detection method.
YASA was trained and evaluated on a large and heterogeneous database of thousands of polysomnography recordings, including healthy individuals and patients with sleep disorders. Overall, the results show that YASA matches human inter-rater agreement, with an accuracy of ~85% against expert consensus scoring. The full validation of YASA can be found in the preprint article:
+
+
Raphael Vallat and Matthew P. Walker (2021). A universal, open-source, high-performance tool for automated sleep staging. bioRxiv 2021.05.28.446165; doi: https://doi.org/10.1101/2021.05.28.446165
+
+
However, our recommendation is that YASA should not replace human scoring, but rather serve as a starting point to speed up sleep staging. If possible, you should always have a trained sleep scorer visually check the predictions of YASA, with a particular emphasis on low-confidence epochs and/or N1 sleep epochs, as these are the epochs most often misclassified by the algorithm.
+Finally, users can also leverage the yasa.plot_spectrogram() function to plot the predicted hypnogram on top of the full-night spectrogram. Such plots are very useful to quickly identify blatant errors in the hypnogram.
YASA does not come with a graphical user interface (GUI) and therefore editing the predicted hypnogram is not currently possible. The simplest way is therefore to export the hypnogram in CSV format and then open the file — together with the corresponding polysomnography data — in an external GUI, as shown below.
+
+
EDFBrowser
+
EDFBrowser is a free software for visualizing polysomnography data in European Data Format (.edf), which also provides a module for visualizing and editing hypnograms.
+
The code below show hows to export the hypnogram in an EDFBrowser-compatible format. It assumes that you have already run the algorithm and stored the predicted hypnogram in an array named hypno.
+
# Export to a CSV file compatible with EDFBrowser
+importnumpyasnp
+importpandasaspd
+hypno_export=pd.DataFrame({
+ "onset":np.arange(len(hypno))*30,
+ "label":hypno,
+ "duration":30})
+hypno_export.to_csv("my_hypno_EDFBrowser.csv",index=False)
+
+
+
You can then import the hypnogram in EDFBrowser by clicking on the “Import annotations/events” in the “Tools” menu. Then, select the “ASCII/CSV” tab and change the parameters as follow:
+
+
+
+
Click “Import”. Once it’s done, the hypnogram can be enabled via the “Window” menu. A dialog will appear where you can setup the labels for the different sleep stages and the mapping to the annotations in the file. The default parameters should work.
+When using the Annotation editor, the hypnogram will be updated realtime when adding, moving or deleting annotations. Once you’re done editing, you can export the edited hypnogram with “Export anotations/events” in the “Tools” menu.
+
+
+
+
+
SpiSOP
+
SpiSOP is an open-source Matlab toolbox for the analysis and visualization of polysomnography sleep data. It comes with a sleep scoring GUI.
+As explained in the documentation, the hypnogram should be a tab-separated text file with two columns (no headers). The first column has the sleep stages (0: Wake, 1: N1, 2: N2, 3: N3, 5: REM) and the second column indicates whether the current epoch should be marked as artefact (1) or valid (0).
YASA was only designed for human scalp data and as such will not work with animal data or intracranial data. Adding support for such data would require the two following steps:
+
+
Modifying (some of) the features. For example, rodent sleep does not have the same temporal dynamics as human sleep, and therefore one could modify the length of the smoothing window to better capture these dynamics.
+
Re-training the classifier using a large database of previously-scored data.
+
+
Despite these required changes, one advantage of YASA is that it provides a useful framework for implementing such sleep staging algorithms. For example, one can save a huge amount of time by simply re-using and adapting the built-in yasa.SleepStaging class.
+In addition, all the code used to train YASA is freely available at https://github.com/raphaelvallat/yasa_classifier and can be re-used to re-train the classifier on non-human data.
Pingouin uses outdated, a Python package that automatically checks if a newer version of YASA is available upon loading. Alternatively, you can click “Watch” on the GitHub of YASA.
+Whenever a new release is out there, you can upgrade your version by typing the following line in a terminal window:
There are many ways to contribute to YASA, even if you are not a programmer, for example, reporting bugs or results that are inconsistent with other softwares, improving the documentation and examples, or, even buying the developpers a coffee!
To cite YASA, please use the preprint publication:
+
+
Raphael Vallat and Matthew P. Walker (2021). A universal, open-source, high-performance tool for automated sleep staging. bioRxiv 2021.05.28.446165; doi: https://doi.org/10.1101/2021.05.28.446165
+
+
BibTeX:
+
@article {Vallat2021.05.28.446165,
+ author = {Vallat, Raphael and Walker, Matthew P.},
+ title = {A universal, open-source, high-performance tool for automated sleep staging},
+ elocation-id = {2021.05.28.446165},
+ year = {2021},
+ doi = {10.1101/2021.05.28.446165},
+ publisher = {Cold Spring Harbor Laboratory},
+ abstract = {The creation of a completely automated sleep-scoring system that is highly accurate, flexible, well validated, free and simple to use by anyone has yet to be accomplished. In part, this is due to the difficulty of use of existing algorithms, algorithms having been trained on too small samples, and paywall demotivation. Here we describe a novel algorithm trained and validated on +27,000 hours of polysomnographic sleep recordings across heterogeneous populations around the world. This tool offers high sleep-staging accuracy matching or exceeding human accuracy and interscorer agreement no matter the population kind. The software is easy to use, computationally low-demanding, open source, and free. Such software has the potential to facilitate broad adoption of automated sleep staging with the hope of becoming an industry standard.Competing Interest StatementThe authors have declared no competing interest.AbbreviationsAHIapnea-hypopnea indexBMIbody mass indexEEGelectroencephalogramEOGelectrooculogramEMGelectromyogramOSAobstructive sleep apneaPSGpolysomnographyMCCMatthews correlation coefficientNREMnon rapid eye movement (sleep)REMrapid eye movement (sleep)},
+ URL = {https://www.biorxiv.org/content/early/2021/05/28/2021.05.28.446165},
+ eprint = {https://www.biorxiv.org/content/early/2021/05/28/2021.05.28.446165.full.pdf},
+ journal = {bioRxiv}
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/build/html/generated/yasa-plot_spectrogram-1.png b/docs/build/html/generated/yasa-plot_spectrogram-1.png
index 3851613..f14dc46 100644
Binary files a/docs/build/html/generated/yasa-plot_spectrogram-1.png and b/docs/build/html/generated/yasa-plot_spectrogram-1.png differ
diff --git a/docs/build/html/generated/yasa-plot_spectrogram-2.png b/docs/build/html/generated/yasa-plot_spectrogram-2.png
index fcc7761..29b856d 100644
Binary files a/docs/build/html/generated/yasa-plot_spectrogram-2.png and b/docs/build/html/generated/yasa-plot_spectrogram-2.png differ
diff --git a/docs/build/html/generated/yasa-topoplot-1.png b/docs/build/html/generated/yasa-topoplot-1.png
index 9b49839..d2cae33 100644
Binary files a/docs/build/html/generated/yasa-topoplot-1.png and b/docs/build/html/generated/yasa-topoplot-1.png differ
diff --git a/docs/build/html/generated/yasa-topoplot-2.png b/docs/build/html/generated/yasa-topoplot-2.png
index bc0e522..2011471 100644
Binary files a/docs/build/html/generated/yasa-topoplot-2.png and b/docs/build/html/generated/yasa-topoplot-2.png differ
diff --git a/docs/build/html/generated/yasa-transition_matrix-1.png b/docs/build/html/generated/yasa-transition_matrix-1.png
index 11ddc13..2fa259f 100644
Binary files a/docs/build/html/generated/yasa-transition_matrix-1.png and b/docs/build/html/generated/yasa-transition_matrix-1.png differ
diff --git a/docs/build/html/generated/yasa.REMResults.html b/docs/build/html/generated/yasa.REMResults.html
index aa9bd7a..daa0906 100644
--- a/docs/build/html/generated/yasa.REMResults.html
+++ b/docs/build/html/generated/yasa.REMResults.html
@@ -3,9 +3,10 @@
- yasa.REMResults — yasa 0.4.1 documentation
+ yasa.REMResults — yasa 0.5.0 documentation
+
@@ -37,13 +38,14 @@
yasa
- 0.4.1
+ 0.5.0
Return a summary of the REM detection, optionally grouped across stage.
@@ -168,13 +173,13 @@
yasa.REMResults
filttuple
Optional filtering to apply to data. For instance, filt=(1,30)
will apply a 1 to 30 Hz bandpass filter, and filt=(None,40)
will apply a 40 Hz lowpass filter. Filtering is done using default
-parameters in the mne.filter.filter_data() function.
Optional filtering to apply to data. For instance, filt=(1,30)
will apply a 1 to 30 Hz bandpass filter, and filt=(None,40)
will apply a 40 Hz lowpass filter. Filtering is done using default
-parameters in the mne.filter.filter_data() function.
If True (default), the coincidence matrix is scaled (see Notes).
+
+
+
+
Returns
+
+
coincidencepd.DataFrame
A symmetric matrix with the (scaled) coincidence values.
+
+
+
+
+
Notes
+
Do slow-waves occur at the same time? One way to measure this is to
+calculate the coincidence matrix, which gives, for each pair of
+channel, the number of samples that were marked as a slow-waves in both
+channels. The output is a symmetric matrix, in which the diagonal is
+simply the number of data points that were marked as a slow-waves in
+the channel.
+
The coincidence matrix can be scaled (default) by dividing the output
+by the product of the sum of each individual binary mask, as shown in
+the example below. It can then be used to define functional
+networks or quickly find outlier channels.
Optional filtering to apply to data. For instance, filt=(1,30)
will apply a 1 to 30 Hz bandpass filter, and filt=(None,40)
will apply a 40 Hz lowpass filter. Filtering is done using default
-parameters in the mne.filter.filter_data() function.
Optional filtering to apply to data. For instance, filt=(1,30)
will apply a 1 to 30 Hz bandpass filter, and filt=(None,40)
will apply a 40 Hz lowpass filter. Filtering is done using default
-parameters in the mne.filter.filter_data() function.
The name of the EEG channel in raw. Preferentially a central
electrode referenced either to the mastoids (C4-M1, C3-M2) or to the
@@ -124,6 +126,16 @@
yasa.SleepStaging
Notes
+
If you use the SleepStaging module in a publication, please cite the following preprint:
+
+
A universal, open-source, high-performance tool for automated sleep staging. Raphael Vallat,
+Matthew P. Walker. bioRxiv 2021.05.28.446165; doi: https://doi.org/10.1101/2021.05.28.446165
+
+
We provide below some key points on the algorithm and its validation. For more details,
+we refer the reader to the preprint article. If you have any questions,
+make sure to first check the
+FAQ section of the documentation.
+If you did not find the answer to your question, please feel free to open an issue on GitHub.
1. Features extraction
For each 30-seconds epoch and each channel, the following features are
calculated:
@@ -140,9 +152,10 @@
yasa.SleepStaging
Higuchi and Petrosian fractal dimension
In addition, the algorithm also calculates a smoothed and normalized
-version of these features. Specifically, a 5-min centered weighted rolling
-average and a 10 min past rolling average are applied. The resulting
-smoothed features are then normalized using a robust z-score.
+version of these features. Specifically, a 7.5 min centered
+triangular-weighted rolling average and a 2 min past rolling average
+are applied. The resulting smoothed features are then normalized using a
+robust z-score.
The data are automatically downsampled to 100 Hz for faster
computation.
2. Sleep stages prediction
@@ -209,6 +222,9 @@
yasa.SleepStaging
>>> sls.plot_predict_proba()
+
The sleep scores can then be manually edited in an external graphical user interface
+(e.g. EDFBrowser), as described in the
+FAQ.
If True (default), the coincidence matrix is scaled (see Notes).
+
+
+
+
Returns
+
+
coincidencepd.DataFrame
A symmetric matrix with the (scaled) coincidence values.
+
+
+
+
+
Notes
+
Do spindles occur at the same time? One way to measure this is to
+calculate the coincidence matrix, which gives, for each pair of
+channel, the number of samples that were marked as a spindle in both
+channels. The output is a symmetric matrix, in which the diagonal is
+simply the number of data points that were marked as a spindle in the
+channel.
+
The coincidence matrix can be scaled (default) by dividing the output
+by the product of the sum of each individual binary mask, as shown in
+the example below. It can then be used to define functional
+networks or quickly find outlier channels.
Optional filtering to apply to data. For instance, filt=(1,30)
will apply a 1 to 30 Hz bandpass filter, and filt=(None,40)
will apply a 40 Hz lowpass filter. Filtering is done using default
-parameters in the mne.filter.filter_data() function.
Optional filtering to apply to data. For instance, filt=(1,30)
will apply a 1 to 30 Hz bandpass filter, and filt=(None,40)
will apply a 40 Hz lowpass filter. Filtering is done using default
-parameters in the mne.filter.filter_data() function.
Single or multi-channel EEG data.
Unit must be uV and shape (n_chan, n_samples).
-Can also be a mne.io.BaseRaw, in which case data
+Can also be a mne.io.BaseRaw, in which case data
and sf will be automatically extracted,
and data will also be automatically converted from Volts (MNE)
to micro-Volts (YASA).
@@ -109,7 +111,7 @@
yasa.art_detect
sffloat
Sampling frequency of the data in Hz.
-Can be omitted if data is a mne.io.BaseRaw object.
The window length (= resolution) for artifact rejection, in seconds.
Default to 5 seconds. Shorter windows (e.g. 1 or 2-seconds) will
@@ -207,7 +209,7 @@
yasa.art_detect
of the log-transformed standard deviation of each channel and each epoch.
The main idea of this approach is to estimate a reference covariance
matrix \(\bar{C}\) (for each sleep stage separately if hypno is
@@ -273,7 +275,7 @@
1D or 2D EEG data. Can also be a mne.io.BaseRaw, in which
case data, sf, and ch_names will be automatically
extracted, and data will also be converted from Volts (MNE default)
to micro-Volts (YASA).
sffloat
The sampling frequency of data AND the hypnogram.
-Can be omitted if data is a mne.io.BaseRaw.
List of channel names, e.g. [‘Cz’, ‘F3’, ‘F4’, …]. If None,
channels will be labelled [‘CHAN000’, ‘CHAN001’, …].
-Can be omitted if data is a mne.io.BaseRaw.
Sleep stage (hypnogram). If the hypnogram is loaded, the
bandpower will be extracted for each sleep stage defined in
@@ -145,20 +147,20 @@
yasa.bandpower
bandpassboolean
If True, apply a standard FIR bandpass filter using the minimum and
maximum frequencies in bands. Fore more details, refer to
-mne.filter.filter_data().
1D or 2D EEG data. Can also be a mne.io.BaseRaw, in which
case data, sf, and ch_names will be automatically
extracted, and data will also be converted from Volts (MNE default)
to micro-Volts (YASA).
sffloat
The sampling frequency of data AND the hypnogram.
-Can be omitted if data is a mne.io.BaseRaw.
List of channel names, e.g. [‘Cz’, ‘F3’, ‘F4’, …]. If None,
channels will be labelled [‘CHAN000’, ‘CHAN001’, …].
-Can be omitted if data is a mne.io.BaseRaw.
The default unit of mne.io.BaseRaw is Volts.
+Therefore, if passing data from a mne.io.BaseRaw,
you need to multiply the data by 1e6 to convert to micro-Volts
(1 V = 1,000,000 uV), e.g.:
>>> data=raw.get_data()*1e6# Make sure that data is in uV
@@ -171,7 +173,7 @@
This will give a pandas.DataFrame where each row is a
detected REM and each column is a parameter (= property).
To get the average parameters sleep stage:
Latencies: latencies of sleep stages from the beginning of the record.
Sleep Onset Latency (SOL): Latency to first epoch of any sleep.
+
+
Warning
+
Since YASA 0.5.0, Artefact and Unscored epochs are now excluded from the calculation of the
+total sleep time (TST). Previously, YASA calculated TST as SPT - WASO, thus including
+Art and Uns. TST is now calculated as the sum of all REM and NREM sleep in SPT.
+
References
Iber, C. (2007). The AASM manual for the scoring of sleep and
@@ -201,7 +209,7 @@
Single or multi-channel data. Unit must be uV and shape (n_samples) or
-(n_chan, n_samples). Can also be a mne.io.BaseRaw,
+(n_chan, n_samples). Can also be a mne.io.BaseRaw,
in which case data, sf, and ch_names will be automatically
extracted, and data will also be automatically converted from
Volts (MNE) to micro-Volts (YASA).
sffloat
Sampling frequency of the data in Hz.
-Can be omitted if data is a mne.io.BaseRaw.
If the detection is taking too long, make sure to downsample
your data to 100 Hz (or 128 Hz). For more details, please refer to
-mne.filter.resample().
This will give a pandas.DataFrame where each row is a
detected spindle and each column is a parameter (= feature or property)
of this spindle. To get the average spindles parameters per channel and
sleep stage:
Single or multi-channel data. Unit must be uV and shape (n_samples) or
-(n_chan, n_samples). Can also be a mne.io.BaseRaw,
+(n_chan, n_samples). Can also be a mne.io.BaseRaw,
in which case data, sf, and ch_names will be automatically
extracted, and data will also be automatically converted from
Volts (MNE) to micro-Volts (YASA).
sffloat
Sampling frequency of the data in Hz.
-Can be omitted if data is a mne.io.BaseRaw.
If the detection is taking too long, make sure to downsample
your data to 100 Hz (or 128 Hz). For more details, please refer to
-mne.filter.resample().
Sleep stage (hypnogram). If the hypnogram is loaded, the
detection will only be applied to the value defined in
@@ -190,7 +192,7 @@
yasa.sw_detect
The lower and upper frequencies for the slow-waves and
spindles-related sigma signals are defined in freq_sw and
freq_sp, respectively.
-For more details, please refer to the Jupyter notebook
This will give a pandas.DataFrame where each row is a
detected slow-wave and each column is a parameter (= property).
To get the average SW parameters per channel and sleep stage:
Hypnogram. The dtype of hypno must be integer
@@ -102,18 +104,20 @@
yasa.transition_matrix
30 seconds epochs. Using an upsampled hypnogram will result in an
incorrect transition matrix.
For best results, we recommend using an hypnogram cropped to
-either the time in bed (TIB) or the sleep period time (SPT).
+either the time in bed (TIB) or the sleep period time (SPT), without
+any artefact / unscored epochs.
Returns
-
-
countsarray
Counts transition matrix (number of transitions from stage X to
-stage Y).
Counts transition matrix (number of transitions from stage A to
+stage B). The pre-transition states are the rows and the
+post-transition states are the columns.
-
probsarray
Conditional probability transition matrix, i.e.
-given that current state is X, what is the probability that
-the next state is Y.
+
Conditional probability transition matrix, i.e.
+given that current state is A, what is the probability that
+the next state is B.
probs is a right stochastic matrix,
i.e. each row sums to 1.
Several metrics of sleep fragmentation can be calculated from the
+probability matrix. For example, the stability of sleep stages can be
+calculated by taking the average of the diagonal values (excluding Wake
+and N1 sleep):
Some basic knowledge of Python, especially the NumPy, Pandas and MNE packages.
A Python editor: YASA works best with Jupyter Lab, a web-based interactive user interface.
-
Some sleep EEG data and optionally a sleep staging file (hypnogram) to perform calculations on specific sleep stages. To facilitate masking and indexing operations, the data and hypnogram must have the same sampling frequency and number of samples. YASA provide some convenient functions to load and upsample hypnogram data to the desired shape.
+
Some sleep EEG data and optionally a sleep staging file (hypnogram).
I have sleep EEG data in European Data Format (.edf), how do I load the data in Python?
If you have sleep EEG data in standard formats (e.g. EDF or BrainVision), you can use the MNE package to load and preprocess your data in Python. A simple preprocessing pipeline using MNE is shown below:
importmne
-# Load the EDF file, excluding the EOGs and EKG channels
-raw=mne.io.read_raw_edf('MYEDFFILE.edf',preload=True,exclude=['EOG1','EOG2','EKG'])
-raw.resample(100)# Downsample the data to 100 Hz
-raw.filter(0.1,40)# Apply a bandpass filter from 0.1 to 40 Hz
-raw.pick_channels(['C4-A1','C3-A2'])# Select a subset of EEG channels
+# Load the EDF file
+raw=mne.io.read_raw_edf('MYEDFFILE.edf',preload=True)
+# Downsample the data to 100 Hz
+raw.resample(100)
+# Apply a bandpass filter from 0.1 to 40 Hz
+raw.filter(0.1,40)
+# Select a subset of EEG channels
+raw.pick_channels(['C4-A1','C3-A2'])
How do I get started with YASA?
-
If you want to dive right in, you can simply go to the main documentation (API reference) and try to apply YASA’s functions on your own EEG data. However, for most users, we strongly recommend that you first try running the examples Jupyter notebooks to get a sense of how YASA works and what it can do! The advantage is that the notebooks also come with example datasets so they should work right out of the box as long as you’ve installed YASA first. The notebooks and datasets can be found on GitHub (make sure that you download the whole notebooks/ folder). A short description of all notebooks is provided below:
-
Spindles detection
-
-
01_spindles_detection: single-channel spindles detection and step-by-step description of the spindles detection algorithm.
run_visbrain: interactive display of the detected spindles using the Visbrain visualization software in Python.
-
-
Slow-waves detection
+
If you want to dive right in, you can simply go to the main documentation (API reference) and try to apply YASA’s functions on your own EEG data.
+However, for most users, we strongly recommend that you first try running the examples Jupyter notebooks to get a sense of how YASA works and what it can do!
+The notebooks also come with example datasets so they should work right out of the box as long as you’ve installed YASA first.
+The notebooks and datasets can be found on GitHub (make sure that you download the whole notebooks/ folder). A short description of all notebooks is provided below:
+
Automatic sleep staging
-
05_sw_detection: single-channel slow-waves detection and step-by-step description of the slow-waves detection algorithm.
YASA was created and is maintained by Raphael Vallat. Contributions are more than welcome so feel free to contact me, open an issue or submit a pull request!
+
YASA was created and is maintained by Raphael Vallat, a postdoctoral researcher in Matthew Walker’s lab at UC Berkeley. Contributions are more than welcome so feel free to contact me, open an issue or submit a pull request!
To see the code or report a bug, please visit the GitHub repository.
Note that this program is provided with NO WARRANTY OF ANY KIND.
Citation
-
To cite YASA, please use the Zenodo DOI:
-
+
To cite YASA, please use the preprint publication:
+
+
Raphael Vallat and Matthew P. Walker (2021). A universal, open-source, high-performance tool for automated sleep staging. bioRxiv 2021.05.28.446165; doi: https://doi.org/10.1101/2021.05.28.446165
diff --git a/docs/build/html/searchindex.js b/docs/build/html/searchindex.js
index 17c4354..e165d0a 100644
--- a/docs/build/html/searchindex.js
+++ b/docs/build/html/searchindex.js
@@ -1 +1 @@
-Search.setIndex({docnames:["api","changelog","contributing","generated/yasa.REMResults","generated/yasa.SWResults","generated/yasa.SleepStaging","generated/yasa.SpindlesResults","generated/yasa.art_detect","generated/yasa.bandpower","generated/yasa.bandpower_from_psd","generated/yasa.bandpower_from_psd_ndarray","generated/yasa.hypno_int_to_str","generated/yasa.hypno_str_to_int","generated/yasa.hypno_upsample_to_data","generated/yasa.hypno_upsample_to_sf","generated/yasa.irasa","generated/yasa.load_profusion_hypno","generated/yasa.moving_transform","generated/yasa.plot_spectrogram","generated/yasa.rem_detect","generated/yasa.sleep_statistics","generated/yasa.sliding_window","generated/yasa.spindles_detect","generated/yasa.stft_power","generated/yasa.sw_detect","generated/yasa.topoplot","generated/yasa.transition_matrix","index"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":2,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["api.rst","changelog.rst","contributing.rst","generated/yasa.REMResults.rst","generated/yasa.SWResults.rst","generated/yasa.SleepStaging.rst","generated/yasa.SpindlesResults.rst","generated/yasa.art_detect.rst","generated/yasa.bandpower.rst","generated/yasa.bandpower_from_psd.rst","generated/yasa.bandpower_from_psd_ndarray.rst","generated/yasa.hypno_int_to_str.rst","generated/yasa.hypno_str_to_int.rst","generated/yasa.hypno_upsample_to_data.rst","generated/yasa.hypno_upsample_to_sf.rst","generated/yasa.irasa.rst","generated/yasa.load_profusion_hypno.rst","generated/yasa.moving_transform.rst","generated/yasa.plot_spectrogram.rst","generated/yasa.rem_detect.rst","generated/yasa.sleep_statistics.rst","generated/yasa.sliding_window.rst","generated/yasa.spindles_detect.rst","generated/yasa.stft_power.rst","generated/yasa.sw_detect.rst","generated/yasa.topoplot.rst","generated/yasa.transition_matrix.rst","index.rst"],objects:{"yasa.REMResults":{__init__:[3,1,1,""],get_mask:[3,1,1,""],get_sync_events:[3,1,1,""],plot_average:[3,1,1,""],summary:[3,1,1,""]},"yasa.SWResults":{__init__:[4,1,1,""],get_mask:[4,1,1,""],get_sync_events:[4,1,1,""],plot_average:[4,1,1,""],plot_detection:[4,1,1,""],summary:[4,1,1,""]},"yasa.SleepStaging":{__init__:[5,1,1,""],fit:[5,1,1,""],get_features:[5,1,1,""],plot_predict_proba:[5,1,1,""],predict:[5,1,1,""],predict_proba:[5,1,1,""]},"yasa.SpindlesResults":{__init__:[6,1,1,""],get_mask:[6,1,1,""],get_sync_events:[6,1,1,""],plot_average:[6,1,1,""],plot_detection:[6,1,1,""],summary:[6,1,1,""]},yasa:{REMResults:[3,0,1,""],SWResults:[4,0,1,""],SleepStaging:[5,0,1,""],SpindlesResults:[6,0,1,""],art_detect:[7,2,1,""],bandpower:[8,2,1,""],bandpower_from_psd:[9,2,1,""],bandpower_from_psd_ndarray:[10,2,1,""],hypno_int_to_str:[11,2,1,""],hypno_str_to_int:[12,2,1,""],hypno_upsample_to_data:[13,2,1,""],hypno_upsample_to_sf:[14,2,1,""],irasa:[15,2,1,""],load_profusion_hypno:[16,2,1,""],moving_transform:[17,2,1,""],plot_spectrogram:[18,2,1,""],rem_detect:[19,2,1,""],sleep_statistics:[20,2,1,""],sliding_window:[21,2,1,""],spindles_detect:[22,2,1,""],stft_power:[23,2,1,""],sw_detect:[24,2,1,""],topoplot:[25,2,1,""],transition_matrix:[26,2,1,""]}},objnames:{"0":["py","class","Python class"],"1":["py","method","Python method"],"2":["py","function","Python function"]},objtypes:{"0":"py:class","1":"py:method","2":"py:function"},terms:{"009ddc":5,"01_spindles_detect":[22,27],"02_spindles_detection_multi":[22,27],"03_spindles_detection_nrem_onli":[22,27],"04_spindles_slow_fast":[22,27],"05_sw_detect":[24,27],"06_sw_detect":1,"06_sw_detection_multi":27,"07_rems_detect":[19,27],"08_bandpow":[8,27],"09_irasa":[15,27],"10_bandpow":1,"10_spectrogram":27,"11_nonlinear_featur":27,"12_spindl":27,"13_artifact_reject":[7,27],"14_automatic_sleep_stag":[5,27],"15_topoplot":[1,27],"16hz":1,"1e6":19,"1hz":1,"299859v1":15,"2bfz":18,"2bpz":18,"5hz":22,"99d7f1":5,"barth\u00e9lemi":7,"boolean":[3,4,5,6,8,9,10,15,17,19,22,23,24],"break":1,"case":[5,7,8,13,15,22,24],"class":[1,3,4,5,6,7],"default":[1,3,4,5,6,7,8,9,10,15,16,17,18,19,20,21,22,23,24],"export":5,"float":[3,4,6,7,8,13,14,15,16,17,18,19,20,21,22,23,24,25],"function":[1,2,3,4,6,7,8,10,11,12,15,17,21,27],"import":[5,18,20,21,24,25,26,27],"int":[7,8,15,17,18,19,21,22,23,24,25],"long":[3,4,6,22,24,27],"new":[2,4,5,6,7,8,9,10,11,12,13,14,15,18,19,20,21,22,24,25,26],"public":[7,20],"return":[1,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26],"short":[7,22,27],"switch":1,"true":[1,3,4,5,6,7,8,9,10,15,16,17,18,19,22,23,24,26,27],"try":27,AND:[8,15,18],Added:1,Eye:27,For:[1,3,4,5,6,7,8,15,16,18,19,22,24,25,26,27],One:1,RMS:22,The:[1,2,5,7,8,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27],Then:7,There:2,These:5,Uns:11,Use:[2,24],Using:26,With:21,__init__:[3,4,5,6],_ch_name:[3,4,6],_data:[3,4,6],_data_filt:[3,4,6],_event:[3,4,6],_hypno:[3,4,6],_sf:[3,4,6],aasm:[1,20],about:[1,2],abov:[7,17,22],absolut:[1,5,19,22,24],abspow:22,academi:20,accompani:2,account:[1,7],accur:[3,4,5,6,7],accuraci:5,across:[1,3,4,6,27],activ:[4,6],adapt:7,added:[1,24],adding:[2,15],addit:[1,5],address:1,adult:20,advanc:27,advantag:[1,27],after:[2,3,4,6,20],agarw:19,age:[1,5],aggfunc:[1,3,4,6],agreement:[1,5],algorithm:[1,5,19,22,24,27],all:[1,2,7,19,20,22,23,25,27],allow:1,alpha:[8,9,10],alreadi:[1,20],also:[1,2,5,7,8,13,15,20,22,24,27],alwai:[1,5],american:[5,20],amount:18,amp_neg:24,amp_po:24,amp_ptp:24,amplitud:[1,3,4,6,17,19,22,24,27],anaconda:27,analys:27,analysi:[15,27],ancoli:20,andreev:7,ani:[1,7,20,24,27],annot:[16,26],anoth:27,antropi:[1,5],aperiod:[15,27],api:[1,27],appli:[1,3,4,5,6,7,8,19,22,24,27],approach:[7,19],approxim:15,appveyor:1,arai:10,arang:21,argument:[1,3,4,6,8,15,25],arithmet:17,around:[1,7,11,12,21,24,25],arrai:[1,3,4,6,7,9,10,11,12,13,14,16,17,18,21,26],array_lik:[3,4,6,7,8,9,11,12,13,14,17,18,19,20,22,23,24,26],art:[11,12],art_detect:1,art_epoch:7,artefact:[1,7,8,18,19,20,22,24],artifact:[7,27],artifect:7,as_strid:21,associ:[5,15,20],assum:[5,20],assumpt:1,attribut:[3,4,6,15],auto:[5,15],automat:[1,5,7,8,13,15,19,22,24,27],avail:[1,4,6,17],averag:[1,3,4,5,6,8,9,15,19,22,24,27],avoid:1,awai:15,axi:[1,5,21],bad:7,band:[1,5,8,9,10,15,22,23,24,27],bandpa:24,bandpass:[1,3,4,6,8,24,27],bandpow:[1,9,10,27],bandpower_from_psd:[1,10],bandpower_from_psd_ndarrai:1,bandwidth:24,bandwis:23,bar:7,barach:7,barakat:24,base:[1,7,19,22,24,25,27],baseraw:[5,7,8,13,15,19,22,24],basic:[1,27],beaudri:22,becaus:[1,5],bed:[1,20,26],been:[1,24],befor:[1,2,3,4,6],begin:[1,19,20,22,24],behavior:[1,22],below:[1,7,27],best:[1,2,7,26,27],bet:2,beta:[5,8,9,10],better:[1,19,22,24],between:[1,7,8,9,10,15,17,19,21,22,24],biggest:1,bin:23,bio:19,biologi:7,biorxiv:15,bivari:23,blink:7,blob:[5,7,8,15,19,22,24],blue:5,bodi:7,bonnet:20,bool:[3,4,6,7,16,19,22,23,24],both:[1,15,18,19],bottom:27,box:27,brain:15,brainvis:27,briefli:1,broad:[15,22,23],broadband:22,bug:[2,27],bugfix:1,bui:2,bytesio:18,calcul:[1,5,7,8,15,18,19,20,21,22,24,26,27],call:[1,24],can:[1,2,4,5,6,7,8,13,15,22,24,25,26,27],canthi:19,care:5,carrier:24,cbar:26,cbar_ax:26,cbar_kw:26,cbar_tick:25,cbar_titl:25,center:[1,3,4,5,6,24],central:[1,5],ch_name:[1,3,4,6,8,9,15,22,24],chan000:[8,9,15],chan001:[8,9,15],chang:[1,2,24],channel:[1,3,4,5,6,7,8,9,15,17,18,19,21,22,23,24,25,27],check:[1,5,27],chin:5,choic:[7,19,22,24],chokroverti:20,circ_mean:24,circ_r:24,circular:24,cite:27,classif:27,classifi:[1,5],clinic:20,close:22,closer:22,cluster:7,cmap:[18,25,26],code:[1,5,27],codebas:2,coeffici:25,coffe:2,color:[4,6,25],color_palett:25,colorbar:25,colormap:[18,25],column:[1,8,9,19,22,24],com:[5,7,8,15,16,17,18,19,22,24],combin:1,come:[5,27],command:27,common:5,comodulogram:27,compat:1,complex:5,compli:[1,2],compon:[1,15,27],compumed:[1,16],comput:[1,5,7,9,10,15,17,19,20,23,24],concaten:22,concret:5,condit:26,confid:5,confus:1,congedo:7,conserv:7,consid:[2,7],consist:[1,21],contact:27,contain:[1,7,8,9,10],contamin:7,content:15,continu:19,contrast:18,contribut:[1,27],conveni:27,convert:[1,5,7,8,11,12,15,19,22,24],copi:5,corr:[17,22],correctli:2,correl:[17,22,25],correspond:[11,12,13,17,21],could:1,count:[1,26],coupl:[1,24,27],covar:[7,17],covari:[7,17],creat:[26,27],critic:[7,19,22,24],crop:[13,20,26],cross:[5,24],cubic:[17,23],current:[5,7,13,14,24,26],damberg:20,data:[1,3,4,5,6,7,8,9,10,13,14,15,17,18,19,21,22,23,24,25,27],data_filt:[3,4,6],data_full_6hrs_100hz_cz:18,data_full_6hrs_100hz_hypno_30:18,datafram:[1,3,4,5,6,8,9,15,19,22,24],dataset:[1,27],dean:5,debug:[7,19,22,24],defin:[1,7,8,9,10,18,19,22,24,25],definit:7,deflect:24,degre:23,delfrat:22,delta:[5,8,9,10],demonstr:27,denni:5,denomin:1,denot:1,densiti:[1,9,10,15],depend:1,deprec:1,deriv:[1,5,22],describ:15,descript:[7,27],desir:[14,27],detail:[1,8,9,10,16,18,22,24,25,27],detect:[1,3,4,5,6,7,19,22,24,27],detrend:[1,22],deviat:[1,5,7,22,24],df_sync:[3,4,6],dict:[3,4,5,6,8,11,12,15,20,22,25],dictionari:5,dictionnari:[11,12],differ:[1,4,5,6,7,15],difficult:1,dimens:[1,5,21],dimension:[1,10],direct:24,directli:[1,18],directori:2,disabl:[1,22],discoveri:5,discret:25,discuss:2,disord:5,displai:[1,27],distanc:7,distribut:[7,15,18],dive:27,divid:[8,9,10],doc:2,docstr:2,document:[1,27],doi:[15,27],done:[2,3,4,6,23,26],doubl:5,download:27,downsampl:[1,5,22,24,27],dpi:25,drastic:7,driven:27,dtype:26,duggan:19,dur_neg:24,dur_po:24,durat:[1,19,20,22,24],dure:[7,22,24],each:[1,3,4,5,6,7,8,9,10,13,14,17,19,20,21,22,23,24,26],edf:[5,16,27],edit:2,editor:[16,27],eeg:[1,4,5,6,7,8,9,13,14,15,18,21,22,27],eeg_nam:5,effect:[1,7,8,19,22,24],effici:[1,20],eigenvalu:7,either:[4,5,6,26],ekg:[7,27],electrod:[5,7,25],element:25,emg1:5,emg2:5,emg:[1,5,7],emg_nam:5,emul:22,end:[18,19,22,24],engin:[7,19],enhanc:1,ensembl:[1,19,22,24],ensur:[2,17,19,22,23,24],enter:27,entir:27,entropi:[1,5],eog1:27,eog2:27,eog:[1,3,5,7,19,27],eog_nam:5,epoch:[1,5,7,16,17,20,21,24,26,27],equal:23,error:[7,19,22,24],especi:[5,27],essenti:[7,11,12],estim:[7,15],etc:2,ethnic:5,euclidean:7,european:[24,27],even:1,event:[1,3,4,6,20,27],everi:[7,15,17,23],exact:[8,13,18],exampl:[1,2,5,7,8,15,18,19,20,21,22,24,25,26,27],exceed:7,except:[16,20,22],exclud:[7,27],exclus:[1,5],exist:[1,2],expect:5,experi:[1,7],experiment:1,expert:22,explain:1,exponenti:15,express:[1,20,22,24],extract:[1,5,7,8,13,15,22,24],eye:[7,19,27],facilit:27,factor:15,fall:19,fals:[3,4,5,6,7,8,17,19,22,23,24,26],far:7,fast:27,faster:[1,5,7,23],fastest:[17,23],featur:[1,5,22,27],feedback:1,feel:27,fell:7,femal:5,ferrarelli:24,field:7,fieldtrip:15,fig:[18,25],figsiz:[3,4,6,25,26],figur:[3,4,6,18,25],file:[1,5,27],filenam:16,filipini:24,filt:[3,4,6],filter:[1,3,4,6,8,19,22,24,27],filter_data:[3,4,6,8],find:1,fir:[1,8,22,24],first:[1,7,19,20,22,24,25,27],fit:[5,13,15],fit_param:15,flake8:2,fmax:18,fmin:18,fmt:26,fname:16,focus:1,folder:27,follow:[2,5,15,20,22,24],font:25,fontsiz:25,fooof:15,fore:8,forest:1,format:[1,2,3,4,6,7,8,16,18,19,20,22,24,27],found:[1,5,16,25,27],fourier:22,fpz:5,fractal:[5,15,27],fraction:26,free:[19,22,24,27],freq:[9,10,15],freq_broad:22,freq_rem:19,freq_so:1,freq_sp:[1,22,24],freq_sw:[1,24],frequenc:[1,3,4,5,6,7,8,9,10,13,14,15,16,17,18,19,20,21,22,23,24,26,27],from:[1,5,7,8,15,18,19,20,21,22,24,25,26,27],full:[1,5,16,18,19,22,24,27],fundament:15,futur:1,gamma:[8,9,10],gender:5,generaliz:2,geometr:15,geometri:7,get:[1,5,18,19,22,24],get_bool_vector:1,get_centered_indic:1,get_data:19,get_featur:5,get_mask:[1,3,4,6],get_sync_ev:[1,3,4,6],get_sync_sw:1,github:[2,5,7,8,15,16,17,18,19,22,24,27],githubusercont:18,give:[19,22,24],given:[7,9,13,14,26],global:25,goal:15,good:[7,17,23],gotman:19,grand:1,greater:7,grid:23,grid_kw:26,gridspec_kw:26,grigg:20,group:[1,3,4,6],grp_chan:[1,4,6,22,24],grp_stage:[1,3,4,6,19,22,24],guidanc:2,guidelin:[1,20],guo:5,halfwai:22,ham:[8,15],handl:1,harmon:15,has:[1,7,8,17,19,22,24],have:[1,7,8,13,18,19,20,22,24,27],hdeeg:7,health:5,heatmap:26,height_ratio:26,help:[3,4,5,6],higher:[7,17,18,23],highest:1,highli:2,higuchi:5,hilbert:[1,22],hill:24,hirshkowitz:20,hjorth:5,homogen:1,horizont:26,how:[1,7,8,15,19,24],howev:[7,27],hset:15,hspace:26,html:[1,2,7,25],http:[4,5,6,7,8,15,16,17,18,19,22,24,25],huber:24,hue:[1,4,6],human:[5,22],hypno:[1,3,4,5,6,7,8,11,12,13,14,16,18,19,20,22,24,26],hypno_upsample_to_data:[7,8,18,19,22,24],hypnogram:[1,3,4,6,7,8,11,12,13,14,15,16,18,19,20,22,24,26,27],iber:20,idea:7,ideal:[8,15,22],ident:16,idxchannel:[3,4,6],ieee:[7,19],impact:18,implement:[1,7,10,22,24],importantli:24,improv:[1,2],inch:[3,4,6],includ:[1,2,7,8,19,22,24,27],incorrect:26,increas:[7,24],increment:15,inde:1,independ:15,index:[3,4,6,7,25,27],indic:[1,3,4,6,22,25],individu:1,inf:24,influenc:7,info:[7,19,22,24],informat:5,initi:[1,3,4,5,6],input:[1,15,17,23],inspect:[5,7],inspir:17,instal:[1,2,5],instanc:[1,3,4,5,6,21],instantan:22,instead:[1,22],integ:[7,8,11,12,15,16,18,19,20,22,24,26],inter:[5,7],interact:[1,4,6,27],intercept:15,interest:[1,8,9,10,15,24],interfac:27,intern:1,interp2d:23,interp:[17,23],interpol:[17,23],interquartil:5,invers:[8,15],involv:5,ipynb:[1,5,7,8,15,19,22,24],ipywidget:[1,4,6],irasa:[1,27],irregular:15,isol:1,isolationforest:[1,19,22,24],israel:20,issu:[1,2,7,27],iter:7,its:15,jcsm:20,joblib:5,joint:7,journal:[5,19,20,22,24],jupyt:[1,4,5,6,18,22,24,27],just:22,kapen:20,keenan:20,kei:[3,4,5,6],kept:22,keyword:[1,8,15],kind:27,know:1,knowledg:27,kryger:20,kurtosi:5,kwarg:[3,4,6,25],kwargs_welch:[8,15],lab:27,label:[8,9,15,26],lacours:22,lafortun:24,lambda_i:7,landmark:[1,3,4,6],larg:7,laroch:19,last:[1,20,21],lat_n1:20,lat_n2:20,lat_n3:20,lat_rem:20,latenc:[1,20],latest:[1,4,6,7],law:15,lead:1,learn:[1,2,19],least:[1,8,15,17,18,22],left:[5,7,19,27],length:[1,7,8,15,18,21,24],less:7,let:1,level:[7,19,22,24],lgbmclassifi:5,lib:21,life:24,light:20,lightgbm:[1,5],like:[1,2],limit:27,line:[4,6,18,27],linear:[1,15,27],lineplot:[1,3,4,6],list:[3,4,6,7,8,9,10,15,19,22,24,25],liu:15,load:[1,5,8,16,18,19,22,24,27],load_profusion_hypno:1,loadtxt:18,loc:[1,3,5,19],locabsfallslop:19,locabsriseslop:19,locabsvalpeak:19,local:2,locat:[19,22,24],lock:[1,27],log10:22,log:[7,15,19,22,24],longer:[1,13,18],loop:1,low:5,lower:[1,8,9,10,15,18,23,24],lowercas:12,lowest:[1,5],lowpass:[3,4,6],lspopt:1,machin:19,mai:[7,24],main:[5,7,27],maintain:27,mainten:[1,20],major:[1,5,7,27],majority_onli:5,make:[1,2,4,6,7,19,22,24,27],make_standard_montag:25,male:5,mani:2,manipul:27,manual:[1,20],map:[11,12,16],mapping_dict:[11,12],mark:[7,16],martin:24,mask:[1,8,19,22,24,25,27],massimini:24,master:[5,7,8,15,18,19,22,24],mastoid:5,matplotlib:[4,6,18,25,26],matric:7,matrix:[1,7,21,26,27],max:[5,8,9,10,13,17,18,24,25],maximum:[8,17,19,22,24,25],mayaud:7,mcdevitt:19,mean:[1,3,4,6,7,8,15,17,18,19,22,24],mean_direct:24,median:[3,4,6,8,15,22],medic:[5,19],medicin:[5,7,20],mednick:19,merg:22,messag:[7,19,22,24],metadata:5,method:[1,3,4,5,6,7,15,17,19,22,27],micro:[7,8,15,19,22,24],midcross:24,middl:[17,24,27],min:[5,8,9,10,17,18,25],min_dist:22,minimum:[8,17,19,22,24,25],minor:1,minut:20,miscellan:1,misclassif:5,miss:1,mne:[1,3,4,5,6,7,8,13,15,19,22,24,25,27],mobil:5,mode:[4,6],model:15,modifi:1,modul:1,montag:25,more:[1,2,7,8,9,10,16,18,19,22,24,25,27],most:[1,5,6,7,19,22,24,27],move:[1,17,22],movement:[7,8,18,19,20,22,24,27],much:[7,23],multi:[1,4,6,7,18,22,24,27],multi_onli:22,multipl:15,multipli:19,multitap:18,must:[2,5,7,8,9,10,18,19,22,24,25,26,27],myedffil:27,myfil:5,n_band:10,n_chan:[1,3,4,6,7,10,22,24],n_chan_reject:7,n_channel:[9,21],n_color:25,n_epoch:[1,7,10,16,21],n_freq:[1,9,10],n_sampl:[1,3,4,6,7,21,22,24],name:[1,3,4,5,6,8,9,10,15,22,24,25],nap:5,nation:[1,5,16],nativ:16,ndarrai:[5,10,15,18,23],ndpac:[1,24],need:[1,2,13,19,27],neg:[1,4,19,24],negpeak:[1,4,24],neural:7,neurophysiolog:15,neurosci:[19,22,24],newer:1,next:[18,26],night:[1,5,7,18,27],niknazar:19,nois:7,non:[1,5,15,27],none:[3,4,5,6,7,8,9,13,15,17,18,19,21,22,23,24,25],norm:23,normal:[1,5,18,22,23,24],note:[5,7,8,11,12,15,16,17,18,19,20,21,22,23,24,27],notebook:[1,5,7,8,15,18,19,22,24,25,27],now:1,nperseg:[17,23],npz:18,nrem:[20,22,24],nsrr:[1,16],number:[3,4,5,6,7,8,13,18,19,22,24,25,26,27],numpi:[1,2,5,10,15,18,21,23,26,27],numpydoc:2,nutshel:15,object:[1,7],occur:[4,6,22],ocular:19,off:7,offer:15,offici:20,offset:15,ojeda:7,old:1,omit:[7,8,13,15,22,24],onc:1,one:[1,7,16,17,20,21,22,24,26],onli:[1,4,5,6,7,10,15,17,19,22,24],onlin:7,onset:[1,20],open:[2,27],oper:27,optim:[1,5],option:[1,3,4,5,6,8,15,17,18,27],org:[15,25],orient:26,origin:[6,15,22,26],oscil:[1,22,24,27],oscillatori:[1,15],other:[1,25],ouput:[3,4],out:[1,17,20,27],outdat:1,outlier:[1,19,22,24],output:[1,3,4,6,17,19,22,23,24],over:21,overlai:[1,4,6,27],overlap:[17,21,23],own:27,pac:[1,24],packag:[1,2,4,5,6,7,17,27],pad:13,pair:15,palett:[5,25],panda:[1,3,4,5,6,8,9,11,12,15,19,22,24,25,27],panel:27,paquet:24,paramet:[1,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26],part:[3,4,6],particip:[1,5],pass:[1,2,3,4,6,7,8,15,19,25],past:5,path:[5,16],path_to_model:5,peak:[1,3,4,6,17,19,22,24],pearson:25,penzel:20,pep8:2,peppard:22,per:[1,13,14,16,20,22,24,26],percentag:20,percentil:[1,18],perform:[1,5,7,17,22,23,24,27],period:[1,15,20,26],permut:5,petrosian:5,phase:[1,24,27],phaseatsigmapeak:[1,24],physic:1,pick_channel:27,pingouin:24,pip:[1,2,27],pipelin:[1,27],place:[19,22,24],pleas:[1,2,5,7,8,15,16,18,19,22,24,25,27],plot:[1,3,4,5,6,18,25,26,27],plot_averag:[1,3,4,6],plot_detect:[1,4,6],plot_predict_proba:5,plot_spectrogram:1,plot_topomap:[1,25],plt:26,point:[7,22,23,24],pointwis:23,poirier:24,polysomnographi:[5,27],posit:[7,22,24,25],pospeak:24,possibl:1,post:1,potato:7,potenti:[2,5],power:[1,5,8,9,10,15,22,23,27],pre:[1,5,7,9],precis:[17,23],pred:5,predict:5,predict_proba:5,preferenti:5,preload:[5,27],preprocess:27,prerequisit:27,present:[7,22],pressman:20,previous:[1,24],print:[1,7,19,22,24],prior:7,prob:26,proba:5,probabl:[5,26,27],process:1,produc:[4,6],product:19,profus:[1,16],program:27,promin:[1,6,22],prompt:27,prop_above_zero:17,properli:2,properti:[19,22,24],proport:[17,22],provid:[3,4,6,7,19,22,24,27],psd:[1,8,9,10,15,18],psd_aperiod:15,psd_oscillatori:15,psg:5,ptp:[17,24],pull:27,purpl:5,put:22,pydata:25,pyplot:26,pyriemann:[1,7],pytest:2,python:[1,2,7,17,27],qiang:5,qualiti:7,quickli:1,radian:[1,24],randint:21,random:[19,21,22,24],rang:[1,5,15,19,22,23,24,25],raphael:27,raphaelvallat:[5,7,8,15,18,19,22,24],rapid:[19,27],rater:5,ratio:[5,22],raw:[1,3,4,5,6,18,19,22,27],rdbu_r:18,read:18,read_raw_edf:[5,27],readabl:1,readm:1,readthedoc:[4,6,7],real:27,reason:5,reciproc:15,recommend:[1,2,7,26,27],record:[1,5,20,27],rectangular:23,rectbivariatesplin:23,redistribut:15,refer:[1,5,7,8,15,16,18,19,20,22,24,25,27],referenc:5,regress:17,rehabilit:7,reject:[7,27],rel:[1,5,8,9,10,22],rel_pow:22,relat:24,relationship:7,releas:1,relev:24,relpow:22,rem:[1,3,7,8,12,16,18,19,20,22,24,27],rem_detect:1,remain:15,remov:[1,7,19,22,24],remove_outli:[19,22,24],remresult:[1,19],renam:1,render:2,reorgan:1,replac:16,report:[2,27],repositori:27,represent:27,reproduc:[19,22,24,27],request:[18,27],requir:[1,4,6,19],resampl:[1,15,22,24,27],research:[1,5,16],resolut:[7,23,25],resourc:[1,5,16],respect:[13,24],restructuredtext:2,result:[1,5,7,13,15,19,21,22,24,26],return_fit:15,rich:5,riemannian:7,right:[7,19,26,27],rise:19,rms:[17,22],robillard:24,robust:5,roc:[1,3,19],rocabsfallslop:19,rocabsriseslop:19,rocabsvalpeak:19,roll:5,root:[1,17,22],row:[8,9,19,21,22,24,26,27],rule:20,run:[1,2,4,5,6,7,19,24,27],run_visbrain:27,s10548:15,same:[7,8,13,15,17,18,19,22,23,24,27],sampl:[1,3,4,6,7,8,13,14,15,16,17,18,19,20,21,22,23,24,26,27],sattari:19,scale:5,scientif:5,scikit:1,scipi:[8,9,10,15,23],score:[1,5,7,20,22,24],scorer:5,seaborn:[1,3,4,6,25,26],sec:[1,5,17,18,24],second:[1,3,4,5,6,7,8,13,14,15,16,17,18,19,20,21,22,23,24,26,27],section:1,see:[1,2,3,4,5,6,7,9,10,27],seed:[19,21,22,24],select:[1,22,27],self:[3,4,5,6],semilog:15,sens:27,sensit:7,separ:[7,15,27],sequenti:8,seri:[11,12,17,25],set:[1,5,21,24],set_label_posit:26,set_xlabel:26,set_ylabel:26,sever:[1,27],sex:[1,5],sf_data:[13,14],sf_hyp:[16,20],sf_hypno:[13,14],shape:[1,3,4,6,7,9,10,13,21,22,23,24,27],shorter:[7,13],should:[1,5,8,15,20,22,24,27],show:27,shown:27,sigma:[1,6,8,9,10,22,24],sigmapeak:[1,24],signal:[1,4,6,7,8,9,10,15,17,19,21,22,24],signatur:[3,4,5,6,7],signific:25,significantli:24,silber:20,similarli:1,simpl:[1,27],simpler:1,simplest:2,simpli:[7,15,27],sinc:24,singl:[1,7,17,18,22,23,24,27],situat:2,size:[3,4,6,17,21,23,25],skew:5,sklearn:[1,19,22,24],sleep:[1,3,4,5,6,7,8,11,12,13,14,16,18,19,20,22,24,26,27],sleep_statist:1,sleepstag:1,slide:[8,15,18,21],sliding_window:1,slightli:1,slope:[15,17,19,24],slow:[1,4,24,27],slower:[17,23],slowest:[17,23],sls:5,sme:[1,20],smooth:5,sns:26,so_coupl:27,societi:7,softwar:27,sol:20,sole:15,some:[1,7,27],soon:7,sophas:22,sort:[3,4,6],sourc:[3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26],space:15,specest:15,specif:[1,3,4,5,6,7,20,24,27],specifi:[1,8,9,23,24],spectra:15,spectral:[1,8,9,10,15,27],spectral_r:18,spectrogram:[18,27],spectrum:[15,27],sphinx:2,sphinx_bootstrap_them:2,spindl:[1,6,22,24,27],spindles_detect:1,spindles_detect_multi:1,spindlesresult:[1,22],spline:23,spt:[1,20,26],squar:[1,17,22,26],stabl:1,stage:[1,3,4,5,6,7,8,11,12,13,14,18,19,20,22,24,26,27],standard:[1,2,5,7,8,20,22,24,27],standard_1020:25,start:[2,19,21,22,24,26],stat:20,state:[1,26],statist:[1,15,20,24,27],statu:5,std:7,step:[15,17,19,21,22,23,24,27],stft:23,stochast:26,str:[3,4,5,6,7,16,17,18,19,22,24,25],straightforward:7,stratifi:27,stream:18,stride:21,stride_trick:21,string:[11,12],strong:1,strongli:27,studi:19,style:2,sub:[1,5],submit:27,subplot:26,subset:27,sum:[23,26],sum_i:7,summari:[1,3,4,6,19,22,24],sunflow:5,support:[1,5,7,10],sure:[1,2,4,6,7,19,22,24,27],sw_detect:1,sw_detect_multi:1,swresult:[1,24],sxx:23,symmetr:7,symmetri:22,synchron:[1,3,4,6],system:[5,7],take:[7,15,22,24],taken:1,takeuchi:19,taper:[18,27],technic:20,templat:[1,27],tensorpac:1,term:22,termin:27,terminolog:20,test:[1,2],text:15,than:[1,7,13,18,19,22,23,24,27],thei:[2,22,27],them:2,therefor:[5,7,13,19],theta:[8,9,10],thi:[1,2,3,4,5,6,7,8,10,11,12,15,17,18,19,21,22,24,25,27],think:1,those:2,three:1,thresh:22,threshold:[1,7,19,22,24],through:[1,22,24],tib:[1,20,26],tick:25,tick_top:26,time:[1,3,4,6,7,8,15,17,19,20,21,22,23,24,26,27],time_aft:[1,3,4,6],time_befor:[1,3,4,6],timepoint:[1,3,4,6],timestamp:1,titl:25,tobi:7,tononi:24,too:[1,7,22,24],tool:[7,15],toolbox:27,top:[18,26,27],topograph:27,topographi:15,topoplot:[1,27],total:[1,5,8,9,10,20,22],totalabspow:1,toward:5,train:[1,5],transact:[7,19],transform:[1,7,17,22],transform_sign:17,transit:[1,22,24,26,27],transition_matrix:1,translat:16,travel:24,travi:1,trim:[1,18],trimbothstd:1,trimperc:18,trough:24,tst:[1,20],tupl:[3,4,6,7,8,9,10,15,19,22,23,24,25],turn:5,tutori:[24,25,27],twice:18,twilight:5,two:[1,3,8,15,16,17,22],txt:18,type:[3,4,5,6],typic:7,unecessari:1,unit:[1,7,19,22,24],uns:12,unscor:[1,7,8,18,19,20,22,24],updat:[1,2],upgrad:[2,27],upon:1,upper:[1,8,9,10,18,24],upsampl:[1,7,8,13,14,18,19,22,24,26,27],use:[1,2,3,4,5,6,7,8,15,17,19,22,23,24,25,27],used:[1,3,4,5,6,8,15,17,18],useful:1,user:[1,7,19,22,24,27],userwarn:13,uses:[1,2,7,19,22,24],using:[1,3,4,5,6,7,8,15,19,22,23,24,26,27],usual:[17,23],valid:[1,5,25],vallat:27,valnegpeak:24,valpospeak:24,valu:[1,7,8,13,14,15,16,17,18,19,20,22,23,24,25,26],vandewal:24,vari:15,variabl:[4,6],varianc:7,variou:2,various:15,vector:[1,3,4,6,7,8,15,17,18,19,20,21,22,23,24],vector_length:24,verbos:[1,7,19,22,24],veri:[5,7],version:[1,4,5,6,7,8,9,10,11,12,13,14,15,18,19,20,21,22,24,25,26],versu:27,via:[1,23],vien:24,visbrain:[1,27],visit:27,visual:[5,7,20,27],viz:[1,25],vmax:[25,26],vmin:[25,26],volt:[1,5,7,8,15,19,22,24],wai:[1,2],wake:[5,7,8,12,16,18,19,20,22,24],walkthrough:22,want:[1,2,24,27],warbi:22,warn:[1,7,13,19,22,24],warranti:27,waso:20,wave:[1,4,24,27],web:27,websit:[1,16],weight:5,welch:[8,9,10,15],welcom:27,well:[1,5,8,9,10],wen:15,were:5,what:[26,27],when:[1,2,3,4,5,6,7,8,18,19,22,24],where:[3,7,8,15,18,19,20,21,22,24],wherea:15,which:[1,4,5,6,7,8,9,10,13,15,16,21,22,23,24],whitehurst:19,whole:27,wide:[5,24],widget:[4,6],wiki:16,win_sec:[8,15,18],window:[7,8,15,17,18,21,23],within:[2,20,24],wonambi:17,word:1,work:[1,4,5,6,7,27],workshop:7,wrapper:[1,7,11,12,21,25],www:15,xaxi:26,xkcd:5,xml:[1,16],yasa:1,yasa_classifi:5,year:[5,24],yet:27,yetton:19,ylorrd:26,you:[1,2,5,7,19,22,24,27],your:[1,2,7,8,18,19,22,24,27],zenodo:27,zero:[5,17,24],zhang:5,zscore:7},titles:["API reference","What\u2019s new","Contribute to YASA","yasa.REMResults","yasa.SWResults","yasa.SleepStaging","yasa.SpindlesResults","yasa.art_detect","yasa.bandpower","yasa.bandpower_from_psd","yasa.bandpower_from_psd_ndarray","yasa.hypno_int_to_str","yasa.hypno_str_to_int","yasa.hypno_upsample_to_data","yasa.hypno_upsample_to_sf","yasa.irasa","yasa.load_profusion_hypno","yasa.moving_transform","yasa.plot_spectrogram","yasa.rem_detect","yasa.sleep_statistics","yasa.sliding_window","yasa.spindles_detect","yasa.stft_power","yasa.sw_detect","yasa.topoplot","yasa.transition_matrix","Installation"],titleterms:{"new":1,analys:0,api:0,april:1,art_detect:7,august:1,automat:0,bandpow:8,bandpower_from_psd:9,bandpower_from_psd_ndarrai:10,build:2,check:2,citat:27,code:2,contribut:2,decemb:1,detect:0,dev:[],develop:27,document:2,event:0,februari:1,galleri:27,get:27,guidelin:2,how:27,hypno_int_to_str:11,hypno_str_to_int:12,hypno_upsample_to_data:13,hypno_upsample_to_sf:14,hypnogram:0,instal:27,irasa:15,januari:1,load_profusion_hypno:16,mai:1,march:1,moving_transform:17,novemb:1,octob:1,plot:0,plot_spectrogram:18,refer:0,rem_detect:19,remresult:3,sleep:0,sleep_statist:20,sleepstag:5,sliding_window:21,spectral:0,spindles_detect:22,spindlesresult:6,stage:0,start:27,statist:0,stft_power:23,sw_detect:24,swresult:4,tool:0,topoplot:25,transition_matrix:26,what:1,yasa:[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27]}})
\ No newline at end of file
+Search.setIndex({docnames:["api","changelog","contributing","faq","generated/yasa.REMResults","generated/yasa.SWResults","generated/yasa.SleepStaging","generated/yasa.SpindlesResults","generated/yasa.art_detect","generated/yasa.bandpower","generated/yasa.bandpower_from_psd","generated/yasa.bandpower_from_psd_ndarray","generated/yasa.hypno_int_to_str","generated/yasa.hypno_str_to_int","generated/yasa.hypno_upsample_to_data","generated/yasa.hypno_upsample_to_sf","generated/yasa.irasa","generated/yasa.load_profusion_hypno","generated/yasa.moving_transform","generated/yasa.plot_spectrogram","generated/yasa.rem_detect","generated/yasa.sleep_statistics","generated/yasa.sliding_window","generated/yasa.spindles_detect","generated/yasa.stft_power","generated/yasa.sw_detect","generated/yasa.topoplot","generated/yasa.transition_matrix","index"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":2,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["api.rst","changelog.rst","contributing.rst","faq.rst","generated/yasa.REMResults.rst","generated/yasa.SWResults.rst","generated/yasa.SleepStaging.rst","generated/yasa.SpindlesResults.rst","generated/yasa.art_detect.rst","generated/yasa.bandpower.rst","generated/yasa.bandpower_from_psd.rst","generated/yasa.bandpower_from_psd_ndarray.rst","generated/yasa.hypno_int_to_str.rst","generated/yasa.hypno_str_to_int.rst","generated/yasa.hypno_upsample_to_data.rst","generated/yasa.hypno_upsample_to_sf.rst","generated/yasa.irasa.rst","generated/yasa.load_profusion_hypno.rst","generated/yasa.moving_transform.rst","generated/yasa.plot_spectrogram.rst","generated/yasa.rem_detect.rst","generated/yasa.sleep_statistics.rst","generated/yasa.sliding_window.rst","generated/yasa.spindles_detect.rst","generated/yasa.stft_power.rst","generated/yasa.sw_detect.rst","generated/yasa.topoplot.rst","generated/yasa.transition_matrix.rst","index.rst"],objects:{"yasa.REMResults":{__init__:[4,1,1,""],get_mask:[4,1,1,""],get_sync_events:[4,1,1,""],plot_average:[4,1,1,""],summary:[4,1,1,""]},"yasa.SWResults":{__init__:[5,1,1,""],get_coincidence_matrix:[5,1,1,""],get_mask:[5,1,1,""],get_sync_events:[5,1,1,""],plot_average:[5,1,1,""],plot_detection:[5,1,1,""],summary:[5,1,1,""]},"yasa.SleepStaging":{__init__:[6,1,1,""],fit:[6,1,1,""],get_features:[6,1,1,""],plot_predict_proba:[6,1,1,""],predict:[6,1,1,""],predict_proba:[6,1,1,""]},"yasa.SpindlesResults":{__init__:[7,1,1,""],get_coincidence_matrix:[7,1,1,""],get_mask:[7,1,1,""],get_sync_events:[7,1,1,""],plot_average:[7,1,1,""],plot_detection:[7,1,1,""],summary:[7,1,1,""]},yasa:{REMResults:[4,0,1,""],SWResults:[5,0,1,""],SleepStaging:[6,0,1,""],SpindlesResults:[7,0,1,""],art_detect:[8,2,1,""],bandpower:[9,2,1,""],bandpower_from_psd:[10,2,1,""],bandpower_from_psd_ndarray:[11,2,1,""],hypno_int_to_str:[12,2,1,""],hypno_str_to_int:[13,2,1,""],hypno_upsample_to_data:[14,2,1,""],hypno_upsample_to_sf:[15,2,1,""],irasa:[16,2,1,""],load_profusion_hypno:[17,2,1,""],moving_transform:[18,2,1,""],plot_spectrogram:[19,2,1,""],rem_detect:[20,2,1,""],sleep_statistics:[21,2,1,""],sliding_window:[22,2,1,""],spindles_detect:[23,2,1,""],stft_power:[24,2,1,""],sw_detect:[25,2,1,""],topoplot:[26,2,1,""],transition_matrix:[27,2,1,""]}},objnames:{"0":["py","class","Python class"],"1":["py","method","Python method"],"2":["py","function","Python function"]},objtypes:{"0":"py:class","1":"py:method","2":"py:function"},terms:{"009ddc":6,"01_spindles_detect":23,"02_spindles_detection_multi":23,"03_spindles_detection_nrem_onli":23,"04_spindles_slow_fast":23,"05_sw_detect":25,"06_sw_detect":1,"07_rems_detect":20,"08_bandpow":9,"09_irasa":16,"10_bandpow":1,"13_artifact_reject":8,"14_automatic_sleep_stag":6,"15_topoplot":1,"16hz":1,"1e6":20,"1hz":1,"299859v1":16,"2bfz":19,"2bpz":19,"5hz":23,"99d7f1":6,"abstract":3,"barth\u00e9lemi":8,"boolean":[4,5,6,7,9,10,11,16,18,20,23,24,25],"break":1,"case":[6,8,9,14,16,23,25],"class":[1,3,4,5,6,7,8],"default":[1,3,4,5,6,7,8,9,10,11,16,17,18,19,20,21,22,23,24,25],"export":[3,6],"final":[3,27],"float":[4,5,7,8,9,14,15,16,17,18,19,20,21,22,23,24,25,26],"function":[1,2,3,4,5,7,8,9,11,12,13,16,18,22,28],"import":[1,3,5,6,7,19,21,22,25,26,27,28],"int":[8,9,16,18,19,20,22,23,24,25,26],"long":[4,5,7,23,25,28],"new":[2,3,5,6,7,8,9,10,11,12,13,14,15,16,19,20,21,22,23,25,26,27],"public":[3,6,8,21,28],"return":[1,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27],"short":[8,23,28],"switch":1,"true":[1,3,4,5,6,7,8,9,10,11,16,17,18,19,20,23,24,25,27,28],"try":28,"while":3,AND:[9,16,19],Added:1,Adding:3,For:[1,3,4,5,6,7,8,9,16,17,19,20,23,25,26,27,28],One:[1,5,7],RMS:23,Such:3,The:[1,2,3,5,6,7,8,9,12,13,14,15,16,17,19,20,21,22,23,24,25,26,27,28],Then:[3,8],There:[2,3],These:6,Uns:[1,12,21],Use:[2,3,25],Using:27,With:22,__init__:[4,5,6,7],_ch_name:[4,5,7],_data:[4,5,7],_data_filt:[4,5,7],_event:[4,5,7],_hypno:[4,5,7],_sf:[4,5,7],aasm:[1,21],abbreviationsahiapnea:3,about:[1,2],abov:[8,18,23],absolut:[1,6,20,23,25],abspow:23,academi:21,accept:3,accompani:2,accomplish:3,account:[1,8],accur:[3,4,5,6,7,8],accuraci:[1,3,6],across:[1,3,4,5,7,28],activ:[5,7],adapt:[3,8],add:3,added:[1,25],adding:[2,3,16],addit:[1,3,6],address:1,adequ:3,adjust:3,adopt:3,adult:[3,21],advantag:[1,3],after:[2,4,5,7,21],against:3,agarw:20,age:[1,6],aggfunc:[1,4,5,7],agreement:[1,3,6],algorithm:[1,3,6,20,23,25,28],all:[1,2,3,8,20,21,23,24,26,28],allow:1,alpha:[9,10,11],alreadi:[1,3,21],also:[1,2,3,6,8,9,14,16,21,23,25,28],altern:3,alwai:[1,3,6],american:[6,21],amount:[3,19],amp_neg:25,amp_po:25,amp_ptp:25,amplitud:[1,4,5,7,18,20,23,25,28],anaconda:28,analys:28,analysi:[3,16,28],ancoli:21,andreev:8,ani:[1,6,8,21,25,27,28],anim:3,annot:[3,17,27],anot:3,anoth:28,answer:6,antropi:[1,6],anyon:3,aperiod:[16,28],api:[1,28],apneapsgpolysomnographymccmatthew:3,appear:3,appli:[1,3,4,5,6,7,8,9,20,23,25,28],approach:[3,8,20],approxim:16,appveyor:1,arai:11,arang:[3,22],argument:[1,4,5,7,9,16,26],arithmet:18,around:[1,3,8,12,13,22,25,26],arrai:[1,3,4,5,7,8,10,11,12,13,14,15,17,18,19,22],array_lik:[4,5,7,8,9,10,12,13,14,15,18,19,20,21,23,24,25,27],art:[1,12,13,21],art_detect:1,art_epoch:8,artefact:[1,3,8,9,19,20,21,23,25,27,28],articl:[1,3,6,28],artifact:[8,28],artifact_reject:28,artifect:8,as_strid:22,ascii:3,ask:3,associ:[6,16,21],assum:[3,6,21],assumpt:1,attribut:[4,5,7,16],author:3,auto:[6,16],autom:[3,6,28],automat:[1,3,6,8,9,14,16,20,23,25,28],automatic_stag:28,avail:[1,3,5,7,18],averag:[1,4,5,6,7,9,10,16,20,23,25,27,28],avoid:1,awai:16,axi:[1,6,22],bad:8,band:[1,3,6,9,10,11,16,23,24,25,28],bandpa:25,bandpass:[1,3,4,5,7,9,25,28],bandpow:[1,10,11,28],bandpower_from_psd:[1,11],bandpower_from_psd_ndarrai:1,bandwidth:25,bandwis:24,bar:8,barach:8,barakat:25,base:[1,8,20,23,25,26,28],baseraw:[6,8,9,14,16,20,23,25],basic:[1,28],bdf:3,beaudri:23,becaus:[1,6],becom:3,bed:[1,21,27],been:[1,3,25],befor:[1,2,4,5,7],begin:[1,20,21,23,25],behavior:[1,23],below:[1,3,5,6,7,8,28],berkelei:28,best:[1,2,8,27,28],bet:2,beta:[6,9,10,11],better:[1,3,20,23,25],between:[1,8,9,10,11,16,18,20,22,23,25],bibtex:3,biggest:1,bin:24,binari:[5,7],bio:20,biologi:8,biorxiv:[3,6,16,28],bivari:24,blatant:3,blink:8,blob:[6,8,9,16,20,23,25],blue:6,bodi:8,bonnet:21,bool:[4,5,7,8,17,20,23,24,25],both:[1,3,5,7,16,19,20],bottom:28,box:28,brain:16,brainvis:[3,28],brand:1,briefli:1,broad:[3,16,23,24],broadband:23,bug:[1,2,3,28],bugfix:1,bui:[2,3],build:1,built:3,bytesio:19,calcul:[1,5,6,7,8,9,16,19,20,21,22,23,25,27,28],call:[1,25],can:[1,2,3,5,6,7,8,9,14,16,23,25,26,27,28],canthi:20,captur:3,care:6,carrier:[3,25],cbar:27,cbar_ax:27,cbar_kw:27,cbar_tick:26,cbar_titl:26,center:[1,4,5,6,7,25],central:[1,6],certain:3,ch_name:[1,4,5,7,9,10,16,23,25],chan000:[9,10,16],chan001:[9,10,16],chang:[1,2,3,25],channel:[1,3,4,5,6,7,8,9,10,16,18,19,20,22,23,24,25,26,28],charact:1,check:[1,3,6,28],chin:6,choic:[8,20,23,25],chokroverti:21,circ_mean:25,circ_r:25,circular:25,cite:[3,6,28],classif:28,classifi:[1,3,6],click:3,clinic:21,close:23,closer:23,cluster:8,cmap:[19,26,27],code:[1,3,6,28],codebas:2,coeffici:26,coefficientnremnon:3,coffe:[2,3],coincid:[1,5,7],cold:3,color:[5,7,26],color_palett:26,colorbar:26,colormap:[19,26],column:[1,3,9,10,20,23,25,27],com:[1,3,5,6,7,8,9,16,17,18,19,20,23,25],combin:[1,3],come:[3,6,28],command:[3,28],comment:[1,3],common:6,comodulogram:28,compat:[1,3],compet:3,complet:3,complex:6,compli:[1,2],compon:[1,16,28],compumed:[1,17],comput:[1,6,8,10,11,16,18,20,21,24,25],computation:3,concaten:23,concret:6,condit:27,confid:[3,6],conflict:1,confus:1,congedo:8,consensu:3,conserv:8,consid:[2,8],consist:[1,22],contact:28,contain:[1,8,9,10,11],contamin:8,content:[3,16],continu:20,contrast:19,contribut:[1,3,28],convert:[1,6,8,9,12,13,16,20,23,25],copi:6,corr:[18,23],correctli:2,correl:[3,18,23,26],correspond:[3,12,13,14,18,22],could:[1,3],count:[1,27],coupl:[1,25,28],covar:[8,18],covari:[8,18],creat:[27,28],creation:3,critic:[1,8,20,23,25],crop:[14,21,27],cross:[6,25],csv:3,cubic:[18,24],current:[3,6,8,14,15,25,27],custom:3,damberg:21,data:[1,4,5,6,7,8,9,10,11,14,15,16,18,19,20,22,23,24,25,26,28],data_filt:[4,5,7],data_full_6hrs_100hz_cz:19,data_full_6hrs_100hz_hypno_30:19,databas:[1,3],datafram:[1,3,4,5,6,7,9,10,16,20,23,25,27],dataset:[1,28],dean:6,debug:[8,20,23,25],declar:3,defin:[1,5,7,8,9,10,11,19,20,23,25,26],definit:8,deflect:25,degre:24,delet:3,delfrat:23,delimit:3,delta:[6,9,10,11],demand:3,demonstr:28,demotiv:3,denni:6,denomin:1,denot:1,densiti:[1,10,11,16],depend:1,deprec:1,deriv:[1,6,23],describ:[1,3,6,16],descript:[3,8,28],design:3,desir:15,despit:3,detail:[1,6,9,10,11,17,19,23,25,26,28],detect:[1,4,5,6,7,8,20,23,25,28],detrend:[1,23],develop:2,developp:3,deviat:[1,6,8,23,25],df_sync:[4,5,7],diag:27,diagon:[5,7,27],dialog:3,dict:[4,5,6,7,9,12,13,16,21,23,26],dictionari:6,dictionnari:[12,13],did:6,differ:[1,3,5,6,7,8,16],difficult:1,difficulti:3,dimens:[1,6,22],dimension:[1,11],direct:25,directli:[1,19],directori:2,disabl:[1,23],discoveri:6,discret:26,discuss:2,disord:[3,6],displai:[1,28],distanc:8,distribut:[8,16,19],dive:28,divid:[5,7,9,10,11],doc:2,docstr:2,document:[1,3,6,28],doe:[1,3],doi:[3,6,16,28],done:[2,3,4,5,7,24,27],doubl:[1,6],download:28,downsampl:[1,3,6,23,25,28],dpi:26,drastic:8,driven:28,dtype:27,due:3,duggan:20,dur_neg:[1,25],dur_po:[1,25],durat:[1,3,20,21,23,25],dure:[8,23,25],dynam:3,each:[1,3,4,5,6,7,8,9,10,11,14,15,18,20,21,22,23,24,25,27],earli:3,easi:3,edf:[3,6,17,28],edfbrows:[3,6],edit:[2,3,6],editor:[3,17,28],eeg:[1,3,5,6,7,8,9,10,14,15,16,19,22,23,28],eeg_nam:6,eeglab:3,effect:[1,8,9,20,23,25],effici:[1,21],eigenvalu:8,either:[1,5,6,7,27],ekg:8,elan:3,electrod:[6,8,26],element:26,eloc:3,emg1:6,emg2:6,emg:[1,6,8],emg_nam:6,emphasi:3,emul:23,enabl:3,encod:3,end:[19,20,23,25],engin:[8,20],enhanc:1,ensembl:[1,20,23,25],ensur:[2,18,20,23,24,25],enter:28,entir:28,entropi:[1,6],eog:[1,4,6,8,20],eog_nam:6,epoch:[1,3,6,8,17,18,21,22,25,27,28],eprint:3,equal:24,error:[3,8,20,23,25],especi:[3,6,28],essenti:[8,12,13],estim:[8,16],etc:[2,3],ethnic:6,euclidean:8,european:[3,25,28],evalu:3,even:[1,3],event:[1,4,5,7,21,28],everi:[8,16,18,24],exact:[9,14,19],exampl:[1,2,3,5,6,7,8,9,16,19,20,21,22,23,25,26,27,28],exce:1,exceed:[3,8],except:[17,21,23],exclud:[1,8,21,27],exclus:[1,6],exist:[1,2,3],expect:6,experi:[1,8],experiment:1,expert:[3,23],explain:[1,3],exponenti:16,express:[1,21,23,25],extens:3,extern:[3,6],extra:1,extract:[1,6,8,9,14,16,23,25],eye:[3,8,20,28],facilit:3,factor:16,fall:20,fals:[3,4,5,6,7,8,9,18,20,23,24,25,27],faq:[1,6,28],far:8,fast:28,faster:[1,6,8,24],fastest:[18,24],featur:[1,3,6,23,28],feedback:1,feel:[6,28],fell:8,femal:6,ferrarelli:25,few:3,field:8,fieldtrip:16,fig:[19,26],figsiz:[4,5,7,26,27],figur:[4,5,7,19,26],file:[1,3,6,28],filenam:17,filipini:25,filt:[4,5,7],filter:[1,3,4,5,7,9,20,23,25,28],filter_data:[4,5,7,9],find:[1,3,5,6,7],fine:3,fir:[1,9,23,25],first:[1,3,6,8,20,21,23,25,26,28],fit:[6,14,16],fit_param:16,fix:1,flake8:[1,2],flexibl:3,fmax:19,fmin:19,fmt:[3,27],fname:17,focus:1,folder:28,follow:[2,3,6,16,21,23,25],font:26,fontsiz:26,fooof:16,fore:9,forest:1,format:[1,2,3,4,5,7,8,9,17,19,20,21,23,25,28],found:[1,3,6,17,26,28],fourier:23,fpz:6,fractal:[6,16,28],fraction:27,fragment:27,framework:3,free:[3,6,20,23,25,28],freeli:3,freq:[10,11,16],freq_broad:23,freq_rem:20,freq_so:1,freq_sp:[1,23,25],freq_sw:[1,25],frequenc:[1,3,4,5,6,7,8,9,10,11,14,15,16,17,18,19,20,21,22,23,24,25,27,28],from:[1,3,6,8,9,16,19,20,21,22,23,25,26,27,28],full:[1,3,6,17,19,20,23,25,28],fundament:16,furthermor:1,futur:1,gamma:[9,10,11],gender:6,generaliz:2,geometr:16,geometri:8,get:[1,6,19,20,23,25],get_bool_vector:1,get_centered_indic:1,get_coincidence_matrix:[1,5,7],get_data:20,get_featur:6,get_mask:[1,4,5,7],get_sync_ev:[1,4,5,7],get_sync_sw:1,github:[2,3,5,6,7,8,9,16,17,18,19,20,23,25,28],githubusercont:19,give:[1,3,5,7,20,23,25],given:[8,10,14,15,27],global:26,goal:16,good:[8,18,24],gotman:20,grab:3,grand:1,graphic:[3,6],greater:8,grid:24,grid_kw:27,gridspec_kw:27,grigg:21,group:[1,4,5,7],grp_chan:[1,5,7,23,25],grp_stage:[1,4,5,7,20,23,25],gui:3,guidanc:2,guidelin:[1,21],guo:6,halfwai:23,ham:[9,16],handl:1,harbor:3,harmon:16,has:[1,3,8,9,18,20,23,25],have:[1,3,6,8,9,14,19,20,21,23,25,28],hdeeg:8,header:3,health:6,healthi:3,heatmap:27,height_ratio:27,help:[4,5,6,7],here:3,heterogen:3,high:[3,6,28],higher:[8,18,19,24],highest:1,highli:[2,3],higuchi:6,hilbert:[1,23],hill:25,hirshkowitz:21,hjorth:6,homogen:1,hope:3,horizont:27,hour:3,how:[1,3,8,9,16,20,25],howev:[3,8,28],hset:16,hspace:27,html:[1,2,8,26],http:[1,3,5,6,7,8,9,16,17,18,19,20,23,25,26,28],huber:25,hue:[1,5,7],huge:3,human:[3,6,23],hundr:1,hyp:3,hypno:[1,3,4,5,6,7,8,9,12,13,14,15,17,19,20,21,23,25,27],hypno_export:3,hypno_int:3,hypno_upsample_to_data:[8,9,19,20,23,25],hypnogram:[1,3,4,5,7,8,9,12,13,14,15,16,17,19,20,21,23,25,27,28],hypopnea:3,iber:21,idea:8,ideal:[3,9,16,23],ident:17,identifi:3,idxchannel:[4,5,7],ieee:[8,20],impact:19,implement:[1,3,8,11,23,25],importantli:25,improv:[1,2,3],inch:[4,5,7],includ:[1,2,3,8,9,20,21,23,25],inconsist:3,incorrect:27,increas:[8,25],increment:16,inde:1,independ:16,index:[3,4,5,7,8,26],indexbmibodi:3,indexeegelectroencephalogrameogelectrooculogramemgelectromyogramosaobstruct:3,indic:[1,3,4,5,7,23,26],individu:[1,3,5,7],industri:3,inf:25,influenc:8,info:[8,20,23,25],informat:6,initi:[1,4,5,6,7],input:[1,16,18,24],inspect:[6,8],inspir:18,instal:[1,2,3,6],instanc:[1,4,5,6,7,22],instantan:23,instead:[1,23],integ:[8,9,12,13,16,17,19,20,21,23,25,27],inter:[3,6,8],interact:[1,5,7,28],intercept:16,interest:[1,3,9,10,11,16,25],interfac:[3,6,28],intern:1,interp2d:24,interp:[18,24],interpol:[18,24],interquartil:6,interscor:3,intracrani:3,invalid:1,invers:[9,16],involv:6,ipynb:[1,6,8,9,16,20,23,25],ipywidget:[1,5,7],irasa:[1,28],irregular:16,isol:1,isolationforest:[1,20,23,25],israel:21,issu:[1,2,6,8,28],iter:8,its:[1,6,16],jcsm:21,joblib:6,joint:8,journal:[3,6,20,21,23,25],juli:1,jupyt:[1,5,6,7,19,23,25,28],just:23,kapen:21,keenan:21,keep:1,kei:[4,5,6,7],kept:23,keyword:[1,9,16],kid:3,kind:[3,28],know:1,knowledg:28,kramer:[5,7],kryger:21,kurtosi:6,kwarg:[4,5,7,26],kwargs_welch:[9,16],lab:28,label:[3,9,10,16,27],laboratori:3,lacours:[3,23],lafortun:25,lambda_i:8,landmark:[1,4,5,7],larg:[3,8],laroch:20,last:[1,21,22],lat_n1:21,lat_n2:21,lat_n3:21,lat_rem:21,latenc:[1,21],latest:[1,5,7,8],law:16,lead:1,learn:[1,2,20],least:[1,9,16,18,19,23],led:1,left:[6,8,20,28],len:3,length:[1,3,8,9,16,19,22,25],less:8,let:[1,3],level:[8,20,23,25],leverag:3,lgbmclassifi:6,lib:22,life:25,light:21,lightgbm:[1,6],like:[1,2,3],limit:28,line:[1,3,5,7,19,28],linear:[1,16,28],lineplot:[1,4,5,7],list:[4,5,7,8,9,10,11,16,20,23,25,26],liu:16,load:[1,6,9,17,19,20,23,25,28],load_profusion_hypno:1,loadtxt:19,loc:[1,4,6,20,27],locabsfallslop:20,locabsriseslop:20,locabsvalpeak:20,local:2,locat:[20,23,25],lock:1,log10:23,log:[8,16,20,23,25],longer:[1,14,19],longitudin:1,loop:[1,3],low:[3,6],lower:[1,9,10,11,16,19,24,25],lowercas:13,lowest:[1,6],lowpass:[4,5,7],lspopt:1,machin:20,mai:[3,8,25],main:[6,8,28],maintain:28,mainten:[1,21],major:[1,6,8],majority_onli:6,make:[1,2,5,6,7,8,20,23,25,28],make_standard_montag:26,male:6,mani:[2,3],manual:[1,3,6,21],map:[3,12,13,17],mapping_dict:[12,13],mark:[1,3,5,7,8,17],martin:25,mask:[1,5,7,9,20,23,25,26],mass:3,massimini:[3,25],master:[6,8,9,16,19,20,23,25],mastoid:6,match:3,matlab:3,matplotlib:[5,7,19,26,27],matric:8,matrix:[1,5,7,8,22,27,28],matter:3,matthew:[3,6,28],max:[1,6,9,10,11,14,18,19,25,26],maxim:3,maximum:[1,9,18,20,23,25,26],mayaud:8,mcdevitt:20,mean:[1,4,5,7,8,9,16,18,19,20,23,25,27],mean_direct:25,measur:[5,7],median:[4,5,7,9,16,23],medic:[6,20],medicin:[6,8,21],mednick:20,menu:3,merg:23,messag:[8,20,23,25],metadata:6,method:[1,3,4,5,6,7,8,16,18,20,23,28],metric:27,micro:[8,9,16,20,23,25],midcross:25,middl:[18,25,28],might:3,min:[1,6,9,10,11,18,19,26],min_dist:23,minimum:[9,18,20,23,25,26],minor:1,minut:[1,21],miscellan:1,misclassif:6,misclassifi:3,miss:1,mne:[1,3,4,5,6,7,8,9,14,16,20,23,25,26,28],mobil:6,mode:[5,7],model:16,modifi:[1,3],modul:[1,3,6],montag:26,more:[1,2,6,8,9,10,11,17,19,20,23,25,26,28],most:[1,3,6,7,8,20,23,25,28],move:[1,3,18,23],movement:[3,8,9,19,20,21,23,25,28],much:[8,24],multi:[1,5,7,8,19,23,25,28],multi_onli:23,multipl:16,multipli:20,multitap:19,must:[2,6,8,9,10,11,19,20,23,25,26,27],my_hypno_edfbrows:3,my_hypno_spisop:3,my_hypno_visbrain:3,myedffil:[3,28],myfil:6,n_band:11,n_chan:[1,4,5,7,8,11,23,25],n_chan_reject:8,n_channel:[10,22],n_color:26,n_epoch:[1,8,11,17,22],n_freq:[1,10,11],n_sampl:[1,4,5,7,8,22,23,25],naiv:28,name:[1,3,4,5,6,7,9,10,11,16,23,25,26],nap:6,nation:[1,6,17],nativ:17,ndarrai:[6,11,16,19,24],ndpac:[1,25],need:[1,2,14,20,28],neg:[1,5,20,25],negpeak:[1,5,25],nepoch_list:3,nepoch_nb:3,net:3,network:[1,5,7],neural:8,neurophysiolog:16,neurosci:[20,23,25],newer:[1,3],newlin:3,next:[3,19,27],night:[1,3,6,8,19,28],niknazar:20,nois:8,non:[1,3,6,16,28],none:[4,5,6,7,8,9,10,14,16,18,19,20,22,23,24,25,26],nonlinear_featur:28,norm:24,normal:[1,6,19,23,24,25],note:[5,6,7,8,9,12,13,16,17,18,19,20,21,22,23,24,25,28],notebook:[1,3,6,8,9,16,19,20,23,25,26,28],notifi:3,novel:3,now:[1,21],nperseg:[18,24],npz:19,nrem:[1,21,23,25],nsampling_period:3,nsrr:[1,17],number:[1,4,5,6,7,8,9,14,19,20,23,25,26,27],numpi:[1,2,3,5,6,7,11,16,19,22,24,27,28],numpydoc:2,nutshel:16,object:[1,8],obtain:1,occur:[1,5,7,23],ocular:20,off:8,offer:[3,16],offici:21,offset:16,often:3,ojeda:8,old:1,older:3,omit:[8,9,14,16,23,25],onc:[1,3],one:[1,3,8,17,18,21,22,23,25,27],onli:[1,3,5,6,7,8,11,16,18,20,23,25],onlin:[1,8],onset:[1,3,21],open:[2,3,6,28],optim:[1,3,6],option:[1,4,5,6,7,9,16,18,19,28],org:[3,6,16,26,28],orient:27,origin:[7,16,23,27],oscil:[1,23,25,28],oscillatori:[1,16],other:[1,26],ouput:[4,5],our:3,out:[1,3,18,21,28],outdat:[1,3],outlier:[1,5,7,20,23,25],output:[1,4,5,7,18,20,23,24,25],over:22,overal:3,overlai:[1,5,7,28],overlap:[18,22,24],own:[3,28],pac:[1,25],packag:[1,2,3,5,6,7,8,18,28],pad:14,pair:[1,5,7,16],palett:[6,26],panda:[1,3,4,5,6,7,9,10,12,13,16,20,23,25,26,27,28],panel:28,paquet:25,paramet:[1,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27],part:[3,4,5,7],particip:[1,6],particular:3,pass:[1,2,4,5,7,8,9,16,20,26],past:[1,6],path:[6,17],path_to_model:6,patient:3,paywal:3,pdf:3,peak:[1,4,5,7,18,20,23,25],pearson:26,penzel:21,pep8:2,peppard:23,per:[1,14,15,17,21,23,25,27],percentag:21,percentil:[1,19],perform:[1,3,6,8,18,23,24,25,28],period:[1,16,21,27],permut:6,petrosian:6,phase:[1,25,28],phaseatsigmapeak:[1,25],physic:1,pick_channel:[3,28],pingouin:[3,25],pip:[1,2,3,28],pipelin:[1,3,28],place:[20,23,25],pleas:[1,2,3,6,8,9,16,17,19,20,23,25,26,28],plot:[1,3,4,5,6,7,19,26,27,28],plot_averag:[1,4,5,7],plot_detect:[1,3,5,7],plot_predict_proba:6,plot_spectrogram:[1,3],plot_topomap:[1,26],plt:27,point:[3,5,6,7,8,23,24,25],pointwis:24,poirier:25,polysomnograph:3,polysomnographi:[1,6,28],popul:3,posit:[8,23,25,26],pospeak:25,possibl:[1,3],post:[1,27],postdoctor:28,potato:8,potenti:[2,3,6],power:[1,6,9,10,11,16,23,24,28],pre:[1,6,8,10,27],precis:[18,24],pred:6,predict:[3,6],predict_proba:6,preferenti:6,preload:[3,6,28],preprint:[1,3,6,28],preprocess:[3,28],prerequisit:28,present:[8,23],pressman:21,previou:1,previous:[1,3,21,25],print:[1,8,20,23,25],prior:8,prob:27,proba:6,probabl:[6,27,28],process:1,produc:[5,7],product:[5,7,20],profus:[1,17],program:28,programm:3,promin:[1,7,23],prompt:28,prop_above_zero:18,properli:2,properti:[20,23,25],proport:[18,23],propos:3,provid:[3,4,5,6,7,8,20,23,25,28],psd:[1,9,10,11,16,19],psd_aperiod:16,psd_oscillatori:16,psg:6,ptp:[18,25],publish:[1,3],pull:28,purpl:6,put:23,pydata:26,pyplot:27,pyriemann:[1,8],pytest:2,python:[1,2,3,8,18,28],qiang:6,qualiti:[3,8],question:[3,6],quickli:[1,3,5,7],radian:[1,25],randint:22,random:[20,22,23,25],rang:[1,6,16,20,23,24,25,26],raphael:[3,6,28],raphaelvallat:[1,3,6,8,9,16,19,20,23,25],rapid:[3,20,28],rater:[3,6],rather:3,ratio:[6,23],raw:[1,3,4,5,6,7,19,20,23,28],rdbu_r:19,reach:3,read:[19,28],read_raw_edf:[3,6,28],readabl:1,reader:6,readm:1,readthedoc:[5,7,8],real:28,realtim:3,reason:[3,6],receiv:1,recent:1,reciproc:16,recommend:[1,2,3,8,27,28],record:[1,3,6,21,28],rectangular:24,rectbivariatesplin:24,redistribut:16,refer:[1,5,6,7,8,9,16,17,19,20,21,23,25,26,28],referenc:6,regress:18,rehabilit:8,reject:[8,28],rel:[1,6,9,10,11,23],rel_pow:23,relat:25,relationship:8,releas:[1,3],relev:25,relpow:23,rem:[1,3,4,8,9,13,17,19,20,21,23,25,28],rem_detect:1,remain:[3,16],remov:[1,3,8,20,23,25],remove_outli:[20,23,25],remrapid:3,remresult:[1,20],rems_detect:28,renam:1,render:2,reorgan:1,replac:[3,17],report:[2,3,28],repositori:28,repres:3,represent:28,reprocess:1,reproduc:[20,23,25,28],request:[19,28],requir:[1,3,5,7,20],resampl:[1,3,16,23,25,28],research:[1,6,17,28],resolut:[8,24,26],resourc:[1,6,17],respect:[14,25],restructuredtext:2,result:[1,3,6,8,14,16,20,22,23,25,27],return_fit:16,review:1,rich:6,riemannian:8,right:[8,20,27,28],rise:20,rms:[18,23],robillard:25,robust:6,roc:[1,4,20],rocabsfallslop:20,rocabsriseslop:20,rocabsvalpeak:20,rodent:3,roll:[1,6],root:[1,18,23],round:27,row:[9,10,20,22,23,25,27,28],rule:21,run:[1,2,3,5,6,7,8,20,25,28],run_visbrain:28,s10548:16,safeti:1,sai:3,sake:3,same:[3,5,7,8,9,14,16,18,19,20,23,24,25],sampl:[1,3,4,5,7,8,9,14,15,16,17,18,19,20,21,22,23,24,25,27],sattari:20,save:3,savetxt:3,scale:[1,5,6,7],scalp:3,scientif:6,scikit:1,scipi:[9,10,11,16,24],score:[1,3,6,8,21,23,25],scorer:[3,6],screenshot:3,scroll:3,seaborn:[1,4,5,7,26,27],sec:[1,6,18,19,25],second:[1,3,4,5,6,7,8,9,14,15,16,17,18,19,20,21,22,23,24,25,27,28],section:[1,6],see:[1,2,3,4,5,6,7,8,10,11,28],seed:[20,22,23,25],select:[1,3,23,28],self:[4,5,6,7],semilog:16,sens:28,sensit:8,sep:3,separ:[3,8,16,28],sequenti:9,seri:[3,12,13,18,26],serv:3,set:[1,6,22,25],set_label_posit:27,set_xlabel:27,set_ylabel:27,setup:3,sever:[1,3,27],sex:[1,6],sf_data:[14,15],sf_hyp:[17,21],sf_hypno:[14,15],shape:[1,4,5,7,8,10,11,14,22,23,24,25],shorter:[8,14],should:[1,3,6,9,16,21,23,25,28],show:[3,28],shown:[3,5,7,28],sigma:[1,7,9,10,11,23,25],sigma_coupl:28,sigmapeak:[1,25],signal:[1,5,7,8,9,10,11,16,18,20,22,23,25],signatur:[4,5,6,7,8],signific:[1,26],significantli:25,silber:21,similar:3,similarli:1,simpl:[1,3,28],simpler:1,simplest:[2,3],simpli:[3,5,7,8,16,28],sinc:[21,25],singl:[1,8,18,19,23,24,25,28],situat:2,size:[4,5,7,18,22,24,26],skew:6,sklearn:[1,20,23,25],sleep:[1,4,5,6,7,8,9,12,13,14,15,17,19,20,21,23,25,27,28],sleep_statist:1,sleepstag:[1,3],slide:[9,16,19,22],sliding_window:1,slight:1,slightli:[1,3],slope:[16,18,20,25,28],slow:[1,3,5,25,28],slower:[18,24],slowest:[18,24],sls:6,small:3,sme:[1,21],smooth:[3,6],sns:27,societi:8,softwar:[3,28],sol:21,sole:16,solv:1,some:[1,3,6,8,28],soon:8,sophas:23,sort:[4,5,7],sourc:[3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28],space:16,specest:16,specif:[1,3,4,5,6,7,8,21,25,28],specifi:[1,9,10,24,25],spectra:16,spectral:[1,9,10,11,16,28],spectral_r:19,spectrogram:[3,19,28],spectrum:[16,28],speed:[1,3],sphinx:2,sphinx_bootstrap_them:2,spindl:[1,3,7,23,25,28],spindles_detect:[1,28],spindles_detect_multi:1,spindles_detection_multi:28,spindles_detection_nrem_onli:28,spindles_slow_fast:28,spindlesresult:[1,23],spisop:3,spline:24,spring:3,spt:[1,21,27],squar:[1,18,23,27],stabil:27,stabl:1,stage:[1,4,5,6,7,8,9,12,13,14,15,19,20,21,23,25,27,28],standard:[1,2,3,6,8,9,21,23,25,28],standard_1020:26,start:[2,3,20,22,23,25,27],stat:21,state:[1,27],statementth:3,statist:[1,16,21,25,28],statu:6,std:8,step:[3,16,18,20,22,23,24,25,28],stft:24,stochast:27,store:3,str:[4,5,6,7,8,17,18,19,20,23,25,26],straightforward:8,stratifi:28,stream:19,stride:22,stride_trick:22,string:[12,13],strong:1,strongli:28,studi:[1,20],style:2,sub:[1,6],submit:28,subplot:27,subset:[3,28],sum:[1,5,7,21,24,27],sum_i:8,summari:[1,4,5,7,20,23,25],sunflow:6,support:[1,3,6,8,11],sure:[1,2,5,6,7,8,20,23,25,28],sw_detect:[1,28],sw_detect_multi:1,sw_detection_multi:28,swresult:[1,25],sxx:24,symmetr:[5,7,8],symmetri:23,synchron:[1,4,5,7],system:[3,6,8],tab:3,take:[8,16,23,25,27],taken:1,takeuchi:20,taper:[19,28],technic:21,templat:[1,28],tempor:3,ten:1,tensorpac:1,term:23,termin:[3,28],terminolog:21,test:[1,2],teuniz:3,text:[3,16],than:[1,8,14,19,20,23,24,25,28],thei:[2,3,23,28],them:2,therefor:[1,3,6,8,14,20],theta:[9,10,11],thi:[1,2,3,4,5,6,7,8,9,11,12,13,16,18,19,20,22,23,25,26,28],think:1,those:2,thousand:3,three:1,thresh:23,threshold:[1,3,8,20,23,25],through:[1,3,23,25],thu:[1,21],tib:[1,21,27],tick:26,tick_top:27,time:[1,3,4,5,7,8,9,16,18,20,21,22,23,24,25,27,28],time_aft:[1,4,5,7],time_bas:3,time_befor:[1,4,5,7],timepoint:[1,4,5,7],timestamp:1,titl:[3,26],to_csv:3,to_numpi:3,tobi:8,togeth:3,tononi:25,too:[1,3,8,23,25],tool:[3,6,8,16,28],toolbox:[3,28],top:[3,19,27,28],topograph:28,topographi:16,topoplot:[1,28],total:[1,6,9,10,11,21,23],totalabspow:1,toward:6,train:[1,3,6],tranisit:28,transact:[8,20],transform:[1,8,18,23],transform_sign:18,transit:[1,23,25,27,28],transition_matrix:1,translat:17,travel:25,travi:1,triangular:6,trim:[1,19],trimbothstd:1,trimperc:19,trough:25,tst:[1,21],tune:3,tupl:[4,5,7,8,9,10,11,16,20,23,24,25,26],turn:6,tutori:[25,26,28],twice:19,twilight:6,two:[1,3,4,5,7,9,16,17,18,23],txt:[3,19],type:[3,4,5,6,7],typic:8,unecessari:1,unfortun:1,unit:[1,8,20,23,25],univers:[3,6,28],uns:13,unscal:7,unscor:[1,8,9,19,20,21,23,25,27],until:3,updat:[1,2,3],upgrad:[1,2,3,28],upon:[1,3],upper:[1,9,10,11,19,25],upsampl:[1,8,9,14,15,19,20,23,25,27],url:3,use:[1,2,3,4,5,6,7,8,9,16,18,20,23,24,25,26,28],used:[1,3,4,5,6,7,9,16,18,19],useful:[1,3],user:[1,3,6,8,20,23,25,28],userwarn:14,uses:[1,2,3,8,20,23,25],using:[1,3,4,5,6,7,8,9,16,20,23,24,25,27,28],usual:[18,24],utf:3,valid:[1,3,6,26],vallat2021:3,vallat:[3,6,28],valnegpeak:25,valpospeak:25,valu:[1,5,7,8,9,14,15,16,17,18,19,20,21,23,24,25,26,27],vandewal:25,vari:[3,16],variabl:[5,7],varianc:8,variou:2,various:16,vector:[1,4,5,7,8,9,16,18,19,20,21,22,23,24,25],vector_length:25,verbos:[1,8,20,23,25],veri:[3,6,8],version:[1,3,5,6,7,8,9,10,11,12,13,14,15,16,19,20,21,22,23,25,26,27],versu:28,via:[1,3,24],vien:25,visbrain:[1,3,28],visit:28,visual:[6,8,21,28],viz:[1,26],vmax:[26,27],vmin:[26,27],volt:[1,6,8,9,16,20,23,25],wai:[1,2,3,5,7],wake:[3,6,8,9,13,17,19,20,21,23,25,27],walker:[3,6,28],walkthrough:23,want:[1,2,3,25,28],warbi:23,warn:[1,8,14,20,23,25],warranti:28,waso:[1,21],watch:3,wave:[1,3,5,25,28],web:28,websit:[1,17],weight:6,welch:[9,10,11,16],welcom:28,well:[1,3,6,9,10,11],wen:16,were:[1,5,6,7],what:[27,28],when:[1,2,3,4,5,6,7,8,9,19,20,23,25],whenev:3,where:[3,4,8,9,16,19,20,21,22,23,25],wherea:16,whether:[1,3],which:[1,3,5,6,7,8,9,10,11,14,16,17,22,23,24,25],whitehurst:20,whole:28,wide:[6,25],widget:[5,7],wiki:17,win:3,win_sec:[9,16,19],window:[1,3,8,9,16,18,19,22,24],within:[2,21,25],without:27,wonambi:18,word:[1,3],work:[1,3,5,6,7,8,28],workshop:8,world:3,would:3,wrapper:[1,8,12,13,22,26],www:[3,16],xaxi:27,xkcd:6,xml:[1,17],yasa:[1,3],yasa_classifi:[3,6],year:[3,6,25],yet:[3,28],yetton:20,ylorrd:27,you:[1,2,3,6,8,20,23,25,28],your:[1,2,3,6,8,9,19,20,23,25,28],zero:[6,18,25],zhang:6,zscore:8},titles:["API reference","What\u2019s new","Contribute to YASA","FAQ","yasa.REMResults","yasa.SWResults","yasa.SleepStaging","yasa.SpindlesResults","yasa.art_detect","yasa.bandpower","yasa.bandpower_from_psd","yasa.bandpower_from_psd_ndarray","yasa.hypno_int_to_str","yasa.hypno_str_to_int","yasa.hypno_upsample_to_data","yasa.hypno_upsample_to_sf","yasa.irasa","yasa.load_profusion_hypno","yasa.moving_transform","yasa.plot_spectrogram","yasa.rem_detect","yasa.sleep_statistics","yasa.sliding_window","yasa.spindles_detect","yasa.stft_power","yasa.sw_detect","yasa.topoplot","yasa.transition_matrix","Installation"],titleterms:{"new":1,analys:0,api:0,april:1,art_detect:8,august:1,automat:0,bandpow:9,bandpower_from_psd:10,bandpower_from_psd_ndarrai:11,build:2,check:2,citat:28,code:2,contribut:2,data:3,decemb:1,detect:[0,3],develop:28,document:2,event:[0,3],faq:3,februari:1,galleri:28,get:28,guidelin:2,how:28,hypno_int_to_str:12,hypno_str_to_int:13,hypno_upsample_to_data:14,hypno_upsample_to_sf:15,hypnogram:0,instal:28,irasa:16,januari:1,load:3,load_profusion_hypno:17,mai:1,march:1,moving_transform:18,novemb:1,octob:1,other:3,plot:0,plot_spectrogram:19,polysomnographi:3,refer:0,rem_detect:20,remresult:4,sleep:[0,3],sleep_statist:21,sleepstag:6,sliding_window:22,spectral:0,spindles_detect:23,spindlesresult:7,stage:[0,3],start:28,statist:0,stft_power:24,sw_detect:25,swresult:5,tool:0,topoplot:26,transition_matrix:27,visual:3,what:1,yasa:[2,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28]}})
\ No newline at end of file
diff --git a/docs/build/plot_directive/generated/yasa-plot_spectrogram-1.png b/docs/build/plot_directive/generated/yasa-plot_spectrogram-1.png
index 3851613..f14dc46 100644
Binary files a/docs/build/plot_directive/generated/yasa-plot_spectrogram-1.png and b/docs/build/plot_directive/generated/yasa-plot_spectrogram-1.png differ
diff --git a/docs/build/plot_directive/generated/yasa-plot_spectrogram-2.png b/docs/build/plot_directive/generated/yasa-plot_spectrogram-2.png
index fcc7761..29b856d 100644
Binary files a/docs/build/plot_directive/generated/yasa-plot_spectrogram-2.png and b/docs/build/plot_directive/generated/yasa-plot_spectrogram-2.png differ
diff --git a/docs/build/plot_directive/generated/yasa-topoplot-1.png b/docs/build/plot_directive/generated/yasa-topoplot-1.png
index 9b49839..d2cae33 100644
Binary files a/docs/build/plot_directive/generated/yasa-topoplot-1.png and b/docs/build/plot_directive/generated/yasa-topoplot-1.png differ
diff --git a/docs/build/plot_directive/generated/yasa-topoplot-2.png b/docs/build/plot_directive/generated/yasa-topoplot-2.png
index bc0e522..2011471 100644
Binary files a/docs/build/plot_directive/generated/yasa-topoplot-2.png and b/docs/build/plot_directive/generated/yasa-topoplot-2.png differ
diff --git a/docs/build/plot_directive/generated/yasa-transition_matrix-1.png b/docs/build/plot_directive/generated/yasa-transition_matrix-1.png
index 11ddc13..2fa259f 100644
Binary files a/docs/build/plot_directive/generated/yasa-transition_matrix-1.png and b/docs/build/plot_directive/generated/yasa-transition_matrix-1.png differ
diff --git a/docs/changelog.rst b/docs/changelog.rst
index 89503d5..6cf36e3 100644
--- a/docs/changelog.rst
+++ b/docs/changelog.rst
@@ -6,19 +6,36 @@ What's new
v0.5.0 (August 2021)
--------------------
-This is a major release with an important bugfix for the slow-waves detection as well as API-breaking changes in the SleepStaging module. We recommend all users to upgrade to this version with `pip install --upgrade yasa`.
+This is a major release with an important bugfix for the slow-waves detection as well as API-breaking changes in the automatic sleep staging module. We recommend all users to upgrade to this version with `pip install --upgrade yasa`.
-**Bugfix**
+**Slow-waves detection**
-a. Fixed a bug in :py:func:`yasa.sw_detect` in which the detection could return event with very long duration (e.g. several tens of seconds). We have now added extra safety checks to make sure that the duration of slow-waves duration does not exceed the maximum duration allowed by the ``dur_neg`` and ``dur_pos`` parameters (e.g. 2.5 seconds). Please make sure to double check your results.
-b. Artefact and Unscored epochs are now excluded from the calculation of the total sleep time (TST) in :py:func:`yasa.sleep_statistics`. Previously, YASA calculated TST as SPT - WASO, thus including Art and Uns. TST is now calculated as the sum of all REM and NREM sleep in SPT.
+We have fixed a critical bug in :py:func:`yasa.sw_detect` in which the detection could keep slow-waves with invalid duration (e.g. several tens of seconds). We have now added extra safety checks to make sure that the total duration of the slow-waves does not exceed the maximum duration allowed by the ``dur_neg`` and ``dur_pos`` parameters (default = 2.5 seconds).
-**New functions**
+.. warning::
+ Please make sure to double-check any results obtained with :py:func:`yasa.sw_detect`.
-a. Added the :py:meth:`yasa.SpindlesResults.get_coincidence_matrix` and :py:meth:`yasa.SWResults.get_coincidence_matrix` methods to calculate the (scaled) coincidence matrix.
-The coincidence matrix gives, for each pair of channel, the number of samples that were marked as an event (spindles or slow-waves) in both channels. In other words, it gives an indication of whether events (spindles or slow-waves) are co-occuring for any pair of channel. The scaled version of the coincidence matrix can then be used to define functional networks or quickly find outlier channels.
+**Sleep staging**
-**Enhancements**
+Recently, we have published a `preprint article `_ describing YASA's sleep staging algorithm and its validation across hundreds of polysomnography recordings. In July 2021, we have received comments from three reviewers, which have led us to implement several changes to the sleep staging algorithm.
+The most significant change is that the time lengths of the rolling windows have been updated from 5.5 minutes centered / 5 minutes past to 7.5 minutes centered / 2 min past, leading to slight improvements in accuracy. Furthermore, we have also updated the training database and the parameters of the LightGBM classifier.
+Unfortunately, these changes mean that the new version of the algorithm is no longer compatible with the previous version (0.4.0 or 0.4.1). Therefore, if you're running a longitudinal study with YASA's sleep staging, we either recommend to keep the previous version of YASA, or to update to the new version and reprocess all your nights with the new algorithm for consistency.
+
+**Sleep statistics**
+
+Artefact and Unscored epochs are now excluded from the calculation of the total sleep time (TST) in :py:func:`yasa.sleep_statistics`. Previously, YASA calculated TST as SPT - WASO, thus including Art and Uns. TST is now calculated as the sum of all REM and NREM sleep in SPT.
+
+**New FAQ**
+
+The online documentation now has a brand new FAQ section! Make sure to check it out at https://raphaelvallat.com/yasa/build/html/faq.html
+
+**New function: coincidence matrix**
+
+We have added the :py:meth:`yasa.SpindlesResults.get_coincidence_matrix` and :py:meth:`yasa.SWResults.get_coincidence_matrix` methods to calculate the (scaled) coincidence matrix.
+The coincidence matrix gives, for each pair of channel, the number of samples that were marked as an event (spindles or slow-waves) in both channels. In other words, it gives an indication of whether events (spindles or slow-waves) are co-occuring for any pair of channel.
+The scaled version of the coincidence matrix can then be used to define functional networks or quickly find outlier channels.
+
+**Minor enhancements**
a. Minor speed improvements in :py:class:`yasa.SleepStaging`.
b. Updated dependency to pyRiemann>=0.2.7, which solves the version conflict with scikit-learn (see `issue 33 `_).
@@ -48,7 +65,7 @@ v0.4.0 (November 2020)
This is a major release with several new functions, the biggest of which is the addition of an **automatic sleep staging module** (:py:class:`yasa.SleepStaging`). This means that YASA can now automatically score the sleep stages of your raw EEG data. The classifier was trained and validated on more than 3000 nights from the `National Sleep Research Resource (NSRR) `_ website.
-Briefly, the algorithm works by calculating a set of features for each 30-sec epochs from a central EEG channel (required), as well as an EOG channel (optional) and an EMG channel (optional). For best performance, users can also specify the age and the sex of the participants. Pre-trained classifiers are already included in YASA. The automatic sleep staging algorithm requires the `LightGBM `_ and `antropy `_ package.
+Briefly, the algorithm works by calculating a set of features for each 30-sec epochs from a central EEG channel (required), as well as an EOG channel (optional) and an EMG channel (optional). For best performance, users can also specify the age and the sex of the participants. Pre-trained classifiers are already included in YASA. The automatic sleep staging algorithm requires the `LightGBM `_ and `antropy `_ package.
**Other changes**
@@ -60,7 +77,7 @@ d. The :py:func:`yasa.sw_detect` now also returns the timestamp of the sigma pea
**Dependencies**
a. Switch to latest version of `TensorPAC `_.
-b. Added `ipywidgets `_, `LightGBM `_ and `entropy `_ to dependencies.
+b. Added `ipywidgets `_, `LightGBM `_ and `entropy `_ to dependencies.
----------------------------------------------------------------------------------------
diff --git a/docs/conf.py b/docs/conf.py
index 363d5ca..93a6f27 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -72,7 +72,7 @@
# General information about the project.
project = 'yasa'
author = 'Raphael Vallat'
-copyright = u'2018-{}, Raphael Vallat'.format(time.strftime("%Y"))
+copyright = u'2018-{}, Dr. Raphael Vallat, Center for Human Sleep Science, UC Berkeley'.format(time.strftime("%Y"))
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -119,6 +119,7 @@
'navbar_class': "navbar",
'navbar_links': [
("Functions", "api"),
+ ("FAQ", "faq"),
("What's new", "changelog"),
("Contribute", "contributing")],
}
@@ -180,7 +181,7 @@
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'yasa', 'yasa Documentation',
- author, 'yasa', 'Sleep microstructure analysis.',
+ author, 'yasa', 'Sleep analysis.',
'Miscellaneous'),
]
diff --git a/docs/contributing.rst b/docs/contributing.rst
index 300deaa..25da5a5 100644
--- a/docs/contributing.rst
+++ b/docs/contributing.rst
@@ -5,12 +5,12 @@ Contribute to YASA
There are many ways to contribute to YASA: reporting bugs, adding new functions, improving the documentation, etc...
-If you like YASA, you can also consider `buying me a coffee `_!
+If you like YASA, you can also consider `buying the developers a coffee `_!
Code guidelines
---------------
-*Before starting new code*, we highly recommend opening an issue on `GitHub `_ to discuss potential changes.
+Before starting new code, we highly recommend opening an issue on `GitHub `_ to discuss potential changes.
* Please use standard `pep8 `_ and `flake8 `_ Python style guidelines. To test that your code complies with those, you can run:
diff --git a/docs/faq.rst b/docs/faq.rst
new file mode 100644
index 0000000..166f507
--- /dev/null
+++ b/docs/faq.rst
@@ -0,0 +1,388 @@
+.. _faq:
+
+FAQ
+===
+
+Loading and visualizing polysomnography data
+--------------------------------------------
+
+.. ----------------------------- LOAD EDF -----------------------------
+.. raw:: html
+
+
+
+If you have polysomnography data in European Data Format (.edf), you can use the `MNE package `_ to load and preprocess your data in Python. MNE also supports several other standard formats (e.g. BrainVision, BDF, EEGLab). A simple preprocessing pipeline using MNE is shown below.
+
+.. code-block:: python
+
+ import mne
+ # Load the EDF file
+ raw = mne.io.read_raw_edf('MYEDFFILE.edf', preload=True)
+ # Downsample the data to 100 Hz
+ raw.resample(100)
+ # Apply a bandpass filter from 0.1 to 40 Hz
+ raw.filter(0.1, 40)
+ # Select a subset of EEG channels
+ raw.pick_channels(['C4-A1', 'C3-A2'])
+
+.. ----------------------------- VISUALIZE -----------------------------
+.. raw:: html
+
+
+
+YASA is a command-line software and does not support data visualization. To scroll through your data, we recommend the free software EDFBrowser (https://www.teuniz.net/edfbrowser/):
+
+.. figure:: /pictures/edfbrowser_with_hypnogram.png
+ :align: center
+
+.. .. ----------------------------- HYPNOGRAM -----------------------------
+.. .. raw:: html
+
+..
+
+The **spindles** detection is a custom adaptation of the `Lacourse et al 2018 `_ method. A step-by-step description of the algorithm can be found in `this notebook `_.
+
+The **slow-waves detection** combines the methods proposed in `Massimini et al 2004 `_ and `Carrier et al 2011 `_. A step-by-step description of the algorithm can be found `here `_.
+
+.. important::
+ Both algorithms have parameters that can (and should) be fine-tuned to your data, as explained in the next question.
+
+.. ----------------------------- PARAMETERS -----------------------------
+.. raw:: html
+
+
+
+There are several parameters that can be adjusted in the spindles / slow-waves / artefact detection. While the default parameters should work reasonably well on most data, they might not be adequate for your data, especially if you're working with specific populations (e.g. older adults, kids, patients with certain disorders, etc).
+
+For the sake of example, let's say that you have 100 recordings and you want to apply YASA to automatically detect the spindles. However, you'd like to fine-tune the parameters to your data. **We recommend the following approach:**
+
+1. Grab a few representative recordings (e.g. 5 or 10 out of 100) and manually annotate the sleep spindles. You can use `EDFBrowser `_ to manually score the sleep spindles. Ideally, the manual scoring should be high-quality, so you may also ask a few other trained individuals to score the same data until you reach a consensus.
+2. Apply YASA on the same recordings, first with the default parameters and then by slightly varying each parameter. For example, you may want to use a different detection threshold each time you run the algorithm, or a different frequency band for the filtering. In other words, you loop across several possible combinations of parameters. Save the resulting detection dataframe.
+3. Finally, find the combination of parameters that give you the results that are the most similar to your own scoring. For example, you can use the combination of parameters that maximize the `F1-score `_ of the detected spindles against your own visual detection.
+4. Use the "winning" combination to score the remaining recordings in your database.
+
+.. ----------------------------- MANUAL EDITING -----------------------------
+.. raw:: html
+
+
+
+YASA does not currently support visual editing of the detected events. However, you can import the events as annotations in `EDFBrowser `_ and edit the events from there. If you simply want to visualize the detected events (no editing), you can also use the `plot_detection `_ method.
+
+.. raw:: html
+
+
+
+YASA was trained and evaluated on a large and heterogeneous database of thousands of polysomnography recordings, including healthy individuals and patients with sleep disorders. Overall, the results show that **YASA matches human inter-rater agreement, with an accuracy of ~85% against expert consensus scoring**. The full validation of YASA can be found in the preprint article:
+
+* Raphael Vallat and Matthew P. Walker (2021). *A universal, open-source, high-performance tool for automated sleep staging*. bioRxiv 2021.05.28.446165; doi: https://doi.org/10.1101/2021.05.28.446165
+
+However, our recommendation is that **YASA should not replace human scoring, but rather serve as a starting point to speed up sleep staging**. If possible, you should always have a trained sleep scorer visually check the predictions of YASA, with a particular emphasis on low-confidence epochs and/or N1 sleep epochs, as these are the epochs most often misclassified by the algorithm.
+Finally, users can also leverage the :py:func:`yasa.plot_spectrogram` function to plot the predicted hypnogram on top of the full-night spectrogram. Such plots are very useful to quickly identify blatant errors in the hypnogram.
+
+.. figure:: /pictures/spectrogram.png
+ :align: center
+
+.. raw:: html
+
+
+
+
+
+.. ----------------------------- EDITING -----------------------------
+.. raw:: html
+
+
+
+YASA does not come with a graphical user interface (GUI) and therefore editing the predicted hypnogram is not currently possible. The simplest way is therefore to export the hypnogram in CSV format and then open the file — together with the corresponding polysomnography data — in an external GUI, as shown below.
+
+----------
+
+**EDFBrowser**
+
+`EDFBrowser `_ is a free software for visualizing polysomnography data in European Data Format (.edf), which also provides a module for visualizing and editing hypnograms.
+
+The code below show hows to export the hypnogram in an EDFBrowser-compatible format. It assumes that you have already run the algorithm and stored the predicted hypnogram in an array named ``hypno``.
+
+.. code-block:: python
+
+ # Export to a CSV file compatible with EDFBrowser
+ import numpy as np
+ import pandas as pd
+ hypno_export = pd.DataFrame({
+ "onset": np.arange(len(hypno)) * 30,
+ "label": hypno,
+ "duration": 30})
+ hypno_export.to_csv("my_hypno_EDFBrowser.csv", index=False)
+
+You can then import the hypnogram in EDFBrowser by clicking on the "Import annotations/events" in the "Tools" menu. Then, select the "ASCII/CSV" tab and change the parameters as follow:
+
+.. figure:: /pictures/edfbrowser_import_annotations.png
+ :align: center
+
+Click "Import". Once it's done, the hypnogram can be enabled via the "Window" menu. A dialog will appear where you can setup the labels for the different sleep stages and the mapping to the annotations in the file. The default parameters should work.
+When using the Annotation editor, the hypnogram will be updated realtime when adding, moving or deleting annotations. Once you're done editing, you can export the edited hypnogram with "Export anotations/events" in the "Tools" menu.
+
+.. figure:: /pictures/edfbrowser_with_hypnogram.png
+ :align: center
+
+----------
+
+**SpiSOP**
+
+`SpiSOP `_ is an open-source Matlab toolbox for the analysis and visualization of polysomnography sleep data. It comes with a sleep scoring GUI.
+As explained in `the documentation `_, the hypnogram should be a tab-separated text file with two columns (no headers). The first column has the sleep stages (0: Wake, 1: N1, 2: N2, 3: N3, 5: REM) and the second column indicates whether the current epoch should be marked as artefact (1) or valid (0).
+
+.. code-block:: python
+
+ hypno_int = pd.Series(hypno).map({"W": 0, "N1": 1, "N2": 2, "N3": 3, "R": 5}).to_numpy()
+ hypno_export = pd.DataFrame({"label": hypno_int, "artefact": 0})
+ hypno_export.to_csv("my_hypno_SpiSOP.txt", sep="\t", header=False, index=False)
+
+----------
+
+**Visbrain**
+
+`Visbrain `_ is an open-source Python toolbox that includes a module for visualizing polysomnography sleep data and scoring sleep (see screenshot below).
+
+.. figure:: /pictures/visbrain.PNG
+ :align: center
+
+Visbrain accepts several `formats for the hypnogram `_. The code below show how to export the hypnogram in the `Elan software format `_ (i.e. a text file with the *.hyp* extension):
+
+.. code-block:: python
+
+ hypno_int = pd.Series(hypno).map({"W": 0, "N1": 1, "N2": 2, "N3": 3, "R": 5}).to_numpy()
+ header = "time_base 30\nsampling_period 1/30\nepoch_nb %i\nepoch_list" % len(hypno_int)
+ np.savetxt("my_hypno_Visbrain.txt", hypno_int, fmt='%s', delimiter=',', newline='\n',
+ header=header, comments="", encoding="utf-8")
+
+.. raw:: html
+
+
+
+
+
+.. ----------------------------- ANIMAL DATA -----------------------------
+.. raw:: html
+
+
+
+YASA was only designed for human scalp data and as such will not work with animal data or intracranial data. Adding support for such data would require the two following steps:
+
+1. Modifying (some of) the features. For example, rodent sleep does not have the same temporal dynamics as human sleep, and therefore one could modify the length of the smoothing window to better capture these dynamics.
+2. Re-training the classifier using a large database of previously-scored data.
+
+Despite these required changes, one advantage of YASA is that it provides a useful framework for implementing such sleep staging algorithms. For example, one can save a huge amount of time by simply re-using and adapting the built-in :py:class:`yasa.SleepStaging` class.
+In addition, all the code used to train YASA is freely available at https://github.com/raphaelvallat/yasa_classifier and can be re-used to re-train the classifier on non-human data.
+
+.. raw:: html
+
+
+
+Pingouin uses `outdated `_, a Python package that automatically checks if a newer version of YASA is available upon loading. Alternatively, you can click "Watch" on the `GitHub `_ of YASA.
+Whenever a new release is out there, you can upgrade your version by typing the following line in a terminal window:
+
+.. code-block:: shell
+
+ pip install --upgrade yasa
+
+.. ----------------------------- DONATION -----------------------------
+.. raw:: html
+
+
+
+There are many ways to contribute to YASA, even if you are not a programmer, for example, reporting bugs or results that are inconsistent with other softwares, improving the documentation and examples, or, even `buying the developpers a coffee `_!
+
+.. ----------------------------- CITING YASA -----------------------------
+.. raw:: html
+
+
+
+To cite YASA, please use the preprint publication:
+
+* Raphael Vallat and Matthew P. Walker (2021). *A universal, open-source, high-performance tool for automated sleep staging*. bioRxiv 2021.05.28.446165; doi: https://doi.org/10.1101/2021.05.28.446165
+
+BibTeX:
+
+.. code-block:: latex
+
+ @article {Vallat2021.05.28.446165,
+ author = {Vallat, Raphael and Walker, Matthew P.},
+ title = {A universal, open-source, high-performance tool for automated sleep staging},
+ elocation-id = {2021.05.28.446165},
+ year = {2021},
+ doi = {10.1101/2021.05.28.446165},
+ publisher = {Cold Spring Harbor Laboratory},
+ abstract = {The creation of a completely automated sleep-scoring system that is highly accurate, flexible, well validated, free and simple to use by anyone has yet to be accomplished. In part, this is due to the difficulty of use of existing algorithms, algorithms having been trained on too small samples, and paywall demotivation. Here we describe a novel algorithm trained and validated on +27,000 hours of polysomnographic sleep recordings across heterogeneous populations around the world. This tool offers high sleep-staging accuracy matching or exceeding human accuracy and interscorer agreement no matter the population kind. The software is easy to use, computationally low-demanding, open source, and free. Such software has the potential to facilitate broad adoption of automated sleep staging with the hope of becoming an industry standard.Competing Interest StatementThe authors have declared no competing interest.AbbreviationsAHIapnea-hypopnea indexBMIbody mass indexEEGelectroencephalogramEOGelectrooculogramEMGelectromyogramOSAobstructive sleep apneaPSGpolysomnographyMCCMatthews correlation coefficientNREMnon rapid eye movement (sleep)REMrapid eye movement (sleep)},
+ URL = {https://www.biorxiv.org/content/early/2021/05/28/2021.05.28.446165},
+ eprint = {https://www.biorxiv.org/content/early/2021/05/28/2021.05.28.446165.full.pdf},
+ journal = {bioRxiv}
+ }
+
+.. ----------------------------- END -----------------------------
+.. raw:: html
+
+
+
+
diff --git a/docs/index.rst b/docs/index.rst
index 0cf19ce..6e64e74 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -25,16 +25,16 @@
.. figure:: /pictures/yasa_logo.png
:align: center
-**YASA** (*Yet Another Spindle Algorithm*) is a sleep analysis toolbox in Python. YASA includes several fast and convenient command-line functions to:
+**YASA** (*Yet Another Spindle Algorithm*) is a command-line sleep analysis toolbox in Python. The main functions of YASA are:
-* Perform automatic sleep staging.
-* Detect sleep spindles, slow-waves, and rapid eye movements on single and multi-channel EEG data.
-* Reject major artifacts on single or multi-channel EEG data.
-* Perform advanced spectral analyses: spectral bandpower, phase-amplitude coupling, event-locked analyses, 1/f, and more!
-* Manipulate hypnogram and calculate sleep statistics.
+* Automatic sleep staging of polysomnography data (see `preprint article `_).
+* Event detection: sleep spindles, slow-waves and rapid eye movements, on single or multi-channel EEG data.
+* Artefact rejection, on single or multi-channel EEG data.
+* Spectral analyses: bandpower, phase-amplitude coupling, 1/f slope, and more!
+* Hypnogram analysis: sleep statistics and stage tranisitions.
-For more details, check out the `API documentation `_ or try the
-`tutorial (Jupyter notebooks) `_.
+For more details, check out the `API documentation `_, try the
+`tutorial (Jupyter notebooks) `_ or read the `FAQ `_.
**********
@@ -53,7 +53,7 @@ To use YASA, all you need is:
- Some basic knowledge of Python, especially the `NumPy `_, `Pandas `_ and `MNE `_ packages.
- A Python editor: YASA works best with `Jupyter Lab `_, a web-based interactive user interface.
-- Some sleep EEG data and optionally a sleep staging file (hypnogram) to perform calculations on specific sleep stages. To facilitate masking and indexing operations, the data and hypnogram must have the same sampling frequency and number of samples. YASA provide some convenient functions to load and upsample hypnogram data to the desired shape.
+- Some sleep EEG data and optionally a sleep staging file (hypnogram).
**I have sleep EEG data in European Data Format (.edf), how do I load the data in Python?**
@@ -62,89 +62,49 @@ If you have sleep EEG data in standard formats (e.g. EDF or BrainVision), you ca
.. code-block:: python
import mne
- # Load the EDF file, excluding the EOGs and EKG channels
- raw = mne.io.read_raw_edf('MYEDFFILE.edf', preload=True, exclude=['EOG1', 'EOG2', 'EKG'])
- raw.resample(100) # Downsample the data to 100 Hz
- raw.filter(0.1, 40) # Apply a bandpass filter from 0.1 to 40 Hz
- raw.pick_channels(['C4-A1', 'C3-A2']) # Select a subset of EEG channels
+ # Load the EDF file
+ raw = mne.io.read_raw_edf('MYEDFFILE.edf', preload=True)
+ # Downsample the data to 100 Hz
+ raw.resample(100)
+ # Apply a bandpass filter from 0.1 to 40 Hz
+ raw.filter(0.1, 40)
+ # Select a subset of EEG channels
+ raw.pick_channels(['C4-A1', 'C3-A2'])
**********
How do I get started with YASA?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-If you want to dive right in, you can simply go to the main documentation (:ref:`api_ref`) and try to apply YASA's functions on your own EEG data. However, for most users, we strongly recommend that you first try running the examples Jupyter notebooks to get a sense of how YASA works and what it can do! The advantage is that the notebooks also come with example datasets so they should work right out of the box as long as you've installed YASA first. The notebooks and datasets can be found on `GitHub `_ (make sure that you download the whole *notebooks/* folder). A short description of all notebooks is provided below:
-
-**Spindles detection**
-
-* `01_spindles_detection `_: single-channel spindles detection and step-by-step description of the spindles detection algorithm.
-* `02_spindles_detection_multi `_: multi-channel spindles detection.
-* `03_spindles_detection_NREM_only `_: how to limit the spindles detection on specific sleep stages using an hypnogram.
-* `04_spindles_slow_fast `_: slow versus fast spindles.
-* `run_visbrain `_: interactive display of the detected spindles using the Visbrain visualization software in Python.
-
-**Slow-waves detection**
-
-* `05_sw_detection `_: single-channel slow-waves detection and step-by-step description of the slow-waves detection algorithm.
-* `06_sw_detection_multi `_: multi-channel slow-waves detection.
-
-**Rapid Eye Movements (REMs) detection**
-
-* `07_REMs_detection `_: REMs detection.
-
-**Spectral analysis**
-
-* `08_bandpower `_: calculate spectral band power, optionally averaged across channels and sleep stages.
-* `09_IRASA `_: separate the aperiodic (= fractal = 1/f) components of the EEG power spectrum using the IRASA method.
-* `10_spectrogram `_: plot a multi-taper full-night spectrogram on single-channel EEG data with the hypnogram on top.
-* `11_nonlinear_features `_: calculate non-linear EEG features on 30-seconds epochs and perform sleep stage classification.
-* `12_SO-sigma_coupling `_: slow-oscillations/spindles phase-amplitude coupling and data-driven comodulogram.
-* `15_topoplot `_: topoplot.
-
-**Artifact rejection**
-
-* `13_artifact_rejection `_: automatic artifact rejection on single and multi-channel EEG data.
+If you want to dive right in, you can simply go to the main documentation (:ref:`api_ref`) and try to apply YASA's functions on your own EEG data.
+However, for most users, we strongly recommend that you first try running the examples Jupyter notebooks to get a sense of how YASA works and what it can do!
+The notebooks also come with example datasets so they should work right out of the box as long as you've installed YASA first.
+The notebooks and datasets can be found on `GitHub `_ (make sure that you download the whole *notebooks/* folder). A short description of all notebooks is provided below:
**Automatic sleep staging**
-* `14_automatic_sleep_staging `_: automatic sleep staging of polysomnography data.
-
-
-.. Typical use: spindles detection
-.. -------------------------------
-
-.. .. code-block:: python
-
-.. import yasa
+* `automatic_staging `_: Automatic sleep staging of polysomnography data.
-.. # 1) Single-channel spindles detection, in its simplest form.
-.. # There are many optional arguments that you can change to customize the detection.
-.. sp = yasa.spindles_detect(data, sf)
-.. # The output of the the detection (`sp`) is a class that has several attributes and methods.
-.. # For instance, to get the full detection dataframe, one can simply use:
-.. sp.summary()
-.. # To plot an average template of all the detected spindles,
-.. # centered around the most prominent peak (+/- 1 second)
-.. sp.plot_average(center='Peak', time_before=1, time_after=1)
-.. # To interactively inspect the detected spindles
-.. sp.plot_detection()
+**Event detection**
-.. # 2) Multi-channels spindles detection limited to N2/N3 sleep, with automatic outlier rejection
-.. sp = yasa.spindles_detect(data, sf, ch_names, hypno=hypno, include=(2, 3), remove_outliers=True)
-.. # Return spindles count / density and parameters averaged across channels and sleep stages
-.. sp.summary(grp_stage=True, grp_chan=True)
-
-.. The output of ``sp.summary()`` is a `pandas DataFrame `_ where each row is a detected spindle and each column a parameter of this event, including the start and end timestamps (in seconds from the beginning of the data), duration, amplitude, etc.
+* `spindles_detection `_: single-channel spindles detection and step-by-step description of the spindles detection algorithm.
+* `spindles_detection_multi `_: multi-channel spindles detection.
+* `spindles_detection_NREM_only `_: how to limit the spindles detection on specific sleep stages using an hypnogram.
+* `spindles_slow_fast `_: slow versus fast spindles.
+* `sw_detection `_: single-channel slow-waves detection and step-by-step description of the slow-waves detection algorithm.
+* `sw_detection_multi `_: multi-channel slow-waves detection.
+* `artifact_rejection `_: automatic artifact rejection on single and multi-channel EEG data.
+* `REMs_detection `_: REMs detection.
+* `run_visbrain `_: interactive display of the detected spindles using the Visbrain visualization software in Python.
-.. .. table::
-.. :widths: auto
+**Spectral analysis**
-.. ======= ===== ========== =========== ===== ========== ========== =========== ============== ==========
-.. Start End Duration Amplitude RMS AbsPower RelPower Frequency Oscillations Symmetry
-.. ======= ===== ========== =========== ===== ========== ========== =========== ============== ==========
-.. 3.32 4.06 0.74 81.80 19.65 2.72 0.49 12.85 10 0.67
-.. 13.26 13.85 0.59 99.30 24.49 2.82 0.24 12.15 7 0.25
-.. ======= ===== ========== =========== ===== ========== ========== =========== ============== ==========
+* `bandpower `_: calculate spectral band power, optionally averaged across channels and sleep stages.
+* `IRASA `_: separate the aperiodic (= fractal = 1/f) components of the EEG power spectrum using the IRASA method.
+* `spectrogram `_: plot a multi-taper full-night spectrogram on single-channel EEG data with the hypnogram on top.
+* `nonlinear_features `_: calculate non-linear EEG features on 30-seconds epochs and perform a naive sleep stage classification.
+* `SO-sigma_coupling `_: slow-oscillations/spindles phase-amplitude coupling and data-driven comodulogram.
+* `topoplot `_: topoplot.
**********
@@ -163,7 +123,7 @@ Below some plots demonstrating the functionalities of YASA. To reproduce these,
Development
~~~~~~~~~~~
-YASA was created and is maintained by `Raphael Vallat `_. Contributions are more than welcome so feel free to contact me, open an issue or submit a pull request!
+YASA was created and is maintained by `Raphael Vallat `_, a postdoctoral researcher in `Matthew Walker's lab `_ at UC Berkeley. Contributions are more than welcome so feel free to contact me, open an issue or submit a pull request!
To see the code or report a bug, please visit the `GitHub repository `_.
@@ -174,13 +134,8 @@ Note that this program is provided with NO WARRANTY OF ANY KIND.
Citation
~~~~~~~~
-To cite YASA, please use the Zenodo DOI:
-
-.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.2370600.svg
- :target: https://doi.org/10.5281/zenodo.2370600
-
-If you use the automatic sleep staging module, please cite the preprint article:
+To cite YASA, please use the preprint publication:
-* A universal, open-source, high-performance tool for automated sleep staging. Raphael Vallat, Matthew P. Walker. bioRxiv 2021.05.28.446165; doi: https://doi.org/10.1101/2021.05.28.446165
+* Raphael Vallat and Matthew P. Walker (2021). *A universal, open-source, high-performance tool for automated sleep staging*. bioRxiv 2021.05.28.446165; doi: https://doi.org/10.1101/2021.05.28.446165
|
diff --git a/docs/pictures/edfbrowser_import_annotations.png b/docs/pictures/edfbrowser_import_annotations.png
new file mode 100644
index 0000000..41a30a5
Binary files /dev/null and b/docs/pictures/edfbrowser_import_annotations.png differ
diff --git a/docs/pictures/edfbrowser_with_hypnogram.png b/docs/pictures/edfbrowser_with_hypnogram.png
new file mode 100644
index 0000000..46648b3
Binary files /dev/null and b/docs/pictures/edfbrowser_with_hypnogram.png differ
diff --git a/docs/pictures/spectrogram.png b/docs/pictures/spectrogram.png
new file mode 100644
index 0000000..03c25dd
Binary files /dev/null and b/docs/pictures/spectrogram.png differ