From f58afa7e68ee5500709a57b0f75341e11e96fecd Mon Sep 17 00:00:00 2001 From: Unknown Date: Sat, 25 Aug 2018 09:50:42 -0700 Subject: [PATCH] MindReading toolbox for SDF and latency detection --- .../Latency/decrease_dictionary_size.py | 21 +++++ swdb_2018_tools/Latency/examine_dataset.py | 36 ++++++++ .../filter_spikes_by_region_stimulus.py | 38 ++++++++ swdb_2018_tools/Latency/get_all_regions.py | 27 ++++++ swdb_2018_tools/Latency/get_hist_sdf.py | 39 ++++++++ .../Latency/get_sdf_from_spike_train.py | 11 +++ swdb_2018_tools/Latency/latency_per_region.py | 58 ++++++++++++ .../Latency/latency_per_region_from_pkl.py | 78 ++++++++++++++++ .../Latency/plot_latency_for_region.py | 30 ++++++ swdb_2018_tools/Latency/plot_spike_train.py | 19 ++++ .../Latency/plot_spike_train_psth.py | 19 ++++ .../plot_spike_train_psth_with_latency.py | 84 +++++++++++++++++ .../plot_spike_train_sdf_with_latency.py | 91 +++++++++++++++++++ swdb_2018_tools/Latency/print_info.py | 20 ++++ 14 files changed, 571 insertions(+) create mode 100644 swdb_2018_tools/Latency/decrease_dictionary_size.py create mode 100644 swdb_2018_tools/Latency/examine_dataset.py create mode 100644 swdb_2018_tools/Latency/filter_spikes_by_region_stimulus.py create mode 100644 swdb_2018_tools/Latency/get_all_regions.py create mode 100644 swdb_2018_tools/Latency/get_hist_sdf.py create mode 100644 swdb_2018_tools/Latency/get_sdf_from_spike_train.py create mode 100644 swdb_2018_tools/Latency/latency_per_region.py create mode 100644 swdb_2018_tools/Latency/latency_per_region_from_pkl.py create mode 100644 swdb_2018_tools/Latency/plot_latency_for_region.py create mode 100644 swdb_2018_tools/Latency/plot_spike_train.py create mode 100644 swdb_2018_tools/Latency/plot_spike_train_psth.py create mode 100644 swdb_2018_tools/Latency/plot_spike_train_psth_with_latency.py create mode 100644 swdb_2018_tools/Latency/plot_spike_train_sdf_with_latency.py create mode 100644 swdb_2018_tools/Latency/print_info.py diff --git a/swdb_2018_tools/Latency/decrease_dictionary_size.py b/swdb_2018_tools/Latency/decrease_dictionary_size.py new file mode 100644 index 0000000..5c576d1 --- /dev/null +++ b/swdb_2018_tools/Latency/decrease_dictionary_size.py @@ -0,0 +1,21 @@ +import pickle + +region = 'VISp' + +with open(region + '_spikes.pkl') as f: + region_spikes = pickle.load(f) + +region_spikes = region_spikes[0] + +new_region_spikes = {} +ind = 0 +for c_key in region_spikes.keys(): + if ind < 200: + new_region_spikes[c_key] = region_spikes[c_key] + ind += 1 + +region_spikes = new_region_spikes +print('Saving region file to disk: ' + region) +with open('Small_' + region + '_spikes.pkl', 'w') as f: + pickle.dump([region_spikes], f) +print('File saved') \ No newline at end of file diff --git a/swdb_2018_tools/Latency/examine_dataset.py b/swdb_2018_tools/Latency/examine_dataset.py new file mode 100644 index 0000000..cd3f8af --- /dev/null +++ b/swdb_2018_tools/Latency/examine_dataset.py @@ -0,0 +1,36 @@ +# AWS +basic_path = 'F:\\' +drive_path = basic_path + 'visual_coding_neuropixels' + +# We need to import these modules to get started +import numpy as np +import pandas as pd +import os +import matplotlib.pyplot as plt + +# Import NWB_adapter +import os +import sys +sys.path.append(basic_path + 'resources/swdb_2018_neuropixels') +from swdb_2018_neuropixels.ephys_nwb_adapter import NWB_adapter +from print_info import print_info + +# Provide path to manifest file +manifest_file = os.path.join(drive_path,'ephys_manifest.csv') + +# Create a dataframe +expt_info_df = pd.read_csv(manifest_file) + +#make new dataframe by selecting only multi-probe experiments +multi_probe_expt_info = expt_info_df[expt_info_df.experiment_type == 'multi_probe'] + +multi_probe_example = 0 + +multi_probe_filename = multi_probe_expt_info.iloc[multi_probe_example]['nwb_filename'] + +# Specify full path to the .nwb file +nwb_file = os.path.join(drive_path,multi_probe_filename) + +data_set = NWB_adapter(nwb_file) + +print_info(data_set.unit_df) \ No newline at end of file diff --git a/swdb_2018_tools/Latency/filter_spikes_by_region_stimulus.py b/swdb_2018_tools/Latency/filter_spikes_by_region_stimulus.py new file mode 100644 index 0000000..a9833be --- /dev/null +++ b/swdb_2018_tools/Latency/filter_spikes_by_region_stimulus.py @@ -0,0 +1,38 @@ +basic_path = 'F:\\' +drive_path = basic_path + 'visual_coding_neuropixels' + +import numpy as np +import pandas as pd +import os +import matplotlib.pyplot as plt + +# Import NWB_adapter +import os +import sys +sys.path.append(basic_path + 'resources/swdb_2018_neuropixels') +from swdb_2018_neuropixels.ephys_nwb_adapter import NWB_adapter + +def filter_spikes_by_region_stimulus(multi_probe_expt_info, region, stimulus): + spike_trains = {} + pre_stimulus_time = 0.1 + for multi_probe_example in range(len(multi_probe_expt_info)): + multi_probe_filename = multi_probe_expt_info.iloc[multi_probe_example]['nwb_filename'] + + # Specify full path to the .nwb file + nwb_file = os.path.join(drive_path,multi_probe_filename) + + data_set = NWB_adapter(nwb_file) + + for c_probe in np.unique(data_set.unit_df['probe']): + region_units = data_set.unit_df[(data_set.unit_df['structure'] == region) & (data_set.unit_df['probe'] == c_probe)] + all_units = data_set.spike_times[c_probe] + for index, region_unit in region_units.iterrows(): + spike_train = all_units[region_unit['unit_id']] + for ind, stim_row in data_set.stim_tables['natural_scenes'].iterrows(): + current_train = spike_train[(spike_train > stim_row['start'] - pre_stimulus_time) & (spike_train < stim_row['end'])] - stim_row['start'] + train_id = multi_probe_filename + '_' + c_probe + '_' + region_unit['unit_id'] + '_' + str(int(stim_row['frame'])) + '_' + str(region_unit['depth']) + if not spike_trains.has_key(train_id): + spike_trains[train_id] = [] + spike_trains[train_id].append(current_train) + return spike_trains + diff --git a/swdb_2018_tools/Latency/get_all_regions.py b/swdb_2018_tools/Latency/get_all_regions.py new file mode 100644 index 0000000..d63d101 --- /dev/null +++ b/swdb_2018_tools/Latency/get_all_regions.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +""" +Created on Thu Aug 23 21:52:27 2018 + +@author: Stav +""" + +def get_all_regions(multi_probe_expt_info): +# all_regions = [] +# +# for multi_probe_example in range(len(multi_probe_expt_info)): +# +# multi_probe_filename = multi_probe_expt_info.iloc[multi_probe_example]['nwb_filename'] +# +# # Specify full path to the .nwb file +# nwb_file = os.path.join(drive_path,multi_probe_filename) +# +# data_set = NWB_adapter(nwb_file) +# unique_regions = np.unique(data_set.unit_df['structure']) +# unique_list = list(unique_regions) +# all_regions.extend(unique_list) +# +# all_regions = list(set(all_regions)) + + all_regions = ['VISp', 'VISrl', 'DG', 'CA', 'VISal', 'VISam', 'SCs', 'TH', 'VISpm', 'VISl'] + + return all_regions \ No newline at end of file diff --git a/swdb_2018_tools/Latency/get_hist_sdf.py b/swdb_2018_tools/Latency/get_hist_sdf.py new file mode 100644 index 0000000..f28aae6 --- /dev/null +++ b/swdb_2018_tools/Latency/get_hist_sdf.py @@ -0,0 +1,39 @@ +import numpy as np +import matplotlib.mlab as mlab +from get_sdf_from_spike_train import get_sdf_from_spike_train + +def spike_times_to_arr(spike_times, start_point, end_point): + arr_size = int((end_point-start_point)*1000) + spike_arr = np.zeros(arr_size) + for spike_time in spike_times: + spike_index = int((spike_time-start_point)*1000) + if spike_index < arr_size: + spike_arr[spike_index] = 1 + + return spike_arr + +def get_hist_sdf(spike_times): + spike_times_arr = spike_times_to_arr(spike_times, -0.1, 0.25) + if True: + sdf = get_sdf_from_spike_train(spike_times_arr, 10) + + # print(spike_times_arr) + # print(sdf) + + return sdf + # sigma = 0.045 + sigma = 0.01 + dx = 0.001 + # start = -3*sigma + # stop = 3*sigma + # step = 0.001 + # edges = np.arange(start, stop+step, step) + # kernel = mlab.normpdf(edges, 0, sigma) + # kernel *= 0.001 + gx = np.arange(-3*sigma, 3*sigma+dx, dx) + gaussian = np.exp(-(gx/sigma)**2/2) + conv_data = np.convolve(spike_times_arr, gaussian, mode='full') + + return conv_data +# center = np.ceil(float(len(edes))/float(2)) +# conv_data \ No newline at end of file diff --git a/swdb_2018_tools/Latency/get_sdf_from_spike_train.py b/swdb_2018_tools/Latency/get_sdf_from_spike_train.py new file mode 100644 index 0000000..9cd9873 --- /dev/null +++ b/swdb_2018_tools/Latency/get_sdf_from_spike_train.py @@ -0,0 +1,11 @@ +import numpy as np + +def kernel_fn(x,h): + return (1./h)*(np.exp(1)**(-(x**2)/h**2)) + +def get_sdf_from_spike_train(spike_train,h=None): + n=len(spike_train) + sdf=np.zeros(n); + out=np.abs(np.mgrid[0:n,0:n][0]-np.matrix.transpose(np.mgrid[0:n,0:n][0])) + sdf=1000*np.mean(kernel_fn(out,h)*spike_train,axis=1) + return sdf \ No newline at end of file diff --git a/swdb_2018_tools/Latency/latency_per_region.py b/swdb_2018_tools/Latency/latency_per_region.py new file mode 100644 index 0000000..c72781c --- /dev/null +++ b/swdb_2018_tools/Latency/latency_per_region.py @@ -0,0 +1,58 @@ +# AWS +basic_path = 'F:\\' +drive_path = basic_path + 'visual_coding_neuropixels' + +# We need to import these modules to get started +import numpy as np +import pandas as pd +import os +import matplotlib.pyplot as plt + +# Import NWB_adapter +import os +import sys +sys.path.append(basic_path + 'resources/swdb_2018_neuropixels') +from swdb_2018_neuropixels.ephys_nwb_adapter import NWB_adapter + +from filter_spikes_by_region_stimulus import filter_spikes_by_region_stimulus +from plot_spike_train import plot_spike_train +from plot_spike_train_psth import plot_spike_train_psth +from plot_spike_train_psth_with_latency import plot_spike_train_psth_with_latency +from get_all_regions import get_all_regions +from print_info import print_info +import pickle + +current_stimulus = ['natural_images'] + +# Provide path to manifest file +manifest_file = os.path.join(drive_path,'ephys_manifest.csv') + +# Create a dataframe +expt_info_df = pd.read_csv(manifest_file) + +#make new dataframe by selecting only multi-probe experiments +multi_probe_expt_info = expt_info_df[expt_info_df.experiment_type == 'multi_probe'] + +all_regions = get_all_regions(multi_probe_expt_info) +print('All regions: ' + str(all_regions)) + +output_path = 'Latency_results/' +if not os.path.exists(output_path): + os.makedirs(output_path) + +for region in all_regions: + region_spikes = filter_spikes_by_region_stimulus(multi_probe_expt_info, region, current_stimulus) + + print('Saving region file to disk: ' + region) + with open(region + '_spikes.pkl', 'w') as f: + pickle.dump([region_spikes], f) + print('File saved') + + # c_output_path = output_path + region + '/' + # if not os.path.exists(c_output_path): + # os.makedirs(c_output_path) + + # for key, val in region_spikes.iteritems(): + # file_name = c_output_path + key + # plot_spike_train(val, file_name + '.png') + # plot_spike_train_psth_with_latency(val, file_name + '_psth.png') \ No newline at end of file diff --git a/swdb_2018_tools/Latency/latency_per_region_from_pkl.py b/swdb_2018_tools/Latency/latency_per_region_from_pkl.py new file mode 100644 index 0000000..dcc2359 --- /dev/null +++ b/swdb_2018_tools/Latency/latency_per_region_from_pkl.py @@ -0,0 +1,78 @@ +# AWS +basic_path = 'F:\\' +drive_path = basic_path + 'visual_coding_neuropixels' + +# We need to import these modules to get started +import numpy as np +import pandas as pd +import os +import matplotlib.pyplot as plt + +# Import NWB_adapter +import os +import sys +sys.path.append(basic_path + 'resources/swdb_2018_neuropixels') +from swdb_2018_neuropixels.ephys_nwb_adapter import NWB_adapter + +from filter_spikes_by_region_stimulus import filter_spikes_by_region_stimulus +from plot_spike_train import plot_spike_train +from plot_spike_train_psth import plot_spike_train_psth +from plot_spike_train_psth_with_latency import plot_spike_train_psth_with_latency +from get_all_regions import get_all_regions +from print_info import print_info +import pickle + +current_stimulus = ['natural_images'] + +# Provide path to manifest file +manifest_file = os.path.join(drive_path,'ephys_manifest.csv') + +# Create a dataframe +expt_info_df = pd.read_csv(manifest_file) + +#make new dataframe by selecting only multi-probe experiments +multi_probe_expt_info = expt_info_df[expt_info_df.experiment_type == 'multi_probe'] + +all_regions = get_all_regions(multi_probe_expt_info) +print('All regions: ' + str(all_regions)) + +output_path = 'Latency_results/' +input_path = '../../../Resources/' +if not os.path.exists(output_path): + os.makedirs(output_path) + +region_latency = {} + +for region in all_regions: + region_latency[region] = [] + # region_spikes = filter_spikes_by_region_stimulus(multi_probe_expt_info, region, current_stimulus) + + + # with open('region_spikes.pkl', 'w') as f: + # pickle.dump([region_spikes], f) + + with open(input_path + 'Small_' + region + '_spikes.pkl') as f: + region_spikes = pickle.load(f) + + region_spikes = region_spikes[0] + + print('Loaded spikes file from region: ' + region) + + c_output_path = output_path + region + '/' + if not os.path.exists(c_output_path): + os.makedirs(c_output_path) + + temp_ind = 0 + for key, val in region_spikes.iteritems(): + file_name = c_output_path + key + plot_spike_train(val, file_name + '.png') + c_latency = plot_spike_train_psth_with_latency(val, file_name + '_psth.png') + region_latency[region].append(c_latency) + if temp_ind > 50: + break + temp_ind += 2 + + print(region_latency) + +with open(input_path + 'region_latency.pkl', 'w') as f: + pickle.dump([region_latency], f) diff --git a/swdb_2018_tools/Latency/plot_latency_for_region.py b/swdb_2018_tools/Latency/plot_latency_for_region.py new file mode 100644 index 0000000..154ac56 --- /dev/null +++ b/swdb_2018_tools/Latency/plot_latency_for_region.py @@ -0,0 +1,30 @@ +import pickle +import numpy as np +from scipy.stats import sem +import matplotlib.pyplot as plt + +input_path = '../../../Resources/' +with open(input_path + 'region_latency.pkl') as f: + region_latency = pickle.load(f) +region_latency = region_latency[0] + +region_means = [] +region_sems = [] +for key, latency_list in region_latency.iteritems(): + clean_list = [x for x in latency_list if ~np.isnan(x)] + clean_arr = np.asarray(clean_list) + mean_val = clean_arr.mean() + sem_val = sem(clean_arr) + region_means.append(mean_val) + region_sems.append(sem_val) + +fig,ax = plt.subplots(1,1,figsize=(6,3)) +ax.plot(region_means, '.') +ax.errorbar(range(len(region_means)), region_means, yerr=region_sems) +ax.set_xticklabels(region_latency.keys(), FontSize=14) +ax.set_xticks(range(len(region_means))) +ax.set_xlabel('Region', FontSize=16) +ax.set_ylabel('Mean response time (ms)', FontSize=16) +ax.set_title('Response time for natural scenes', FontSize=18) + +plt.show() \ No newline at end of file diff --git a/swdb_2018_tools/Latency/plot_spike_train.py b/swdb_2018_tools/Latency/plot_spike_train.py new file mode 100644 index 0000000..d55be29 --- /dev/null +++ b/swdb_2018_tools/Latency/plot_spike_train.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +""" +Created on Thu Aug 23 21:37:00 2018 + +@author: Stav +""" + +import numpy as np +import matplotlib.pyplot as plt + +def plot_spike_train(spike_trains, fig_path): + fig,ax = plt.subplots(1,1,figsize=(6,3)) + for r_ind, row in enumerate(spike_trains): + ax.plot(row, r_ind*np.ones_like(row),'|',color='b') + ax.invert_yaxis() + ax.set_xlim([-0.1, 0.25]) + ax.axvspan(-0.1,0,color='gray',alpha=0.2); + + fig.savefig(fig_path) \ No newline at end of file diff --git a/swdb_2018_tools/Latency/plot_spike_train_psth.py b/swdb_2018_tools/Latency/plot_spike_train_psth.py new file mode 100644 index 0000000..5d0aee4 --- /dev/null +++ b/swdb_2018_tools/Latency/plot_spike_train_psth.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +""" +Created on Thu Aug 23 21:41:41 2018 + +@author: Stav +""" + +import matplotlib.pyplot as plt + +def plot_spike_train_psth(spike_train, fig_path): + fig,ax = plt.subplots(1,1,figsize=(6,3)) + spike_times = [] + for row in spike_train: + spike_times.extend(list(row)) + plt.hist(spike_times, bins=[x * 0.005 for x in range(-20, 51)]) + ax.axvspan(-0.2,0,color='gray',alpha=0.2); + ax.set_xlim([-0.1, 0.25]) + + fig.savefig(fig_path) \ No newline at end of file diff --git a/swdb_2018_tools/Latency/plot_spike_train_psth_with_latency.py b/swdb_2018_tools/Latency/plot_spike_train_psth_with_latency.py new file mode 100644 index 0000000..3fc7e18 --- /dev/null +++ b/swdb_2018_tools/Latency/plot_spike_train_psth_with_latency.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +""" +Created on Thu Aug 23 21:41:41 2018 + +@author: Stav +""" + +import numpy as np +import matplotlib.pyplot as plt +from get_hist_sdf import get_hist_sdf +from print_info import print_info + +def plot_spike_train_psth_with_latency(spike_train, fig_path): + + number_of_std = 3 + window_in_ms = float(10) # 5 + first_possible_response = 50 + start_window_in_ms = float(-100) + end_window_in_ms = float(250) + window_in_secs = window_in_ms/float(1000) + min_range = int(float(-100)/window_in_ms) + max_range = int(float(250)/window_in_ms + 1) + + x_axis_values = [float(x)/float(1000) for x in range(min_range*int(window_in_ms), (max_range-1)*int(window_in_ms))] + + fig,ax = plt.subplots(1,1,figsize=(6,3)) + spike_times = [] + for row in spike_train: + spike_times.extend(list(row)) + vals, bins, patches = plt.hist(spike_times, bins=[x * window_in_secs for x in range(min_range, max_range)]) + + stimulus_ind = np.argmax(np.array(bins) >= 0) + pre_stimulus_vals = vals[:stimulus_ind] + start_gap = int(first_possible_response/window_in_ms) + post_stimulus_val = vals[stimulus_ind+start_gap:] + std_val = np.std(pre_stimulus_vals) + mean_val = np.mean(pre_stimulus_vals) + positive_threshold = mean_val + number_of_std*std_val + negative_threshold = mean_val + -1*number_of_std*std_val + post_np_arr = np.array(post_stimulus_val) + + first_occur = -1 + pos_first_occur = -1 + neg_first_occur = -1 + + if post_np_arr.max() > positive_threshold: + pos_first_occur = np.argmax(post_np_arr > positive_threshold) + + if post_np_arr.min() < negative_threshold: + neg_first_occur = np.argmax(post_np_arr < negative_threshold) + + if pos_first_occur > -1 and neg_first_occur > -1: + if pos_first_occur < neg_first_occur: + first_occur = pos_first_occur + plt.axvline(x=bins[first_occur+stimulus_ind+start_gap], color='green',alpha=0.5) + else: + first_occur = neg_first_occur + plt.axvline(x=bins[first_occur+stimulus_ind+start_gap], color='red',alpha=0.5) + else: + if pos_first_occur > -1: + first_occur = pos_first_occur + plt.axvline(x=bins[first_occur+stimulus_ind+start_gap], color='green',alpha=0.5) + elif neg_first_occur > -1: + first_occur = neg_first_occur + plt.axvline(x=bins[first_occur+stimulus_ind+start_gap], color='red',alpha=0.5) + + ax.axvspan(start_window_in_ms/float(1000),0,color='gray',alpha=0.2) + ax.set_xlim([start_window_in_ms/float(1000), end_window_in_ms/float(1000)]) + fig.savefig(fig_path) + + # print(pos_first_occur) + # print(neg_first_occur) + # print(first_occur) + # print(stimulus_ind) + # print(start_gap) + # print(first_occur+stimulus_ind+start_gap) + # print(bins[first_occur+stimulus_ind+start_gap]) + # print('--') + + response_time = float('nan') +# if first_occur > -1: +# response_time = x_axis_values[int((first_occur+stimulus_ind+start_gap)*window_in_ms)]*1000 + + return response_time \ No newline at end of file diff --git a/swdb_2018_tools/Latency/plot_spike_train_sdf_with_latency.py b/swdb_2018_tools/Latency/plot_spike_train_sdf_with_latency.py new file mode 100644 index 0000000..658ab74 --- /dev/null +++ b/swdb_2018_tools/Latency/plot_spike_train_sdf_with_latency.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- +""" +Created on Thu Aug 23 21:41:41 2018 + +@author: Stav +""" + +import numpy as np +import matplotlib.pyplot as plt +from get_hist_sdf import get_hist_sdf +from print_info import print_info + +def plot_spike_train_sdf_with_latency(spike_train, fig_path): + + number_of_std = 3 + window_in_ms = float(10) # 5 + first_possible_response = 50 + start_window_in_ms = float(-100) + end_window_in_ms = float(250) + window_in_secs = window_in_ms/float(1000) + min_range = int(float(-100)/window_in_ms) + max_range = int(float(250)/window_in_ms + 1) + + x_axis_values = [float(x)/float(1000) for x in range(min_range*int(window_in_ms), (max_range-1)*int(window_in_ms))] + + fig,ax = plt.subplots(1,1,figsize=(6,3)) + spike_times = [] + for row in spike_train: + spike_times.extend(list(row)) +# vals, bins, patches = plt.hist(spike_times, bins=[x * window_in_secs for x in range(min_range, max_range)]) + +# stimulus_ind = np.argmax(np.array(bins) >= 0) +# pre_stimulus_vals = vals[:stimulus_ind] +# start_gap = int(first_possible_response/window_in_ms) +# post_stimulus_val = vals[stimulus_ind+start_gap:] +# std_val = np.std(pre_stimulus_vals) +# mean_val = np.mean(pre_stimulus_vals) +# positive_threshold = mean_val + number_of_std*std_val +# negative_threshold = mean_val + -1*number_of_std*std_val +# post_np_arr = np.array(post_stimulus_val) + +# first_occur = -1 +# pos_first_occur = -1 +# neg_first_occur = -1 + +# if post_np_arr.max() > positive_threshold: +# pos_first_occur = np.argmax(post_np_arr > positive_threshold) + +# if post_np_arr.min() < negative_threshold: +# neg_first_occur = np.argmax(post_np_arr < negative_threshold) + +# if pos_first_occur > -1 and neg_first_occur > -1: +# if pos_first_occur < neg_first_occur: +# first_occur = pos_first_occur +# plt.axvline(x=bins[first_occur+stimulus_ind+start_gap], color='green',alpha=0.5) +# else: +# first_occur = neg_first_occur +# plt.axvline(x=bins[first_occur+stimulus_ind+start_gap], color='red',alpha=0.5) +# else: +# if pos_first_occur > -1: +# first_occur = pos_first_occur +# plt.axvline(x=bins[first_occur+stimulus_ind+start_gap], color='green',alpha=0.5) +# elif neg_first_occur > -1: +# first_occur = neg_first_occur +# plt.axvline(x=bins[first_occur+stimulus_ind+start_gap], color='red',alpha=0.5) + sdfs = np.zeros((len(spike_train), int(end_window_in_ms-start_window_in_ms))) + for row_ind, row in enumerate(spike_train): + sdf = get_hist_sdf(row) + sdfs[row_ind, :] = sdf[:350] + mean_sdf = sdfs.mean(axis=0) + plt.plot(x_axis_values, mean_sdf, color='yellow') + + ax.axvspan(start_window_in_ms/float(1000),0,color='gray',alpha=0.2) + ax.set_xlim([start_window_in_ms/float(1000), end_window_in_ms/float(1000)]) + ax.set_ylim([0, 0.2]) + fig.savefig(fig_path) + + # print(pos_first_occur) + # print(neg_first_occur) + # print(first_occur) + # print(stimulus_ind) + # print(start_gap) + # print(first_occur+stimulus_ind+start_gap) + # print(bins[first_occur+stimulus_ind+start_gap]) + # print('--') + + response_time = float('nan') +# if first_occur > -1: +# response_time = x_axis_values[int((first_occur+stimulus_ind+start_gap)*window_in_ms)]*1000 + + return response_time \ No newline at end of file diff --git a/swdb_2018_tools/Latency/print_info.py b/swdb_2018_tools/Latency/print_info.py new file mode 100644 index 0000000..3388613 --- /dev/null +++ b/swdb_2018_tools/Latency/print_info.py @@ -0,0 +1,20 @@ +import numpy as np +import pandas as pd + +def print_info(some_var): + var_type = type(some_var) + print(var_type) + my_nparr = np.zeros((5,5)) + my_dict = {'a':1, 'b':2} + columns = ['A','B', 'C'] + my_pd = pd.DataFrame(index=[1,2,3], columns=columns) + my_list = ['1', '2', '3'] + + if var_type == type(my_nparr): + print some_var.shape + if var_type == type(my_dict): + print some_var.keys() + if var_type == type(my_pd): + print some_var.columns + if var_type == type(my_list): + print len(some_var) \ No newline at end of file