Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions swdb_2018_tools/Latency/decrease_dictionary_size.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import pickle

region = 'VISp'

with open(region + '_spikes.pkl') as f:
region_spikes = pickle.load(f)

region_spikes = region_spikes[0]

new_region_spikes = {}
ind = 0
for c_key in region_spikes.keys():
if ind < 200:
new_region_spikes[c_key] = region_spikes[c_key]
ind += 1

region_spikes = new_region_spikes
print('Saving region file to disk: ' + region)
with open('Small_' + region + '_spikes.pkl', 'w') as f:
pickle.dump([region_spikes], f)
print('File saved')
36 changes: 36 additions & 0 deletions swdb_2018_tools/Latency/examine_dataset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# AWS
basic_path = 'F:\\'
drive_path = basic_path + 'visual_coding_neuropixels'

# We need to import these modules to get started
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt

# Import NWB_adapter
import os
import sys
sys.path.append(basic_path + 'resources/swdb_2018_neuropixels')
from swdb_2018_neuropixels.ephys_nwb_adapter import NWB_adapter
from print_info import print_info

# Provide path to manifest file
manifest_file = os.path.join(drive_path,'ephys_manifest.csv')

# Create a dataframe
expt_info_df = pd.read_csv(manifest_file)

#make new dataframe by selecting only multi-probe experiments
multi_probe_expt_info = expt_info_df[expt_info_df.experiment_type == 'multi_probe']

multi_probe_example = 0

multi_probe_filename = multi_probe_expt_info.iloc[multi_probe_example]['nwb_filename']

# Specify full path to the .nwb file
nwb_file = os.path.join(drive_path,multi_probe_filename)

data_set = NWB_adapter(nwb_file)

print_info(data_set.unit_df)
38 changes: 38 additions & 0 deletions swdb_2018_tools/Latency/filter_spikes_by_region_stimulus.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
basic_path = 'F:\\'
drive_path = basic_path + 'visual_coding_neuropixels'

import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt

# Import NWB_adapter
import os
import sys
sys.path.append(basic_path + 'resources/swdb_2018_neuropixels')
from swdb_2018_neuropixels.ephys_nwb_adapter import NWB_adapter

def filter_spikes_by_region_stimulus(multi_probe_expt_info, region, stimulus):
spike_trains = {}
pre_stimulus_time = 0.1
for multi_probe_example in range(len(multi_probe_expt_info)):
multi_probe_filename = multi_probe_expt_info.iloc[multi_probe_example]['nwb_filename']

# Specify full path to the .nwb file
nwb_file = os.path.join(drive_path,multi_probe_filename)

data_set = NWB_adapter(nwb_file)

for c_probe in np.unique(data_set.unit_df['probe']):
region_units = data_set.unit_df[(data_set.unit_df['structure'] == region) & (data_set.unit_df['probe'] == c_probe)]
all_units = data_set.spike_times[c_probe]
for index, region_unit in region_units.iterrows():
spike_train = all_units[region_unit['unit_id']]
for ind, stim_row in data_set.stim_tables['natural_scenes'].iterrows():
current_train = spike_train[(spike_train > stim_row['start'] - pre_stimulus_time) & (spike_train < stim_row['end'])] - stim_row['start']
train_id = multi_probe_filename + '_' + c_probe + '_' + region_unit['unit_id'] + '_' + str(int(stim_row['frame'])) + '_' + str(region_unit['depth'])
if not spike_trains.has_key(train_id):
spike_trains[train_id] = []
spike_trains[train_id].append(current_train)
return spike_trains

27 changes: 27 additions & 0 deletions swdb_2018_tools/Latency/get_all_regions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 23 21:52:27 2018

@author: Stav
"""

def get_all_regions(multi_probe_expt_info):
# all_regions = []
#
# for multi_probe_example in range(len(multi_probe_expt_info)):
#
# multi_probe_filename = multi_probe_expt_info.iloc[multi_probe_example]['nwb_filename']
#
# # Specify full path to the .nwb file
# nwb_file = os.path.join(drive_path,multi_probe_filename)
#
# data_set = NWB_adapter(nwb_file)
# unique_regions = np.unique(data_set.unit_df['structure'])
# unique_list = list(unique_regions)
# all_regions.extend(unique_list)
#
# all_regions = list(set(all_regions))

all_regions = ['VISp', 'VISrl', 'DG', 'CA', 'VISal', 'VISam', 'SCs', 'TH', 'VISpm', 'VISl']

return all_regions
39 changes: 39 additions & 0 deletions swdb_2018_tools/Latency/get_hist_sdf.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
import numpy as np
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There are a lot of constants hardcoded in this script that could be made into default parameter values. Instead of:

def my_function(x):
    return 5 * x

write:

def my_function(x, other_number=5):
    return other_number * x

This does the same thing, but is clearer and more flexible.

import matplotlib.mlab as mlab
from get_sdf_from_spike_train import get_sdf_from_spike_train

def spike_times_to_arr(spike_times, start_point, end_point):
arr_size = int((end_point-start_point)*1000)
spike_arr = np.zeros(arr_size)
for spike_time in spike_times:
spike_index = int((spike_time-start_point)*1000)
if spike_index < arr_size:
spike_arr[spike_index] = 1

return spike_arr

def get_hist_sdf(spike_times):
spike_times_arr = spike_times_to_arr(spike_times, -0.1, 0.25)
if True:
sdf = get_sdf_from_spike_train(spike_times_arr, 10)

# print(spike_times_arr)
# print(sdf)

return sdf
# sigma = 0.045
sigma = 0.01
dx = 0.001
# start = -3*sigma
# stop = 3*sigma
# step = 0.001
# edges = np.arange(start, stop+step, step)
# kernel = mlab.normpdf(edges, 0, sigma)
# kernel *= 0.001
gx = np.arange(-3*sigma, 3*sigma+dx, dx)
gaussian = np.exp(-(gx/sigma)**2/2)
conv_data = np.convolve(spike_times_arr, gaussian, mode='full')

return conv_data
# center = np.ceil(float(len(edes))/float(2))
# conv_data
11 changes: 11 additions & 0 deletions swdb_2018_tools/Latency/get_sdf_from_spike_train.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
import numpy as np

def kernel_fn(x,h):
return (1./h)*(np.exp(1)**(-(x**2)/h**2))

def get_sdf_from_spike_train(spike_train,h=None):
n=len(spike_train)
sdf=np.zeros(n);
out=np.abs(np.mgrid[0:n,0:n][0]-np.matrix.transpose(np.mgrid[0:n,0:n][0]))
sdf=1000*np.mean(kernel_fn(out,h)*spike_train,axis=1)
return sdf
58 changes: 58 additions & 0 deletions swdb_2018_tools/Latency/latency_per_region.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# AWS
basic_path = 'F:\\'
drive_path = basic_path + 'visual_coding_neuropixels'

# We need to import these modules to get started
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt

# Import NWB_adapter
import os
import sys
sys.path.append(basic_path + 'resources/swdb_2018_neuropixels')
from swdb_2018_neuropixels.ephys_nwb_adapter import NWB_adapter

from filter_spikes_by_region_stimulus import filter_spikes_by_region_stimulus
from plot_spike_train import plot_spike_train
from plot_spike_train_psth import plot_spike_train_psth
from plot_spike_train_psth_with_latency import plot_spike_train_psth_with_latency
from get_all_regions import get_all_regions
from print_info import print_info
import pickle

current_stimulus = ['natural_images']

# Provide path to manifest file
manifest_file = os.path.join(drive_path,'ephys_manifest.csv')

# Create a dataframe
expt_info_df = pd.read_csv(manifest_file)

#make new dataframe by selecting only multi-probe experiments
multi_probe_expt_info = expt_info_df[expt_info_df.experiment_type == 'multi_probe']

all_regions = get_all_regions(multi_probe_expt_info)
print('All regions: ' + str(all_regions))

output_path = 'Latency_results/'
if not os.path.exists(output_path):
os.makedirs(output_path)

for region in all_regions:
region_spikes = filter_spikes_by_region_stimulus(multi_probe_expt_info, region, current_stimulus)

print('Saving region file to disk: ' + region)
with open(region + '_spikes.pkl', 'w') as f:
pickle.dump([region_spikes], f)
print('File saved')

# c_output_path = output_path + region + '/'
# if not os.path.exists(c_output_path):
# os.makedirs(c_output_path)

# for key, val in region_spikes.iteritems():
# file_name = c_output_path + key
# plot_spike_train(val, file_name + '.png')
# plot_spike_train_psth_with_latency(val, file_name + '_psth.png')
78 changes: 78 additions & 0 deletions swdb_2018_tools/Latency/latency_per_region_from_pkl.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
# AWS
basic_path = 'F:\\'
drive_path = basic_path + 'visual_coding_neuropixels'

# We need to import these modules to get started
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt

# Import NWB_adapter
import os
import sys
sys.path.append(basic_path + 'resources/swdb_2018_neuropixels')
from swdb_2018_neuropixels.ephys_nwb_adapter import NWB_adapter

from filter_spikes_by_region_stimulus import filter_spikes_by_region_stimulus
from plot_spike_train import plot_spike_train
from plot_spike_train_psth import plot_spike_train_psth
from plot_spike_train_psth_with_latency import plot_spike_train_psth_with_latency
from get_all_regions import get_all_regions
from print_info import print_info
import pickle

current_stimulus = ['natural_images']

# Provide path to manifest file
manifest_file = os.path.join(drive_path,'ephys_manifest.csv')

# Create a dataframe
expt_info_df = pd.read_csv(manifest_file)

#make new dataframe by selecting only multi-probe experiments
multi_probe_expt_info = expt_info_df[expt_info_df.experiment_type == 'multi_probe']

all_regions = get_all_regions(multi_probe_expt_info)
print('All regions: ' + str(all_regions))

output_path = 'Latency_results/'
input_path = '../../../Resources/'
if not os.path.exists(output_path):
os.makedirs(output_path)

region_latency = {}

for region in all_regions:
region_latency[region] = []
# region_spikes = filter_spikes_by_region_stimulus(multi_probe_expt_info, region, current_stimulus)


# with open('region_spikes.pkl', 'w') as f:
# pickle.dump([region_spikes], f)

with open(input_path + 'Small_' + region + '_spikes.pkl') as f:
region_spikes = pickle.load(f)

region_spikes = region_spikes[0]

print('Loaded spikes file from region: ' + region)

c_output_path = output_path + region + '/'
if not os.path.exists(c_output_path):
os.makedirs(c_output_path)

temp_ind = 0
for key, val in region_spikes.iteritems():
file_name = c_output_path + key
plot_spike_train(val, file_name + '.png')
c_latency = plot_spike_train_psth_with_latency(val, file_name + '_psth.png')
region_latency[region].append(c_latency)
if temp_ind > 50:
break
temp_ind += 2

print(region_latency)

with open(input_path + 'region_latency.pkl', 'w') as f:
pickle.dump([region_latency], f)
30 changes: 30 additions & 0 deletions swdb_2018_tools/Latency/plot_latency_for_region.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import pickle
import numpy as np
from scipy.stats import sem
import matplotlib.pyplot as plt

input_path = '../../../Resources/'
with open(input_path + 'region_latency.pkl') as f:
region_latency = pickle.load(f)
region_latency = region_latency[0]

region_means = []
region_sems = []
for key, latency_list in region_latency.iteritems():
clean_list = [x for x in latency_list if ~np.isnan(x)]
clean_arr = np.asarray(clean_list)
mean_val = clean_arr.mean()
sem_val = sem(clean_arr)
region_means.append(mean_val)
region_sems.append(sem_val)

fig,ax = plt.subplots(1,1,figsize=(6,3))
ax.plot(region_means, '.')
ax.errorbar(range(len(region_means)), region_means, yerr=region_sems)
ax.set_xticklabels(region_latency.keys(), FontSize=14)
ax.set_xticks(range(len(region_means)))
ax.set_xlabel('Region', FontSize=16)
ax.set_ylabel('Mean response time (ms)', FontSize=16)
ax.set_title('Response time for natural scenes', FontSize=18)

plt.show()
19 changes: 19 additions & 0 deletions swdb_2018_tools/Latency/plot_spike_train.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 23 21:37:00 2018

@author: Stav
"""

import numpy as np
import matplotlib.pyplot as plt

def plot_spike_train(spike_trains, fig_path):
fig,ax = plt.subplots(1,1,figsize=(6,3))
for r_ind, row in enumerate(spike_trains):
ax.plot(row, r_ind*np.ones_like(row),'|',color='b')
ax.invert_yaxis()
ax.set_xlim([-0.1, 0.25])
ax.axvspan(-0.1,0,color='gray',alpha=0.2);

fig.savefig(fig_path)
19 changes: 19 additions & 0 deletions swdb_2018_tools/Latency/plot_spike_train_psth.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 23 21:41:41 2018

@author: Stav
"""

import matplotlib.pyplot as plt

def plot_spike_train_psth(spike_train, fig_path):
fig,ax = plt.subplots(1,1,figsize=(6,3))
spike_times = []
for row in spike_train:
spike_times.extend(list(row))
plt.hist(spike_times, bins=[x * 0.005 for x in range(-20, 51)])
ax.axvspan(-0.2,0,color='gray',alpha=0.2);
ax.set_xlim([-0.1, 0.25])

fig.savefig(fig_path)
Loading