diff --git a/corticalmapping.egg-info/PKG-INFO b/corticalmapping.egg-info/PKG-INFO new file mode 100644 index 0000000..05a998d --- /dev/null +++ b/corticalmapping.egg-info/PKG-INFO @@ -0,0 +1,20 @@ +Metadata-Version: 2.1 +Name: corticalmapping +Version: 2.0.0 +Summary: cortical mapping tools +Home-page: http://stash.corp.alleninstitute.org/users/junz/repos/corticalmapping/ +Author: Jun Zhuang +Author-email: junz@alleninstitute.org +License: UNKNOWN +Description: corticalmapping + + by Jun Zhuang @ 2014 + + contains basic visual stimulation, imaging analysis, plotting, cell visual response properties analysis functionalities. + also contains a relatively mature code base of retinotopic mapping (visual stimulation and analysis) +Platform: any +Classifier: Programming Language :: Python +Classifier: Development Status :: 4 - Beta +Classifier: Natural Language :: English +Classifier: Operating System :: OS Independent +Provides-Extra: testing diff --git a/corticalmapping.egg-info/SOURCES.txt b/corticalmapping.egg-info/SOURCES.txt new file mode 100644 index 0000000..1eb2ef8 --- /dev/null +++ b/corticalmapping.egg-info/SOURCES.txt @@ -0,0 +1,38 @@ +README.md +setup.py +corticalmapping/CaimanTools.py +corticalmapping/CamstimTools.py +corticalmapping/DatabaseTools.py +corticalmapping/HighLevel.py +corticalmapping/MotionCorrection.py +corticalmapping/NwbTools.py +corticalmapping/ResponseAnalysis.py +corticalmapping/RetinotopicMapping.py +corticalmapping/SingleCellAnalysis.py +corticalmapping/VasculatureMapMatching.py +corticalmapping/VisualStim.py +corticalmapping/__init__.py +corticalmapping/setup.py +corticalmapping.egg-info/PKG-INFO +corticalmapping.egg-info/SOURCES.txt +corticalmapping.egg-info/dependency_links.txt +corticalmapping.egg-info/requires.txt +corticalmapping.egg-info/top_level.txt +corticalmapping/core/DataAnalysis.py +corticalmapping/core/FileTools.py +corticalmapping/core/ImageAnalysis.py +corticalmapping/core/PlottingTools.py +corticalmapping/core/TimingAnalysis.py +corticalmapping/core/__init__.py +corticalmapping/core/tifffile.py +corticalmapping/ephys/KilosortWrapper.py +corticalmapping/ephys/OpenEphysWrapper.py +corticalmapping/ephys/__init__.py +corticalmapping/ipython_lizard/__init__.py +corticalmapping/ipython_lizard/export_notebook_functions.py +corticalmapping/ipython_lizard/html_widgets.py +corticalmapping/ipython_lizard/ipython_filedialog.py +corticalmapping/ipython_lizard/patchplot_ipywidgets.py +corticalmapping/ipython_lizard/wrapped_retinotopic_mapping.py +corticalmapping/ipython_lizard/utils/__init__.py +corticalmapping/ipython_lizard/utils/progress_bar.py \ No newline at end of file diff --git a/corticalmapping.egg-info/dependency_links.txt b/corticalmapping.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/corticalmapping.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/corticalmapping.egg-info/requires.txt b/corticalmapping.egg-info/requires.txt new file mode 100644 index 0000000..23446c1 --- /dev/null +++ b/corticalmapping.egg-info/requires.txt @@ -0,0 +1,8 @@ +numpy +scipy +PyDAQmx +scikit-image +tifffile + +[testing] +pytest diff --git a/corticalmapping.egg-info/top_level.txt b/corticalmapping.egg-info/top_level.txt new file mode 100644 index 0000000..23ab74b --- /dev/null +++ b/corticalmapping.egg-info/top_level.txt @@ -0,0 +1 @@ +corticalmapping diff --git a/corticalmapping/.gitignore b/corticalmapping/.gitignore new file mode 100644 index 0000000..0be5d04 --- /dev/null +++ b/corticalmapping/.gitignore @@ -0,0 +1,58 @@ +# Created by .ignore support plugin (hsz.mobi) +### JetBrains template +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm + +#*.iml +*.pyc + + +## Directory-based project format: +.idea/ +.cache/ + + +# if you remove the above rule, at least ignore the following: + +# User-specific stuff: +# .idea/workspace.xml +# .idea/tasks.xml +# .idea/dictionaries + +# Sensitive or high-churn files: +# .idea/dataSources.ids +# .idea/dataSources.xml +# .idea/sqlDataSources.xml +# .idea/dynamic.xml +# .idea/uiDesigner.xml + +# Gradle: +# .idea/gradle.xml +# .idea/libraries + +# Mongo Explorer plugin: +# .idea/mongoSettings.xml + +## File-based project format: +*.ipr +*.iws + +## Plugin-specific files: + +# IntelliJ +/out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties + + +/corticalmapping/ipython_lizard/res/test_pkls +.ipynb_checkpoints + diff --git a/corticalmapping/DatabaseTools.py b/corticalmapping/DatabaseTools.py index afaa3a6..01abe68 100644 --- a/corticalmapping/DatabaseTools.py +++ b/corticalmapping/DatabaseTools.py @@ -1717,80 +1717,6 @@ def get_axon_dgcrm_from_clu_f(clu_f, plane_n, axon_n, trace_type): return dgcrm -def get_axon_morphology(clu_f, nwb_f, plane_n, axon_n): - - axon_morph = {} - - mc_grp = nwb_f['processing/motion_correction/MotionCorrection/{}/corrected'.format(plane_n)] - pixel_size = mc_grp['pixel_size'].value - # print(pixel_size) - pixel_size_mean = np.mean(pixel_size) - - bout_ns = clu_f['axons/{}'.format(axon_n)].value - # print(bout_ns) - bout_num = len(bout_ns) - axon_morph['bouton_num'] = bout_num - - if bout_num == 1: - axon_roi = get_roi(nwb_f=nwb_f, plane_n=plane_n, roi_n=bout_ns[0]) - else: - axon_roi = get_axon_roi_from_clu_f(clu_f=clu_f, axon_n=axon_n) - axon_roi = ia.WeightedROI(axon_roi.get_weighted_mask(), pixelSize=pixel_size, - pixelSizeUnit=mc_grp['pixel_size_unit'].value) - - # plt.imshow(axon_roi.get_binary_mask(), interpolation='nearest') - # plt.show() - - axon_morph['axon_row_range'] = (np.max(axon_roi.pixels[0]) - - np.min(axon_roi.pixels[0])) * pixel_size[0] * 1e6 - axon_morph['axon_col_range'] = (np.max(axon_roi.pixels[1]) - - np.min(axon_roi.pixels[1])) * pixel_size[1] * 1e6 - - axon_morph['axon_area'] = axon_roi.get_pixel_area() * 1e12 - - axon_qhull = spatial.ConvexHull(np.array(axon_roi.pixels).transpose()) - # print(axon_qhull.volume) - axon_morph['axon_qhull_area'] = axon_qhull.volume * axon_roi.pixelSizeX * axon_roi.pixelSizeY * 1e12 - - bout_rois = [] - for bout_n in bout_ns: - bout_rois.append(get_roi(nwb_f=nwb_f, plane_n=plane_n, roi_n=bout_n)) - - bout_areas = [r.get_pixel_area() for r in bout_rois] - bout_area_mean = np.mean(bout_areas) - axon_morph['bouton_area_mean'] = bout_area_mean * 1e12 - - if bout_num == 1: - bout_area_std = np.nan - else: - bout_area_std = np.std(bout_areas) - axon_morph['bouton_area_std'] = bout_area_std * 1e12 - - bout_coords = np.array([r.get_center() for r in bout_rois]) # [[y0, x0], [y1, x1], ... , [yn, xn]] - if bout_num == 1: - axon_morph['bouton_row_std'] = np.nan - axon_morph['bouton_col_std'] = np.nan - axon_morph['bouton_dis_mean'] = np.nan - axon_morph['bouton_dis_std'] = np.nan - axon_morph['bouton_dis_median'] = np.nan - axon_morph['bouton_dis_max'] = np.nan - else: - axon_morph['bouton_row_std'] = np.std(bout_coords[:, 0]) * pixel_size_mean * 1e6 - axon_morph['bouton_col_std'] = np.std(bout_coords[:, 1]) * pixel_size_mean * 1e6 - - bout_dis = spatial.distance.pdist(bout_coords) * pixel_size_mean - axon_morph['bouton_dis_mean'] = np.mean(bout_dis) * 1e6 - axon_morph['bouton_dis_median'] = np.median(bout_dis) * 1e6 - axon_morph['bouton_dis_max'] = np.max(bout_dis) * 1e6 - - if bout_num == 2: - axon_morph['bouton_dis_std'] = np.nan - else: - axon_morph['bouton_dis_std'] = np.std(bout_dis) * 1e6 - - return axon_morph - - def get_everything_from_axon(nwb_f, clu_f, plane_n, axon_n, params=ANALYSIS_PARAMS, verbose=False): """ @@ -3819,28 +3745,14 @@ def get_sta(arr, arr_ts, trigger_ts, frame_start, frame_end): if __name__ == '__main__': # =================================================================================================== - nwb_f = h5py.File(r"G:\bulk_LGN_database\nwbs\190221_M426525_110_repacked.nwb", 'r') - clu_f = h5py.File(r"G:\bulk_LGN_database\intermediate_results\bouton_clustering" - r"\AllStimuli_DistanceThr_1.30\190221_M426525_plane0_axon_grouping.hdf5", 'r') - plane_n = 'plane0' - axon_n = 'axon_0007' - axon_morph = get_axon_morphology(clu_f=clu_f, nwb_f=nwb_f, plane_n=plane_n, axon_n=axon_n) - - keys = axon_morph.keys() - keys.sort() - for key in keys: - print('{}: {}'.format(key, axon_morph[key])) - # =================================================================================================== - - # =================================================================================================== - # nwb_f = h5py.File(r"G:\bulk_LGN_database\nwbs\190404_M439939_110_repacked.nwb") - # uc_inds, _ = get_UC_ts_mask(nwb_f=nwb_f, plane_n='plane0') - # plt.plot(uc_inds) - # plt.show() - # - # dgc_spont_inds, _ = get_DGC_spont_ts_mask(nwb_f=nwb_f, plane_n='plane0') - # plt.plot(dgc_spont_inds) - # plt.show() + nwb_f = h5py.File(r"G:\bulk_LGN_database\nwbs\190404_M439939_110_repacked.nwb") + uc_inds, _ = get_UC_ts_mask(nwb_f=nwb_f, plane_n='plane0') + plt.plot(uc_inds) + plt.show() + + dgc_spont_inds, _ = get_DGC_spont_ts_mask(nwb_f=nwb_f, plane_n='plane0') + plt.plot(dgc_spont_inds) + plt.show() # =================================================================================================== # =================================================================================================== @@ -3894,6 +3806,4 @@ def get_sta(arr, arr_ts, trigger_ts, frame_start, frame_end): # plot_roi_retinotopy(coords_roi=coords_roi, coords_rf=coords_rf, ax_alt=ax_alt, ax_azi=ax_azi, # cmap='viridis', canvas_shape=(512, 512), edgecolors='#000000', linewidths=0.5) # plt.show() - # =================================================================================================== - - print('for debug ...') \ No newline at end of file + # =================================================================================================== \ No newline at end of file diff --git a/corticalmapping/HighLevel.py b/corticalmapping/HighLevel.py index cb118ac..2d79bfc 100644 --- a/corticalmapping/HighLevel.py +++ b/corticalmapping/HighLevel.py @@ -7,26 +7,157 @@ import itertools import pandas as pd import scipy.stats as stats -import scipy.ndimage as ni import scipy.sparse as sparse -import scipy.interpolate as ip +import scipy.ndimage as ni import matplotlib.pyplot as plt import tifffile as tf -from toolbox.misc import BinarySlicer -import allensdk_internal.brain_observatory.mask_set as mask_set +from toolbox.misc.slicer import BinarySlicer +#import allensdk_internal.brain_observatory.mask_set as mask_set import corticalmapping.core.ImageAnalysis as ia import corticalmapping.core.TimingAnalysis as ta import corticalmapping.core.PlottingTools as pt import corticalmapping.core.FileTools as ft -import corticalmapping.SingleCellAnalysis as sca +#import corticalmapping.SingleCellAnalysis as sca import corticalmapping.RetinotopicMapping as rm -try: - # from r_neuropil import NeuropilSubtract as NS - from allensdk.brain_observatory.r_neuropil import NeuropilSubtract as NS -except Exception as e: - print 'fail to import neural pil subtraction module ...' - print e +#try: +# # from r_neuropil import NeuropilSubtract as NS +# from allensdk.brain_observatory.r_neuropil import NeuropilSubtract as NS +#except Exception as e: + #print('fail to import neural pil subtraction module ...') + #print(e) + + + +def get_masks_from_caiman(spatial_com, dims, thr=0, thr_method='nrg', swap_dim=False): + """ + Gets masks of spatial components results generated the by the CaImAn segmentation + + this function is stripped out from the caiman.utils.visualization.get_contours(). only works for 2d spatial + components. + + Args: + spatial_com: np.ndarray or sparse matrix, mostly will be the caiman.source_extraction.cnmf.estimates.A + 2d Matrix of Spatial components, each row is a flattened pixel (order 'F'), each column + is a spatial component + dims: tuple of ints + Spatial dimensions of movie (row, col) + thr: scalar between 0 and 1 + Energy threshold for computing contours (default 0.9) + if thr_method is 'nrg': higher thr will make bigger hole inside the mask + if thr_method is 'max': (usually does not work very well), higher thr will make smaller mask + near the center. + thr_method: [optional] string + Method of thresholding: + 'max' sets to zero pixels that have value less than a fraction of the max value + 'nrg' keeps the pixels that contribute up to a specified fraction of the energy + swap_dim: if True, flattened 2d array will be reshaped by order 'C', otherwise with order 'F'. + Returns: + masks: 3d array, dtype=np.float, spatial component x row x col + """ + + if 'csc_matrix' not in str(type(spatial_com)): + spatial_com = sparse.csc_matrix(spatial_com) + + if len(spatial_com.shape) != 2: + raise ValueError('input "spatial_com" should be a 2d array or 2d sparse matrix.') + + n_mask = spatial_com.shape[1] + + if len(dims) != 2: + raise ValueError("input 'dims' should have two entries: (num_row, num_col).") + + if dims[0] * dims[1] != spatial_com.shape[0]: + raise ValueError("the product of dims[0] and dims[1] ({} x {}) should be equal to the first dimension " + "of the input 'spatial_com'.".format(dims[0], dims[1], spatial_com.shape[0])) + + masks = [] + + # # get the center of mass of neurons( patches ) + # cm = com(A, *dims) + + # for each patches + for i in range(n_mask): + # we compute the cumulative sum of the energy of the Ath component that has been ordered from least to highest + patch_data = spatial_com.data[spatial_com.indptr[i]:spatial_com.indptr[i + 1]] + indx = np.argsort(patch_data)[::-1] + if thr_method == 'nrg': + cumEn = np.cumsum(patch_data[indx] ** 2) + # we work with normalized values + cumEn /= cumEn[-1] + Bvec = np.ones(spatial_com.shape[0]) + # we put it in a similar matrix + Bvec[spatial_com.indices[spatial_com.indptr[i]:spatial_com.indptr[i + 1]][indx]] = cumEn + else: + if thr_method != 'max': + print('Unknown threshold method {}. should be either "max" or "nrg". ' + 'Choosing "max".'.format(thr_method)) + Bvec = np.zeros(spatial_com.shape[0]) + Bvec[spatial_com.indices[spatial_com.indptr[i]: + spatial_com.indptr[i + 1]]] = patch_data / patch_data.max() + if swap_dim: + Bmat = np.reshape(Bvec, dims, order='C') + mask = np.array(spatial_com[:, i].todense().reshape(dims, order='C')) + else: + Bmat = np.reshape(Bvec, dims, order='F') + mask = np.array(spatial_com[:, i].todense().reshape(dims, order='F')) + + Bmat[Bmat >= thr] = 1. + Bmat[Bmat < thr] = 0. + + masks.append(mask * Bmat) + + return np.array(masks) + + +def threshold_mask_by_energe(mask, sigma=1., thr_high=0.0, thr_low=0.1): + """ + threshold a weighted mask by reversed accumulative energy. Use this to treat masks spit out by caiman + segmentation. + :param mask: 2d array + :param sigma: float, 2d gaussian filter sigma + :param thr_high: float, 0 - 1, bigger thr_high will make bigger hole inside the roi + :param thr_low: float, 0 - 1, bigger thr_low will make smaller roi around the center + :return: 2d array thresholded mask + """ + + if len(mask.shape) != 2: + raise ValueError('input "mask" should be a 2d array.') + + if sigma is not None: + mask = ni.gaussian_filter(mask, sigma=sigma) + + mask = ia.array_nor(mask) + mask_s = mask.flatten() + + indx_low = np.argsort(mask_s) + cum_eng_low = np.cumsum(mask_s[indx_low] ** 2) + cum_eng_low /= cum_eng_low[-1] + mask_eng_low = np.ones(mask_s.shape, dtype=np.float) + mask_eng_low[indx_low] = cum_eng_low + mask_eng_low = mask_eng_low.reshape(mask.shape) + + indx_high = np.argsort(mask_s)[::-1] + cum_eng_high = np.cumsum(mask_s[indx_high] ** 2) + cum_eng_high /= cum_eng_high[-1] + mask_eng_high = np.ones(mask_s.shape, dtype=np.float) + mask_eng_high[indx_high] = cum_eng_high + mask_eng_high = mask_eng_high.reshape(mask.shape) + + mask_bin = np.ones(mask.shape) + + mask_bin[mask_eng_high < thr_high] = 0. + mask_bin[mask_eng_low < thr_low] = 0. + + mask_labeled, mask_num = ni.label(mask_bin, structure=[[1,1,1], [1,1,1], [1,1,1]]) + mask_dict = ia.get_masks(labeled=mask_labeled, keyPrefix='', labelLength=5) + + for key, value in mask_dict.items(): + mask_w = value * mask + mask_w = mask_w / np.amax(mask_w) + mask_dict[key] = ia.WeightedROI(mask_w) + + return mask_dict def translateMovieByVasculature(mov, parameterPath, matchingDecimation=2, referenceDecimation=2, verbose=True): @@ -46,17 +177,17 @@ def translateMovieByVasculature(mov, parameterPath, matchingDecimation=2, refere referenceDecimation = float(referenceDecimation) if matchingParams[ - 'Xoffset'] % matchingDecimation != 0: print 'Original Xoffset is not divisble by movDecimation. Taking the floor integer.' + 'Xoffset'] % matchingDecimation != 0: print('Original Xoffset is not divisble by movDecimation. Taking the floor integer.') if matchingParams[ - 'Yoffset'] % matchingDecimation != 0: print 'Original Yoffset is not divisble by movDecimation. Taking the floor integer.' + 'Yoffset'] % matchingDecimation != 0: print('Original Yoffset is not divisble by movDecimation. Taking the floor integer.') offset = [int(matchingParams['Xoffset'] / matchingDecimation), int(matchingParams['Yoffset'] / matchingDecimation)] if matchingParams[ - 'ReferenceMapHeight'] % matchingDecimation != 0: print 'Original ReferenceMapHeight is not divisble by movDecimation. Taking the floor integer.' + 'ReferenceMapHeight'] % matchingDecimation != 0: print('Original ReferenceMapHeight is not divisble by movDecimation. Taking the floor integer.') if matchingParams[ - 'ReferenceMapWidth'] % matchingDecimation != 0: print 'Original ReferenceMapWidth is not divisble by movDecimation. Taking the floor integer.' + 'ReferenceMapWidth'] % matchingDecimation != 0: print('Original ReferenceMapWidth is not divisble by movDecimation. Taking the floor integer.') outputShape = [int(matchingParams['ReferenceMapHeight'] / matchingDecimation), int(matchingParams['ReferenceMapHeight'] / matchingDecimation)] @@ -67,7 +198,7 @@ def translateMovieByVasculature(mov, parameterPath, matchingDecimation=2, refere if matchingDecimation / referenceDecimation != 1: movT = ia.rigid_transform_cv2(movT, zoom=matchingDecimation / referenceDecimation) - if verbose: print 'shape of output movie:', movT.shape + if verbose: print('shape of output movie:', movT.shape) return movT @@ -92,22 +223,22 @@ def translateHugeMovieByVasculature(inputPath, outputPath, parameterPath, output if outputDtype is None: outputDtype = inputMov.dtype.str - if len(inputMov.shape) != 3: raise ValueError, 'Input movie should be 3-d!' + if len(inputMov.shape) != 3: raise ValueError('Input movie should be 3-d!') frameNum = inputMov.shape[0] if outputPath[-4:] != '.npy': outputPath += '.npy' - if verbose: print '\nInput movie shape:', inputMov.shape + if verbose: print('\nInput movie shape:', inputMov.shape) chunkNum = frameNum // chunkLength if frameNum % chunkLength == 0: if verbose: - print 'Translating in chunks: ' + str(chunkNum) + ' x ' + str(chunkLength) + ' frame(s)' + print('Translating in chunks: ' + str(chunkNum) + ' x ' + str(chunkLength) + ' frame(s)') else: chunkNum += 1 - if verbose: print 'Translating in chunks: ' + str(chunkNum - 1) + ' x ' + str( - chunkLength) + ' frame(s)' + ' + ' + str(frameNum % chunkLength) + ' frame(s)' + if verbose: print('Translating in chunks: ' + str(chunkNum - 1) + ' x ' + str( + chunkLength) + ' frame(s)' + ' + ' + str(frameNum % chunkLength) + ' frame(s)') frameT1 = translateMovieByVasculature(inputMov[0, :, :], parameterPath=parameterPath, matchingDecimation=matchingDecimation, @@ -115,7 +246,7 @@ def translateHugeMovieByVasculature(inputPath, outputPath, parameterPath, output plt.imshow(frameT1, cmap='gray') plt.show() - if verbose: print 'Output movie shape:', (frameNum, frameT1.shape[0], frameT1.shape[1]), '\n' + if verbose: print('Output movie shape:', (frameNum, frameT1.shape[0], frameT1.shape[1]), '\n') with open(outputPath, 'wb') as f: np.lib.format.write_array_header_1_0(f, {'descr': outputDtype, 'fortran_order': False, @@ -126,8 +257,8 @@ def translateHugeMovieByVasculature(inputPath, outputPath, parameterPath, output indEnd = (i + 1) * chunkLength if indEnd > frameNum: indEnd = frameNum currMov = inputMov[indStart:indEnd, :, :] - if verbose: print 'Translating frame ' + str(indStart) + ' to frame ' + str(indEnd) + '.\t' + str( - i * 100. / chunkNum) + '%' + if verbose: print('Translating frame ' + str(indStart) + ' to frame ' + str(indEnd) + '.\t' + str( + i * 100. / chunkNum) + '%') currMovT = translateMovieByVasculature(currMov, parameterPath=parameterPath, matchingDecimation=matchingDecimation, referenceDecimation=referenceDecimation, verbose=False) @@ -147,38 +278,26 @@ def segmentPhotodiodeSignal(pd, digitizeThr=0.9, filterSize=0.01, segmentThr=0.0 :return: ''' - # plot_r = [2000., 3000.] - # - # plt.plot(pd[int(plot_r[0]*Fs):int(plot_r[1]*Fs)]) - # plt.title('pd raw') - # plt.show() - pdDigitized = np.array(pd) + # plt.plot(pdDigitized[0: 100 * 30000]) + # plt.show() - pdDigitized[pd < digitizeThr] = 0. + pdDigitized[pd < digitizeThr] = 0.; pdDigitized[pd >= digitizeThr] = 5. - - # plt.plot(pdDigitized[int(plot_r[0]*Fs):int(plot_r[1]*Fs)]) - # plt.title('pd digitized') + # plt.plot(pdDigitized[0: 100 * 30000]) # plt.show() filterDataPoint = int(filterSize * Fs) - # print filterDataPoint pdFiltered = ni.filters.gaussian_filter(pdDigitized, filterDataPoint) - - # plt.plot(pdFiltered[int(plot_r[0]*Fs):int(plot_r[1]*Fs)]) - # plt.title('pd filtered') - # plt.show() - pdFilteredDiff = np.diff(pdFiltered) pdFilteredDiff = np.hstack(([0], pdFilteredDiff)) pdSignal = np.multiply(pdDigitized, pdFilteredDiff) - - # plt.plot(pdSignal[int(plot_r[0]*Fs):int(plot_r[1]*Fs)]) - # plt.title('pd signal') + # plt.plot(pdSignal[0: 100 * 30000]) # plt.show() + # plt.plot(pdSignal[:1000000]) + # plt.show() displayOnsets = ta.get_onset_timeStamps(pdSignal, Fs, threshold=segmentThr, onsetType='raising') trueDisplayOnsets = [] @@ -191,13 +310,13 @@ def segmentPhotodiodeSignal(pd, digitizeThr=0.9, filterSize=0.01, segmentThr=0.0 trueDisplayOnsets.append(displayOnset) currOnset = displayOnset - print '\nNumber of photodiode onsets:', len(trueDisplayOnsets) + print('\nNumber of photodiode onsets:', len(trueDisplayOnsets)) if verbose: - print '\nDisplay onsets (sec):' - print '\n'.join([str(o) for o in trueDisplayOnsets]) + print('\nDisplay onsets (sec):') + print('\n'.join([str(o) for o in trueDisplayOnsets])) - print '\n' + print('\n') return np.array(trueDisplayOnsets) @@ -242,11 +361,11 @@ def findLogPath(date, # string if (dateTime[0:6] == date) and (mouseID in mouse) and (stimulus in stim) and (userID in user) and ( fileNumber == fileNum) and (ext == '.pkl'): logPathList.append(os.path.join(displayFolder, f)) - print '\n' + '\n'.join(logPathList) + '\n' + print('\n' + '\n'.join(logPathList) + '\n') if len(logPathList) == 0: - raise LookupError, 'Can not find visual display Log.' + raise LookupError('Can not find visual display Log.') elif len(logPathList) > 1: - raise LookupError, 'Find more than one visual display Log!' + raise LookupError('Find more than one visual display Log!') return logPathList[0] @@ -356,26 +475,34 @@ def analysisMappingDisplayLog(display_log): else: raise ValueError('log should be either dictionary or a path string!') + def convert(data): + if isinstance(data, bytes): return data.decode('ascii') + if isinstance(data, dict): return dict(map(convert, data.items())) + if isinstance(data, tuple): return map(convert, data) + return data + + log = convert(log) # convert bytestrings to strings (a py2to3 issue) + # check display order - if log['presentation']['displayOrder'] == -1: raise ValueError, 'Display order is -1 (should be 1)!' + if log['presentation']['displayOrder'] == -1: raise ValueError('Display order is -1 (should be 1)!') refreshRate = float(log['monitor']['refreshRate']) # check display visual frame interval interFrameInterval = np.mean(np.diff(log['presentation']['timeStamp'])) - if interFrameInterval > (1.01 / refreshRate): raise ValueError, 'Mean visual display too long: ' + str( - interFrameInterval) + 'sec' # check display - if interFrameInterval < (0.99 / refreshRate): raise ValueError, 'Mean visual display too short: ' + str( - interFrameInterval) + 'sec' # check display + if interFrameInterval > (1.01 / refreshRate): raise ValueError('Mean visual display too long: ' + str( + interFrameInterval) + 'sec') # check display + if interFrameInterval < (0.99 / refreshRate): raise ValueError('Mean visual display too short: ' + str( + interFrameInterval) + 'sec') # check display # get sweep start time relative to display onset try: startTime = -1 * log['stimulation']['preGapDur'] except KeyError: startTime = -1 * log['stimulation']['preGapFrameNum'] / log['monitor']['refreshRate'] - print 'Movie chunk start time relative to sweep onset:', startTime, 'sec' - displayInfo['B2U']['startTime'] = startTime + print('Movie chunk start time relative to sweep onset:', startTime, 'sec') + displayInfo['B2U']['startTime'] = startTime; displayInfo['U2B']['startTime'] = startTime - displayInfo['L2R']['startTime'] = startTime + displayInfo['L2R']['startTime'] = startTime; displayInfo['R2L']['startTime'] = startTime # get basic information @@ -383,44 +510,44 @@ def analysisMappingDisplayLog(display_log): displayIter = log['presentation']['displayIteration'] sweepTable = log['stimulation']['sweepTable'] dirList = [] - B2Uframes = [] - U2Bframes = [] - L2Rframes = [] + B2Uframes = []; + U2Bframes = []; + L2Rframes = []; R2Lframes = [] # parcel frames for each direction for frame in frames: currDir = frame[4] if currDir not in dirList: dirList.append(currDir) - if currDir == 'B2U': + if currDir == b'B2U': B2Uframes.append(frame) - elif currDir == 'U2B': + elif currDir == b'U2B': U2Bframes.append(frame) - elif currDir == 'L2R': + elif currDir == b'L2R': L2Rframes.append(frame) - elif currDir == 'R2L': + elif currDir == b'R2L': R2Lframes.append(frame) # get sweep order indices for each direction dirList = dirList * displayIter - displayInfo['B2U']['ind'] = [ind for ind, dir in enumerate(dirList) if dir == 'B2U'] - print 'B2U sweep order indices:', displayInfo['B2U']['ind'] - displayInfo['U2B']['ind'] = [ind for ind, dir in enumerate(dirList) if dir == 'U2B'] - print 'U2B sweep order indices:', displayInfo['U2B']['ind'] - displayInfo['L2R']['ind'] = [ind for ind, dir in enumerate(dirList) if dir == 'L2R'] - print 'L2R sweep order indices:', displayInfo['L2R']['ind'] - displayInfo['R2L']['ind'] = [ind for ind, dir in enumerate(dirList) if dir == 'R2L'] - print 'R2L sweep order indices:', displayInfo['R2L']['ind'] + displayInfo['B2U']['ind'] = [ind for ind, adir in enumerate(dirList) if adir == b'B2U'] + print('B2U sweep order indices:', displayInfo['B2U']['ind']) + displayInfo['U2B']['ind'] = [ind for ind, adir in enumerate(dirList) if adir == b'U2B'] + print('U2B sweep order indices:', displayInfo['U2B']['ind']) + displayInfo['L2R']['ind'] = [ind for ind, adir in enumerate(dirList) if adir == b'L2R'] + print('L2R sweep order indices:', displayInfo['L2R']['ind']) + displayInfo['R2L']['ind'] = [ind for ind, adir in enumerate(dirList) if adir == b'R2L'] + print('R2L sweep order indices:', displayInfo['R2L']['ind']) # get sweep duration for each direction displayInfo['B2U']['sweepDur'] = len(B2Uframes) / refreshRate - print 'Chunk duration for B2U sweeps:', displayInfo['B2U']['sweepDur'], 'sec' + print('Chunk duration for B2U sweeps:', displayInfo['B2U']['sweepDur'], 'sec') displayInfo['U2B']['sweepDur'] = len(U2Bframes) / refreshRate - print 'Chunk duration for U2B sweeps:', displayInfo['U2B']['sweepDur'], 'sec' + print('Chunk duration for U2B sweeps:', displayInfo['U2B']['sweepDur'], 'sec') displayInfo['L2R']['sweepDur'] = len(L2Rframes) / refreshRate - print 'Chunk duration for L2R sweeps:', displayInfo['L2R']['sweepDur'], 'sec' + print('Chunk duration for L2R sweeps:', displayInfo['L2R']['sweepDur'], 'sec') displayInfo['R2L']['sweepDur'] = len(R2Lframes) / refreshRate - print 'Chunk duration for R2L sweeps:', displayInfo['R2L']['sweepDur'], 'sec' + print('Chunk duration for R2L sweeps:', displayInfo['R2L']['sweepDur'], 'sec') # get phase position slopes and intercepts for each direction displayInfo['B2U']['slope'], displayInfo['B2U']['intercept'] = rm.getPhasePositionEquation2(B2Uframes, sweepTable) @@ -475,7 +602,7 @@ def analyzeSparseNoiseDisplayLog(logPath): def getAverageDfMovie(movPath, frameTS, onsetTimes, chunkDur, startTime=0., temporalDownSampleRate=1, - is_load_all=False): + is_load_all=True): ''' :param movPath: path to the image movie :param frameTS: the timestamps for each frame of the raw movie @@ -491,14 +618,14 @@ def getAverageDfMovie(movPath, frameTS, onsetTimes, chunkDur, startTime=0., temp elif temporalDownSampleRate > 1: frameTS_real = frameTS[::temporalDownSampleRate] else: - raise ValueError, 'temporal downsampling rate can not be less than 1!' + raise ValueError('temporal downsampling rate can not be less than 1!') if is_load_all: if movPath[-4:] == '.npy': try: mov = np.load(movPath) except ValueError: - print 'Cannot load the entire npy file into memroy. Trying BinarySlicer...' + print('Cannot load the entire npy file into memroy. Trying BinarySlicer...') mov = BinarySlicer(movPath) elif movPath[-4:] == '.tif': mov = tf.imread(movPath) @@ -537,7 +664,7 @@ def getAverageDfMovieFromH5Dataset(dset, frameTS, onsetTimes, chunkDur, startTim elif temporalDownSampleRate > 1: frameTS_real = frameTS[::temporalDownSampleRate] else: - raise ValueError, 'temporal downsampling rate can not be less than 1!' + raise ValueError('temporal downsampling rate can not be less than 1!') aveMov, n = ia.get_average_movie(dset, frameTS_real, onsetTimes + startTime, chunkDur, isReturnN=True) @@ -578,17 +705,17 @@ def getMappingMovies(movPath, frameTS, displayOnsets, displayInfo, temporalDownS elif FFTmode == 'valley': isReverse = True else: - raise LookupError, 'FFTmode should be either "peak" or "valley"!' + raise LookupError('FFTmode should be either "peak" or "valley"!') for dir in ['B2U', 'U2B', 'L2R', 'R2L']: - print '\nAnalyzing sweeps with direction:', dir + print('\nAnalyzing sweeps with direction:', dir) onsetInd = list(displayInfo[dir]['ind']) for ind in displayInfo[dir]['ind']: if ind >= len(displayOnsets): - print 'Visual Stimulation Direction:' + dir + ' index:' + str( - ind) + ' was not displayed. Remove from averageing.' + print('Visual Stimulation Direction:' + dir + ' index:' + str( + ind) + ' was not displayed. Remove from averageing.') onsetInd.remove(ind) aveMov, aveMovNor = getAverageDfMovie(movPath=movPath, @@ -599,6 +726,9 @@ def getMappingMovies(movPath, frameTS, displayOnsets, displayInfo, temporalDownS temporalDownSampleRate=temporalDownSampleRate, is_load_all=is_load_all) + print('aveMov.shape = ' + str(aveMov.shape)) + print('aveMovNor.shape = ' + str(aveMovNor.shape)) + if isRectify: aveMovNorRec = np.array(aveMovNor) aveMovNorRec[aveMovNorRec < 0.] = 0. @@ -651,7 +781,7 @@ def regression_detrend(mov, roi, verbose=True): """ if len(mov.shape) != 3: - raise (ValueError, 'Input movie should be 3-dimensional!') + raise ValueError roi = ia.WeightedROI(roi) trend = roi.get_weighted_trace(mov) @@ -664,7 +794,7 @@ def regression_detrend(mov, roi, verbose=True): n = 0 - for i, j in itertools.product(range(mov.shape[1]), range(mov.shape[2])): + for i, j in itertools.product(list(range(mov.shape[1])), list(range(mov.shape[2]))): pixel_trace = mov[:, i, j] slope, intercept, r_value, p_value, stderr = stats.linregress(trend, pixel_trace) slopes[i, j] = slope @@ -673,7 +803,7 @@ def regression_detrend(mov, roi, verbose=True): if verbose: if n % (pixel_num // 10) == 0: - print 'progress:', int(round(float(n) * 100 / pixel_num)), '%' + print('progress:', int(round(float(n) * 100 / pixel_num)), '%') n += 1 return mov_new, trend, slopes, rvalues @@ -715,7 +845,7 @@ def neural_pil_subtraction(trace_center, trace_surround, lam=0.05): ns.fit() # ns.fit_block_coordinate_desc() - return ns.r, ns.error, trace_center - (ns.r * trace_surround) + return ns.r, ns.error, trace_center - ns.r * trace_surround def get_lfp(trace, fs=30000., notch_base=60., notch_bandwidth=1., notch_harmonics=4, notch_order=2, @@ -770,7 +900,7 @@ def array_to_rois(input_folder, overlap_threshold=0.9, neuropil_limit=(5, 10), i center_masks.update(curr_masks) center_mask_array = [] - for mask in center_masks.values(): + for mask in list(center_masks.values()): center_mask_array.append(mask) center_mask_array = np.array(center_mask_array, dtype=np.uint8) @@ -779,7 +909,7 @@ def array_to_rois(input_folder, overlap_threshold=0.9, neuropil_limit=(5, 10), i duplicates = ms.detect_duplicates(overlap_threshold=overlap_threshold) # print 'number of duplicates:', len(duplicates) if len(duplicates) > 0: - inds = duplicates.keys() + inds = list(duplicates.keys()) center_mask_array = np.array([center_mask_array[i] for i in range(len(center_mask_array)) if i not in inds]) # removing unions @@ -787,7 +917,7 @@ def array_to_rois(input_folder, overlap_threshold=0.9, neuropil_limit=(5, 10), i unions = ms.detect_unions() # print 'number of unions:', len(unions) if len(unions) > 0: - inds = unions.keys() + inds = list(unions.keys()) center_mask_array = np.array([center_mask_array[i] for i in range(len(center_mask_array)) if i not in inds]) # get total mask @@ -888,7 +1018,7 @@ def concatenate_nwb_files(path_list, save_path, gap_dur=100., roi_path=None, is_ for i, curr_path in enumerate(path_list): - print('\n\nprocessing ' + curr_path + '...') + print(('\n\nprocessing ' + curr_path + '...')) curr_f = h5py.File(curr_path, 'r') @@ -913,7 +1043,7 @@ def concatenate_nwb_files(path_list, save_path, gap_dur=100., roi_path=None, is_ pixel_size_unit = None roi_grp = curr_f[roi_path] - for roi_n in roi_grp.keys(): + for roi_n in list(roi_grp.keys()): if roi_n[0: 4] == 'roi_' and roi_n != 'roi_list': roi_dict_center.update({roi_n: ia.ROI(roi_grp[roi_n]['img_mask'].value, pixelSize=pixel_size, pixelSizeUnit=pixel_size_unit)}) @@ -922,12 +1052,12 @@ def concatenate_nwb_files(path_list, save_path, gap_dur=100., roi_path=None, is_ pixelSizeUnit=pixel_size_unit)}) else: # print('avoid loading {}/{} as an roi.'.format(roi_path, roi_n)) - print('avoid loading {} as an roi.'.format(roi_n)) + print(('avoid loading {} as an roi.'.format(roi_n))) # check analog start time all_chs_grp = curr_f['acquisition/timeseries'] - for curr_chn, curr_ch_grp in all_chs_grp.items(): - if 'starting_time' in curr_ch_grp.keys(): + for curr_chn, curr_ch_grp in list(all_chs_grp.items()): + if 'starting_time' in list(curr_ch_grp.keys()): total_analog_sample_count = curr_ch_grp['num_samples'].value if curr_ch_grp['starting_time'].value != 0: raise ValueError('starting time of analog channel: {} is not 0.'.format(curr_chn)) @@ -972,7 +1102,7 @@ def concatenate_nwb_files(path_list, save_path, gap_dur=100., roi_path=None, is_ except Exception: pass for curr_unit_n in curr_unit_ns: - if curr_unit_n in curr_ug_dict.keys(): + if curr_unit_n in list(curr_ug_dict.keys()): curr_ug_dict[curr_unit_n].append(curr_ug_grp[curr_unit_n]['times'].value + next_start) else: curr_ug_dict[curr_unit_n] = [curr_ug_grp[curr_unit_n]['times'].value + next_start] @@ -995,7 +1125,7 @@ def concatenate_nwb_files(path_list, save_path, gap_dur=100., roi_path=None, is_ print('\nsaving rois ...') roi_grp = save_f.create_group('rois') if roi_dict_center: - for curr_roi_n_c, curr_roi_c in roi_dict_center.items(): + for curr_roi_n_c, curr_roi_c in list(roi_dict_center.items()): curr_roi_grp = roi_grp.create_group(curr_roi_n_c) curr_roi_grp_c = curr_roi_grp.create_group('center') curr_roi_c.to_h5_group(curr_roi_grp_c) @@ -1006,26 +1136,26 @@ def concatenate_nwb_files(path_list, save_path, gap_dur=100., roi_path=None, is_ curr_roi_grp_s = curr_roi_grp.create_group('surround') curr_roi_s.to_h5_group(curr_roi_grp_s) except Exception as e: - print('error in saving surrounding roi: {}. \nError message: {}'.format(curr_roi_n_c, e)) + print(('error in saving surrounding roi: {}. \nError message: {}'.format(curr_roi_n_c, e))) if analog_chs or is_save_running: print('\nsaving analog channels ...') - for curr_analog_n, curr_analog_trace_list in analog_dict.items(): - print curr_analog_n + for curr_analog_n, curr_analog_trace_list in list(analog_dict.items()): + print(curr_analog_n) curr_analog_trace_all = np.concatenate(curr_analog_trace_list, axis=0) save_f['analog_' + curr_analog_n] = curr_analog_trace_all if unit_groups: for unit_group in unit_groups: - print('\nsaving ephys units for unit group: {}'.format(unit_group)) + print(('\nsaving ephys units for unit group: {}'.format(unit_group))) save_units_grp = save_f.create_group(unit_group) - for unit_n, unit_ts in unit_dict[unit_group].items(): + for unit_n, unit_ts in list(unit_dict[unit_group].items()): save_unit_grp = save_units_grp.create_group(unit_n) save_unit_grp.create_dataset('timestamps', data=np.concatenate(unit_ts, axis=0)) if time_series_paths: print('\nsaving other time series ...') - for save_ts_n, save_ts_dict in time_series_dict.items(): + for save_ts_n, save_ts_dict in list(time_series_dict.items()): print(save_ts_n) save_ts_grp = save_f.create_group(save_ts_n) curr_ts_data = save_ts_dict['data'] @@ -1138,7 +1268,7 @@ def get_drifting_grating_dataframe(dgr_grp, sweep_dur): bl_bins = t <= 0 res_bins = (t > 0) & (t <= sweep_dur) - gratings = dgr_grp.keys() + gratings = list(dgr_grp.keys()) dg_df = pd.DataFrame(columns=['n', 'sf', 'tf', 'dir', 'con', 'radius', 'baseline', 'baseline_std', 'F0', 'F0_std', 'F1', 'F1_std', 'F2', 'F2_std']) @@ -1258,8 +1388,7 @@ def generate_strf_from_timestamps(unit_ts, squares_ts_grp, unit_n='', sta_start= :param unit_ts: 1d array, spike timestamps, should be monotonic increasing :param squares_ts_grp: h5py group object, containing the timestamps of each square displayed, this should be the - output of corticalmapping.NwbTools.RecordedFile.analyze_visual_stimuli_corticalmapping() - function + output of corticalmapping.NwbTools.RecordedFile.analyze_visual_stimuli() function :param unit_n: str, name of the unit :param sta_start: float, stimulus triggered average start time relative to stimulus onset :param sta_end: float, stimulus triggered average end time relative to stimulus onset @@ -1271,7 +1400,7 @@ def generate_strf_from_timestamps(unit_ts, squares_ts_grp, unit_n='', sta_start= t = np.arange((sta_start + bin_width / 2), (sta_end + bin_width / 2), bin_width) t = np.round(t * 100000) / 100000 - all_squares = squares_ts_grp.keys() + all_squares = list(squares_ts_grp.keys()) traces = [] locations = [] signs = [] @@ -1302,8 +1431,7 @@ def generate_strf_from_continuous(continuous, continuous_ts, squares_ts_grp, roi :param continuous_ts: 1d array, timestamp series for the continuous, monotonically increasing, should have same size as continuous :param squares_ts_grp: h5py group object, containing the timestamps of each square displayed, this should be the - output of corticalmapping.NwbTools.RecordedFile.analyze_visual_stimuli_corticalmapping() - function + output of corticalmapping.NwbTools.RecordedFile.analyze_visual_stimuli() function :param roi_n: str, name of the roi :param sta_start: float, stimulus triggered average start time relative to stimulus onset :param sta_end: float, stimulus triggered average end time relative to stimulus onset @@ -1316,7 +1444,7 @@ def generate_strf_from_continuous(continuous, continuous_ts, squares_ts_grp, roi chunk_frame_start = int(np.floor(sta_start / mean_frame_dur)) t = (np.arange(chunk_frame_dur) + chunk_frame_start) * mean_frame_dur - all_squares = squares_ts_grp.keys() + all_squares = list(squares_ts_grp.keys()) traces = [] # square x trial x t locations = [] @@ -1345,321 +1473,7 @@ def generate_strf_from_continuous(continuous, continuous_ts, squares_ts_grp, roi return sca.SpatialTemporalReceptiveField(locations, signs, traces, t, name=roi_n, trace_data_type='df/f') -def get_drifting_grating_response_nwb(nwb_path, plane_ns, grating_onsets_path, time_window): - """ - extract and response table for drifting_gratings from a nwb file. The response table will be saved in the /analysis - group. - - :param nwb_path: str, path to the nwb file - :param plane_ns: list of strings, plane names for multi-plane imaging - :param grating_onsets_path: str, hdf5 group path to the grating_onset timestamps - :param time_window: tuple/list of two floats, start and end time relative to grating onset - :return: None - """ - - def get_sta(arr, arr_ts, trigger_ts, frame_start, frame_end): - - sta_arr = [] - - for trig in trigger_ts: - trig_ind = ta.find_nearest(arr_ts, trig) - curr_sta = arr[:, (trig_ind + frame_start) : (trig_ind + frame_end)] - sta_arr.append(curr_sta.reshape((curr_sta.shape[0], 1, curr_sta.shape[1]))) - - sta_arr = np.concatenate(sta_arr, axis=1) - return sta_arr - - - if time_window[0] >= time_window[1]: - raise ValueError('time window should be from early time to late time.') - - nwb_f = h5py.File(nwb_path) - - res_grp = nwb_f['analysis'].create_group('response_table_drifting_grating') - - - grating_ns = nwb_f[grating_onsets_path].keys() - grating_ns.sort() - - for plane_n in plane_ns: - print(plane_n) - - res_grp_plane = res_grp.create_group(plane_n) - - trace_ts = nwb_f['processing/motion_correction/MotionCorrection/' + plane_n + '/corrected/timestamps'] - - traces = {} - traces['global_dff_center'] = nwb_f['processing/rois_and_traces_' + plane_n + '/DfOverF/dff_center/data'].value - traces['f_center_demixed'] = nwb_f['processing/rois_and_traces_' + plane_n + '/Fluorescence/f_center_demixed/data'].value - traces['f_center_raw'] = nwb_f['processing/rois_and_traces_' + plane_n + '/Fluorescence/f_center_raw/data'].value - traces['f_center_subtracted'] = nwb_f['processing/rois_and_traces_' + plane_n + '/Fluorescence/f_center_subtracted/data'].value - traces['f_surround_raw'] = nwb_f['processing/rois_and_traces_' + plane_n + '/Fluorescence/f_surround_raw/data'].value - - frame_dur = np.mean(np.diff(trace_ts)) - frame_start = int(time_window[0] // frame_dur) - frame_end = int(time_window[1] // frame_dur) - t_axis = np.arange(frame_end - frame_start) * frame_dur + time_window[0] - - res_grp_plane.attrs['sta_timestamps'] = t_axis - - for grating_n in grating_ns: - - onsets_grating_grp = nwb_f[grating_onsets_path + '/' + grating_n] - - curr_grating_grp = res_grp_plane.create_group(grating_n) - for key, value in onsets_grating_grp.items(): - if key not in ['data', 'num_samples', 'timestamps']: - curr_grating_grp.attrs[key] = value.value - curr_grating_grp.attrs['sta_traces_dimenstion'] = 'roi x trial x timepoint' - - grating_onsets = onsets_grating_grp['timestamps'].value - for trace_n, trace in traces.items(): - sta = get_sta(arr=trace, arr_ts=trace_ts, trigger_ts=grating_onsets, frame_start=frame_start, - frame_end=frame_end) - curr_grating_grp.create_dataset('sta_' + trace_n, data=sta) - - -def plot_roi_traces_three_planes(nwb_f, roi0=None, roi1=None, roi2=None, trace_type='f_center_raw'): - - f = plt.figure(figsize=(10, 10)) - - ax0_img = f.add_axes([0.04, 0.67, 0.32, 0.32]) - ax0_img.set_xticks([]) - ax0_img.set_yticks([]) - ax0_img.set_ylabel('plane0, {}'.format(roi0), fontsize=15) - bg0 = nwb_f['processing/rois_and_traces_plane0/ImageSegmentation/imaging_plane' \ - '/reference_images/max_projection/data'].value - bg0 = ia.array_nor(bg0) - ax0_img.imshow(bg0, vmin=0, vmax=0.8, cmap='gray', interpolation='nearest') - - ax0_trace0 = f.add_axes([0.37, 0.88, 0.62, 0.1]) - ax0_trace0.set_axis_off() - - ax0_trace1 = f.add_axes([0.37, 0.78, 0.62, 0.1]) - ax0_trace1.set_axis_off() - - ax0_trace2 = f.add_axes([0.37, 0.68, 0.62, 0.1]) - ax0_trace2.set_axis_off() - - if roi0 is not None: - roi0_mask = nwb_f['processing/rois_and_traces_plane0/ImageSegmentation/imaging_plane' \ - '/{}/img_mask'.format(roi0)].value - pt.plot_mask_borders(mask=roi0_mask, plotAxis=ax0_img, lw=0.5) - - roi0_ind = int(roi0[-4:]) - roi0_trace = nwb_f['processing/rois_and_traces_plane0/Fluorescence/{}' \ - '/data'.format(trace_type)][roi0_ind, :] - chunk_len = len(roi0_trace) // 3 - ax0_trace0.plot(roi0_trace[0:chunk_len]) - ax0_trace1.plot(roi0_trace[chunk_len:2*chunk_len]) - ax0_trace2.plot(roi0_trace[2*chunk_len:3*chunk_len]) - - ax1_img = f.add_axes([0.04, 0.34, 0.32, 0.32]) - ax1_img.set_xticks([]) - ax1_img.set_yticks([]) - ax1_img.set_ylabel('plane1, {}'.format(roi1), fontsize=15) - bg1 = nwb_f['processing/rois_and_traces_plane1/ImageSegmentation/imaging_plane' \ - '/reference_images/max_projection/data'].value - bg1 = ia.array_nor(bg1) - ax1_img.imshow(bg1, vmin=0, vmax=0.8, cmap='gray', interpolation='nearest') - - ax1_trace0 = f.add_axes([0.37, 0.55, 0.62, 0.1]) - ax1_trace0.set_axis_off() - - ax1_trace1 = f.add_axes([0.37, 0.45, 0.62, 0.1]) - ax1_trace1.set_axis_off() - - ax1_trace2 = f.add_axes([0.37, 0.35, 0.62, 0.1]) - ax1_trace2.set_axis_off() - - if roi1 is not None: - roi1_mask = nwb_f['processing/rois_and_traces_plane1/ImageSegmentation/imaging_plane' \ - '/{}/img_mask'.format(roi1)].value - pt.plot_mask_borders(mask=roi1_mask, plotAxis=ax1_img, lw=0.5) - - roi1_ind = int(roi1[-4:]) - roi1_trace = nwb_f['processing/rois_and_traces_plane1/Fluorescence/{}' \ - '/data'.format(trace_type)][roi1_ind, :] - chunk_len = len(roi1_trace) // 3 - ax1_trace0.plot(roi1_trace[0:chunk_len]) - ax1_trace1.plot(roi1_trace[chunk_len:2 * chunk_len]) - ax1_trace2.plot(roi1_trace[2 * chunk_len:3 * chunk_len]) - - ax2_img = f.add_axes([0.04, 0.01, 0.32, 0.32]) - ax2_img.set_xticks([]) - ax2_img.set_yticks([]) - ax2_img.set_ylabel('plane2, {}'.format(roi2), fontsize=15) - bg2 = nwb_f['processing/rois_and_traces_plane2/ImageSegmentation/imaging_plane' \ - '/reference_images/max_projection/data'].value - bg2 = ia.array_nor(bg2) - ax2_img.imshow(bg2, vmin=0, vmax=0.8, cmap='gray', interpolation='nearest') - - ax2_trace0 = f.add_axes([0.37, 0.22, 0.62, 0.1]) - ax2_trace0.set_axis_off() - - ax2_trace1 = f.add_axes([0.37, 0.12, 0.62, 0.1]) - ax2_trace1.set_axis_off() - - ax2_trace2 = f.add_axes([0.37, 0.02, 0.62, 0.1]) - ax2_trace2.set_axis_off() - - if roi2 is not None: - roi2_mask = nwb_f['processing/rois_and_traces_plane2/ImageSegmentation/imaging_plane' \ - '/{}/img_mask'.format(roi2)].value - pt.plot_mask_borders(mask=roi2_mask, plotAxis=ax2_img, lw=0.5) - - roi2_ind = int(roi2[-4:]) - roi2_trace = nwb_f['processing/rois_and_traces_plane2/Fluorescence/{}' \ - '/data'.format(trace_type)][roi2_ind, :] - chunk_len = len(roi2_trace) // 3 - ax2_trace0.plot(roi2_trace[0:chunk_len]) - ax2_trace1.plot(roi2_trace[chunk_len:2 * chunk_len]) - ax2_trace2.plot(roi2_trace[2 * chunk_len:3 * chunk_len]) - - # plt.show() - - return f - - -def get_masks_from_caiman(spatial_com, dims, thr=0, thr_method='nrg', swap_dim=False): - """ - Gets masks of spatial components results generated the by the CaImAn segmentation - - this function is stripped out from the caiman.utils.visualization.get_contours(). only works for 2d spatial - components. - - Args: - spatial_com: np.ndarray or sparse matrix, mostly will be the caiman.source_extraction.cnmf.estimates.A - 2d Matrix of Spatial components, each row is a flattened pixel (order 'F'), each column - is a spatial component - dims: tuple of ints - Spatial dimensions of movie (row, col) - thr: scalar between 0 and 1 - Energy threshold for computing contours (default 0.9) - if thr_method is 'nrg': higher thr will make bigger hole inside the mask - if thr_method is 'max': (usually does not work very well), higher thr will make smaller mask - near the center. - thr_method: [optional] string - Method of thresholding: - 'max' sets to zero pixels that have value less than a fraction of the max value - 'nrg' keeps the pixels that contribute up to a specified fraction of the energy - swap_dim: if True, flattened 2d array will be reshaped by order 'C', otherwise with order 'F'. - Returns: - masks: 3d array, dtype=np.float, spatial component x row x col - """ - - if 'csc_matrix' not in str(type(spatial_com)): - spatial_com = sparse.csc_matrix(spatial_com) - - if len(spatial_com.shape) != 2: - raise ValueError('input "spatial_com" should be a 2d array or 2d sparse matrix.') - - n_mask = spatial_com.shape[1] - - if len(dims) != 2: - raise ValueError("input 'dims' should have two entries: (num_row, num_col).") - - if dims[0] * dims[1] != spatial_com.shape[0]: - raise ValueError("the product of dims[0] and dims[1] ({} x {}) should be equal to the first dimension " - "of the input 'spatial_com'.".format(dims[0], dims[1], spatial_com.shape[0])) - - masks = [] - - # # get the center of mass of neurons( patches ) - # cm = com(A, *dims) - - # for each patches - for i in range(n_mask): - # we compute the cumulative sum of the energy of the Ath component that has been ordered from least to highest - patch_data = spatial_com.data[spatial_com.indptr[i]:spatial_com.indptr[i + 1]] - indx = np.argsort(patch_data)[::-1] - if thr_method == 'nrg': - cumEn = np.cumsum(patch_data[indx] ** 2) - # we work with normalized values - cumEn /= cumEn[-1] - Bvec = np.ones(spatial_com.shape[0]) - # we put it in a similar matrix - Bvec[spatial_com.indices[spatial_com.indptr[i]:spatial_com.indptr[i + 1]][indx]] = cumEn - else: - if thr_method != 'max': - print('Unknown threshold method {}. should be either "max" or "nrg". ' - 'Choosing "max".'.format(thr_method)) - Bvec = np.zeros(spatial_com.shape[0]) - Bvec[spatial_com.indices[spatial_com.indptr[i]: - spatial_com.indptr[i + 1]]] = patch_data / patch_data.max() - if swap_dim: - Bmat = np.reshape(Bvec, dims, order='C') - mask = np.array(spatial_com[:, i].todense().reshape(dims, order='C')) - else: - Bmat = np.reshape(Bvec, dims, order='F') - mask = np.array(spatial_com[:, i].todense().reshape(dims, order='F')) - - Bmat[Bmat >= thr] = 1. - Bmat[Bmat < thr] = 0. - - masks.append(mask * Bmat) - - return np.array(masks) - - -def threshold_mask_by_energy(mask, sigma=1., thr_high=0.0, thr_low=0.1): - """ - threshold a weighted mask by reversed accumulative energy. Use this to treat masks spit out by caiman - segmentation. - :param mask: 2d array - :param sigma: float, 2d gaussian filter sigma - :param thr_high: float, 0 - 1, bigger thr_high will make bigger hole inside the roi - :param thr_low: float, 0 - 1, bigger thr_low will make smaller roi around the center - :return: 2d array thresholded mask - """ - - if len(mask.shape) != 2: - raise ValueError('input "mask" should be a 2d array.') - - if sigma is not None: - mask = ni.gaussian_filter(mask, sigma=sigma) - - mask = ia.array_nor(mask) - mask_s = mask.flatten() - - indx_low = np.argsort(mask_s) - cum_eng_low = np.cumsum(mask_s[indx_low] ** 2) - cum_eng_low /= cum_eng_low[-1] - mask_eng_low = np.ones(mask_s.shape, dtype=np.float) - mask_eng_low[indx_low] = cum_eng_low - mask_eng_low = mask_eng_low.reshape(mask.shape) - - indx_high = np.argsort(mask_s)[::-1] - cum_eng_high = np.cumsum(mask_s[indx_high] ** 2) - cum_eng_high /= cum_eng_high[-1] - mask_eng_high = np.ones(mask_s.shape, dtype=np.float) - mask_eng_high[indx_high] = cum_eng_high - mask_eng_high = mask_eng_high.reshape(mask.shape) - - mask_bin = np.ones(mask.shape) - - mask_bin[mask_eng_high < thr_high] = 0. - mask_bin[mask_eng_low < thr_low] = 0. - - mask_labeled, mask_num = ni.label(mask_bin, structure=[[1,1,1], [1,1,1], [1,1,1]]) - mask_dict = ia.get_masks(labeled=mask_labeled, keyPrefix='', labelLength=5) - - for key, value in mask_dict.items(): - mask_w = value * mask - mask_w = mask_w / np.amax(mask_w) - mask_dict[key] = ia.WeightedROI(mask_w) - - return mask_dict - - if __name__ == '__main__': - - # =========================================================================== - nwb_f = h5py.File(r"Z:\chandelier_cell_project\M447219\2019-06-25-deepscope\190625_M447219_110.nwb", 'r') - plot_roi_traces_three_planes(nwb_f=nwb_f, roi0='roi_0000', roi1='roi_0004', roi2='roi_0003') - nwb_f.close() - # =========================================================================== - # =========================================================================== # dateRecorded = '150930' # mouseID = '187474' @@ -1841,14 +1655,12 @@ def threshold_mask_by_energy(mask, sigma=1., thr_high=0.0, thr_low=0.1): # =========================================================================== # =========================================================================== - # input_folder = r"\\aibsdata2\nc-ophys\CorticalMapping\IntrinsicImageData" \ - # r"\170404-M302706\2p_movies\for_segmentation\tempdir" - # c, s = array_to_rois(input_folder=input_folder, overlap_threshold=0.9, neuropil_limit=(5, 10), is_plot=True) - # print c.shape - # print s.shape + input_folder = r"\\aibsdata2\nc-ophys\CorticalMapping\IntrinsicImageData" \ + r"\170404-M302706\2p_movies\for_segmentation\tempdir" + c, s = array_to_rois(input_folder=input_folder, overlap_threshold=0.9, neuropil_limit=(5, 10), is_plot=True) + print(c.shape) + print(s.shape) # =========================================================================== - print 'for debug...' - - + print('for debug...') diff --git a/corticalmapping/MotionCorrection.py b/corticalmapping/MotionCorrection.py new file mode 100644 index 0000000..cf3869a --- /dev/null +++ b/corticalmapping/MotionCorrection.py @@ -0,0 +1,336 @@ +__author__ = 'junz' + +import os +import numpy as np +import tifffile as tf +from . import core.ImageAnalysis as ia +from . import core.FileTools as ft +import matplotlib.pyplot as plt + + +try: import cv2; from .core.ImageAnalysis import rigid_transform_cv2 as rigid_transform +except ImportError as e: print(e); from .core.ImageAnalysis import rigid_transform as rigid_transform + + +plt.ioff() + +def iamstupid(imgMat, imgRef, maxDisplacement=10, normFunc=ia.array_diff): + ''' + + align two images with rigid transformation + + :param imgRef: reference image + :param imgMat: matching image + :param maxDisplacement: maximum displacement, if single value, it will be apply to both rows and columns. if two + values, it will be [rowMaxDisplacement, columnMaxDisplacement] + :param normFunc: the function to calculate the distance between two images + :return: + : offSet: final offSet + : hitLimitFlag: the Flag to mark if the maxDisplacement limit was hit for row and column + ''' + + try: + rowMaxDisplacement = int(abs(maxDisplacement[0])) + columnMaxDisplacement = int(abs(maxDisplacement[1])) + except TypeError: rowMaxDisplacement = columnMaxDisplacement = int(abs(maxDisplacement)) + + #temporary code + # imgMat[imgMat<150]=0 + # imgRef[imgRef<150]=0 + # imgMat[imgMat>400]=400 + # imgRef[imgRef>400]=400 + + prevDis = normFunc(imgRef,imgMat) + prevOffset = [0, 0] + hitLimitFlag = [0, 0] + tryList = [[-1, 0], [0, -1], [1, 0], [0, 1]] + currDisList = np.array([normFunc(imgRef, rigid_transform(imgMat, offset=o, outputShape=imgRef.shape)) for o in tryList]) + currDis = np.min(currDisList) + minInd = np.where(currDisList == currDis)[0] + if len(minInd)>0: minInd = minInd[0] + currOffset = tryList[minInd] + + while currDis < prevDis: + prevDis = currDis; prevOffset = currOffset + + tryList = [] + if abs(prevOffset[0]) != rowMaxDisplacement: + if prevOffset[0] < 0: tryList.append([prevOffset[0]-1, prevOffset[1]]) + elif prevOffset[0] > 0: tryList.append([prevOffset[0]+1, prevOffset[1]]) + else: tryList += [[1, prevOffset[1]],[-1, prevOffset[1]]] + else: hitLimitFlag[0] = 1 + + if abs(prevOffset[1]) != columnMaxDisplacement: + if prevOffset[1] < 0: tryList.append([prevOffset[0], prevOffset[1]-1]) + elif prevOffset[1] > 0: tryList.append([prevOffset[0], prevOffset[1]+1]) + else: tryList += [[prevOffset[0], 1],[prevOffset[0], -1]] + else: hitLimitFlag[1] = 1 + + if len(tryList)>0: + currDisList = np.array([normFunc(imgRef, rigid_transform(imgMat, offset=o, outputShape=imgRef.shape)) for o in tryList]) + currDis = np.min(currDisList) + currOffset = tryList[np.where(currDisList == currDis)[0]] + else:break + + return np.array(prevOffset,dtype=np.int), hitLimitFlag + + +def getDistanceList(img, imgRef, normFunc=ia.array_diff, isPlot = False): + ''' + get the list of distances from each frame in img to the reference image, imgRef + normFunc is the function to calculate distance between two frames + ''' + + distanceList = np.zeros(img.shape[0]) + for i in range(img.shape[0]): + distanceList[i] = normFunc(img[i,:,:], imgRef) + if isPlot: + disMedian = np.median(distanceList) + disStd = np.std(distanceList) + f = plt.figure(figsize=(15,8)) + ax1 = f.add_subplot(211); ax1.plot(distanceList), ax1.set_ylim([0,200]);ax1.set_title('distance from mean for each frame') + ax2 = f.add_subplot(212); _ = ax2.hist(distanceList, bins=50, range=(disMedian-3*disStd,disMedian+3*disStd)); ax2.set_title('distribution of distances') + return distanceList, f + else: return distanceList + + +def alignSingleMovie(mov, imgRef, badFrameDistanceThr=100, maxDisplacement=10, normFunc=ia.array_diff, verbose=False, alignOrder=1): + ''' + align the frames in a single movie to the imgRef + + the frame with distance from mean projection larger than badFramdDistanceThr will not be used to update current + offset, nor will be included in calculation of new mean projection + + if order is 1: alignment goes from the first frame to the last + if order is -1: alignment goes from the last frame to the first, this is faster in the alignSingleMovieLoop function + + return: offsetList, alignedMov, meanFrame + ''' + + dataType = mov.dtype + currOffset = np.array([0,0]).astype(np.int) + offsetList = [] + alignedMov = np.empty(mov.shape,dtype=dataType) + validFrameNum = [] + + if alignOrder == 1: iterFrames = list(range(mov.shape[0])) + if alignOrder == -1: iterFrames = list(range(mov.shape[0]))[::-1] + + for i in iterFrames: + if normFunc(mov[i,:,:],imgRef)<=badFrameDistanceThr: + if np.array_equal(currOffset,np.array([0,0])):initCurrFrame = mov[i,:,:] + else: initCurrFrame = rigid_transform(mov[i, :, :], offset=currOffset, outputShape=imgRef.shape) + additionalOffset, hitFlag = iamstupid(initCurrFrame,imgRef,maxDisplacement=maxDisplacement,normFunc=normFunc) + currOffset = currOffset+additionalOffset + alignedMov[i,:,:] = rigid_transform(mov[i, :, :], offset=currOffset, outputShape=imgRef.shape) + offsetList.append(currOffset) + validFrameNum.append(i) + if verbose: + print('Frame'+ft.int2str(i,5)+'\tdistance:'+str(normFunc(mov[i,:,:],imgRef))+'\tgood Frame'+'\tOffset:'+str(currOffset)) + else: + alignedMov[i,:,:] = rigid_transform(mov[i, :, :], offset=currOffset, outputShape=imgRef.shape) + offsetList.append(currOffset) + if verbose: + print('Frame'+ft.int2str(i,5)+'\tdistance:'+str(normFunc(mov[i,:,:],imgRef))+'\tbad Frame'+'\tOffset:'+str(currOffset)) + + meanFrame = np.mean(alignedMov[np.array(validFrameNum),:,:],axis=0) + if alignOrder == -1: offsetList = offsetList[::-1] + return offsetList, alignedMov, meanFrame + + +def alignSingleMovieLoop(mov, iterations=2, badFrameDistanceThr=100, maxDisplacement=10, normFunc=ia.array_diff, verbose=False): + ''' + align a single movie with iterations, every time it will use mean frame from last iteration as imgRef + + For every iteration it calls MotionCorrection.alignSingleMovie function + + the imgRef for first iteration is the last frame of the movie + ''' + + if iterations < 1: raise ValueError('Iterations should be an integer larger than 0!') + + else: + offsetList, alignedMov, meanFrame = alignSingleMovie(mov,mov[-1,:,:],badFrameDistanceThr=badFrameDistanceThr,maxDisplacement=maxDisplacement,normFunc=normFunc,verbose=verbose,alignOrder=-1) + + if iterations == 1: + return offsetList, alignedMov, meanFrame + else: + allOffsetList = np.array(offsetList) + for i in range(iterations-1): + offsetList, alignedMov, meanFrame = alignSingleMovie(alignedMov,meanFrame,badFrameDistanceThr=badFrameDistanceThr,maxDisplacement=maxDisplacement,normFunc=normFunc,verbose=verbose) + allOffsetList += offsetList + return allOffsetList, alignedMov,meanFrame + + +def alignMultipleTiffs(paths, + iterations=2, + badFrameDistanceThr=100, + maxDisplacement=10, + normFunc=ia.array_diff, + verbose=True, + output=False, + saveFolder=None, + fileNameSurfix='corrected', + cameraBias=0): + ''' + motion correction of mulitiple tif file by using rigid plane transformation. Motion correction will be applied both + within and across tif files + + paths: paths of input tif files + iterations: number of iterations to perform motion correction8 + badFrameDistanceThr: the threshold of distance to define a good or bad frame, if a frame has distance from reference + framebigger than this value, it will be defined as bad frame, it will not be included in mean + frame calculation + normFunc: function to calculate distance between two frames. + options: corticalmapping.core.ImageAnalysis.array_diff (mean of absolute difference across all pixels) + corticalmapping.core.ImageAnalysis.distance (Frobenius distance or Euclidean norm) + + verbose: if True, print alignment information for each frame + output: if True, generate and save motion corrected tif files + saveFolder: if None, corrected files will be saved in the same folder of original data + fileNameSurfix: surfix of corrected file names + ''' + + if saveFolder is not None: + fileNameList = [os.path.split(p)[1] for p in paths] + if len(set(fileNameList)) 1: + print('\nPlotting distance distribution across mean frames of each file ...') + _, f = getDistanceList(meanFrames,meanFrames[0,:,:],normFunc=normFunc,isPlot=True) + f.suptitle('Distances across files'); plt.show() + print('Start alignment across files...') + fileOffset, allMeanFrames, aveMeanFrame = alignSingleMovieLoop(meanFrames,iterations=5,badFrameDistanceThr=65535,maxDisplacement=maxDisplacement,normFunc=normFunc,verbose=verbose) + print('Plotting mean frame of each file before and after cross file alignment ...') + tf.imshow(np.dstack((np.array(meanFrames), np.array(allMeanFrames))),photometric='miniswhite', cmap='gray'); plt.show() + + for i, path in enumerate(paths): + offsets[i] = offsets[i] + fileOffset[i,:] + print('Saving motion correction results for file:', path) + fileFolder, fileName = os.path.split(path) + newFileName = os.path.splitext(fileName)[0]+'_correction_results.pkl' + if saveFolder is not None: savePath = os.path.join(saveFolder,newFileName) + else: savePath = os.path.join(fileFolder,newFileName) + ft.saveFile(savePath,{'offset':offsets[i],'meanFrame':allMeanFrames[i,:,:].astype(np.float32),'path':path,'status':'cross_files'}) + print('End of cross file alignment.\n') + else: print('\nThere is only one file in the list. No need to align across files\n'); aveMeanFrame = meanFrames[0] + + if output: + for i, path in enumerate(paths): + print('Generating output image file for '+path) + fileFolder, fileName = os.path.split(path) + newFileName = ('_'+fileNameSurfix).join(os.path.splitext(fileName)) + if saveFolder is None: newPath = os.path.join(fileFolder,newFileName) + else: newPath = os.path.join(saveFolder,newFileName) + mov = tf.imread(path) + for j in range(mov.shape[0]): + if not np.array_equal(offsets[i][j,:], np.array([0,0])): + mov[j,:,:] = rigid_transform(mov[j, :, :], offset=offsets[i][j, :]) + tf.imsave(newPath, mov-cameraBias) + + return offsets, aveMeanFrame + + +if __name__=='__main__': + + #====================================================================================================== + # img_orig = tf.imread(r"C:\JunZhuang\labwork\data\python_temp_folder\motion_correction\original.tif") + # img_move = tf.imread(r"C:\JunZhuang\labwork\data\python_temp_folder\motion_correction\moved.tif") + # + # offset, _ = iamstupid(img_orig,img_move) + # assert(offset == [-4,-7]) + #====================================================================================================== + + + #====================================================================================================== + # imgPath = r'Z:\Jun\150610-M160809\KSStim_B2U_10Sweeps\KSStim_B2U_10sweeps_001_001.tif' + # imgPath = r"E:\data2\2015-06-11-python-2P-analysis-test\motion_correction_test\for_Jun\test_001.tif" + # img = tf.imread(imgPath) + # distanceList = getDistanceList(img,img[0,:,:],isPlot=True) + # #====================================================================================================== + + #====================================================================================================== + # imgPath = r"E:\data2\2015-06-11-python-2P-analysis-test\motion_correction_test\for_Jun\test_001.tif" + # img = tf.imread(imgPath) + # offset, _ = iamstupid(img[1,:,:],img[2,:,:]) + # print offset + #====================================================================================================== + + #====================================================================================================== + # imgPath = r"E:\data2\2015-06-11-python-2P-analysis-test\motion_correction_test\for_Jun\test_001.tif" + # img = tf.imread(imgPath) + # offsetList, alignedMov, meanFrame = alignSingleMovie(img,img[0,:,:],badFrameDistanceThr=110) + # + # tf.imshow(np.dstack((img,alignedMov)),cmap='gray') + # plt.show() + #====================================================================================================== + + #====================================================================================================== + # imgPath = r"E:\data2\2015-06-11-python-2P-analysis-test\motion_correction_test\for_Jun\test_001.tif" + # img = tf.imread(imgPath) + # offsetList, alignedMov, meanFrame = alignSingleMovie(img,img[0,:,:],badFrameDistanceThr=110) + # + # img2Path = r"E:\data2\2015-06-11-python-2P-analysis-test\motion_correction_test\for_Jun\test_002.tif" + # img2 = tf.imread(img2Path) + # offsetList2, alignedMov2, meanFrame2 = alignSingleMovie(img2,img2[0,:,:],badFrameDistanceThr=110) + # + # print np.hstack((offsetList,offsetList2)) + #====================================================================================================== + + #====================================================================================================== + # imgPath = r"E:\data2\2015-06-11-python-2P-analysis-test\motion_correction_test\for_Jun\test_001.tif" + # img = tf.imread(imgPath) + # offsetList, alignedMov, meanFrame = alignSingleMovieLoop(img,badFrameDistanceThr=110) + # print offsetList + #====================================================================================================== + + #====================================================================================================== + # imgPath = r"E:\data2\2015-06-11-python-2P-analysis-test\motion_correction_test\for_Jun\test_001.tif" + # img = tf.imread(imgPath) + # offsetList, alignedMov, meanFrame = alignSingleMovie(img,img[-1,:,:],badFrameDistanceThr=110,alignOrder=1) + # offsetList2, alignedMov2, meanFrame2 = alignSingleMovie(img,img[-1,:,:],badFrameDistanceThr=110,alignOrder=-1) + # print np.hstack((offsetList,offsetList2)) + # tf.imshow(np.dstack((img,alignedMov,alignedMov2)),cmap='gray') + # plt.show() + #====================================================================================================== + + #====================================================================================================== + # paths=[ + # r"E:\data2\2015-06-11-python-2P-analysis-test\motion_correction_test\for_Jun\test_001.tif", + # r"E:\data2\2015-06-11-python-2P-analysis-test\motion_correction_test\for_Jun\test_002.tif" + # ] + # offsets, meanFrame = alignMultipleTiffs(paths, + # iterations=2, + # badFrameDistanceThr=100, + # maxDisplacement=10, + # normFunc=ia.array_diff, + # verbose=False, + # output=True, + # saveFolder=None, + # fileNameSurfix='corrected', + # cameraBias=0) + # + # print offsets[0]-offsets[1] + #====================================================================================================== + + print('for debug...') \ No newline at end of file diff --git a/corticalmapping/NwbTools.py b/corticalmapping/NwbTools.py index 6d1e2be..f0742bb 100644 --- a/corticalmapping/NwbTools.py +++ b/corticalmapping/NwbTools.py @@ -8,58 +8,55 @@ import corticalmapping.core.FileTools as ft import corticalmapping.core.TimingAnalysis as ta import corticalmapping.core.PlottingTools as pt -import corticalmapping.CamstimTools as ct - try: from nwb.nwb import NWB except ImportError: - print 'no Allen Institute NWB API. get this from ' \ - 'http://stimash.corp.alleninstitute.org/projects/INF/repos/ainwb/browse' + print('no Allen Institute NWB API. get this from ' \ + 'http://stimash.corp.alleninstitute.org/projects/INF/repos/ainwb/browse') DEFAULT_GENERAL = { - 'session_id': '', - 'experimenter': '', - 'institution': 'Allen Institute for Brain Science', - # 'lab': '', - # 'related_publications': '', - 'notes': '', - 'experiment_description': '', - # 'data_collection': '', - 'stimulus': '', - # 'pharmacology': '', - # 'surgery': '', - # 'protocol': '', - 'subject': { - 'subject_id': '', - # 'description': '', - 'species': 'Mus musculus', - 'genotype': '', - 'sex': '', - 'age': '', - # 'weight': '', - }, - # 'virus': '', - # 'slices': '', - 'extracellular_ephys': { - 'electrode_map': '', - 'sampling_rate': 30000., - # 'electrode_group': [], - # 'impedance': [], - # 'filtering': [] - }, - 'optophysiology': { - # 'indicator': '', - # 'excitation_lambda': '', - # 'imaging_rate': '', - # 'location': '', - # 'device': '', - }, - # 'optogenetics': {}, - 'devices': {} -} + 'session_id': '', + 'experimenter': '', + 'institution': 'Allen Institute for Brain Science', + # 'lab': '', + # 'related_publications': '', + 'notes': '', + 'experiment_description': '', + # 'data_collection': '', + 'stimulus': '', + # 'pharmacology': '', + # 'surgery': '', + # 'protocol': '', + 'subject': { + 'subject_id': '', + # 'description': '', + 'species': 'Mus musculus', + 'genotype': '', + 'sex': '', + 'age': '', + # 'weight': '', + }, + # 'virus': '', + # 'slices': '', + 'extracellular_ephys': { + 'electrode_map': '', + 'sampling_rate': 30000., + # 'electrode_group': [], + # 'impedance': [], + # 'filtering': [] + }, + 'optophysiology': { + # 'indicator': '', + # 'excitation_lambda': '', + # 'imaging_rate': '', + # 'location': '', + # 'device': '', + }, + # 'optogenetics': {}, + 'devices': {} + } SPIKE_WAVEFORM_TIMEWINDOW = (-0.002, 0.002) - def plot_waveforms(waveforms, ch_locations=None, stds=None, waveforms_filtered=None, stds_filtered=None, f=None, ch_ns=None, axes_size=(0.2, 0.2), **kwargs): """ @@ -113,7 +110,7 @@ def plot_waveforms(waveforms, ch_locations=None, stds=None, waveforms_filtered=N curr_wf_f = waveforms_filtered[:, j] if stds_filtered is not None: curr_std_f = stds_filtered[:, j] - ax.fill_between(range(waveforms_filtered.shape[0]), curr_wf_f - curr_std_f, + ax.fill_between(list(range(waveforms_filtered.shape[0])), curr_wf_f - curr_std_f, curr_wf_f + curr_std_f, color='#888888', alpha=0.5, edgecolor='none') ax.plot(curr_wf_f, '-', color='#555555', label='filtered', **kwargs) @@ -121,8 +118,8 @@ def plot_waveforms(waveforms, ch_locations=None, stds=None, waveforms_filtered=N curr_wf = waveforms[:, j] if stds is not None: curr_std = stds[:, j] - ax.fill_between(range(waveforms.shape[0]), curr_wf - curr_std, curr_wf + curr_std, - color='#8888ff', alpha=0.5, edgecolor='none') + ax.fill_between(list(range(waveforms.shape[0])), curr_wf - curr_std, curr_wf + curr_std, + color='#8888ff',alpha=0.5, edgecolor='none') ax.plot(curr_wf, '-', color='#3333ff', label='unfiltered', **kwargs) # plot title @@ -151,16 +148,16 @@ def __init__(self, filename, is_manual_check=False, **kwargs): if is_manual_check: keyboard_input = '' while keyboard_input != 'y' and keyboard_input != 'n': - keyboard_input = raw_input('\nthe path "' + filename + '" already exists. Modify it? (y/n) \n') + keyboard_input = input('\nthe path "' + filename + '" already exists. Modify it? (y/n) \n') if keyboard_input == 'y': super(RecordedFile, self).__init__(filename=filename, modify=True, **kwargs) elif keyboard_input == 'n': raise IOError('file already exists.') else: - print('\nModifying existing nwb file: ' + filename) + print(('\nModifying existing nwb file: ' + filename)) super(RecordedFile, self).__init__(filename=filename, modify=True, **kwargs) else: - print('\nCreating a new nwb file: ' + filename) + print(('\nCreating a new nwb file: ' + filename)) super(RecordedFile, self).__init__(filename=filename, modify=False, **kwargs) def add_general(self, general=DEFAULT_GENERAL, is_overwrite=True): @@ -170,194 +167,6 @@ def add_general(self, general=DEFAULT_GENERAL, is_overwrite=True): slf = self.file_pointer ft.write_dictionary_to_h5group_recursively(target=slf['general'], source=general, is_overwrite=is_overwrite) - def add_sync_data(self, f_path, analog_downsample_rate=None, by_label=True, digital_labels=None, - analog_labels=None): - - sync_dict = ft.read_sync(f_path=f_path, analog_downsample_rate=analog_downsample_rate, - by_label=by_label, digital_labels=digital_labels, - analog_labels=analog_labels) - - # add digital channel - if 'digital_channels' in sync_dict.keys(): - - digital_channels = sync_dict['digital_channels'] - - # get channel names - for d_chn, d_ch in digital_channels.items(): - if ft.is_integer(d_chn): - curr_chn = 'digital_CH_' + ft.int2str(d_chn, 3) - else: - curr_chn = 'digital_' + d_chn - - curr_rise = d_ch['rise'] - ch_series_rise = self.create_timeseries('TimeSeries', curr_chn + '_rise', 'acquisition') - ch_series_rise.set_data([], unit='', conversion=np.nan, resolution=np.nan) - if len(curr_rise) == 0: - curr_rise = np.array([np.nan]) - ch_series_rise.set_time(curr_rise) - ch_series_rise.set_value('num_samples', 0) - else: - ch_series_rise.set_time(curr_rise) - ch_series_rise.set_description('timestamps of rise cross of digital channel: ' + curr_chn) - ch_series_rise.set_source('sync program') - ch_series_rise.set_comments('digital') - ch_series_rise.finalize() - - curr_fall = d_ch['fall'] - ch_series_fall = self.create_timeseries('TimeSeries', curr_chn + '_fall', 'acquisition') - ch_series_fall.set_data([], unit='', conversion=np.nan, resolution=np.nan) - if len(curr_fall) == 0: - curr_fall = np.array([np.nan]) - ch_series_fall.set_time(curr_fall) - ch_series_fall.set_value('num_samples', 0) - else: - ch_series_fall.set_time(curr_fall) - ch_series_fall.set_description('timestamps of fall cross of digital channel: ' + curr_chn) - ch_series_fall.set_source('sync program') - ch_series_fall.set_comments('digital') - ch_series_fall.finalize() - - # add analog channels - if 'analog_channels' in sync_dict.keys(): - - analog_channels = sync_dict['analog_channels'] - analog_fs = sync_dict['analog_sample_rate'] - - # get channel names - for a_chn, a_ch in analog_channels.items(): - if ft.is_integer(a_chn): - curr_chn = 'analog_CH_' + ft.int2str(a_chn, 3) - else: - curr_chn = 'analog_' + a_chn - - ch_series = self.create_timeseries('TimeSeries', curr_chn, 'acquisition') - ch_series.set_data(a_ch, unit='voltage', conversion=1., resolution=1.) - ch_series.set_time_by_rate(time_zero=0.0, rate=analog_fs) - ch_series.set_value('num_samples', len(a_ch)) - ch_series.set_comments('continuous') - ch_series.set_description('analog channel recorded by sync program') - ch_series.set_source('sync program') - ch_series.finalize() - - def add_acquisition_image(self, name, img, format='array', description=''): - """ - add arbitrarily recorded image into acquisition group, mostly surface vasculature image - :param name: - :param img: - :param format: - :param description: - :return: - """ - img_dset = self.file_pointer['acquisition/images'].create_dataset(name, data=img) - img_dset.attrs['format'] = format - img_dset.attrs['description'] = description - - def get_analog_data(self, ch_n): - """ - :param ch_n: string, analog channel name - :return: 1-d array, analog data, data * conversion - 1-d array, time stamps - """ - grp = self.file_pointer['acquisition/timeseries'][ch_n] - data = grp['data'].value - if not np.isnan(grp['data'].attrs['conversion']): - data = data.astype(np.float32) * grp['data'].attrs['conversion'] - if 'timestamps' in grp.keys(): - t = grp['timestamps'] - elif 'starting_time' in grp.keys(): - fs = grp['starting_time'].attrs['rate'] - sample_num = grp['num_samples'].value - t = np.arange(sample_num) / fs + grp['starting_time'].value - else: - raise ValueError('can not find timing information of channel:' + ch_n) - return data, t - - def _check_display_order(self, display_order=None): - """ - check display order make sure each presentation has a unique position, and move from increment order. - also check the given display_order is of the next number - """ - stimuli = self.file_pointer['stimulus/presentation'].keys() - - print('\nExisting visual stimuli:') - print('\n'.join(stimuli)) - - stimuli = [int(s[0:s.find('_')]) for s in stimuli] - stimuli.sort() - if stimuli != range(len(stimuli)): - raise ValueError('display order is not incremental.') - - if display_order is not None: - - if display_order != len(stimuli): - raise ValueError('input display order not the next display.') - - # ===========================photodiode related===================================================================== - def add_photodiode_onsets(self, photodiode_ch_path='acquisition/timeseries/photodiode', - digitizeThr=0.9, filterSize=0.01, segmentThr=0.01, smallestInterval=0.03, - expected_onsets_number=None): - """ - intermediate processing step for analysis of visual display. Containing the information about the onset of - photodiode signal. Timestamps are extracted from photodiode signal, should be aligned to the master clock. - extraction is done by corticalmapping.HighLevel.segmentPhotodiodeSignal() function. The raw signal - was first digitized by the digitize_threshold, then filtered by a gaussian fileter with filter_size. Then - the derivative of the filtered signal was calculated by numpy.diff. The derivative signal was then timed - with the digitized signal. Then the segmentation_threshold was used to detect rising edge of the resulting - signal. Any onset with interval from its previous onset smaller than smallest_interval will be discarded. - the resulting timestamps of photodiode onsets will be saved in 'analysis/photodiode_onsets' timeseries - - :param digitizeThr: float - :param filterSize: float - :param segmentThr: float - :param smallestInterval: float - :param expected_onsets_number: int, expected number of photodiode onsets, may extract from visual display - log. if extracted onset number does not match this number, the process will - be abort. If None, no such check will be performed. - :return: - """ - - pd_grp = self.file_pointer[photodiode_ch_path] - - fs = pd_grp['starting_time'].attrs['rate'] - - pd = pd_grp['data'].value * pd_grp['data'].attrs['conversion'] - - pd_onsets = hl.segmentPhotodiodeSignal(pd, digitizeThr=digitizeThr, filterSize=filterSize, - segmentThr=segmentThr, Fs=fs, smallestInterval=smallestInterval) - - if pd_onsets.shape[0] == 0: - return - - if expected_onsets_number is not None: - if len(pd_onsets) != expected_onsets_number: - raise ValueError('The number of photodiode onsets (' + str(len(pd_onsets)) + ') and the expected ' - 'number of sweeps ' + str( - expected_onsets_number) + ' do not match. Abort.') - - pd_ts = self.create_timeseries('TimeSeries', 'photodiode_onsets', modality='other') - pd_ts.set_time(pd_onsets) - pd_ts.set_data([], unit='', conversion=np.nan, resolution=np.nan) - pd_ts.set_description('intermediate processing step for analysis of visual display. ' - 'Containing the information about the onset of photodiode signal. Timestamps ' - 'are extracted from photodiode signal, should be aligned to the master clock.' - 'extraction is done by corticalmapping.HighLevel.segmentPhotodiodeSignal()' - 'function. The raw signal was first digitized by the digitize_threshold, then ' - 'filtered by a gaussian fileter with filter_size. Then the derivative of the filtered ' - 'signal was calculated by numpy.diff. The derivative signal was then timed with the ' - 'digitized signal. Then the segmentation_threshold was used to detect rising edge of ' - 'the resulting signal. Any onset with interval from its previous onset smaller than ' - 'smallest_interval will be discarded.') - pd_ts.set_path('/analysis/PhotodiodeOnsets') - pd_ts.set_value('digitize_threshold', digitizeThr) - pd_ts.set_value('fileter_size', filterSize) - pd_ts.set_value('segmentation_threshold', segmentThr) - pd_ts.set_value('smallest_interval', smallestInterval) - pd_ts.finalize() - - # ===========================photodiode related===================================================================== - - - # ===========================ephys related========================================================================== def add_open_ephys_data(self, folder, prefix, digital_channels=()): """ add open ephys raw data to self, in acquisition group, less useful, because the digital events needs to be @@ -369,7 +178,7 @@ def add_open_ephys_data(self, folder, prefix, digital_channels=()): """ output = oew.pack_folder_for_nwb(folder=folder, prefix=prefix, digital_channels=digital_channels) - for key, value in output.items(): + for key, value in list(output.items()): if 'CH' in key: # analog channel for electrode recording ch_ind = int(key[key.find('CH') + 2:]) @@ -407,10 +216,10 @@ def add_open_ephys_data(self, folder, prefix, digital_channels=()): else: # digital events - for key2, value2 in value.items(): + for key2, value2 in list(value.items()): ch_rise_ts = value2['rise'] - ch_series_rise = self.create_timeseries('TimeSeries', key2 + '_rise', 'acquisition') + ch_series_rise = self.create_timeseries('TimeSeries', key2+'_rise', 'acquisition') ch_series_rise.set_data([], unit='', conversion=np.nan, resolution=np.nan) if len(ch_rise_ts) == 0: ch_rise_ts = np.array([np.nan]) @@ -447,7 +256,7 @@ def add_open_ephys_continuous_data(self, folder, prefix): """ output = oew.pack_folder_for_nwb(folder=folder, prefix=prefix) - for key, value in output.items(): + for key, value in list(output.items()): if 'CH' in key: # analog channel for electrode recording ch_ind = int(key[key.find('CH') + 2:]) @@ -483,6 +292,53 @@ def add_open_ephys_continuous_data(self, folder, prefix): ch_series.set_source('open ephys') ch_series.finalize() + def add_acquisition_image(self, name, img, format='array', description=''): + """ + add arbitrarily recorded image into acquisition group, mostly surface vasculature image + :param name: + :param img: + :param format: + :param description: + :return: + """ + img_dset = self.file_pointer['acquisition/images'].create_dataset(name, data=img) + img_dset.attrs['format'] = format + img_dset.attrs['description'] = description + + def add_acquired_image_series_as_remote_link(self, name, image_file_path, dataset_path, timestamps, + description='', comments='', data_format='zyx', pixel_size=np.nan, + pixel_size_unit=''): + """ + add a required image series in to acquisition field as a link to an external hdf5 file. + :param name: str, name of the image series + :param image_file_path: str, the full file system path to the hdf5 file containing the raw image data + :param dataset_path: str, the path within the hdf5 file pointing to the raw data. the object should have at + least 3 attributes: 'conversion', resolution, unit + :param timestamps: 1-d array, the length of this array should be the same as number of frames in the image data + :param data_format: str, required field for ImageSeries object + :param pixel_size: array, size of pixel + :param pixel_size_unit: str, unit of pixel size + :return: + """ + + img_file = h5py.File(image_file_path) + img_data = img_file[dataset_path] + if timestamps.shape[0] != img_data.shape[0]: + raise ValueError('Number of frames does not equal to the length of timestamps!') + img_series = self.create_timeseries(ts_type='ImageSeries', name=name, modality='acquisition') + img_series.set_data_as_remote_link(image_file_path, dataset_path) + img_series.set_time(timestamps) + img_series.set_description(description) + img_series.set_comments(comments) + img_series.set_value('bits_per_pixel', img_data.dtype.itemsize * 8) + img_series.set_value('format', data_format) + img_series.set_value('dimension', img_data.shape) + img_series.set_value('image_file_path', image_file_path) + img_series.set_value('image_data_path_within_file', dataset_path) + img_series.set_value('pixel_size', pixel_size) + img_series.set_value('pixel_size_unit', pixel_size_unit) + img_series.finalize() + def add_phy_template_clusters(self, folder, module_name, ind_start=None, ind_end=None, is_add_artificial_unit=False, artificial_unit_firing_rate=2., spike_sorter=None): @@ -559,7 +415,7 @@ def add_phy_template_clusters(self, folder, module_name, ind_start=None, ind_end # create UnitTimes interface unit_times = mod.create_interface('UnitTimes') - for unit in spike_ind.keys(): + for unit in list(spike_ind.keys()): # get timestamps of current unit curr_ts = spike_ind[unit] @@ -657,10 +513,9 @@ def add_phy_template_clusters(self, folder, module_name, ind_start=None, ind_end unit_times.finalize() mod.finalize() - def add_external_LFP(self, traces, fs=30000., module_name=None, notch_base=60., notch_bandwidth=1., - notch_harmonics=4, + def add_external_LFP(self, traces, fs=30000., module_name=None, notch_base=60., notch_bandwidth=1., notch_harmonics=4, notch_order=2, lowpass_cutoff=300., lowpass_order=5, resolution=0, conversion=0, unit='', - comments='', source=''): + comments='', source=''): """ add LFP of raw arbitrary electrical traces into LFP module into /procession field. the trace will be filtered by corticalmapping.HighLevel.get_lfp() function. All filters are butterworth digital filters @@ -681,12 +536,12 @@ def add_external_LFP(self, traces, fs=30000., module_name=None, notch_base=60., :param source: str, interface source """ - if module_name is None or module_name == '': + if module_name is None or module_name=='': module_name = 'external_LFP' lfp = {} - for tn, trace in traces.items(): - curr_lfp = hl.get_lfp(trace, fs=fs, notch_base=notch_base, notch_bandwidth=notch_bandwidth, + for tn, trace in list(traces.items()): + curr_lfp = hl.get_lfp(trace,fs=fs, notch_base=notch_base, notch_bandwidth=notch_bandwidth, notch_harmonics=notch_harmonics, notch_order=notch_order, lowpass_cutoff=lowpass_cutoff, lowpass_order=lowpass_order) lfp.update({tn: curr_lfp}) @@ -695,9 +550,9 @@ def add_external_LFP(self, traces, fs=30000., module_name=None, notch_base=60., lfp_mod.set_description('LFP from external traces') lfp_interface = lfp_mod.create_interface('LFP') lfp_interface.set_value('description', 'LFP of raw arbitrary electrical traces. The traces were filtered by ' - 'corticalmapping.HighLevel.get_lfp() function. First, the powerline contamination at ' - 'multiplt harmonics were filtered out by a notch filter. Then the resulting traces were' - ' filtered by a lowpass filter. All filters are butterworth digital filters') + 'corticalmapping.HighLevel.get_lfp() function. First, the powerline contamination at ' + 'multiplt harmonics were filtered out by a notch filter. Then the resulting traces were' + ' filtered by a lowpass filter. All filters are butterworth digital filters') lfp_interface.set_value('comments', comments) lfp_interface.set_value('notch_base', notch_base) lfp_interface.set_value('notch_bandwidth', notch_bandwidth) @@ -706,7 +561,7 @@ def add_external_LFP(self, traces, fs=30000., module_name=None, notch_base=60., lfp_interface.set_value('lowpass_cutoff', lowpass_cutoff) lfp_interface.set_value('lowpass_order', lowpass_order) lfp_interface.set_source(source) - for tn, t_lfp in lfp.items(): + for tn, t_lfp in list(lfp.items()): curr_ts = self.create_timeseries('ElectricalSeries', tn, modality='other') curr_ts.set_data(t_lfp, conversion=conversion, resolution=resolution, unit=unit) curr_ts.set_time_by_rate(time_zero=0., rate=fs) @@ -738,7 +593,7 @@ def add_internal_LFP(self, continuous_channels, module_name=None, notch_base=60. :param source: str, interface source """ - if module_name is None or module_name == '': + if module_name is None or module_name=='': module_name = 'LFP' lfp_mod = self.create_module(module_name) @@ -759,7 +614,8 @@ def add_internal_LFP(self, continuous_channels, module_name=None, notch_base=60. lfp_interface.set_source(source) for channel in continuous_channels: - print '\n', channel, ': start adding LFP ...' + + print('\n', channel, ': start adding LFP ...') trace = self.file_pointer['acquisition/timeseries'][channel]['data'].value fs = self.file_pointer['acquisition/timeseries'][channel]['starting_time'].attrs['rate'] @@ -769,7 +625,7 @@ def add_internal_LFP(self, continuous_channels, module_name=None, notch_base=60. unit = self.file_pointer['acquisition/timeseries'][channel]['data'].attrs['unit'] ts_source = self.file_pointer['acquisition/timeseries'][channel].attrs['source'] - print channel, ': calculating LFP ...' + print(channel, ': calculating LFP ...') t_lfp = hl.get_lfp(trace, fs=fs, notch_base=notch_base, notch_bandwidth=notch_bandwidth, notch_harmonics=notch_harmonics, notch_order=notch_order, lowpass_cutoff=lowpass_cutoff, @@ -782,1190 +638,228 @@ def add_internal_LFP(self, continuous_channels, module_name=None, notch_base=60. curr_ts.set_value('electrode_idx', int(channel.split('_')[1])) curr_ts.set_source(ts_source) lfp_interface.add_timeseries(curr_ts) - print channel, ': finished adding LFP.' + print(channel, ': finished adding LFP.') lfp_interface.finalize() lfp_mod.finalize() - def plot_spike_waveforms(self, modulen, unitn, is_plot_filtered=False, fig=None, axes_size=(0.2, 0.2), **kwargs): + def add_visual_stimulation(self, log_path, display_order=0): """ - plot spike waveforms - - :param modulen: str, name of the module containing ephys recordings - :param unitn: str, name of ephys unit, should be in '/processing/ephys_units/UnitTimes' - :param is_plot_filtered: bool, plot unfiltered waveforms or not - :param channel_names: list of strs, channel names in continuous recordings, should be in '/acquisition/timeseries' - :param fig: matplotlib figure object - :param t_range: tuple of two floats, time range to plot along spike time stamps - :param kwargs: inputs to matplotlib.axes.plot() function - :return: fig + load visual stimulation given saved display log pickle file + :param log_path: the path to the display log generated by corticalmapping.VisualStim + :param display_order: int, in case there is more than one visual display in the file. + This value records the order of the displays + :return: """ - if modulen not in self.file_pointer['processing'].keys(): - raise LookupError('Can not find module for ephys recording: ' + modulen + '.') - - if unitn not in self.file_pointer['processing'][modulen]['UnitTimes'].keys(): - raise LookupError('Can not find ephys unit: ' + unitn + '.') + self._check_display_order(display_order) - ch_ns = self._get_channel_names() + log_dict = ft.loadFile(log_path) - unit_grp = self.file_pointer['processing'][modulen]['UnitTimes'][unitn] - waveforms = unit_grp['template'].value + stim_name = log_dict['stimulation']['stimName'] - if 'template_std' in unit_grp.keys(): - stds = unit_grp['template_std'].value - else: - stds = None + display_frames = log_dict['presentation']['displayFrames'] + time_stamps = log_dict['presentation']['timeStamp'] - if is_plot_filtered: - if 'template_filtered' in unit_grp.keys(): - waveforms_f = unit_grp['template_filtered'].value - if 'template_std_filtered' in unit_grp.keys(): - stds_f = unit_grp['template_std_filtered'].value - else: - stds_f = None - else: - print('can not find unfiltered spike waveforms for unit: ' + unitn) - waveforms_f = None - stds_f = None - else: - waveforms_f = None - stds_f = None + if len(display_frames) != len(time_stamps): + print(('\nWarning: {}'.format(log_path))) + print(('Unequal number of displayFrames ({}) and timeStamps ({}).'.format(len(display_frames), + len(time_stamps)))) - if 'channel_xpos' in self.file_pointer['processing'][modulen].keys(): - ch_xpos = self.file_pointer['processing'][modulen]['channel_xpos'] - ch_ypos = self.file_pointer['processing'][modulen]['channel_ypos'] - ch_locations = zip(ch_xpos, ch_ypos) + if stim_name == 'SparseNoise': + self._add_sparse_noise_stimulation(log_dict, display_order=display_order) + elif stim_name == 'FlashingCircle': + self._add_flashing_circle_stimulation(log_dict, display_order=display_order) + elif stim_name == 'UniformContrast': + self._add_uniform_contrast_stimulation(log_dict, display_order=display_order) + elif stim_name == 'DriftingGratingCircle': + self._add_drifting_grating_circle_stimulation(log_dict, display_order=display_order) + elif stim_name == 'KSstimAllDir': + self._add_drifting_checker_board_stimulation(log_dict, display_order=display_order) else: - ch_locations = None + raise ValueError('stimulation name {} unrecognizable!'.format(stim_name)) - fig = plot_waveforms(waveforms, ch_locations=ch_locations, stds=stds, waveforms_filtered=waveforms_f, - stds_filtered=stds_f, f=fig, ch_ns=ch_ns, axes_size=axes_size, **kwargs) + def add_visual_stimulations(self, log_paths): - fig.suptitle(self.file_pointer['identifier'].value + ' : ' + unitn) + exist_stimuli = list(self.file_pointer['stimulus/presentation'].keys()) - return fig + for i, log_path in enumerate(log_paths): + self.add_visual_stimulation(log_path, i + len(exist_stimuli)) - def generate_dat_file_for_kilosort(self, output_folder, output_name, ch_ns, is_filtered=True, cutoff_f_low=300., - cutoff_f_high=6000.): + @staticmethod + def _analyze_sparse_noise_frames(sn_grp): """ - generate .dat file for kilolsort: "https://github.com/cortex-lab/KiloSort", it is binary raw code, with - structure: ch0_t0, ch1_t0, ch2_t0, ...., chn_t0, ch0_t1, ch1_t1, ch2_t1, ..., chn_t1, ..., ch0_tm, ch1_tm, - ch2_tm, ..., chn_tm + analyze sparse noise display frames saved in '/stimulus/presentation', extract information about onset of + each displayed square: - :param output_folder: str, path to output directory - :param output_name: str, output file name, an extension of '.dat' will be automatically added. - :param ch_ns: list of strings, name of included analog channels - :param is_filtered: bool, if Ture, another .dat file with same size will be generated in the output folder. - this file will contain temporally filtered data (filter done by - corticalmapping.core.TimingAnalysis.butter_... functions). '_filtered' will be attached - to the filtered file name. - :param cutoff_f_low: float, low cutoff frequency, Hz. if None, it will be low-pass - :param cutoff_f_high: float, high cutoff frequency, Hz, if None, it will be high-pass - :return: None + return: all_squares: 2-d array, each line is a displayed square in sparse noise, each column is a feature of + a particular square, squares follow display order + data_format: str, description of the column structure of each square + description: str, + pooled_squares: dict, squares with same location and sign are pooled together. + keys: 'square_00000', 'square_00001', 'square_00002' ... each represents a unique + square. + values: dict, { + 'azi': , + 'alt': , + 'sign': , + 'onset_ind': list of indices of the appearances of current square in + in "all_squares", to be aligned with to photodiode onset + timestamps + } """ - save_path = os.path.join(output_folder, output_name + '.dat') - if os.path.isfile(save_path): - raise IOError('Output file already exists.') - - data_lst = [] - for ch_n in ch_ns: - data_lst.append(self.file_pointer['acquisition/timeseries'][ch_n]['data'].value) - - dtype = data_lst[0].dtype - data = np.array(data_lst, dtype=dtype).flatten(order='F') - data.tofile(save_path) - - if is_filtered: - - if cutoff_f_low is None and cutoff_f_high is None: - print ('both low cutoff frequency and high cutoff frequency are None. Do nothing.') - return - - save_path_f = os.path.join(output_folder, output_name + '_filtered.dat') - if os.path.isfile(save_path_f): - raise IOError('Output file for filtered data already existes.') - - fs = self.file_pointer['general/extracellular_ephys/sampling_rate'].value - data_lst_f = [] - for data_r in data_lst: - if cutoff_f_high is None: - data_lst_f.append(ta.butter_lowpass(data_r, fs=fs, cutoff=cutoff_f_low).astype(dtype)) - elif cutoff_f_low is None: - data_lst_f.append(ta.butter_highpass(data_r, fs=fs, cutoff=cutoff_f_high).astype(dtype)) - else: - data_lst_f.append(ta.butter_bandpass(data_r, - fs=fs, - cutoffs=(cutoff_f_low, cutoff_f_high)).astype(dtype)) - data_f = np.array(data_lst_f, dtype=dtype).flatten(order='F') - data_f.tofile(save_path_f) - - def _get_channel_names(self): - """ - :return: sorted list of channel names, each channel name should have prefix 'ch_' - """ - analog_chs = self.file_pointer['acquisition/timeseries'].keys() - channel_ns = [cn for cn in analog_chs if cn[0:3] == 'ch_'] - channel_ns.sort() - return channel_ns - - # ===========================ephys related========================================================================== - - - # ===========================2p movie related======================================================================= - def add_acquired_image_series_as_remote_link(self, name, image_file_path, dataset_path, timestamps, - description='', comments='', data_format='zyx', pixel_size=np.nan, - pixel_size_unit=''): - """ - add a required image series in to acquisition field as a link to an external hdf5 file. - :param name: str, name of the image series - :param image_file_path: str, the full file system path to the hdf5 file containing the raw image data - :param dataset_path: str, the path within the hdf5 file pointing to the raw data. the object should have at - least 3 attributes: 'conversion', resolution, unit - :param timestamps: 1-d array, the length of this array should be the same as number of frames in the image data - :param data_format: str, required field for ImageSeries object - :param pixel_size: array, size of pixel - :param pixel_size_unit: str, unit of pixel size - :return: - """ - - img_file = h5py.File(image_file_path) - img_data = img_file[dataset_path] - if timestamps.shape[0] != img_data.shape[0]: - raise ValueError('Number of frames does not equal to the length of timestamps!') - img_series = self.create_timeseries(ts_type='ImageSeries', name=name, modality='acquisition') - img_series.set_data_as_remote_link(image_file_path, dataset_path) - img_series.set_time(timestamps) - img_series.set_description(description) - img_series.set_comments(comments) - img_series.set_value('bits_per_pixel', img_data.dtype.itemsize * 8) - img_series.set_value('format', data_format) - img_series.set_value('dimension', img_data.shape) - img_series.set_value('image_file_path', image_file_path) - img_series.set_value('image_data_path_within_file', dataset_path) - img_series.set_value('pixel_size', pixel_size) - img_series.set_value('pixel_size_unit', pixel_size_unit) - img_series.finalize() - - def add_motion_correction_module(self, module_name, original_timeseries_path, corrected_file_path, - corrected_dataset_path, xy_translation_offsets, interface_name='MotionCorrection', - mean_projection=None, max_projection=None, description='', comments='', - source=''): - """ - add a motion corrected image series in to processing field as a module named 'motion_correction' and create a - link to an external hdf5 file which contains the images. - :param module_name: str, module name to be created - :param interface_name: str, interface name of the image series - :param original_timeseries_path: str, the path to the timeseries of the original images - :param corrected_file_path: str, the full file system path to the hdf5 file containing the raw image data - :param corrected_dataset_path: str, the path within the hdf5 file pointing to the motion corrected data. - the object should have at least 3 attributes: 'conversion', resolution, unit - :param xy_translation_offsets: 2d array with two columns, - :param mean_projection: 2d array, mean_projection of corrected image, if None, no dataset will be - created - :param max_projection: 2d array, max_projection of corrected image, if None, no dataset will be - created - :return: - """ - - orig = self.file_pointer[original_timeseries_path] - timestamps = orig['timestamps'].value - - img_file = h5py.File(corrected_file_path) - img_data = img_file[corrected_dataset_path] - if timestamps.shape[0] != img_data.shape[0]: - raise ValueError('Number of frames does not equal to the length of timestamps!') - - if xy_translation_offsets.shape[0] != timestamps.shape[0]: - raise ValueError('Number of offsets does not equal to the length of timestamps!') - - corrected = self.create_timeseries(ts_type='ImageSeries', name='corrected', modality='other') - corrected.set_data_as_remote_link(corrected_file_path, corrected_dataset_path) - corrected.set_time_as_link(original_timeseries_path) - corrected.set_description(description) - corrected.set_comments(comments) - corrected.set_source(source) - for value_n in orig.keys(): - if value_n not in ['image_data_path_within_file', 'image_file_path', 'data', 'timestamps']: - corrected.set_value(value_n, orig[value_n].value) - - xy_translation = self.create_timeseries(ts_type='TimeSeries', name='xy_translation', modality='other') - xy_translation.set_data(xy_translation_offsets, unit='pixel', conversion=np.nan, resolution=np.nan) - xy_translation.set_time_as_link(original_timeseries_path) - xy_translation.set_value('num_samples', xy_translation_offsets.shape[0]) - xy_translation.set_description('Time series of x, y shifts applied to create motion stabilized image series') - xy_translation.set_value('feature_description', ['x_motion', 'y_motion']) - - mc_mod = self.create_module(module_name) - mc_interf = mc_mod.create_interface("MotionCorrection") - mc_interf.add_corrected_image(interface_name, orig=original_timeseries_path, xy_translation=xy_translation, - corrected=corrected) - - if mean_projection is not None: - mc_interf.set_value('mean_projection', mean_projection) - - if max_projection is not None: - mc_interf.set_value('max_projection', max_projection) - - mc_interf.finalize() - mc_mod.finalize() - - def add_muliple_dataset_to_motion_correction_module(self, input_parameters, module_name='motion_correction', - temporal_downsample_rate=1): - """ - add multiple motion corrected datasets into a motion correction module. Designed for adding multiplane - imaging datasets at once. The motion correction module will contain multiple interfaces each corresponding - to one imaging plane. - - :param input_parameters: list of dictionaries, each dictionary in the list represents one imaging plane - the dictionary should contain the following keys: - 'field_name': str, name of the hdf5 group for the motion correction information - 'original_timeseries_path': str, the path to the timeseries of the original images - 'corrected_file_path': str, the full file system path to the hdf5 file - containing the corrected image data - 'corrected_dataset_path': str, the path within the hdf5 file pointing to the motion - corrected data. the object should have at least 3 - attributes: 'conversion', resolution and unit - 'xy_translation_offsets': 2d array with two columns - 'mean_projection': optional, 2d array, mean_projection of corrected image, - if not existing, no dataset will be created - 'max_projection': optional, 2d array, max_projection of corrected image, - if not existing, no dataset will be created - 'description': optional, str, if not existing, it will be set as '' - 'comments': optional, str, if not existing, it will be set as '' - 'source': optional, str, if not existing, it will be set as '' - :param module_name: str, module name to be created - :param temporal_downsample_rate: int, >0, in case the movie was motion corrected before temporal downsample, - use only a subset of offsets. - """ - - mc_mod = self.create_module(module_name) - mc_interf = mc_mod.create_interface('MotionCorrection') - - for mov_dict in input_parameters: - if 'description' not in mov_dict.keys(): - mov_dict['description'] = '' - - if 'comments' not in mov_dict.keys(): - mov_dict['comment'] = '' - - if 'source' not in mov_dict.keys(): - mov_dict['source'] = '' - - orig = self.file_pointer[mov_dict['original_timeseries_path']] - timestamps = orig['timestamps'].value - # print(timestamps.shape) - - img_file = h5py.File(mov_dict['corrected_file_path'], 'r') - img_data = img_file[mov_dict['corrected_dataset_path']] - # print(img_data.shape) - if timestamps.shape[0] != img_data.shape[0]: - raise ValueError('Number of frames does not equal to the length of timestamps!') - - offsets = mov_dict['xy_translation_offsets'] - offsets = offsets[::temporal_downsample_rate, :] - # print(offsets.shape) - if offsets.shape[0] != timestamps.shape[0]: - raise ValueError('Number of offsets does not equal to the length of timestamps!') - - corrected = self.create_timeseries(ts_type='ImageSeries', name='corrected', modality='other') - corrected.set_data_as_remote_link(mov_dict['corrected_file_path'], - mov_dict['corrected_dataset_path']) - corrected.set_time_as_link(mov_dict['original_timeseries_path']) - corrected.set_description(mov_dict['description']) - corrected.set_comments(mov_dict['comments']) - corrected.set_source(mov_dict['source']) - - if 'mean_projection' in mov_dict.keys() and mov_dict['mean_projection'] is not None: - corrected.set_value('mean_projection', mov_dict['mean_projection']) - - if 'max_projection' in mov_dict.keys() and mov_dict['max_projection'] is not None: - corrected.set_value('max_projection', mov_dict['max_projection']) - - for value_n in orig.keys(): - if value_n not in ['image_data_path_within_file', 'image_file_path', 'data', 'timestamps']: - corrected.set_value(value_n, orig[value_n].value) - - xy_translation = self.create_timeseries(ts_type='TimeSeries', name='xy_translation', modality='other') - xy_translation.set_data(offsets, unit='pixel', conversion=np.nan, - resolution=np.nan) - xy_translation.set_time_as_link(mov_dict['original_timeseries_path']) - xy_translation.set_value('num_samples', offsets.shape[0]) - xy_translation.set_description('Time series of x, y shifts applied to create motion ' - 'stabilized image series') - xy_translation.set_value('feature_description', ['x_motion', 'y_motion']) - - mc_interf.add_corrected_image(mov_dict['field_name'], orig=mov_dict['original_timeseries_path'], - xy_translation=xy_translation, - corrected=corrected) - - mc_interf.finalize() - mc_mod.finalize() - - # ===========================2p movie related======================================================================= - - - # ===========================camstim visual stimuli related========================================================= - def add_display_frame_ts_camstim(self, pkl_dict, max_mismatch=0.1, verbose=True, refresh_rate=60., - allowed_jitter=0.01): - - ts_pd_fall = self.file_pointer['acquisition/timeseries/digital_photodiode_fall/timestamps'].value - ts_display_rise = self.file_pointer['acquisition/timeseries/digital_vsync_visual_rise/timestamps'].value - - ts_display_real, display_lag = ct.align_visual_display_time(pkl_dict=pkl_dict, ts_pd_fall=ts_pd_fall, - ts_display_rise=ts_display_rise, - max_mismatch=max_mismatch, - verbose=verbose, refresh_rate=refresh_rate, - allowed_jitter=allowed_jitter) - - frame_ts = self.create_timeseries('TimeSeries', 'FrameTimestamps', modality='other') - frame_ts.set_time(ts_display_rise) - frame_ts.set_data([], unit='', conversion=np.nan, resolution=np.nan) - frame_ts.set_description('onset timestamps of each display frames after correction for display lag. ' - 'Used corticalmapping.HighLevel.align_visual_display_time() function to ' - 'calculate display lag.') - frame_ts.set_path('/processing/visual_display') - frame_ts.set_value('max_mismatch_sec', max_mismatch) - frame_ts.set_value('refresh_rate_hz', refresh_rate) - frame_ts.set_value('allowed_jitter_sec', allowed_jitter) - frame_ts.finalize() - - display_lag_ts = self.create_timeseries('TimeSeries', 'DisplayLag', modality='other') - display_lag_ts.set_time(display_lag[:, 0]) - display_lag_ts.set_data(display_lag[:, 1], unit='second', conversion=np.nan, resolution=np.nan) - display_lag_ts.set_path('/processing/visual_display') - display_lag_ts.set_value('mean_display_lag_sec', np.mean(display_lag[:, 1])) - display_lag_ts.finalize() - - def _add_drifting_grating_stimulation_camstim(self, stim_dict): - - dgts = self.create_timeseries(ts_type='TimeSeries', - name=stim_dict['stim_name'], - modality='stimulus') - - dgts.set_time(stim_dict['sweep_onset_frames']) - dgts.set_data(stim_dict['sweeps'], unit='', conversion=np.nan, resolution=np.nan) - dgts.set_source(stim_dict['source']) - dgts.set_comments(stim_dict['comments']) - dgts.set_description(stim_dict['description']) - for fn, fv in stim_dict.items(): - if fn not in ['sweep_onset_frames', 'sweeps', 'sources', 'comments', 'description']: - dgts.set_value(fn, fv) - dgts.finalize() - - def _add_locally_sparse_noise_stimulation_camstim(self, stim_dict): - - lsnts = self.create_timeseries(ts_type='TimeSeries', - name=stim_dict['stim_name'], - modality='stimulus') - lsnts.set_time(stim_dict['global_frame_ind']) - lsnts.set_data(stim_dict['probes'], unit='', conversion=np.nan, resolution=np.nan) - lsnts.set_source(stim_dict['source']) - lsnts.set_comments(stim_dict['comments']) - lsnts.set_description(stim_dict['description']) - for fn, fv in stim_dict.items(): - if fn not in ['probes', 'global_frame_ind', 'sources', 'comments', 'description']: - lsnts.set_value(fn, fv) - lsnts.finalize() - - def add_visual_stimuli_camstim(self, stim_dict_lst): - - for stim_dict in stim_dict_lst: - if stim_dict['stim_type'] == 'drifting_grating_camstim': - print('adding stimulus: {} to nwb.'.format(stim_dict['stim_name'])) - self._add_drifting_grating_stimulation_camstim(stim_dict=stim_dict) - elif stim_dict['stim_type'] == 'locally_sparse_noise_camstim': - print('adding stimulus: {} to nwb.'.format(stim_dict['stim_name'])) - self._add_locally_sparse_noise_stimulation_camstim(stim_dict=stim_dict) - else: - pass - - # ===========================camstim visual stimuli related========================================================= + if sn_grp['stim_name'].value != 'SparseNoise': + raise NameError('The input stimulus should be "SparseNoise".') + frames = sn_grp['data'].value + frames = [tuple(x) for x in frames] + dtype = [('isDisplay', int), ('azimuth', float), ('altitude', float), ('sign', int), ('isOnset', int)] + frames = np.array(frames, dtype=dtype) - # ===========================retinotopic_mapping visual stimuli related (indexed display)=========================== - def add_visual_display_log_retinotopic_mapping(self, stim_log): - """ - add visual display log into nwb. + all_squares = [] + for i in range(len(frames)): + if frames[i]['isDisplay'] == 1 and \ + (i == 0 or (frames[i - 1]['isOnset'] == -1 and frames[i]['isOnset'] == 1)): + all_squares.append(np.array((i, frames[i]['azimuth'], frames[i]['altitude'], frames[i]['sign']), + dtype=np.float32)) - :param stim_log: retinotopic_mapping.DisplayLogAnalysis.DisplayLogAnalyzer instance - :return: None - """ + all_squares = np.array(all_squares) - stim_dict = stim_log.get_stim_dict() - stim_ns = stim_dict.keys() - stim_ns.sort() - for stim_n in stim_ns: - curr_stim_dict = stim_dict[stim_n] - - print('\nadding {} to nwb ...'.format(stim_n)) - - if stim_n[-35:] == 'StimulusSeparatorRetinotopicMapping': - self._add_stimulus_separator_retinotopic_mapping(curr_stim_dict) - elif stim_n[-33:] == 'UniformContrastRetinotopicMapping': - self._add_uniform_contrast_retinotopic_mapping(curr_stim_dict) - elif stim_n[-32:] == 'FlashingCircleRetinotopicMapping': - self._add_flashing_circle_retinotopic_mapping(curr_stim_dict) - elif stim_n[-39:] == 'DriftingGratingCircleRetinotopicMapping': - self._add_drifting_grating_circle_retinotopic_mapping(curr_stim_dict) - elif stim_n[-37:] == 'StaticGratingCircleRetinotopicMapping': - self._add_static_grating_circle_retinotopic_mapping(curr_stim_dict) - elif stim_n[-30:] == '_SparseNoiseRetinotopicMapping': - self._add_sparse_noise_retinotopic_mapping(curr_stim_dict) - elif stim_n[-36:] == 'LocallySparseNoiseRetinotopicMapping': - self._add_locally_sparse_noise_retinotopic_mapping(curr_stim_dict) - elif stim_n[-30:] == 'StaticImagesRetinotopicMapping': - self._add_static_images_retinotopic_mapping(curr_stim_dict) - elif stim_n[-37:] == 'SinusoidalLuminanceRetinotopicMapping': - self._add_sinusoidal_luminance_retinotopic_mapping(curr_stim_dict) - else: - raise ValueError('Do not understand stimulus name: {}.'.format(stim_n)) + pooled_squares = {} + unique_squares = list(set([tuple(x[1:]) for x in all_squares])) + for i, unique_square in enumerate(unique_squares): + curr_square_n = 'square_' + ft.int2str(i, 5) + curr_azi = unique_square[0] + curr_alt = unique_square[1] + curr_sign = unique_square[2] + curr_onset_ind = [] + for j, give_square in enumerate(all_squares): + if np.array_equal(give_square[1:], unique_square): + curr_onset_ind.append(j) + pooled_squares.update({curr_square_n: {'azi': curr_azi, + 'alt': curr_alt, + 'sign': curr_sign, + 'onset_ind': curr_onset_ind}}) + all_squares = np.array(all_squares) + data_format = ['display frame indices for the onset of each square', 'azimuth of each square', + 'altitude of each square', 'sign of each square'] + description = 'TimeSeries of sparse noise square onsets. Stimulus generated by ' \ + 'corticalmapping.VisualStim.SparseNoise class.' + return all_squares, data_format, description, pooled_squares - def get_display_delay_retinotopic_mapping(self, stim_log, indicator_color_thr=0.5, ccg_t_range=(0., 0.1), - ccg_bins=100, is_plot=True, pd_onset_ts_path=None, - vsync_frame_ts_path=None): + @staticmethod + def _analyze_driftig_grating_frames(dg_grp): """ + analyze drifting grating display frames saved in '/stimulus/presentation', extract information about onset of + each displayed grating: - :param stim_log: retinotopic_mapping.DisplayLogAnalysis.DisplayLogAnalyzer instance - :param indicator_color_thr: float, [-1., 1.] - :param ccg_t_range: - :param ccg_bins: - :param is_plot: - :param pd_onset_ts_path: str, path to the timeseries of photodiode onsets in seconds - :return: + return: all_gratings: 2-d array, each line is a displayed square in sparse noise, each column is a feature of + a particular square, squares follow display order + data_format: str, description of the column structure of each grating + description: str, + pooled_squares: dict, gratings with same parameters are pooled together. + keys: 'grating_00000', 'grating_00001', 'grating_00002' ... each represents a unique + grating. + values: dict, { + 'sf': , + 'tf': , + 'direction': , + 'contrast': , + 'radius': , + 'azi': + 'alt': + 'onset_ind': list of indices of the appearances of current square in + in "all_squares", to be aligned with to photodiode onset + timestamps + } """ + if dg_grp['stim_name'].value != 'DriftingGratingCircle': + raise NameError('The input stimulus should be "DriftingGratingCircle".') - # get photodiode onset timestamps (after display) - if pd_onset_ts_path is None: - if 'acquisition/timeseries/digital_photodiode_rise' in self.file_pointer: - pd_ts_pd = self.file_pointer['acquisition/timeseries/digital_photodiode_rise/timestamps'].value - elif 'analysis/PhotodiodeOnsets' in self.file_pointer: - pd_ts_pd = self.file_pointer['analysis/PhotodiodeOnsets/timestamps'].value - else: - raise LookupError('Cannot find photodiode onset timeseries.') - else: - pd_ts_pd = self.file_pointer[pd_onset_ts_path + '/timestamps'].value - - # get vsync TTL timestamps for displayed frames - if vsync_frame_ts_path is None: - if 'acquisition/timeseries/digital_vsync_stim_rise' in self.file_pointer: - vsync_ts = self.file_pointer['acquisition/timeseries/digital_vsync_stim_rise/timestamps'].value - elif 'acquisition/timeseries/digital_vsync_visual_rise' in self.file_pointer: - vsync_ts = self.file_pointer['acquisition/timeseries/digital_vsync_visual_rise/timestamps'].value - else: - raise LookupError('Cannot find vsync TTL signal for displayed frames.') - else: - vsync_ts = self.file_pointer[vsync_frame_ts_path + '/timestamps'].value - - # check vsync_stim number and total frame number - print('\nnumber of total frames in log file: {}'.format(stim_log.num_frame_tot)) - print('number of vsync_stim TTL rise events: {}'.format(len(vsync_ts))) - if stim_log.num_frame_tot != len(vsync_ts): - raise ValueError('number of vsync_stim TTL rise events does not equal number of total frames in log file!') - - # get photodiode onset timestamps from vsync_stim (before display) - stim_dict = stim_log.get_stim_dict() - pd_onsets_seq = stim_log.analyze_photodiode_onsets_sequential(stim_dict=stim_dict, pd_thr=indicator_color_thr) - - pd_ts_vsync = [] - for pd_onset in pd_onsets_seq: - - if pd_onset['global_frame_ind'] < len(vsync_ts): - pd_ts_vsync.append(vsync_ts[pd_onset['global_frame_ind']]) - - - # calculate display delay as the weighted average of pd_ccg - print('Total number of detected photodiode onsets: {}'.format(len(pd_ts_pd))) - print('calculating photodiode cross-correlogram ...') - pd_ccg = ta.discrete_cross_correlation(pd_ts_vsync, pd_ts_pd, t_range=ccg_t_range, bins=ccg_bins, - isPlot=is_plot) - if is_plot: - plt.show() + frames = dg_grp['data'].value - display_delay = np.sum(pd_ccg[0] * pd_ccg[1]) / np.sum(pd_ccg[1]) - print('calculated display delay: {} second.'.format(display_delay)) - self.file_pointer['analysis/visual_display_delay_sec'] = display_delay + all_gratings = [] + for i in range(len(frames)): + if frames[i][8] == 1 and (i == 0 or (frames[i - 1][8] == -1)): + all_gratings.append(np.array((i, frames[i][2], frames[i][3], frames[i][4], frames[i][5], frames[i][6]), + dtype=np.float32)) - return display_delay + all_gratings = np.array(all_gratings) - def add_photodiode_onsets_combined_retinotopic_mapping(self, pd_onsets_com, display_delay, - vsync_frame_path='acquisition/timeseries/digital_vsync_stim_rise'): - """ - add combined photodiode onsets to self, currently the field is 'analysis/photodiode_onsets' + pooled_gratings = {} + unique_gratings = list(set([tuple(x[1:]) for x in all_gratings])) + for i, unique_grating in enumerate(unique_gratings): + curr_grating_n = 'grating_' + ft.int2str(i, 5) + curr_sf = unique_grating[0] + curr_tf = unique_grating[1] + curr_dir = unique_grating[2] + curr_con = unique_grating[3] + curr_r = unique_grating[4] + curr_onset_ind = [] + for j, given_grating in enumerate(all_gratings): + if np.array_equal(given_grating[1:], unique_grating): + curr_onset_ind.append(j) + pooled_gratings.update({curr_grating_n: {'sf': curr_sf, + 'tf': curr_tf, + 'dir': curr_dir, + 'con': curr_con, + 'r': curr_r, + 'onset_ind': curr_onset_ind}}) + data_format = ['display frame indices for the onset of each square', 'spatial frequency (cyc/deg)', + 'temporal frequency (Hz)', 'moving direction (arc)', 'contrast (%)', 'radius (deg)'] + description = 'TimeSeries of drifting grating circle onsets. Stimulus generated by ' \ + 'corticalmapping.VisualStim.SparseNoise class.' + return all_gratings, data_format, description, pooled_gratings - :param pd_onsets_com: dictionary, product of - retinotopic_mapping.DisplayLogAnalysis.DisplayLogAnalyzer.analyze_photodiode_onsets_combined() - function - :param display_delay: float, display delay in seconds - :param vsync_frame_path: str, hdf5 path to digital timeseries of digital_vsync_frame_rise - :return: None + @staticmethod + def _analyze_flashing_circle_frames(fc_grp): """ + analyze flashing circle display frames saved in '/stimulus/presentation', extract information about onset of + each displayed circle: - vsync_stim_ts = self.file_pointer[vsync_frame_path]['timestamps'].value + display_delay - - stim_ns = pd_onsets_com.keys() - stim_ns.sort() - - pd_grp = self.file_pointer['analysis'].create_group('photodiode_onsets') - for stim_n in stim_ns: - stim_grp = pd_grp.create_group(stim_n) - pd_onset_ns = pd_onsets_com[stim_n].keys() - pd_onset_ns.sort() - for pd_onset_n in pd_onset_ns: - pd_onset_grp = stim_grp.create_group(pd_onset_n) - pd_onset_grp['global_pd_onset_ind'] = pd_onsets_com[stim_n][pd_onset_n]['global_pd_onset_ind'] - pd_onset_grp['global_frame_ind'] = pd_onsets_com[stim_n][pd_onset_n]['global_frame_ind'] - - try: - pd_onset_grp['pd_onset_ts_sec'] = vsync_stim_ts[pd_onsets_com[stim_n][pd_onset_n]['global_frame_ind']] - except IndexError: - pd_onset_ts_sec = [] - for gfi in pd_onsets_com[stim_n][pd_onset_n]['global_frame_ind']: - if gfi < len(vsync_stim_ts): - pd_onset_ts_sec.append(vsync_stim_ts[gfi]) - pd_onset_grp['pd_onset_ts_sec'] = pd_onset_ts_sec - - - def get_drifting_grating_response_table_retinotopic_mapping(self, stim_name, time_window=(-1, 2.5)): - - def get_sta(arr, arr_ts, trigger_ts, frame_start, frame_end): - - sta_arr = [] - - for trig in trigger_ts: - trig_ind = ta.find_nearest(arr_ts, trig) - - if trig_ind + frame_end < arr.shape[1]: - curr_sta = arr[:, (trig_ind + frame_start): (trig_ind + frame_end)] - # print(curr_sta.shape) - sta_arr.append(curr_sta.reshape((curr_sta.shape[0], 1, curr_sta.shape[1]))) - - sta_arr = np.concatenate(sta_arr, axis=1) - return sta_arr - - if time_window[0] >= time_window[1]: - raise ValueError('time window should be from early time to late time.') - - grating_onsets_path = 'analysis/photodiode_onsets/{}'.format(stim_name) - grating_ns = self.file_pointer[grating_onsets_path].keys() - grating_ns.sort() - # print('\n'.join(grating_ns)) - - rois_and_traces_names = self.file_pointer['processing'].keys() - rois_and_traces_names = [n for n in rois_and_traces_names if n[0:15] == 'rois_and_traces'] - rois_and_traces_names.sort() - # print('\n'.join(rois_and_traces_paths)) - - res_grp = self.file_pointer['analysis'].create_group('response_table_{}'.format(stim_name)) - for curr_trace_name in rois_and_traces_names: - - print('\nadding drifting grating response table for {} ...'.format(curr_trace_name)) - - curr_plane_n = curr_trace_name[16:] - - res_grp_plane = res_grp.create_group(curr_plane_n) - - # get trace time stamps - trace_ts = self.file_pointer['processing/motion_correction/MotionCorrection' \ - '/{}/corrected/timestamps'.format(curr_plane_n)] - # get traces - traces = {} - if 'processing/{}/DfOverF/dff_center'.format(curr_trace_name) in self.file_pointer: - traces['global_dff_center'] = self.file_pointer[ - 'processing/{}/DfOverF/dff_center/data'.format(curr_trace_name)].value - if 'processing/{}/Fluorescence'.format(curr_trace_name) in self.file_pointer: - f_types = self.file_pointer['processing/{}/Fluorescence'.format(curr_trace_name)].keys() - for f_type in f_types: - traces[f_type] = self.file_pointer['processing/{}/Fluorescence/{}/data' - .format(curr_trace_name, f_type)].value - # print(traces.keys()) - - # frame_dur = np.mean(np.diff(trace_ts)) - # frame_start = int(time_window[0] // frame_dur) - # frame_end = int(time_window[1] // frame_dur) - # t_axis = np.arange(frame_end - frame_start) * frame_dur + time_window[0] - - frame_dur = np.mean(np.diff(trace_ts)) - frame_start = int(np.floor(time_window[0] / frame_dur)) - frame_end = int(np.ceil(time_window[1] / frame_dur)) - - t_axis = np.arange(frame_end - frame_start) * frame_dur + (frame_start * frame_dur) - res_grp_plane.attrs['sta_timestamps'] = t_axis - - for grating_n in grating_ns: - - onsets_grating_grp = self.file_pointer['{}/{}'.format(grating_onsets_path, grating_n)] - - curr_grating_grp = res_grp_plane.create_group(grating_n) - - grating_onsets = onsets_grating_grp['pd_onset_ts_sec'].value - - curr_grating_grp.attrs['global_trigger_timestamps'] = grating_onsets - curr_grating_grp.attrs['sta_traces_dimenstion'] = 'roi x trial x timepoint' - - for trace_n, trace in traces.items(): - sta = get_sta(arr=trace, arr_ts=trace_ts, trigger_ts=grating_onsets, frame_start=frame_start, - frame_end=frame_end) - curr_grating_grp.create_dataset('sta_' + trace_n, data=sta, compression='lzf') - - def get_spatial_temporal_receptive_field_retinotopic_mapping(self, stim_name, time_window=(-0.5, 2.), - verbose=True): - - def get_sta(arr, arr_ts, trigger_ts, frame_start, frame_end): - - sta_arr = [] - - for trig in trigger_ts: - trig_ind = ta.find_nearest(arr_ts, trig) - - if trig_ind + frame_end < arr.shape[1]: - curr_sta = arr[:, (trig_ind + frame_start): (trig_ind + frame_end)] - # print(curr_sta.shape) - sta_arr.append(curr_sta.reshape((curr_sta.shape[0], 1, curr_sta.shape[1]))) - - sta_arr = np.concatenate(sta_arr, axis=1) - return sta_arr - - if time_window[0] >= time_window[1]: - raise ValueError('time window should be from early time to late time.') - - probe_onsets_path = 'analysis/photodiode_onsets/{}'.format(stim_name) - probe_ns = self.file_pointer[probe_onsets_path].keys() - probe_ns.sort() - # print('\n'.join(probe_ns)) - - rois_and_traces_names = self.file_pointer['processing'].keys() - rois_and_traces_names = [n for n in rois_and_traces_names if n[0:15] == 'rois_and_traces'] - rois_and_traces_names.sort() - # print('\n'.join(rois_and_traces_paths)) - - strf_grp = self.file_pointer['analysis'].create_group('strf_{}'.format(stim_name)) - for curr_trace_name in rois_and_traces_names: - - if verbose: - print('\nadding strfs for {} ...'.format(curr_trace_name)) - - curr_plane_n = curr_trace_name[16:] - - strf_grp_plane = strf_grp.create_group(curr_plane_n) - - # get trace time stamps - trace_ts = self.file_pointer['processing/motion_correction/MotionCorrection' \ - '/{}/corrected/timestamps'.format(curr_plane_n)] - # get traces - traces = {} - if 'processing/{}/DfOverF/dff_center'.format(curr_trace_name) in self.file_pointer: - traces['global_dff_center'] = self.file_pointer[ - 'processing/{}/DfOverF/dff_center/data'.format(curr_trace_name)].value - if 'processing/{}/Fluorescence'.format(curr_trace_name) in self.file_pointer: - f_types = self.file_pointer['processing/{}/Fluorescence'.format(curr_trace_name)].keys() - for f_type in f_types: - traces[f_type] = self.file_pointer['processing/{}/Fluorescence/{}/data' - .format(curr_trace_name, f_type)].value - # print(traces.keys()) - - frame_dur = np.mean(np.diff(trace_ts)) - frame_start = int(np.floor(time_window[0] / frame_dur)) - frame_end = int(np.ceil(time_window[1] / frame_dur)) - t_axis = np.arange(frame_end - frame_start) * frame_dur + (frame_start * frame_dur) - # t_axis = np.arange(frame_end - frame_start) * frame_dur + time_window[0] - - strf_grp_plane.attrs['sta_timestamps'] = t_axis - - for probe_i, probe_n in enumerate(probe_ns): - - if verbose: - print('\tprocessing probe {} / {}'.format(probe_i+1, len(probe_ns))) - - onsets_probe_grp = self.file_pointer['{}/{}'.format(probe_onsets_path, probe_n)] - - curr_probe_grp = strf_grp_plane.create_group(probe_n) - - probe_onsets = onsets_probe_grp['pd_onset_ts_sec'].value - - curr_probe_grp['global_trigger_timestamps'] = h5py.SoftLink('/{}/{}/pd_onset_ts_sec' - .format(probe_onsets_path, probe_n)) - curr_probe_grp.attrs['sta_traces_dimenstion'] = 'roi x trial x timepoint' - - for trace_n, trace in traces.items(): - sta = get_sta(arr=trace, arr_ts=trace_ts, trigger_ts=probe_onsets, frame_start=frame_start, - frame_end=frame_end) - curr_probe_grp.create_dataset('sta_' + trace_n, data=sta, compression='lzf') - - def _add_stimulus_separator_retinotopic_mapping(self, ss_dict): - - stim_name = ss_dict['stim_name'] - - if stim_name[-35:] != 'StimulusSeparatorRetinotopicMapping': - raise ValueError('stimulus should be "StimulusSeparatorRetinotopicMapping" (StimulusSeparator from ' - 'retinotopic_mapping package). ') - - # add template - template_ts = self.create_timeseries('TimeSeries', stim_name, 'template') - template_ts.set_data(ss_dict['frames_unique'], unit='', conversion=np.nan, resolution=np.nan) - template_ts.set_value('num_samples', len(ss_dict['frames_unique'])) - template_ts.set_source(ss_dict['source']) - template_ts.finalize() - - # add stimulus - stim_ts = self.create_timeseries('IndexSeries', stim_name, 'stimulus') - stim_ts.set_time(ss_dict['timestamps'], dtype='u8') - stim_ts.set_data(ss_dict['index_to_display'], unit='frame', conversion=1, resolution=1, dtype='u4') - stim_ts.set_value_as_link('indexed_timeseries', '/stimulus/templates/{}'.format(stim_name)) - stim_ts.set_comments('The "timestamps" of this TimeSeries are indices (64-bit unsigned integer, hacked the ' - 'original ainwb code) referencing the entire display sequence. It should match hardware ' - 'vsync TTL (see "/acquisition/timeseries/digital_vsync_stim/rise"). The "data" of this ' - 'TimeSeries are indices referencing the frames template saved in the "indexed_timeseries" ' - 'field.') - stim_ts.set_description('stimulus separator displayed by retinotopic_mapping package') - stim_ts.set_source(ss_dict['source']) - for key in ['frame_config', 'stim_name', 'pregap_dur', 'postgap_dur', 'coordinate', 'background']: - stim_ts.set_value(key, ss_dict[key]) - stim_ts.set_value('indicator_on_frame_num', ss_dict['indicator_on_frame_num']) - stim_ts.set_value('indicator_off_frame_num', ss_dict['indicator_off_frame_num']) - stim_ts.set_value('cycle_num', ss_dict['cycle_num']) - - stim_ts.finalize() - - def _add_uniform_contrast_retinotopic_mapping(self, uc_dict): - stim_name = uc_dict['stim_name'] - - if stim_name[-33:] != 'UniformContrastRetinotopicMapping': - raise ValueError('stimulus should be "UniformContrastRetinotopicMapping" (UniformContrast from ' - 'retinotopic_mapping package). ') - - # add template - template_ts = self.create_timeseries('TimeSeries', stim_name, 'template') - template_ts.set_data(uc_dict['frames_unique'], unit='', conversion=np.nan, resolution=np.nan) - template_ts.set_value('num_samples', len(uc_dict['frames_unique'])) - template_ts.set_source(uc_dict['source']) - template_ts.finalize() - - # add stimulus - stim_ts = self.create_timeseries('IndexSeries', stim_name, 'stimulus') - stim_ts.set_time(uc_dict['timestamps'], dtype='u8') - stim_ts.set_data(uc_dict['index_to_display'], unit='frame', conversion=1, resolution=1, dtype='u4') - stim_ts.set_value_as_link('indexed_timeseries', '/stimulus/templates/{}'.format(stim_name)) - stim_ts.set_comments('The "timestamps" of this TimeSeries are indices (64-bit unsigned integer, hacked the ' - 'original ainwb code) referencing the entire display sequence. It should match hardware ' - 'vsync TTL (see "/acquisition/timeseries/digital_vsync_stim/rise"). The "data" of this ' - 'TimeSeries are indices referencing the frames template saved in the "indexed_timeseries" ' - 'field.') - stim_ts.set_description('uniform contrast displayed by retinotopic_mapping package') - stim_ts.set_source(uc_dict['source']) - for key in ['frame_config', 'stim_name', 'pregap_dur', 'postgap_dur', 'coordinate', 'background']: - stim_ts.set_value(key, uc_dict[key]) - stim_ts.set_value('duration', uc_dict['duration']) - stim_ts.set_value('color', uc_dict['color']) - stim_ts.finalize() - - def _add_flashing_circle_retinotopic_mapping(self, fc_dict): - stim_name = fc_dict['stim_name'] - - if stim_name[-32:] != 'FlashingCircleRetinotopicMapping': - raise ValueError('stimulus should be "FlashingCircleRetinotopicMapping" (FlashingCircle from ' - 'retinotopic_mapping package). ') - - # add template - template_ts = self.create_timeseries('TimeSeries', stim_name, 'template') - template_ts.set_data(fc_dict['frames_unique'], unit='', conversion=np.nan, resolution=np.nan) - template_ts.set_value('num_samples', len(fc_dict['frames_unique'])) - template_ts.set_source(fc_dict['source']) - template_ts.finalize() - - # add stimulus - stim_ts = self.create_timeseries('IndexSeries', stim_name, 'stimulus') - stim_ts.set_time(fc_dict['timestamps'], dtype='u8') - stim_ts.set_data(fc_dict['index_to_display'], unit='frame', conversion=1, resolution=1, dtype='u4') - stim_ts.set_value_as_link('indexed_timeseries', '/stimulus/templates/{}'.format(stim_name)) - stim_ts.set_comments('The "timestamps" of this TimeSeries are indices (64-bit unsigned integer, hacked the ' - 'original ainwb code) referencing the entire display sequence. It should match hardware ' - 'vsync TTL (see "/acquisition/timeseries/digital_vsync_stim/rise"). The "data" of this ' - 'TimeSeries are indices referencing the frames template saved in the "indexed_timeseries" ' - 'field.') - stim_ts.set_description('flashing circle displayed by retinotopic_mapping package') - stim_ts.set_source(fc_dict['source']) - for key in ['frame_config', 'stim_name', 'pregap_dur', 'postgap_dur', 'coordinate', 'background']: - stim_ts.set_value(key, fc_dict[key]) - stim_ts.set_value('is_smooth_edge', fc_dict['is_smooth_edge']) - stim_ts.set_value('smooth_width_ratio', fc_dict['smooth_width_ratio']) - stim_ts.set_value('center', fc_dict['center']) - stim_ts.set_value('radius', fc_dict['radius']) - stim_ts.set_value('flash_frame_num', fc_dict['flash_frame_num']) - stim_ts.set_value('midgap_dur', fc_dict['midgap_dur']) - stim_ts.set_value('iteration', fc_dict['iteration']) - stim_ts.set_value('color', fc_dict['color']) - stim_ts.finalize() - - def _add_drifting_grating_circle_retinotopic_mapping(self, dgc_dict): - stim_name = dgc_dict['stim_name'] - - if stim_name[-39:] != 'DriftingGratingCircleRetinotopicMapping': - raise ValueError('stimulus should be "DriftingGratingCircleRetinotopicMapping" (DriftingGratingCircle from ' - 'retinotopic_mapping package). ') - - # add template - template_ts = self.create_timeseries('TimeSeries', stim_name, 'template') - frames_unique = dgc_dict['frames_unique'] - frames_template = [] - for frame in frames_unique: - - # temporally fix a bug - if frame == (1, 1, 0., 0., 0., 0., 0., 1.): - frame = (1, 1, 0., 0., 0., 0., 0., 0., 1.) - - if frame == (1, 1, 0., 0., 0., 0., 0., 0.): - frame = (1, 1, 0., 0., 0., 0., 0., 0., 0.) - - curr_frame = np.array(frame) - # print(curr_frame) - curr_frame[curr_frame == None] = np.nan - frames_template.append(np.array(curr_frame, dtype=np.float32)) - frames_template = np.array(frames_template) - template_ts.set_data(frames_template, unit='', conversion=np.nan, resolution=np.nan) - template_ts.set_value('num_samples', frames_template.shape[0]) - template_ts.set_source(dgc_dict['source']) - template_ts.finalize() - - # add stimulus - stim_ts = self.create_timeseries('IndexSeries', stim_name, 'stimulus') - stim_ts.set_time(dgc_dict['timestamps'], dtype='u8') - stim_ts.set_data(dgc_dict['index_to_display'], unit='frame', conversion=1, resolution=1, dtype='u4') - stim_ts.set_value_as_link('indexed_timeseries', '/stimulus/templates/{}'.format(stim_name)) - stim_ts.set_comments('The "timestamps" of this TimeSeries are indices (64-bit unsigned integer, hacked the ' - 'original ainwb code) referencing the entire display sequence. It should match hardware ' - 'vsync TTL (see "/acquisition/timeseries/digital_vsync_stim/rise"). The "data" of this ' - 'TimeSeries are indices referencing the frames template saved in the "indexed_timeseries" ' - 'field.') - stim_ts.set_description('drifting grating circle displayed by retinotopic_mapping package') - stim_ts.set_source(dgc_dict['source']) - for key in ['frame_config', 'stim_name', 'pregap_dur', 'postgap_dur', 'coordinate', 'background']: - stim_ts.set_value(key, dgc_dict[key]) - stim_ts.set_value('is_smooth_edge', dgc_dict['is_smooth_edge']) - stim_ts.set_value('smooth_width_ratio', dgc_dict['smooth_width_ratio']) - stim_ts.set_value('center', dgc_dict['center']) - stim_ts.set_value('iteration', dgc_dict['iteration']) - stim_ts.set_value('dire_list', dgc_dict['dire_list']) - stim_ts.set_value('radius_list', dgc_dict['radius_list']) - stim_ts.set_value('con_list', dgc_dict['con_list']) - stim_ts.set_value('sf_list', dgc_dict['sf_list']) - stim_ts.set_value('tf_list', dgc_dict['tf_list']) - stim_ts.set_value('block_dur', dgc_dict['block_dur']) - stim_ts.set_value('midgap_dur', dgc_dict['midgap_dur']) - stim_ts.finalize() - - def _add_static_grating_circle_retinotopic_mapping(self, sgc_dict): - stim_name = sgc_dict['stim_name'] - - if stim_name[-37:] != 'StaticGratingCircleRetinotopicMapping': - raise ValueError('stimulus should be "StaticGratingCircleRetinotopicMapping" (StaticGratingCircle from ' - 'retinotopic_mapping package). ') - - # add template - template_ts = self.create_timeseries('TimeSeries', stim_name, 'template') - frames_unique = sgc_dict['frames_unique'] - frames_template = [] - for frame in frames_unique: - curr_frame = np.array(frame) - curr_frame[curr_frame == None] = np.nan - frames_template.append(np.array(curr_frame, dtype=np.float32)) - frames_template = np.array(frames_template) - template_ts.set_data(frames_template, unit='', conversion=np.nan, resolution=np.nan) - template_ts.set_value('num_samples', frames_template.shape[0]) - template_ts.set_source(sgc_dict['source']) - template_ts.finalize() - - # add stimulus - stim_ts = self.create_timeseries('IndexSeries', stim_name, 'stimulus') - stim_ts.set_time(sgc_dict['timestamps'], dtype='u8') - stim_ts.set_data(sgc_dict['index_to_display'], unit='frame', conversion=1, resolution=1, dtype='u4') - stim_ts.set_value_as_link('indexed_timeseries', '/stimulus/templates/{}'.format(stim_name)) - stim_ts.set_comments('The "timestamps" of this TimeSeries are indices (64-bit unsigned integer, hacked the ' - 'original ainwb code) referencing the entire display sequence. It should match hardware ' - 'vsync TTL (see "/acquisition/timeseries/digital_vsync_stim/rise"). The "data" of this ' - 'TimeSeries are indices referencing the frames template saved in the "indexed_timeseries" ' - 'field.') - stim_ts.set_description('static grating circle displayed by retinotopic_mapping package') - stim_ts.set_source(sgc_dict['source']) - for key in ['frame_config', 'stim_name', 'pregap_dur', 'postgap_dur', 'coordinate', 'background']: - stim_ts.set_value(key, sgc_dict[key]) - stim_ts.set_value('is_smooth_edge', sgc_dict['is_smooth_edge']) - stim_ts.set_value('smooth_width_ratio', sgc_dict['smooth_width_ratio']) - stim_ts.set_value('center', sgc_dict['center']) - stim_ts.set_value('iteration', sgc_dict['iteration']) - stim_ts.set_value('ori_list', sgc_dict['ori_list']) - stim_ts.set_value('radius_list', sgc_dict['radius_list']) - stim_ts.set_value('con_list', sgc_dict['con_list']) - stim_ts.set_value('sf_list', sgc_dict['sf_list']) - stim_ts.set_value('phase_list', sgc_dict['phase_list']) - stim_ts.set_value('display_dur', sgc_dict['display_dur']) - stim_ts.set_value('midgap_dur', sgc_dict['midgap_dur']) - stim_ts.finalize() - - def _add_sparse_noise_retinotopic_mapping(self, sn_dict): - stim_name = sn_dict['stim_name'] - - if stim_name[-30:] != '_SparseNoiseRetinotopicMapping': - raise ValueError('stimulus should be "SparseNoiseRetinotopicMapping" (SparseNoise from ' - 'retinotopic_mapping package). ') - - # add template - template_ts = self.create_timeseries('TimeSeries', stim_name, 'template') - frames_template = [] - probes = [] - for frame in sn_dict['frames_unique']: - - frames_template.append(np.array([frame[0], frame[3]], dtype=np.float32)) - - if frame[1] is None: - probes.append(np.array([np.nan, np.nan, np.nan], dtype=np.float32)) - else: - # print([frame[1][0], frame[1][1], frame[2]]) - probes.append(np.array([frame[1][0], frame[1][1], frame[2]], dtype=np.float32)) - - frames_template = np.array(frames_template) - probes = np.array(probes) - template_ts.set_data(frames_template, unit='', conversion=np.nan, resolution=np.nan) - template_ts.set_value('probes', probes) - template_ts.set_value('num_samples', frames_template.shape[0]) - template_ts.set_source(sn_dict['source']) - template_ts.set_description('The "data" field saved modified frame configuration: ' - '[is_display, indicator color)]. While the "probe" field saved modified probe ' - 'configuration: [altitude, azimuth, polarity]. These two fields have one-to-one ' - 'relationship. Together they define an unique display frame of sparse noise ' - 'stimulus. The order of these two fields should not be changed.') - template_ts.finalize() - - # add stimulus - stim_ts = self.create_timeseries('IndexSeries', stim_name, 'stimulus') - stim_ts.set_time(sn_dict['timestamps'], dtype='u8') - stim_ts.set_data(sn_dict['index_to_display'], unit='frame', conversion=1, resolution=1, dtype='u4') - stim_ts.set_value_as_link('indexed_timeseries', '/stimulus/templates/{}'.format(stim_name)) - stim_ts.set_comments('The "timestamps" of this TimeSeries are indices (64-bit unsigned integer, hacked the ' - 'original ainwb code) referencing the entire display sequence. It should match hardware ' - 'vsync TTL (see "/acquisition/timeseries/digital_vsync_stim/rise"). The "data" of this ' - 'TimeSeries are indices referencing the frames template saved in the "indexed_timeseries" ' - 'field.') - stim_ts.set_description('sparse noise displayed by retinotopic_mapping package') - stim_ts.set_source(sn_dict['source']) - for key in ['stim_name', 'pregap_dur', 'postgap_dur', 'coordinate', 'background']: - stim_ts.set_value(key, sn_dict[key]) - stim_ts.set_value('frame_config', ['is_display', 'indicator color[-1., 1.]']) # modified frame config - stim_ts.set_value('probe_config', ['altitude (deg)', 'azimuth (deg)', 'polarity']) # modified probe config - stim_ts.set_value('is_include_edge', sn_dict['is_include_edge']) - stim_ts.set_value('probe_frame_num', sn_dict['probe_frame_num']) - stim_ts.set_value('subregion', sn_dict['subregion']) - stim_ts.set_value('iteration', sn_dict['iteration']) - stim_ts.set_value('grid_space', sn_dict['grid_space']) - stim_ts.set_value('probe_orientation', sn_dict['probe_orientation']) - stim_ts.set_value('sign', sn_dict['sign']) - stim_ts.set_value('probe_size', sn_dict['probe_size']) - stim_ts.finalize() - - def _add_locally_sparse_noise_retinotopic_mapping(self, lsn_dict): - stim_name = lsn_dict['stim_name'] - - if stim_name[-36:] != 'LocallySparseNoiseRetinotopicMapping': - raise ValueError('stimulus should be "LocallySparseNoiseRetinotopicMapping" (LocallySparseNoise from ' - 'retinotopic_mapping package). ') - - # add template - template_ts = self.create_timeseries('TimeSeries', stim_name, 'template') - - max_probe_num = 0 # get max probe number in a single frame - for frame in lsn_dict['frames_unique']: - if frame[1] is not None: - max_probe_num = max([max_probe_num, len(frame[1])]) - - frames_template = np.empty((len(lsn_dict['frames_unique']), 2), dtype=np.float32) - probes = np.empty((len(lsn_dict['frames_unique']), max_probe_num, 3), dtype=np.float64) - probes[:] = np.nan - - for frame_ind, frame in enumerate(lsn_dict['frames_unique']): - - frames_template[frame_ind] = np.array([frame[0], frame[3]], dtype=np.float32) - - if frame[1] is not None: - for curr_probe_i, curr_probe in enumerate(frame[1]): - probes[frame_ind, curr_probe_i, :] = np.array([curr_probe], dtype=np.float64) - - template_ts.set_data(frames_template, unit='', conversion=np.nan, resolution=np.nan) - template_ts.set_value('probes', probes, dtype='float64') - template_ts.set_value('num_samples', frames_template.shape[0]) - template_ts.set_source(lsn_dict['source']) - template_ts.set_description('The "data" field saved modified frame configuration: ' - '[is_display, indicator color)]. While the "probe" field saved modified probe ' - 'configuration. It is a 3-d array with axis: [frame_num, probe_num, probe_info], ' - 'the probe info is specified as: [altitude, azimuth, polarity]. The frame ' - 'dimension of these two fields have one-to-one relationship. ' - 'Together they define an unique display frame of locally sparse noise stimulus. ' - 'The order of these two fields should not be changed.') - template_ts.finalize() - - # add stimulus - stim_ts = self.create_timeseries('IndexSeries', stim_name, 'stimulus') - stim_ts.set_time(lsn_dict['timestamps'], dtype='u8') - stim_ts.set_data(lsn_dict['index_to_display'], unit='frame', conversion=1, resolution=1, dtype='u4') - stim_ts.set_value_as_link('indexed_timeseries', '/stimulus/templates/{}'.format(stim_name)) - stim_ts.set_comments('The "timestamps" of this TimeSeries are indices (64-bit unsigned integer, hacked the ' - 'original ainwb code) referencing the entire display sequence. It should match hardware ' - 'vsync TTL (see "/acquisition/timeseries/digital_vsync_stim/rise"). The "data" of this ' - 'TimeSeries are indices referencing the frames template saved in the "indexed_timeseries" ' - 'field.') - stim_ts.set_description('locally sparse noise displayed by retinotopic_mapping package') - stim_ts.set_source(lsn_dict['source']) - for key in ['stim_name', 'pregap_dur', 'postgap_dur', 'coordinate', 'background']: - stim_ts.set_value(key, lsn_dict[key]) - stim_ts.set_value('frame_config', ['is_display', 'indicator color[-1., 1.]']) # modified frame config - stim_ts.set_value('probe_config', ['frame_num x probe_num x probe_info (altitude_deg, azimuth_deg, polarity)']) # modified probe config - stim_ts.set_value('is_include_edge', lsn_dict['is_include_edge']) - stim_ts.set_value('probe_frame_num', lsn_dict['probe_frame_num']) - stim_ts.set_value('subregion', lsn_dict['subregion']) - stim_ts.set_value('iteration', lsn_dict['iteration']) - stim_ts.set_value('grid_space', lsn_dict['grid_space']) - stim_ts.set_value('probe_orientation', lsn_dict['probe_orientation']) - stim_ts.set_value('sign', lsn_dict['sign']) - stim_ts.set_value('probe_size', lsn_dict['probe_size']) - stim_ts.set_value('min_distance', lsn_dict['min_distance']) - stim_ts.set_value('repeat', lsn_dict['repeat']) - stim_ts.finalize() - - def _add_static_images_retinotopic_mapping(self, si_dict): - stim_name = si_dict['stim_name'] - - if stim_name[-30:] != 'StaticImagesRetinotopicMapping': - raise ValueError('stimulus should be "StaticImagesRetinotopicMapping" (StaticImages from ' - 'retinotopic_mapping package). ') - - # add template - template_ts = self.create_timeseries('TimeSeries', stim_name, 'template') - frames_unique = si_dict['frames_unique'] - frames_template = [] - for frame in frames_unique: - curr_frame = np.array(frame) - curr_frame[curr_frame == None] = np.nan - frames_template.append(np.array(curr_frame, dtype=np.float32)) - frames_template = np.array(frames_template) - template_ts.set_data(frames_template, unit='', conversion=np.nan, resolution=np.nan) - template_ts.set_value('num_samples', frames_template.shape[0]) - template_ts.set_value('images_wrapped', si_dict['images_wrapped']) - template_ts.set_value('images_dewrapped', si_dict['images_dewrapped']) - template_ts.set_source(si_dict['source']) - template_ts.finalize() - - # add stimulus - stim_ts = self.create_timeseries('IndexSeries', stim_name, 'stimulus') - stim_ts.set_time(si_dict['timestamps'], dtype='u8') - stim_ts.set_data(si_dict['index_to_display'], unit='frame', conversion=1, resolution=1, dtype='u4') - stim_ts.set_value_as_link('indexed_timeseries', '/stimulus/templates/{}'.format(stim_name)) - stim_ts.set_comments('The "timestamps" of this TimeSeries are indices (64-bit unsigned integer, hacked the ' - 'original ainwb code) referencing the entire display sequence. It should match hardware ' - 'vsync TTL (see "/acquisition/timeseries/digital_vsync_stim/rise"). The "data" of this ' - 'TimeSeries are indices referencing the frames template saved in the "indexed_timeseries" ' - 'field.') - stim_ts.set_description('static images displayed by retinotopic_mapping package') - stim_ts.set_source(si_dict['source']) - for key in ['frame_config', 'stim_name', 'pregap_dur', 'postgap_dur', 'coordinate', 'background']: - stim_ts.set_value(key, si_dict[key]) - stim_ts.set_value('altitude_dewrapped', si_dict['altitude_dewrapped']) - stim_ts.set_value('azimuth_dewrapped', si_dict['azimuth_dewrapped']) - stim_ts.set_value('img_center', si_dict['img_center']) - stim_ts.set_value('midgap_dur', si_dict['midgap_dur']) - stim_ts.set_value('display_dur', si_dict['display_dur']) - stim_ts.set_value('iteration', si_dict['iteration']) - stim_ts.set_value('deg_per_pixel_azi', si_dict['deg_per_pixel_azi']) - stim_ts.set_value('deg_per_pixel_alt', si_dict['deg_per_pixel_alt']) - stim_ts.finalize() - - def _add_sinusoidal_luminance_retinotopic_mapping(self, sl_dict): - stim_name = sl_dict['stim_name'] - - if stim_name[-37:] != 'SinusoidalLuminanceRetinotopicMapping': - raise ValueError('stimulus should be "SinusoidalLuminanceRetinotopicMapping" (StaticImages from ' - 'retinotopic_mapping package). ') - - # add template - template_ts = self.create_timeseries('TimeSeries', stim_name, 'template') - frames_unique = sl_dict['frames_unique'] - frames_template = [] - for frame in frames_unique: - curr_frame = np.array(frame) - curr_frame[curr_frame == None] = np.nan - frames_template.append(np.array(curr_frame, dtype=np.float32)) - frames_template = np.array(frames_template) - template_ts.set_data(frames_template, unit='', conversion=np.nan, resolution=np.nan) - template_ts.set_value('num_samples', frames_template.shape[0]) - template_ts.set_source(sl_dict['source']) - template_ts.finalize() - - # add stimulus - stim_ts = self.create_timeseries('IndexSeries', stim_name, 'stimulus') - stim_ts.set_time(sl_dict['timestamps'], dtype='u8') - stim_ts.set_data(sl_dict['index_to_display'], unit='frame', conversion=1, resolution=1, dtype='u4') - stim_ts.set_value_as_link('indexed_timeseries', '/stimulus/templates/{}'.format(stim_name)) - stim_ts.set_comments('The "timestamps" of this TimeSeries are indices (64-bit unsigned integer, hacked the ' - 'original ainwb code) referencing the entire display sequence. It should match hardware ' - 'vsync TTL (see "/acquisition/timeseries/digital_vsync_stim/rise"). The "data" of this ' - 'TimeSeries are indices referencing the frames template saved in the "indexed_timeseries" ' - 'field.') - stim_ts.set_description('sinusoidal luminance displayed by retinotopic_mapping package') - stim_ts.set_source(sl_dict['source']) - for key in ['frame_config', 'stim_name', 'pregap_dur', 'postgap_dur', 'coordinate', 'background']: - stim_ts.set_value(key, sl_dict[key]) - stim_ts.set_value('cycle_num', sl_dict['cycle_num']) - stim_ts.set_value('max_level', sl_dict['max_level']) - stim_ts.set_value('start_phase', sl_dict['start_phase']) - stim_ts.set_value('midgap_dur', sl_dict['midgap_dur']) - stim_ts.set_value('frequency', sl_dict['frequency']) - stim_ts.finalize() - # ===========================retinotopic_mapping visual stimuli related (indexed display)=========================== - - - # ===========================corticalmapping visual stimuli related (non-indexed display)=========================== - def add_visual_stimulus_corticalmapping(self, log_path, display_order=0): - """ - load visual stimulation given saved display log pickle file - :param log_path: the path to the display log generated by corticalmapping.VisualStim - :param display_order: int, in case there is more than one visual display in the file. - This value records the order of the displays - :return: + return: all_circles: 2-d array, each line is the onset of displayed circle, each column is a feature of + that circle, circles follow the display order + data_format: str, description of the column structure of each circle + description: str, + pooled_circles: None """ - self._check_display_order(display_order) - log_dict = ft.loadFile(log_path) - - stim_name = log_dict['stimulation']['stimName'] + if fc_grp['stim_name'].value != 'FlashingCircle': + raise NameError('The input stimulus should be "FlashingCircle".') - display_frames = log_dict['presentation']['displayFrames'] - time_stamps = log_dict['presentation']['timeStamp'] + frames = fc_grp['data'][:, 0] + azi = fc_grp['center_azimuth_deg'].value + alt = fc_grp['center_altitude_deg'].value + color_c = fc_grp['center_color'].value + color_b = fc_grp['background_color'].value + radius = fc_grp['radius_deg'].value - if len(display_frames) != len(time_stamps): - print ('\nWarning: {}'.format(log_path)) - print('Unequal number of displayFrames ({}) and timeStamps ({}).'.format(len(display_frames), - len(time_stamps))) + all_cirlces = [] + for i in range(len(frames)): + if frames[i] == 1 and (i == 0 or (frames[i - 1] == 0)): + all_cirlces.append(np.array((i, azi, alt, color_c, color_b, radius), dtype=np.float32)) - if stim_name == 'SparseNoise': - self._add_sparse_noise_stimulus_corticalmapping(log_dict, display_order=display_order) - elif stim_name == 'FlashingCircle': - self._add_flashing_circle_stimulus_corticalmapping(log_dict, display_order=display_order) - elif stim_name == 'UniformContrast': - self._add_uniform_contrast_stimulus_corticalmapping(log_dict, display_order=display_order) - elif stim_name == 'DriftingGratingCircle': - self._add_drifting_grating_circle_stimulus_corticalmapping(log_dict, display_order=display_order) - elif stim_name == 'KSstimAllDir': - self._add_drifting_checker_board_stimulus_corticalmapping(log_dict, display_order=display_order) - else: - raise ValueError('stimulation name {} unrecognizable!'.format(stim_name)) + all_cirlces = np.array(all_cirlces) + data_format = ['display frame indices for the onset of each circle', 'center_azimuth_deg', + 'center_altitude_deg', 'center_color', 'background_color', 'radius_deg'] + description = 'TimeSeries of flashing circle onsets. Stimulus generated by ' \ + 'corticalmapping.VisualStim.SparseNoise class.' + return all_cirlces, data_format, description, None - def add_visual_stimuli_corticalmapping(self, log_paths): + @staticmethod + def _analyze_uniform_contrast_frames(uc_grp): - exist_stimuli = self.file_pointer['stimulus/presentation'].keys() + if uc_grp['stim_name'].value != 'UniformContrast': + raise NameError('The input stimulus should be "UniformContrast".') - for i, log_path in enumerate(log_paths): - self.add_visual_stimulus_corticalmapping(log_path, i + len(exist_stimuli)) + onset_array = np.array([]) + data_format = '' + description = 'TimeSeries of uniform contrast stimulus. No onset information. Stimulus generated by ' \ + 'corticalmapping.VisualStim.UniformContrast class.' + return onset_array, data_format, description, {} - def analyze_visual_stimuli_corticalmapping(self, onsets_ts=None): + def analyze_visual_stimuli(self, onsets_ts=None): """ add stimuli onset timestamps of all saved stimulus presentations to 'processing/stimulus_onsets' module @@ -1974,10 +868,10 @@ def analyze_visual_stimuli_corticalmapping(self, onsets_ts=None): """ if onsets_ts is None: - print 'input onsets_ts is None, try to use photodiode onsets as onsets_ts.' + print('input onsets_ts is None, try to use photodiode onsets as onsets_ts.') onsets_ts = self.file_pointer['processing/PhotodiodeOnsets/photodiode_onsets/timestamps'].value - stim_ns = self.file_pointer['stimulus/presentation'].keys() + stim_ns = list(self.file_pointer['stimulus/presentation'].keys()) stim_ns.sort() total_onsets = 0 @@ -1989,13 +883,13 @@ def analyze_visual_stimuli_corticalmapping(self, onsets_ts=None): curr_stim_grp = self.file_pointer['stimulus/presentation'][stim_n] if curr_stim_grp['stim_name'].value == 'SparseNoise': - _ = self._analyze_sparse_noise_frames_corticalmapping(curr_stim_grp) + _ = self._analyze_sparse_noise_frames(curr_stim_grp) elif curr_stim_grp['stim_name'].value == 'FlashingCircle': - _ = self._analyze_flashing_circle_frames_corticalmapping(curr_stim_grp) + _ = self._analyze_flashing_circle_frames(curr_stim_grp) elif curr_stim_grp['stim_name'].value == 'DriftingGratingCircle': - _ = self._analyze_driftig_grating_frames_corticalmapping(curr_stim_grp) + _ = self._analyze_driftig_grating_frames(curr_stim_grp) elif curr_stim_grp['stim_name'].value == 'UniformContrast': - _ = self._analyze_uniform_contrast_frames_corticalmapping(curr_stim_grp) + _ = self._analyze_uniform_contrast_frames(curr_stim_grp) else: raise LookupError('Do not understand stimulus type: {}.'.format(stim_n)) @@ -2013,13 +907,13 @@ def analyze_visual_stimuli_corticalmapping(self, onsets_ts=None): curr_stim_grp = self.file_pointer['stimulus/presentation'][stim_n] if curr_stim_grp['stim_name'].value == 'SparseNoise': - _ = self._analyze_sparse_noise_frames_corticalmapping(curr_stim_grp) + _ = self._analyze_sparse_noise_frames(curr_stim_grp) elif curr_stim_grp['stim_name'].value == 'FlashingCircle': - _ = self._analyze_flashing_circle_frames_corticalmapping(curr_stim_grp) + _ = self._analyze_flashing_circle_frames(curr_stim_grp) elif curr_stim_grp['stim_name'].value == 'DriftingGratingCircle': - _ = self._analyze_driftig_grating_frames_corticalmapping(curr_stim_grp) + _ = self._analyze_driftig_grating_frames(curr_stim_grp) elif curr_stim_grp['stim_name'].value == 'UniformContrast': - _ = self._analyze_uniform_contrast_frames_corticalmapping(curr_stim_grp) + _ = self._analyze_uniform_contrast_frames(curr_stim_grp) else: raise LookupError('Do not understand stimulus type: {}.'.format(stim_n)) @@ -2056,7 +950,7 @@ def analyze_visual_stimuli_corticalmapping(self, onsets_ts=None): curr_onset.set_value('sign', curr_stim_grp['sign'].value) curr_onset.set_value('subregion_deg', curr_stim_grp['subregion_deg'].value) curr_onset.finalize() - for curr_sn, curr_sd in pooled_onsets.items(): + for curr_sn, curr_sd in list(pooled_onsets.items()): curr_s_ts = self.create_timeseries('TimeSeries', curr_sn, modality='other') curr_s_ts.set_data([], unit='', conversion=np.nan, resolution=np.nan) curr_s_ts.set_time(curr_onset_ts[curr_sd['onset_ind']]) @@ -2077,7 +971,7 @@ def analyze_visual_stimuli_corticalmapping(self, onsets_ts=None): curr_onset.set_value('spatial_frequency_list', curr_stim_grp['spatial_frequency_list'].value) curr_onset.set_value('temporal_frequency_list', curr_stim_grp['temporal_frequency_list'].value) curr_onset.finalize() - for curr_gn, curr_gd in pooled_onsets.items(): + for curr_gn, curr_gd in list(pooled_onsets.items()): curr_g_ts = self.create_timeseries('TimeSeries', curr_gn, modality='other') curr_g_ts.set_data([], unit='', conversion=np.nan, resolution=np.nan) curr_g_ts.set_time(curr_onset_ts[curr_gd['onset_ind']]) @@ -2091,180 +985,288 @@ def analyze_visual_stimuli_corticalmapping(self, onsets_ts=None): curr_onset_start_ind = curr_onset_start_ind + curr_onset_arr.shape[0] - @staticmethod - def _analyze_sparse_noise_frames_corticalmapping(sn_grp): + def add_photodiode_onsets(self, digitizeThr=0.9, filterSize=0.01, segmentThr=0.01, smallestInterval=0.03, + expected_onsets_number=None): """ - analyze sparse noise display frames saved in '/stimulus/presentation', extract information about onset of - each displayed square: + intermediate processing step for analysis of visual display. Containing the information about the onset of + photodiode signal. Timestamps are extracted from photodiode signal, should be aligned to the master clock. + extraction is done by corticalmapping.HighLevel.segmentPhotodiodeSignal() function. The raw signal + was first digitized by the digitize_threshold, then filtered by a gaussian fileter with filter_size. Then + the derivative of the filtered signal was calculated by numpy.diff. The derivative signal was then timed + with the digitized signal. Then the segmentation_threshold was used to detect rising edge of the resulting + signal. Any onset with interval from its previous onset smaller than smallest_interval will be discarded. + the resulting timestamps of photodiode onsets will be saved in 'processing/photodiode_onsets' timeseries - return: all_squares: 2-d array, each line is a displayed square in sparse noise, each column is a feature of - a particular square, squares follow display order - data_format: str, description of the column structure of each square - description: str, - pooled_squares: dict, squares with same location and sign are pooled together. - keys: 'square_00000', 'square_00001', 'square_00002' ... each represents a unique - square. - values: dict, { - 'azi': , - 'alt': , - 'sign': , - 'onset_ind': list of indices of the appearances of current square in - in "all_squares", to be aligned with to photodiode onset - timestamps - } + :param digitizeThr: float + :param filterSize: float + :param segmentThr: float + :param smallestInterval: float + :param expected_onsets_number: int, expected number of photodiode onsets, may extract from visual display + log. if extracted onset number does not match this number, the process will + be abort. If None, no such check will be performed. + :return: + """ + fs = self.file_pointer['acquisition/timeseries/photodiode/starting_time'].attrs['rate'] + pd = self.file_pointer['acquisition/timeseries/photodiode/data'].value * \ + self.file_pointer['acquisition/timeseries/photodiode/data'].attrs['conversion'] + + pd_onsets = hl.segmentPhotodiodeSignal(pd, digitizeThr=digitizeThr, filterSize=filterSize, + segmentThr=segmentThr, Fs=fs, smallestInterval=smallestInterval) + + if pd_onsets.shape[0] == 0: + return + + if expected_onsets_number is not None: + if len(pd_onsets) != expected_onsets_number: + raise ValueError('The number of photodiode onsets (' + str(len(pd_onsets)) + ') and the expected ' + 'number of sweeps ' + str(expected_onsets_number) + ' do not match. Abort.') + + pd_ts = self.create_timeseries('TimeSeries', 'photodiode_onsets', modality='other') + pd_ts.set_time(pd_onsets) + pd_ts.set_data([], unit='', conversion=np.nan, resolution=np.nan) + pd_ts.set_description('intermediate processing step for analysis of visual display. ' + 'Containing the information about the onset of photodiode signal. Timestamps ' + 'are extracted from photodiode signal, should be aligned to the master clock.' + 'extraction is done by corticalmapping.HighLevel.segmentPhotodiodeSignal()' + 'function. The raw signal was first digitized by the digitize_threshold, then ' + 'filtered by a gaussian fileter with filter_size. Then the derivative of the filtered ' + 'signal was calculated by numpy.diff. The derivative signal was then timed with the ' + 'digitized signal. Then the segmentation_threshold was used to detect rising edge of ' + 'the resulting signal. Any onset with interval from its previous onset smaller than ' + 'smallest_interval will be discarded.') + pd_ts.set_path('/processing/PhotodiodeOnsets') + pd_ts.set_value('digitize_threshold', digitizeThr) + pd_ts.set_value('fileter_size', filterSize) + pd_ts.set_value('segmentation_threshold', segmentThr) + pd_ts.set_value('smallest_interval', smallestInterval) + pd_ts.finalize() + + def plot_spike_waveforms(self, modulen, unitn, is_plot_filtered=False, fig=None, axes_size=(0.2, 0.2), **kwargs): """ + plot spike waveforms + + :param modulen: str, name of the module containing ephys recordings + :param unitn: str, name of ephys unit, should be in '/processing/ephys_units/UnitTimes' + :param is_plot_filtered: bool, plot unfiltered waveforms or not + :param channel_names: list of strs, channel names in continuous recordings, should be in '/acquisition/timeseries' + :param fig: matplotlib figure object + :param t_range: tuple of two floats, time range to plot along spike time stamps + :param kwargs: inputs to matplotlib.axes.plot() function + :return: fig + """ + if modulen not in list(self.file_pointer['processing'].keys()): + raise LookupError('Can not find module for ephys recording: ' + modulen + '.') + + if unitn not in list(self.file_pointer['processing'][modulen]['UnitTimes'].keys()): + raise LookupError('Can not find ephys unit: ' + unitn + '.') + + ch_ns = self._get_channel_names() + + unit_grp = self.file_pointer['processing'][modulen]['UnitTimes'][unitn] + waveforms = unit_grp['template'].value + + if 'template_std' in list(unit_grp.keys()): + stds = unit_grp['template_std'].value + else: + stds = None + + if is_plot_filtered: + if 'template_filtered' in list(unit_grp.keys()): + waveforms_f = unit_grp['template_filtered'].value + if 'template_std_filtered' in list(unit_grp.keys()): + stds_f = unit_grp['template_std_filtered'].value + else: + stds_f = None + else: + print(('can not find unfiltered spike waveforms for unit: ' + unitn)) + waveforms_f = None + stds_f = None + else: + waveforms_f = None + stds_f = None + + if 'channel_xpos' in list(self.file_pointer['processing'][modulen].keys()): + ch_xpos = self.file_pointer['processing'][modulen]['channel_xpos'] + ch_ypos = self.file_pointer['processing'][modulen]['channel_ypos'] + ch_locations = list(zip(ch_xpos, ch_ypos)) + else: + ch_locations = None + + fig = plot_waveforms(waveforms, ch_locations=ch_locations, stds=stds, waveforms_filtered=waveforms_f, + stds_filtered=stds_f, f=fig, ch_ns=ch_ns, axes_size=axes_size, **kwargs) + + fig.suptitle(self.file_pointer['identifier'].value + ' : ' + unitn) + + return fig + + def add_motion_correction_module(self, module_name, original_timeseries_path, corrected_file_path, + corrected_dataset_path, xy_translation_offsets, interface_name='MotionCorrection', + mean_projection=None, max_projection=None, description='', comments='', + source=''): + """ + add a motion corrected image series in to processing field as a module named 'motion_correction' and create a + link to an external hdf5 file which contains the images. + :param module_name: str, module name to be created + :param interface_name: str, interface name of the image series + :param original_timeseries_path: str, the path to the timeseries of the original images + :param corrected_file_path: str, the full file system path to the hdf5 file containing the raw image data + :param corrected_dataset_path: str, the path within the hdf5 file pointing to the raw data. the object should have at + least 3 attributes: 'conversion', resolution, unit + :param xy_translation_offsets: 2d array with two columns, + :param mean_projection: 2d array, mean_projection of corrected image, if None, no dataset will be + created + :param max_projection: 2d array, max_projection of corrected image, if None, no dataset will be + created + :return: + """ + + orig = self.file_pointer[original_timeseries_path] + timestamps = orig['timestamps'].value + + img_file = h5py.File(corrected_file_path) + img_data = img_file[corrected_dataset_path] + if timestamps.shape[0] != img_data.shape[0]: + raise ValueError('Number of frames does not equal to the length of timestamps!') + + if xy_translation_offsets.shape[0] != timestamps.shape[0]: + raise ValueError('Number of offsets does not equal to the length of timestamps!') - if sn_grp['stim_name'].value != 'SparseNoise': - raise NameError('The input stimulus should be "SparseNoise".') + corrected = self.create_timeseries(ts_type='ImageSeries', name='corrected', modality='other') + corrected.set_data_as_remote_link(corrected_file_path, corrected_dataset_path) + corrected.set_time_as_link(original_timeseries_path) + corrected.set_description(description) + corrected.set_comments(comments) + corrected.set_source(source) + for value_n in list(orig.keys()): + if value_n not in ['image_data_path_within_file', 'image_file_path', 'data', 'timestamps']: + corrected.set_value(value_n, orig[value_n].value) - frames = sn_grp['data'].value - frames = [tuple(x) for x in frames] - dtype = [('isDisplay', int), ('azimuth', float), ('altitude', float), ('sign', int), ('isOnset', int)] - frames = np.array(frames, dtype=dtype) + xy_translation = self.create_timeseries(ts_type='TimeSeries', name='xy_translation', modality='other') + xy_translation.set_data(xy_translation_offsets, unit='pixel', conversion=np.nan, resolution=np.nan) + xy_translation.set_time_as_link(original_timeseries_path) + xy_translation.set_value('num_samples', xy_translation_offsets.shape[0]) + xy_translation.set_description('Time series of x, y shifts applied to create motion stabilized image series') + xy_translation.set_value('feature_description', ['x_motion', 'y_motion']) - all_squares = [] - for i in range(len(frames)): - if frames[i]['isDisplay'] == 1 and \ - (i == 0 or (frames[i - 1]['isOnset'] == -1 and frames[i]['isOnset'] == 1)): - all_squares.append(np.array((i, frames[i]['azimuth'], frames[i]['altitude'], frames[i]['sign']), - dtype=np.float32)) + mc_mod = self.create_module(module_name) + mc_interf = mc_mod.create_interface("MotionCorrection") + mc_interf.add_corrected_image(interface_name, orig=original_timeseries_path, xy_translation=xy_translation, + corrected=corrected) - all_squares = np.array(all_squares) + if mean_projection is not None: + mc_interf.set_value('mean_projection', mean_projection) - pooled_squares = {} - unique_squares = list(set([tuple(x[1:]) for x in all_squares])) - for i, unique_square in enumerate(unique_squares): - curr_square_n = 'square_' + ft.int2str(i, 5) - curr_azi = unique_square[0] - curr_alt = unique_square[1] - curr_sign = unique_square[2] - curr_onset_ind = [] - for j, give_square in enumerate(all_squares): - if np.array_equal(give_square[1:], unique_square): - curr_onset_ind.append(j) - pooled_squares.update({curr_square_n: {'azi': curr_azi, - 'alt': curr_alt, - 'sign': curr_sign, - 'onset_ind': curr_onset_ind}}) - all_squares = np.array(all_squares) - data_format = ['display frame indices for the onset of each square', 'azimuth of each square', - 'altitude of each square', 'sign of each square'] - description = 'TimeSeries of sparse noise square onsets. Stimulus generated by ' \ - 'corticalmapping.VisualStim.SparseNoise class.' - return all_squares, data_format, description, pooled_squares + if max_projection is not None: + mc_interf.set_value('max_projection', max_projection) - @staticmethod - def _analyze_driftig_grating_frames_corticalmapping(dg_grp): + mc_interf.finalize() + mc_mod.finalize() + + def generate_dat_file_for_kilosort(self, output_folder, output_name, ch_ns, is_filtered=True, cutoff_f_low=300., + cutoff_f_high=6000.): """ - analyze drifting grating display frames saved in '/stimulus/presentation', extract information about onset of - each displayed grating: + generate .dat file for kilolsort: "https://github.com/cortex-lab/KiloSort", it is binary raw code, with + structure: ch0_t0, ch1_t0, ch2_t0, ...., chn_t0, ch0_t1, ch1_t1, ch2_t1, ..., chn_t1, ..., ch0_tm, ch1_tm, + ch2_tm, ..., chn_tm - return: all_gratings: 2-d array, each line is a displayed square in sparse noise, each column is a feature of - a particular square, squares follow display order - data_format: str, description of the column structure of each grating - description: str, - pooled_squares: dict, gratings with same parameters are pooled together. - keys: 'grating_00000', 'grating_00001', 'grating_00002' ... each represents a unique - grating. - values: dict, { - 'sf': , - 'tf': , - 'direction': , - 'contrast': , - 'radius': , - 'azi': - 'alt': - 'onset_ind': list of indices of the appearances of current square in - in "all_squares", to be aligned with to photodiode onset - timestamps - } + :param output_folder: str, path to output directory + :param output_name: str, output file name, an extension of '.dat' will be automatically added. + :param ch_ns: list of strings, name of included analog channels + :param is_filtered: bool, if Ture, another .dat file with same size will be generated in the output folder. + this file will contain temporally filtered data (filter done by + corticalmapping.core.TimingAnalysis.butter_... functions). '_filtered' will be attached + to the filtered file name. + :param cutoff_f_low: float, low cutoff frequency, Hz. if None, it will be low-pass + :param cutoff_f_high: float, high cutoff frequency, Hz, if None, it will be high-pass + :return: None """ - if dg_grp['stim_name'].value != 'DriftingGratingCircle': - raise NameError('The input stimulus should be "DriftingGratingCircle".') - frames = dg_grp['data'].value + save_path = os.path.join(output_folder, output_name + '.dat') + if os.path.isfile(save_path): + raise IOError('Output file already exists.') - all_gratings = [] - for i in range(len(frames)): - if frames[i][8] == 1 and (i == 0 or (frames[i - 1][8] == -1)): - all_gratings.append(np.array((i, frames[i][2], frames[i][3], frames[i][4], frames[i][5], frames[i][6]), - dtype=np.float32)) + data_lst = [] + for ch_n in ch_ns: + data_lst.append(self.file_pointer['acquisition/timeseries'][ch_n]['data'].value) - all_gratings = np.array(all_gratings) + dtype = data_lst[0].dtype + data = np.array(data_lst, dtype=dtype).flatten(order='F') + data.tofile(save_path) - pooled_gratings = {} - unique_gratings = list(set([tuple(x[1:]) for x in all_gratings])) - for i, unique_grating in enumerate(unique_gratings): - curr_grating_n = 'grating_' + ft.int2str(i, 5) - curr_sf = unique_grating[0] - curr_tf = unique_grating[1] - curr_dir = unique_grating[2] - curr_con = unique_grating[3] - curr_r = unique_grating[4] - curr_onset_ind = [] - for j, given_grating in enumerate(all_gratings): - if np.array_equal(given_grating[1:], unique_grating): - curr_onset_ind.append(j) - pooled_gratings.update({curr_grating_n: {'sf': curr_sf, - 'tf': curr_tf, - 'dir': curr_dir, - 'con': curr_con, - 'r': curr_r, - 'onset_ind': curr_onset_ind}}) - data_format = ['display frame indices for the onset of each square', 'spatial frequency (cyc/deg)', - 'temporal frequency (Hz)', 'moving direction (arc)', 'contrast (%)', 'radius (deg)'] - description = 'TimeSeries of drifting grating circle onsets. Stimulus generated by ' \ - 'corticalmapping.VisualStim.SparseNoise class.' - return all_gratings, data_format, description, pooled_gratings + if is_filtered: - @staticmethod - def _analyze_flashing_circle_frames_corticalmapping(fc_grp): - """ - analyze flashing circle display frames saved in '/stimulus/presentation', extract information about onset of - each displayed circle: + if cutoff_f_low is None and cutoff_f_high is None: + print ('both low cutoff frequency and high cutoff frequency are None. Do nothing.') + return - return: all_circles: 2-d array, each line is the onset of displayed circle, each column is a feature of - that circle, circles follow the display order - data_format: str, description of the column structure of each circle - description: str, - pooled_circles: None - """ + save_path_f = os.path.join(output_folder, output_name + '_filtered.dat') + if os.path.isfile(save_path_f): + raise IOError('Output file for filtered data already existes.') - if fc_grp['stim_name'].value != 'FlashingCircle': - raise NameError('The input stimulus should be "FlashingCircle".') + fs = self.file_pointer['general/extracellular_ephys/sampling_rate'].value + data_lst_f = [] + for data_r in data_lst: + if cutoff_f_high is None: + data_lst_f.append(ta.butter_lowpass(data_r, fs=fs, cutoff=cutoff_f_low).astype(dtype)) + elif cutoff_f_low is None: + data_lst_f.append(ta.butter_highpass(data_r, fs=fs, cutoff=cutoff_f_high).astype(dtype)) + else: + data_lst_f.append(ta.butter_bandpass(data_r, + fs=fs, + cutoffs=(cutoff_f_low, cutoff_f_high)).astype(dtype)) + data_f = np.array(data_lst_f, dtype=dtype).flatten(order='F') + data_f.tofile(save_path_f) - frames = fc_grp['data'][:, 0] - azi = fc_grp['center_azimuth_deg'].value - alt = fc_grp['center_altitude_deg'].value - color_c = fc_grp['center_color'].value - color_b = fc_grp['background_color'].value - radius = fc_grp['radius_deg'].value + def _get_channel_names(self): + """ + :return: sorted list of channel names, each channel name should have prefix 'ch_' + """ + analog_chs = list(self.file_pointer['acquisition/timeseries'].keys()) + channel_ns = [cn for cn in analog_chs if cn[0:3] == 'ch_'] + channel_ns.sort() + return channel_ns - all_cirlces = [] - for i in range(len(frames)): - if frames[i] == 1 and (i == 0 or (frames[i - 1] == 0)): - all_cirlces.append(np.array((i, azi, alt, color_c, color_b, radius), dtype=np.float32)) + def get_analog_data(self, ch_n): + """ + :param ch_n: string, analog channel name + :return: 1-d array, analog data, data * conversion + 1-d array, time stamps + """ + grp = self.file_pointer['acquisition/timeseries'][ch_n] + data = grp['data'].value + if not np.isnan(grp['data'].attrs['conversion']): + data = data.astype(np.float32) * grp['data'].attrs['conversion'] + if 'timestamps' in list(grp.keys()): + t = grp['timestamps'] + elif 'starting_time' in list(grp.keys()): + fs = self.file_pointer['general/extracellular_ephys/sampling_rate'].value + sample_num = grp['num_samples'].value + t = np.arange(sample_num) / fs + grp['starting_time'].value + else: + raise ValueError('can not find timing information of channel:' + ch_n) + return data, t - all_cirlces = np.array(all_cirlces) - data_format = ['display frame indices for the onset of each circle', 'center_azimuth_deg', - 'center_altitude_deg', 'center_color', 'background_color', 'radius_deg'] - description = 'TimeSeries of flashing circle onsets. Stimulus generated by ' \ - 'corticalmapping.VisualStim.SparseNoise class.' - return all_cirlces, data_format, description, None + def _check_display_order(self, display_order=None): + """ + check display order make sure each presentation has a unique position, and move from increment order. + also check the given display_order is of the next number + """ + stimuli = list(self.file_pointer['stimulus/presentation'].keys()) - @staticmethod - def _analyze_uniform_contrast_frames_corticalmapping(uc_grp): + print('\nExisting visual stimuli:') + print(('\n'.join(stimuli))) - if uc_grp['stim_name'].value != 'UniformContrast': - raise NameError('The input stimulus should be "UniformContrast".') + stimuli = [int(s[0:s.find('_')]) for s in stimuli] + stimuli.sort() + if stimuli != list(range(len(stimuli))): + raise ValueError('display order is not incremental.') - onset_array = np.array([]) - data_format = '' - description = 'TimeSeries of uniform contrast stimulus. No onset information. Stimulus generated by ' \ - 'corticalmapping.VisualStim.UniformContrast class.' - return onset_array, data_format, description, {} + if display_order is not None: - def _add_sparse_noise_stimulus_corticalmapping(self, log_dict, display_order): + if display_order != len(stimuli): + raise ValueError('input display order not the next display.') + + def _add_sparse_noise_stimulation(self, log_dict, display_order): stim_name = log_dict['stimulation']['stimName'] @@ -2312,7 +1314,7 @@ def _add_sparse_noise_stimulus_corticalmapping(self, log_dict, display_order): 'corticalmapping.VisualStim.DisplaySequence for display') stim.finalize() - def _add_flashing_circle_stimulus_corticalmapping(self, log_dict, display_order): + def _add_flashing_circle_stimulation(self, log_dict, display_order): stim_name = log_dict['stimulation']['stimName'] @@ -2352,7 +1354,7 @@ def _add_flashing_circle_stimulus_corticalmapping(self, log_dict, display_order) stim.set_value('iteration', log_dict['stimulation']['iteration']) stim.finalize() - def _add_uniform_contrast_stimulus_corticalmapping(self, log_dict, display_order): + def _add_uniform_contrast_stimulation(self, log_dict, display_order): stim_name = log_dict['stimulation']['stimName'] @@ -2383,7 +1385,7 @@ def _add_uniform_contrast_stimulus_corticalmapping(self, log_dict, display_order stim.set_value('stim_name', log_dict['stimulation']['stimName']) stim.finalize() - def _add_drifting_grating_circle_stimulus_corticalmapping(self, log_dict, display_order): + def _add_drifting_grating_circle_stimulation(self, log_dict, display_order): stim_name = log_dict['stimulation']['stimName'] @@ -2401,9 +1403,9 @@ def _add_drifting_grating_circle_stimulus_corticalmapping(self, log_dict, displa 'stimulus') stim.set_time(time_stamps) stim.set_data(frame_array, unit='', conversion=np.nan, resolution=np.nan) - stim.set_comments('the timestamps of displayed frames (saved in data) are referenced to the start of ' - 'this particular display, not the master time clock. For more useful timestamps, check ' - '"/processing" for aligned photodiode onset timestamps.') + stim.set_comments('the timestamps of displayed frames (saved in data) are referenced to the start of' + 'this particular display, not the master time clock. For more useful timestamps, check' + '/processing for aligned photodiode onset timestamps.') stim.set_description('data formatting: [isDisplay (0:gap; 1:display), ' 'firstFrameInCycle (first frame in cycle:1, rest display frames: 0), ' 'spatialFrequency (cyc/deg), ' @@ -2435,7 +1437,7 @@ def _add_drifting_grating_circle_stimulus_corticalmapping(self, log_dict, displa stim.set_value('background_color', log_dict['stimulation']['background']) stim.finalize() - def _add_drifting_checker_board_stimulus_corticalmapping(self, log_dict, display_order): + def _add_drifting_checker_board_stimulation(self, log_dict, display_order): stim_name = log_dict['stimulation']['stimName'] @@ -2474,8 +1476,7 @@ def _add_drifting_checker_board_stimulus_corticalmapping(self, log_dict, display 'indicatorColor (for photodiode, from -1 to 1)]. ' 'direction (B2U: 0, U2B: 1, L2R: 2, R2L: 3), ' 'for gap frames, the 2ed to 3th elements should be np.nan.') - stim.set_value('data_formatting', - ['isDisplay', 'squarePolarity', 'sweepIndex', 'indicatorColor', 'sweepDirection']) + stim.set_value('data_formatting', ['isDisplay', 'squarePolarity', 'sweepIndex', 'indicatorColor', 'sweepDirection']) stim.set_source('corticalmapping.VisualStim.KSstimAllDir for stimulus; ' 'corticalmapping.VisualStim.DisplaySequence for display') stim.set_value('background_color', log_dict['stimulation']['background']) @@ -2490,7 +1491,7 @@ def _add_drifting_checker_board_stimulus_corticalmapping(self, log_dict, display display_grp.attrs['description'] = 'This group saves the useful infomation about the retiotopic mapping visual' \ 'stimulation (drifting checker board sweeps in all directions). Generated ' \ 'by the corticalmapping.HighLevel.analysisMappingDisplayLog() function.' - for direction, value in display_info.items(): + for direction, value in list(display_info.items()): dir_grp = display_grp.create_group(direction) dir_grp.attrs['description'] = 'group containing the relative information about all sweeps in a particular' \ 'sweep direction. B: bottom, U: up, L: nasal, R: temporal (for stimulus to' \ @@ -2506,95 +1507,38 @@ def _add_drifting_checker_board_stimulus_corticalmapping(self, log_dict, display 'degree = phase * slope + intercept' equ_dset.attrs['data_format'] = ['slope', 'intercept'] - # ===========================corticalmapping visual stimuli related (non-indexed display)=========================== + def add_sync_data(self): + # not for now + pass - - # ============================================eye tracking related================================================== - def add_eyetracking_data(self, ts_path='', pupil_x=None, pupil_y=None, pupil_area=None, module_name='eye_tracking', - unit='unknown', side='leftright_unknown', comments='', description='', source='', - pupil_shape=None, pupil_shape_meta=None): + def add_kilosort_clusters(self, folder, module_name, ind_start=None, ind_end=None): """ - add eyetrackin data as a module named 'eye_tracking' - :param ts_path: str, timestamp path in the nwb file - :param pupil_x: 1-d array, horizontal position of pupil center - :param pupil_y: 1-d array, vertical position of pupil center - :param pupil_area: 1-d array, area of detected pupil - :param module_name: str, module name to be created - :param unit: str, the unit of pupil_x, pupil_y, the unit of pupil_area should be ^2 - :param side: str, side of the eye, 'left' or 'right' - :param comments: str - :param description: str - :param source: str + expects spike clusters.npy, spike_templates.npy, and spike_times.npy in the folder. use only for the direct outputs of kilosort, + that haven't been modified with phy-template. + :param folder: :return: """ - if ts_path not in self.file_pointer['acquisition/timeseries'].keys(): - print('Cannot find field "{}" in "acquisition/timeseries".'.format(ts_path)) - return - else: - ts = self.file_pointer['acquisition/timeseries'][ts_path]['timestamps'].value - - ts_num = len(ts) - print('number of eyet racking timestamps: {}'.format(ts.shape)) - - ts_num_min = ts_num + # if ind_start == None: + # ind_start = 0 + # + # if ind_end == None: + # ind_end = self.file_pointer['acquisition/timeseries/photodiode/num_samples'].value + # + # if ind_start >= ind_end: + # raise ValueError('ind_end should be larger than ind_start.') + # + # spike_clusters = np.load(os.path.join(folder, 'spike_clusters.npy')) + # spike_templates = np.load(os.path.join(folder, 'spike_templates.npy')) + # spikes_times = np.load(os.path.join(folder, 'spike_times.npy')) + # templates = np.load(os.path.join(folder, 'templates.npy')) - if pupil_x is not None: - if pupil_x.shape[0] != ts_num: - print('length of pupil_x ({}) is different from the number' - ' of timestamps ({}).'.format(pupil_x.shape[0], ts_num)) - ts_num_min = min([ts_num_min, pupil_x.shape[0]]) - - if pupil_y is not None: - if pupil_y.shape[0] != ts_num: - print('length of pupil_y ({}) is different from the number' - ' of timestamps ({}).'.format(pupil_area.shape[0], ts_num)) - ts_num_min = min([ts_num_min, pupil_y.shape[0]]) - - if pupil_area is not None: - if pupil_area.shape[0] != ts_num: - print('length of pupil_area ({}) is different from the number' - ' of timestamps ({}).'.format(pupil_area.shape[0], ts_num)) - ts_num_min = min([ts_num_min, pupil_area.shape[0]]) - - ts_to_add = ts[0:ts_num_min] - - pupil_series = self.create_timeseries('TimeSeries', name='eyetracking', modality='other') - pupil_series.set_data([], unit='', conversion=np.nan, resolution=np.nan) - pupil_series.set_time(ts_to_add) - - if pupil_x is not None: - pupil_series.set_value('pupil_x', pupil_x[0:ts_num_min]) - - if pupil_y is not None: - pupil_series.set_value('pupil_y', pupil_y[0:ts_num_min]) - - if pupil_area is not None: - pupil_series.set_value('pupil_area', pupil_area[0:ts_num_min]) - - if pupil_shape is not None: - pupil_series.set_value('pupil_shape', pupil_shape[0:ts_num_min, :]) - - if pupil_shape_meta is not None: - pupil_series.set_value('pupil_shape_meta', pupil_shape_meta) - - pupil_series.set_value('unit', 'pupil_x: {}; pupil_y: {}; pupil_area: {} ^ 2'.format(unit, unit, unit)) - pupil_series.set_value('side', side) - pupil_series.set_comments(comments) - pupil_series.set_description(description) - pupil_series.set_source(source) - - et_mod = self.create_module('{}_{}'.format(module_name, side)) - et_interf = et_mod.create_interface("PupilTracking") - et_interf.add_timeseries(pupil_series) - pupil_series.finalize() - et_interf.finalize() - et_mod.finalize() - - # ============================================eye tracking related================================================== + # not for now + pass if __name__ == '__main__': + # ========================================================================================================= # tmp_path = r"E:\data\python_temp_folder\test.nwb" # open_ephys_folder = r"E:\data\2016-07-19-160719-M256896\100_spontaneous_2016-07-19_09-45-06_Jun" @@ -2638,7 +1582,7 @@ def add_eyetracking_data(self, ts_path='', pupil_x=None, pupil_y=None, pupil_are # log_path = r"E:\data\2016-06-29-160610-M240652-Ephys\101_160610172256-SparseNoise-M240652-Jun-0-" \ # r"notTriggered-complete.pkl" # rf = RecordedFile(tmp_path) - # rf.add_visual_stimulation_corticalmapping(log_path) + # rf.add_visual_stimulation(log_path) # rf.close() # ========================================================================================================= @@ -2647,7 +1591,7 @@ def add_eyetracking_data(self, ts_path='', pupil_x=None, pupil_y=None, pupil_are # log_path = r"\\aibsdata2\nc-ophys\CorticalMapping\IntrinsicImageData\161017-M274376-FlashingCircle" \ # r"\161017162026-FlashingCircle-M274376-Sahar-101-Triggered-complete.pkl" # rf = RecordedFile(tmp_path) - # rf.add_visual_stimulation_corticalmapping(log_path, display_order=1) + # rf.add_visual_stimulation(log_path, display_order=1) # rf.close() # ========================================================================================================= @@ -2656,7 +1600,7 @@ def add_eyetracking_data(self, ts_path='', pupil_x=None, pupil_y=None, pupil_are # log_paths = [r"\\aibsdata2\nc-ophys\CorticalMapping\IntrinsicImageData\161017-M274376-FlashingCircle\161017162026-FlashingCircle-M274376-Sahar-101-Triggered-complete.pkl", # r"E:\data\2016-06-29-160610-M240652-Ephys\101_160610172256-SparseNoise-M240652-Jun-0-notTriggered-complete.pkl",] # rf = RecordedFile(tmp_path) - # rf.add_visual_stimuli_corticalmapping(log_paths) + # rf.add_visual_stimulations(log_paths) # rf.close() # ========================================================================================================= @@ -2664,7 +1608,7 @@ def add_eyetracking_data(self, ts_path='', pupil_x=None, pupil_y=None, pupil_are # tmp_path = r"E:\data\python_temp_folder\test.nwb" # log_paths = [r"C:\data\sequence_display_log\161018164347-UniformContrast-MTest-Jun-255-notTriggered-complete.pkl"] # rf = RecordedFile(tmp_path) - # rf.add_visual_stimuli_corticalmapping(log_paths) + # rf.add_visual_stimulations(log_paths) # rf.close() # ========================================================================================================= @@ -2673,7 +1617,7 @@ def add_eyetracking_data(self, ts_path='', pupil_x=None, pupil_y=None, pupil_are # # log_paths = [r"C:\data\sequence_display_log\160205131514-ObliqueKSstimAllDir-MTest-Jun-255-notTriggered-incomplete.pkl"] # log_paths = [r"C:\data\sequence_display_log\161018174812-DriftingGratingCircle-MTest-Jun-255-notTriggered-complete.pkl"] # rf = RecordedFile(tmp_path) - # rf.add_visual_stimuli_corticalmapping(log_paths) + # rf.add_visual_stimulations(log_paths) # rf.close() # ========================================================================================================= @@ -2696,16 +1640,18 @@ def add_eyetracking_data(self, ts_path='', pupil_x=None, pupil_y=None, pupil_are # ========================================================================================================= # ========================================================================================================= - # rf = RecordedFile(r"D:\data2\thalamocortical_project\method_development\2017-02-25-ephys-software-development" - # r"\test_folder\170302_M292070_100_SparseNoise.nwb") - # unit = 'unit_00065' - # wfs = rf.file_pointer['processing/tetrode/UnitTimes'][unit]['template'].value - # stds = rf.file_pointer['processing/tetrode/UnitTimes'][unit]['template_std'].value - # x_pos = rf.file_pointer['processing/tetrode/channel_xpos'].value - # y_pos = rf.file_pointer['processing/tetrode/channel_ypos'].value - # rf.close() - # plot_waveforms(wfs, zip(x_pos, y_pos), stds, axes_size=(0.3, 0.3)) - # plt.show() + rf = RecordedFile(r"D:\data2\thalamocortical_project\method_development\2017-02-25-ephys-software-development" + r"\test_folder\170302_M292070_100_SparseNoise.nwb") + unit = 'unit_00065' + wfs = rf.file_pointer['processing/tetrode/UnitTimes'][unit]['template'].value + stds = rf.file_pointer['processing/tetrode/UnitTimes'][unit]['template_std'].value + x_pos = rf.file_pointer['processing/tetrode/channel_xpos'].value + y_pos = rf.file_pointer['processing/tetrode/channel_ypos'].value + rf.close() + plot_waveforms(wfs, list(zip(x_pos, y_pos)), stds, axes_size=(0.3, 0.3)) + plt.show() # ========================================================================================================= - print('for debug ...') + + + print('for debug ...') \ No newline at end of file diff --git a/corticalmapping/README.md b/corticalmapping/README.md new file mode 100644 index 0000000..18c9e1b --- /dev/null +++ b/corticalmapping/README.md @@ -0,0 +1,6 @@ +corticalmapping + +by Jun Zhuang @ 2014 + +contains basic visual stimulation, imaging analysis, plotting, cell visual response properties analysis functionalities. +also contains a relatively mature code base of retinotopic mapping (visual stimulation and analysis) \ No newline at end of file diff --git a/corticalmapping/RetinotopicMapping.py b/corticalmapping/RetinotopicMapping.py index 5a77143..fcea62f 100644 --- a/corticalmapping/RetinotopicMapping.py +++ b/corticalmapping/RetinotopicMapping.py @@ -11,19 +11,15 @@ from operator import itemgetter import skimage.morphology as sm import skimage.transform as tsfm - +import cv2 import matplotlib.colors as col from matplotlib import cm -import core.FileTools as ft -import core.ImageAnalysis as ia -import core.PlottingTools as pt -import tifffile as tf +import corticalmapping.core.FileTools as ft +import corticalmapping.core.ImageAnalysis as ia +import corticalmapping.core.PlottingTools as pt +import imaging_behavior.core.tifffile as tf -try: - import cv2 -except ImportError as e: - print e def loadTrial(trialPath): @@ -38,22 +34,20 @@ def loadTrial(trialPath): except KeyError: traces = [] - trial = RetinotopicMappingTrial(mouseID=trialDict['mouseID'], # str, mouseID - dateRecorded=trialDict['dateRecorded'], # int, date recorded, yearmonthday - trialNum=trialDict['trialNum'], # str, number of the trail on that day - mouseType=trialDict['mouseType'], # str, mouse Genotype - visualStimType=trialDict['visualStimType'], # str, stimulation type - visualStimBackground=trialDict['visualStimBackground'], - # str, background of visual stimulation - imageExposureTime=trialDict['imageExposureTime'], - # float, exposure time of image file - altPosMap=trialDict['altPosMap'], # altitude position map - aziPosMap=trialDict['aziPosMap'], # azimuth position map - altPowerMap=trialDict['altPowerMap'], # altitude power map - aziPowerMap=trialDict['aziPowerMap'], # azimuth power map - vasculatureMap=trialDict['vasculatureMap'], # vasculature map - params=trialDict['params'], - isAnesthetized=trialDict['isAnesthetized']) + trial = RetinotopicMappingTrial(mouseID = trialDict['mouseID'], # str, mouseID + dateRecorded = trialDict['dateRecorded'], # int, date recorded, yearmonthday + trialNum = trialDict['trialNum'], # str, number of the trail on that day + mouseType = trialDict['mouseType'], # str, mouse Genotype + visualStimType = trialDict['visualStimType'], # str, stimulation type + visualStimBackground = trialDict['visualStimBackground'], # str, background of visual stimulation + imageExposureTime = trialDict['imageExposureTime'], # float, exposure time of image file + altPosMap = trialDict['altPosMap'], # altitude position map + aziPosMap = trialDict['aziPosMap'], # azimuth position map + altPowerMap = trialDict['altPowerMap'], # altitude power map + aziPowerMap = trialDict['aziPowerMap'], # azimuth power map + vasculatureMap = trialDict['vasculatureMap'], # vasculature map + params = trialDict['params'], + isAnesthetized = trialDict['isAnesthetized']) try: trial.altPosMapf = trialDict['altPosMapf'] @@ -76,28 +70,22 @@ def loadTrial(trialPath): pass try: - if isinstance(trialDict['finalPatches'].values()[0], dict): + if isinstance(list(trialDict['finalPatches'].values())[0],dict): trial.finalPatches = {} - for area, patchDict in trialDict['finalPatches'].iteritems(): - try: - trial.finalPatches.update({area: Patch(patchDict['array'], patchDict['sign'])}) - except KeyError: - trial.finalPatches.update({area: Patch(patchDict['sparseArray'], patchDict['sign'])}) - else: - pass + for area,patchDict in trialDict['finalPatches'].items(): + try:trial.finalPatches.update({area:Patch(patchDict['array'],patchDict['sign'])}) + except KeyError:trial.finalPatches.update({area:Patch(patchDict['sparseArray'],patchDict['sign'])}) + else: pass except KeyError: pass try: - if isinstance(trialDict['finalPatchesMarked'].values()[0], dict): + if isinstance(list(trialDict['finalPatchesMarked'].values())[0],dict): trial.finalPatchesMarked = {} - for area, patchDict in trialDict['finalPatchesMarked'].iteritems(): - try: - trial.finalPatchesMarked.update({area: Patch(patchDict['array'], patchDict['sign'])}) - except KeyError: - trial.finalPatchesMarked.update({area: Patch(patchDict['sparseArray'], patchDict['sign'])}) - else: - pass + for area,patchDict in trialDict['finalPatchesMarked'].items(): + try:trial.finalPatchesMarked.update({area:Patch(patchDict['array'],patchDict['sign'])}) + except KeyError:trial.finalPatchesMarked.update({area:Patch(patchDict['sparseArray'],patchDict['sign'])}) + else: pass except KeyError: pass @@ -139,13 +127,13 @@ def findPhaseIndex(trace, harmonic=1): :return: index of the peak of the given harmonic ''' traceF = np.fft.fft(trace) - angle = (-1 * np.angle(traceF[harmonic])) % (2 * np.pi) - index = len(trace) * angle / (2 * np.pi) + angle = (-1*np.angle(traceF[harmonic]))%(2*np.pi) + index = len(trace) * angle / (2*np.pi) return index -def generatePhaseMap(movie, cycles=1, isReverse=False, isFilter=False, sigma=3., isplot=False): +def generatePhaseMap(movie,cycles = 1,isReverse = False,isFilter = False,sigma = 3.,isplot = False): ''' generating phase map of a 3-d movie, on the frequency defined by cycles. @@ -164,36 +152,36 @@ def generatePhaseMap(movie, cycles=1, isReverse=False, isFilter=False, sigma=3., if isReverse: movie = np.amax(movie) - movie - spectrumMovie = np.fft.fft(movie, axis=0) + spectrumMovie = np.fft.fft(movie,axis=0) - # generate power movie + #generate power movie powerMovie = (np.abs(spectrumMovie) * 2.) / np.size(movie, 0) - powerMap = np.abs(powerMovie[cycles, :, :]) + powerMap = np.abs(powerMovie[cycles,:,:]) - # generate phase movie + #generate phase movie phaseMovie = np.angle(spectrumMovie) - # phaseMap = phaseMovie[cycles,:,:] - phaseMap = -1 * phaseMovie[cycles, :, :] + #phaseMap = phaseMovie[cycles,:,:] + phaseMap = -1 * phaseMovie[cycles,:,:] - # remove pixels with not enough power in the ideal frequency + #remove pixels with not enough power in the ideal frequency if isFilter == True: - meanPower = np.mean(powerMovie, axis=0) - stdPower = np.std(powerMovie, axis=0) + meanPower = np.mean(powerMovie, axis = 0) + stdPower = np.std(powerMovie, axis = 0) for i in np.arange(powerMap.shape[0]): for j in np.arange(powerMap.shape[1]): - if powerMap[i, j] < meanPower[i, j] + sigma * stdPower[i, j]: - phaseMap[i, j] = np.nan + if powerMap[i,j] < meanPower[i,j] + sigma * stdPower[i,j]: + phaseMap[i,j] = np.nan if isplot == True: plt.figure() plotMap = 180 * (phaseMap / np.pi) - plt.imshow(plotMap, cmap='hsv', aspect='equal') + plt.imshow(plotMap,aspect='equal') plt.colorbar() - return phaseMap % (2 * np.pi), powerMap # value from -pi to pi + return phaseMap % (2*np.pi), powerMap #value from -pi to pi -def generatePhaseMap2(movie, cycles, isReverse=False, isPlot=False): +def generatePhaseMap2(movie,cycles,isReverse = False,isPlot = False): ''' generating phase map of a 3-d movie, on the frequency defined by cycles. the movie should have the same length of 'cycles' number of cycles. @@ -203,14 +191,14 @@ def generatePhaseMap2(movie, cycles, isReverse=False, isPlot=False): spectrumMovie = np.fft.fft(movie, axis=0) - # generate power movie + #generate power movie powerMovie = (np.abs(spectrumMovie) * 2.) / np.size(movie, 0) - powerMap = np.abs(powerMovie[cycles, :, :]) + powerMap = np.abs(powerMovie[cycles,:,:]) - # generate phase movie + #generate phase movie phaseMovie = np.angle(spectrumMovie) - # phaseMap = phaseMovie[cycles,:,:] - phaseMap = -1 * phaseMovie[cycles, :, :] + #phaseMap = phaseMovie[cycles,:,:] + phaseMap = -1 * phaseMovie[cycles,:,:] phaseMap = phaseMap % (2 * np.pi) if isPlot == True: @@ -218,15 +206,13 @@ def generatePhaseMap2(movie, cycles, isReverse=False, isPlot=False): plotMap = 180 * (phaseMap / np.pi) plt.imshow(plotMap, aspect='equal', - cmap='hsv', - vmax=360, - vmin=0, - interpolation='nearest') + cmap = 'hsv', + vmax = 360, + vmin = 0, + interpolation = 'nearest') plt.colorbar() return phaseMap, powerMap - - # # def generatePhaseMap3(movie, filter_size=None, isReverse=False, isPlot=False): # ''' @@ -267,14 +253,14 @@ def generatePhaseMap2(movie, cycles, isReverse=False, isPlot=False): # return phaseMap, powerMap -def getPhase(trace, cycles, isReverse=False): +def getPhase(trace,cycles,isReverse = False): ''' return phase and power of a certain trace, the trace should have 'cycles' number of cycles the returned phase can be plugged direct into equation generated by getPhasePositionEquation() function to calculate retinotopic location of a pertical trace ''' - if trace.ndim != 1: raise ValueError, 'input trace should be a 1-d array!' + if trace.ndim != 1: raise ValueError('input trace should be a 1-d array!') if isReverse: trace = np.amax(trace) - trace @@ -292,36 +278,36 @@ def phasePosition(phaseMap, displayLog): log ''' - if not ('KSstim' in displayLog['stimulation']['stimName']): - raise TypeError, 'The stimulation is not KSstim!' + if not('KSstim' in displayLog['stimulation']['stimName']): + raise TypeError('The stimulation is not KSstim!') sweepTable = displayLog['stimulation']['sweepTable'] frames = displayLog['stimulation']['frames'] - phase = np.linspace(0, 2 * np.pi, len(frames), endpoint=False) + phase = np.linspace(0,2*np.pi,len(frames),endpoint=False) phaseIndexStart = np.nan phaseIndexEnd = np.nan phaseStart = np.nan phaseEnd = np.nan - for i in range(0, len(frames) - 1): - if (frames[i][2] == None) & (frames[i + 1][2] != None): - phaseStart = phase[i + 1] - phaseIndexStart = int(i + 1) - # print phaseIndexStart, phaseStart + for i in range(0,len(frames)-1): + if (frames[i][2] == None) & (frames[i+1][2] != None): + phaseStart = phase[i+1] + phaseIndexStart = int(i+1) + #print phaseIndexStart, phaseStart - if (frames[i][2] != None) & (frames[i + 1][2] == None): + if (frames[i][2] != None) & (frames[i+1][2] == None): phaseEnd = phase[i] phaseIndexEnd = int(i) - # print phaseIndexEnd, phaseEnd + #print phaseIndexEnd, phaseEnd if np.isnan(phaseIndexStart): - print 'no gap in the front.' + print('no gap in the front.') phaseIndexStart = 0 phaseStart = phase[0] if np.isnan(phaseIndexEnd): - print 'no gap in the end.' + print('no gap in the end.') phaseIndexEnd = len(phase) phaseEnd = phase[-1] @@ -330,7 +316,7 @@ def phasePosition(phaseMap, displayLog): for i, framei in enumerate(frames): if framei[2] != None: - position[i] = (sweepTable[framei[2]][1] + sweepTable[framei[2]][2]) / 2 + position[i] = (sweepTable[framei[2]][1]+sweepTable[framei[2]][2])/2 stiDirection = displayLog['stimulation']['direction'] @@ -338,17 +324,17 @@ def phasePosition(phaseMap, displayLog): position = position[::-1] stiDirection = stiDirection[::-1] - print '\nStimulus direction:', stiDirection + print('\nStimulus direction:', stiDirection) - slope, intercept, r_value, p_value, stderr = stats.linregress(phase[phaseIndexStart:(phaseIndexEnd + 1)], - position[phaseIndexStart:(phaseIndexEnd + 1)]) + slope, intercept, r_value, p_value, stderr = stats.linregress(phase[phaseIndexStart:(phaseIndexEnd+1)], + position[phaseIndexStart:(phaseIndexEnd+1)]) positionMap = phaseMap * slope + intercept - # print slope, intercept, phaseMap[74,66], phaseMap2[74,66], positionMap[74,66] + #print slope, intercept, phaseMap[74,66], phaseMap2[74,66], positionMap[74,66] - # positionMap[phaseMapphaseEnd] = np.nan + #positionMap[phaseMapphaseEnd] = np.nan return positionMap @@ -359,41 +345,40 @@ def getPhasePositionEquation(displayLog): display of KSStim ''' - if not ('KSstim' in displayLog['stimulation']['stimName']): - raise TypeError, 'The stimulation is not KSstim!' + if not('KSstim' in displayLog['stimulation']['stimName']): + raise TypeError('The stimulation is not KSstim!') sweepTable = displayLog['stimulation']['sweepTable'] frames = displayLog['stimulation']['frames'] - phase = np.linspace(0, 2 * np.pi, len(frames), endpoint=False) + phase = np.linspace(0,2*np.pi,len(frames),endpoint=False) phaseIndexStart = np.nan phaseIndexEnd = np.nan - for i in range(0, len(frames) - 1): - if (frames[i][2] == None) & (frames[i + 1][2] != None): phaseIndexStart = int(i + 1) - if (frames[i][2] != None) & (frames[i + 1][2] == None): phaseIndexEnd = int(i) + for i in range(0,len(frames)-1): + if (frames[i][2] == None) & (frames[i+1][2] != None):phaseIndexStart = int(i+1) + if (frames[i][2] != None) & (frames[i+1][2] == None):phaseIndexEnd = int(i) - if np.isnan(phaseIndexStart): print 'no gap in the front.'; phaseIndexStart = 0 + if np.isnan(phaseIndexStart):print('no gap in the front.'); phaseIndexStart = 0 - if np.isnan(phaseIndexEnd): print 'no gap in the end.'; phaseIndexEnd = len(phase) + if np.isnan(phaseIndexEnd):print('no gap in the end.'); phaseIndexEnd = len(phase) position = np.zeros(len(frames)) position[:] = np.nan for i, framei in enumerate(frames): if framei[2] is not None: - position[i] = (sweepTable[framei[2]][1] + sweepTable[framei[2]][2]) / 2 + position[i] = (sweepTable[framei[2]][1]+sweepTable[framei[2]][2])/2 stiDirection = displayLog['stimulation']['direction'] if displayLog['presentation']['displayOrder'] == -1: - position = position[::-1]; - stiDirection = stiDirection[::-1] + position = position[::-1]; stiDirection = stiDirection[::-1] # print '\nStimulus direction:', stiDirection - slope, intercept, r_value, p_value, stderr = stats.linregress(phase[phaseIndexStart:(phaseIndexEnd + 1)], - position[phaseIndexStart:(phaseIndexEnd + 1)]) + slope, intercept, r_value, p_value, stderr = stats.linregress(phase[phaseIndexStart:(phaseIndexEnd+1)], + position[phaseIndexStart:(phaseIndexEnd+1)]) # print 'slope: \t'+str(slope) # print 'intercept: \t'+str(intercept) @@ -411,28 +396,28 @@ def getPhasePositionEquation2(frames, sweepTable): Assuming displayOrder is 1 when display this stimulus ''' - phase = np.linspace(0, 2 * np.pi, len(frames), endpoint=False) + phase = np.linspace(0,2*np.pi,len(frames),endpoint=False) phaseIndexStart = np.nan phaseIndexEnd = np.nan - for i in range(0, len(frames) - 1): - if (frames[i][2] == None) & (frames[i + 1][2] != None): phaseIndexStart = int(i + 1) - if (frames[i][2] != None) & (frames[i + 1][2] == None): phaseIndexEnd = int(i) + for i in range(0,len(frames)-1): + if (frames[i][2] == None) & (frames[i+1][2] != None):phaseIndexStart = int(i+1) + if (frames[i][2] != None) & (frames[i+1][2] == None):phaseIndexEnd = int(i) - if np.isnan(phaseIndexStart): print 'no gap in the front.'; phaseIndexStart = 0 - if np.isnan(phaseIndexEnd): print 'no gap in the end.'; phaseIndexEnd = len(phase) + if np.isnan(phaseIndexStart):print('no gap in the front.'); phaseIndexStart = 0 + if np.isnan(phaseIndexEnd):print('no gap in the end.'); phaseIndexEnd = len(phase) position = np.zeros(len(frames)) position[:] = np.nan for i, framei in enumerate(frames): if framei[2] is not None: - position[i] = (sweepTable[framei[2]][1] + sweepTable[framei[2]][2]) / 2 + position[i] = (sweepTable[framei[2]][1]+sweepTable[framei[2]][2])/2 # print '\nStimulus direction:', stiDirection - slope, intercept, r_value, p_value, stderr = stats.linregress(phase[phaseIndexStart:(phaseIndexEnd + 1)], - position[phaseIndexStart:(phaseIndexEnd + 1)]) + slope, intercept, r_value, p_value, stderr = stats.linregress(phase[phaseIndexStart:(phaseIndexEnd+1)], + position[phaseIndexStart:(phaseIndexEnd+1)]) # print 'slope: \t'+str(slope) # print 'intercept: \t'+str(intercept) @@ -443,13 +428,13 @@ def getPhasePositionEquation2(frames, sweepTable): return slope, intercept -def visualSignMap(phasemap1, phasemap2): +def visualSignMap(phasemap1,phasemap2): ''' calculate visual sign map from two orthogonally oriented phase maps ''' if phasemap1.shape != phasemap2.shape: - raise LookupError, "'phasemap1' and 'phasemap2' should have same size." + raise LookupError("'phasemap1' and 'phasemap2' should have same size.") gradmap1 = np.gradient(phasemap1) gradmap2 = np.gradient(phasemap2) @@ -463,96 +448,98 @@ def visualSignMap(phasemap1, phasemap2): graddir2 = np.zeros(np.shape(gradmap2[0])) # gradmag2 = np.zeros(np.shape(gradmap2[0])) - for i in range(phasemap1.shape[0]): + for i in range(phasemap1.shape[0]): for j in range(phasemap2.shape[1]): - graddir1[i, j] = math.atan2(gradmap1[1][i, j], gradmap1[0][i, j]) - graddir2[i, j] = math.atan2(gradmap2[1][i, j], gradmap2[0][i, j]) + + graddir1[i,j] = math.atan2(gradmap1[1][i,j],gradmap1[0][i,j]) + graddir2[i,j] = math.atan2(gradmap2[1][i,j],gradmap2[0][i,j]) # gradmag1[i,j] = np.sqrt((gradmap1[1][i,j]**2)+(gradmap1[0][i,j]**2)) # gradmag2[i,j] = np.sqrt((gradmap2[1][i,j]**2)+(gradmap2[0][i,j]**2)) - vdiff = np.multiply(np.exp(1j * graddir1), np.exp(-1j * graddir2)) + vdiff = np.multiply(np.exp(1j * graddir1),np.exp(-1j * graddir2)) areamap = np.sin(np.angle(vdiff)) return areamap -def dilationPatches(rawPatches, smallPatchThr=5, borderWidth=1): # pixel width of the border after dilation +def dilationPatches(rawPatches,smallPatchThr = 5,borderWidth = 1): #pixel width of the border after dilation ''' dilation patched in a given area untill the border between them are as narrow as defined by 'borderWidth'. ''' - # get patch borders + #get patch borders total_area = sm.convex_hull_image(rawPatches) patchBorder = np.multiply(-1 * (rawPatches - 1), total_area) - # thinning patch borders + #thinning patch borders patchBorder = sm.skeletonize(patchBorder) - # thicking patch borders + #thicking patch borders if borderWidth > 1: - patchBorder = ni.binary_dilation(patchBorder, iterations=borderWidth - 1).astype(np.int) + patchBorder = ni.binary_dilation(patchBorder, iterations = borderWidth - 1).astype(np.int) - # genertating new patches + #genertating new patches newPatches = np.multiply(-1 * (patchBorder - 1), total_area) - # removing small edges + #removing small edges labeledPatches, patchNum = ni.label(newPatches) - for i in xrange(1, patchNum + 1): + for i in range(1, patchNum + 1): currPatch = np.array(labeledPatches) currPatch[currPatch != i] = 0 currPatch = currPatch / i if (np.sum(np.multiply(currPatch, rawPatches)[:]) == 0) or (np.sum(currPatch[:]) < smallPatchThr): - # revCurrPatch = -1 * (currPatch - 1) - # newPatches = np.multiply(newPatches, revCurrPatch) + #revCurrPatch = -1 * (currPatch - 1) + #newPatches = np.multiply(newPatches, revCurrPatch) newPatches[currPatch == 1] = 0 else: currPatch = ni.binary_closing(currPatch, - structure=np.ones((borderWidth + 2, borderWidth + 2))).astype(np.int) + structure = np.ones((borderWidth+2,borderWidth+2))).astype(np.int) newPatches[currPatch == 1] = 1 return newPatches -def dilationPatches2(rawPatches, dilationIter=20, borderWidth=1): # pixel width of the border after dilation +def dilationPatches2(rawPatches,dilationIter = 20,borderWidth = 1): #pixel width of the border after dilation ''' dilation patched in a given area untill the border between them are as narrow as defined by 'borderWidth'. ''' - total_area = ni.binary_dilation(rawPatches, iterations=dilationIter).astype(np.int) + + total_area = ni.binary_dilation(rawPatches, iterations = dilationIter).astype(np.int) patchBorder = total_area - rawPatches - # thinning patch borders + #thinning patch borders patchBorder = sm.skeletonize(patchBorder) - # thickening patch borders + #thickening patch borders if borderWidth > 1: - patchBorder = ni.binary_dilation(patchBorder, iterations=borderWidth - 1).astype(np.int) + patchBorder = ni.binary_dilation(patchBorder, iterations = borderWidth - 1).astype(np.int) - # genertating new patches + #genertating new patches newPatches = np.multiply(-1 * (patchBorder - 1), total_area) - # removing small edges + #removing small edges labeledPatches, patchNum = ni.label(newPatches) - newPatches2 = np.zeros(newPatches.shape, dtype=np.int) + newPatches2 = np.zeros(newPatches.shape, dtype = np.int) - for i in xrange(1, patchNum + 1): - currPatch = np.zeros(labeledPatches.shape, dtype=np.int) + for i in range(1, patchNum + 1): + currPatch = np.zeros(labeledPatches.shape, dtype = np.int) currPatch[labeledPatches == i] = 1 currPatch[labeledPatches != i] = 0 if (np.sum(np.multiply(currPatch, rawPatches)[:]) > 0): - # currPatch = ni.binary_closing(currPatch, - # structure = np.ones((borderWidth+2,borderWidth+2))).astype(np.int) +# currPatch = ni.binary_closing(currPatch, +# structure = np.ones((borderWidth+2,borderWidth+2))).astype(np.int) newPatches2[currPatch == 1] = 1 return newPatches2 @@ -566,21 +553,21 @@ def labelPatches(patchmap, signMap, connectivity=4): labeledPatches, patchNum = ni.label(patchmap) - # list of area of every patch, first column: patch label, second column: area - patchArea = np.zeros((patchNum, 2), dtype=np.int) + #list of area of every patch, first column: patch label, second column: area + patchArea = np.zeros((patchNum,2),dtype=np.int) - for i in range(1, patchNum + 1): - currPatch = np.zeros(labeledPatches.shape, dtype=np.int) + for i in range(1, patchNum+1): + currPatch = np.zeros(labeledPatches.shape, dtype = np.int) currPatch[labeledPatches == i] = 1 currPatch[labeledPatches != i] = 0 - patchArea[i - 1] = [i, np.sum(currPatch[:])] + patchArea[i-1] = [i, np.sum(currPatch[:])] - # sort patches by the area, from largest to the smallest - sortArea = patchArea[patchArea[:, 1].argsort(axis=0)][::-1, :] + #sort patches by the area, from largest to the smallest + sortArea=patchArea[patchArea[:,1].argsort(axis=0)][::-1,:] patches = {} - for i, ind in enumerate(sortArea[:, 0]): - currPatch = np.zeros(labeledPatches.shape, dtype=np.int) + for i, ind in enumerate(sortArea[:,0]): + currPatch = np.zeros(labeledPatches.shape, dtype = np.int) currPatch[labeledPatches == ind] = 1 currPatch[labeledPatches != ind] = 0 currSignPatch = np.multiply(currPatch, signMap) @@ -590,19 +577,19 @@ def labelPatches(patchmap, signMap, connectivity=4): elif np.sum(currSignPatch[:]) < 0: currSign = -1 else: - raise LookupError, 'This patch has no visual Sign!!' + raise LookupError('This patch has no visual Sign!!') patchname = 'patch' + ft.int2str(i, 2) - patches.update({patchname: Patch(currPatch, currSign)}) + patches.update({patchname : Patch(currPatch, currSign)}) return patches def phaseFilter(phaseMap, - filterType='Gaussian', # 'Gaussian' of 'uniform' - filterSize=3, - isPositive=True): # if Ture return phase [0 2pi], if False return phase [-pi, pi] + filterType = 'Gaussian', # 'Gaussian' of 'uniform' + filterSize = 3, + isPositive = True): #if Ture return phase [0 2pi], if False return phase [-pi, pi] ''' smooth phaseMap in a circular fashion ''' @@ -641,8 +628,8 @@ def plotVisualSpace(visualSpace, altAxis, aziAxis, tickSpace=10, plotAxis=None, pt.plot_mask_borders(visualSpace, plotAxis=ax, color=lineColor, borderWidth=lineWidth) ax.set_aspect('equal') - altTickInds = range(len(altAxis))[::tickSpace] - aziTickInds = range(len(aziAxis))[::tickSpace] + altTickInds = list(range(len(altAxis)))[::tickSpace] + aziTickInds = list(range(len(aziAxis)))[::tickSpace] altTickLabels = [str(int(round(altAxis[altTickInd]))) for altTickInd in altTickInds] aziTickLabels = [str(int(round(aziAxis[aziTickInd]))) for aziTickInd in aziTickInds] @@ -656,6 +643,7 @@ def plotVisualSpace(visualSpace, altAxis, aziAxis, tickSpace=10, plotAxis=None, def localMin(eccMap, binSize): + ''' find local minimum of eccenticity map (in degree), with binning by binSize in degree @@ -677,51 +665,53 @@ def localMin(eccMap, binSize): marker, NumOfMin = ni.measurements.label(marker) i = i + 1 - # if NumOfMin == 1: - # print 'Only one local minumum was found!!!' - # elif NumOfMin == 0: - # print 'No local minumum was found!!!' - # else: - # print str(NumOfMin) + ' local minuma were found!!!' - # - # if NumOfMin > 1: - # plt.figure() - # plt.imshow(marker,vmin=np.amin(marker), vmax=np.amax(marker),cmap='jet',interpolation='nearest') - # plt.colorbar() - # plt.title('marker from local min') +# if NumOfMin == 1: +# print 'Only one local minumum was found!!!' +# elif NumOfMin == 0: +# print 'No local minumum was found!!!' +# else: +# print str(NumOfMin) + ' local minuma were found!!!' +# +# if NumOfMin > 1: +# plt.figure() +# plt.imshow(marker,vmin=np.amin(marker), vmax=np.amax(marker),cmap='jet',interpolation='nearest') +# plt.colorbar() +# plt.title('marker from local min') return marker -def adjacentPairs(patches, borderWidth=2): +def adjacentPairs(patches,borderWidth = 2): + ''' return all the patch pairs with same visual sign and sharing border ''' - keyList = patches.keys() + keyList = list(patches.keys()) pairKeyList = [] for pair in combinations(keyList, 2): patch1 = patches[pair[0]] patch2 = patches[pair[1]] - if (ia.is_adjacent(patch1.array, patch2.array, borderWidth=borderWidth)) and (patch1.sign == patch2.sign): + if (ia.is_adjacent(patch1.array, patch2.array, borderWidth = borderWidth)) and (patch1.sign == patch2.sign): + pairKeyList.append(pair) return pairKeyList -def mergePatches(array1, array2, borderWidth=2): +def mergePatches(array1, array2, borderWidth = 2): ''' merge two binary patches with borderWidth no greater than borderWidth ''' sp = array1 + array2 - spc = ni.binary_closing(sp, iterations=(borderWidth)).astype(np.int8) + spc = ni.binary_closing(sp, iterations = (borderWidth)).astype(np.int8) _, patchNum = ni.measurements.label(spc) if patchNum > 1: - raise LookupError, 'this two patches are too far apart!!!' + raise LookupError('this two patches are too far apart!!!') else: return spc @@ -743,20 +733,20 @@ def eccentricityMap(altMap, aziMap, altCenter, aziCenter): eccMap = np.zeros(altMap.shape) eccMap[:] = np.nan - # for i in xrange(altMap.shape[0]): - # for j in xrange(altMap.shape[1]): - # alt = altMap2[i,j] - # azi = aziMap2[i,j] - # eccMap[i,j] = np.arctan(np.sqrt(np.tan(alt-altCenter2)**2 + ((np.tan(azi-aziCenter2)**2)/(np.cos(alt-altCenter2)**2)))) +# for i in xrange(altMap.shape[0]): +# for j in xrange(altMap.shape[1]): +# alt = altMap2[i,j] +# azi = aziMap2[i,j] +# eccMap[i,j] = np.arctan(np.sqrt(np.tan(alt-altCenter2)**2 + ((np.tan(azi-aziCenter2)**2)/(np.cos(alt-altCenter2)**2)))) eccMap = np.arctan( - np.sqrt( - np.square(np.tan(altMap2 - altCenter2)) - + - np.square(np.tan(aziMap2 - aziCenter2)) / np.square(np.cos(altMap2 - altCenter2)) - ) - ) - - eccMap = eccMap * 180 / np.pi + np.sqrt( + np.square(np.tan(altMap2-altCenter2)) + + + np.square(np.tan(aziMap2-aziCenter2))/np.square(np.cos(altMap2-altCenter2)) + ) + ) + + eccMap = eccMap*180 / np.pi return eccMap @@ -768,51 +758,51 @@ def sortPatches(patchDict): patches = [] newPatchDict = {} - for key, value in patchDict.iteritems(): - patches.append((value, value.getArea())) + for key, value in patchDict.items(): + patches.append((value,value.getArea())) - patches = sorted(patches, key=lambda a: a[1], reverse=True) + patches = sorted(patches, key=lambda a:a[1], reverse=True) for i, item in enumerate(patches): + patchName = 'patch' + ft.int2str(i + 1, 2) - newPatchDict.update({patchName: item[0]}) + newPatchDict.update({patchName:item[0]}) return newPatchDict -def plotPatches(patches, plotaxis=None, zoom=1, alpha=0.5, markersize=5): +def plotPatches(patches,plotaxis = None,zoom = 1,alpha = 0.5,markersize = 5): ''' plot a patches in a patch dictionary ''' if plotaxis == None: f = plt.figure() - plotaxis = f.add_axes([1, 1, 1, 1]) + plotaxis = f.add_axes([1,1,1,1]) imageHandle = {} - for key, value in patches.iteritems(): + for key, value in patches.items(): if zoom > 1: - currPatch = Patch(ni.zoom(value.array, zoom, order=0), value.sign) + currPatch = Patch(ni.zoom(value.array, zoom, order = 0),value.sign) else: currPatch = value - h = plotaxis.imshow(currPatch.getSignedMask(), cmap='jet', vmax=1, vmin=-1, interpolation='nearest', - alpha=alpha) - plotaxis.plot(currPatch.getCenter()[1], currPatch.getCenter()[0], '.k', markersize=markersize * zoom) - imageHandle.update({'handle_' + key: h}) + h = plotaxis.imshow(currPatch.getSignedMask(),vmax=1,vmin=-1,interpolation='nearest',alpha=alpha) + plotaxis.plot(currPatch.getCenter()[1], currPatch.getCenter()[0],'.k', markersize = markersize * zoom) + imageHandle.update({'handle_'+key:h}) - plotaxis.set_xlim([0, currPatch.array.shape[1] - 1]) - plotaxis.set_ylim([currPatch.array.shape[0] - 1, 0]) + plotaxis.set_xlim([0, currPatch.array.shape[1]-1]) + plotaxis.set_ylim([currPatch.array.shape[0]-1, 0]) # plotaxis.set_axis_off() return imageHandle -def plotPatchBorders(patches, plotaxis=None, borderWidth=2, color='#ff0000', zoom=1, isPlotCenter=True, isCenter=True, - rotationAngle=0): # rotation of map in degrees, counter-clockwise +def plotPatchBorders(patches,plotaxis = None,borderWidth = 2,color='#ff0000',zoom = 1,isPlotCenter = True,isCenter = True, + rotationAngle = 0 ):# rotation of map in degrees, counter-clockwise - # generating plot axis + #generating plot axis if plotaxis == None: f = plt.figure() plotaxis = f.add_subplot(111) @@ -822,41 +812,41 @@ def plotPatchBorders(patches, plotaxis=None, borderWidth=2, color='#ff0000', zoo borderArray = [] - # initiating center and area + #initiating center and area center = None area = 0 - for key, value in patches.iteritems(): + for key, value in patches.items(): if zoom > 1: - currPatch = Patch(ni.zoom(value.array, zoom, order=0), value.sign) + currPatch = Patch(ni.zoom(value.array, zoom, order = 0),value.sign) currBorderWidth = borderWidth * zoom else: currPatch = value currBorderWidth = borderWidth - # updating center + #updating center currArea = currPatch.getArea() currCenter = currPatch.getCenter() if currArea > area: center = currCenter area = np.int(currArea) - # print 'currArea:', currArea, ' currCenter:', currCenter, ' center:', center + #print 'currArea:', currArea, ' currCenter:', currCenter, ' center:', center - # generating border array for the current patch - currBorder = currPatch.getBorder(borderWidth=currBorderWidth) + #generating border array for the current patch + currBorder = currPatch.getBorder(borderWidth = currBorderWidth) - # adding center of current patches to the border array + #adding center of current patches to the border array if isPlotCenter: - currBorder[currCenter[0] - currBorderWidth - 1:currCenter[0] + currBorderWidth + 1, - currCenter[1] - currBorderWidth - 1:currCenter[1] + currBorderWidth + 1] = 1 + currBorder[currCenter[0]-currBorderWidth-1:currCenter[0]+currBorderWidth+1, + currCenter[1]-currBorderWidth-1:currCenter[1]+currBorderWidth+1] = 1 currBorder[np.isnan(currBorder)] = 0 borderArray.append(currBorder) - # binarize border array - borderArray = np.sum(np.array(borderArray), axis=0) + #binarize border array + borderArray = np.sum(np.array(borderArray),axis=0) borderArray[borderArray >= 1] = 1 # centering and expanding border array @@ -879,43 +869,45 @@ def plotPatchBorders(patches, plotaxis=None, borderWidth=2, color='#ff0000', zoo expandE = maxDis - center[1] expandW = maxDis - (borderArray.shape[1] - center[1]) - borderArray = np.concatenate((np.zeros((expandN, borderArray.shape[1])), borderArray), axis=0) - borderArray = np.concatenate((borderArray, np.zeros((expandS, borderArray.shape[1]))), axis=0) - borderArray = np.concatenate((np.zeros((borderArray.shape[0], expandE)), borderArray), axis=1) - borderArray = np.concatenate((borderArray, np.zeros((borderArray.shape[0], expandW))), axis=1) + borderArray = np.concatenate((np.zeros((expandN,borderArray.shape[1])),borderArray),axis = 0) + borderArray = np.concatenate((borderArray,np.zeros((expandS,borderArray.shape[1]))),axis = 0) + borderArray = np.concatenate((np.zeros((borderArray.shape[0],expandE)),borderArray),axis = 1) + borderArray = np.concatenate((borderArray,np.zeros((borderArray.shape[0],expandW))),axis = 1) + # rotating border array borderArrayR = tsfm.rotate(borderArray, rotationAngle) - # binarize rotated border array - borderArrayR[borderArrayR > 0] = 1 + #binarize rotated border array + borderArrayR[borderArrayR > 0]=1 - # thinning rotated border array - # borderArrayR = sm.binary_opening(borderArrayR,np.array([[0,1,0],[1,1,1],[0,1,0]])) + #thinning rotated border array + #borderArrayR = sm.binary_opening(borderArrayR,np.array([[0,1,0],[1,1,1],[0,1,0]])) borderArrayR = sm.skeletonize(borderArrayR) - # dilating rotated border array + #dilating rotated border array borderArrayR = sm.binary_dilation(borderArrayR, sm.square(currBorderWidth)) - # clear unwanted pixels + #clear unwanted pixels borderR = np.array(borderArrayR).astype(np.float32) borderR[borderArrayR == 0] = np.nan - # plotting - imageHandle = plotaxis.imshow(borderR, vmin=0, vmax=1, cmap='temp', interpolation='nearest') + #plotting + imageHandle = plotaxis.imshow(borderR, vmin=0, vmax=1, cmap='temp', interpolation = 'nearest') return imageHandle def plotPatchBorders2(patches, - plotAxis=None, - plotSize=None, # size of plotting area - borderWidth=2, - zoom=1, - centerPatch=1, - rotationAngle=0, # rotation of map in degrees, counter-clockwise - markerSize=2, # size of center dot - closingIteration=None # open iteration for patch borders + plotAxis = None, + plotSize = None, # size of plotting area + borderWidth = 2, + zoom = 1, + centerPatch = 1, + rotationAngle = 0, # rotation of map in degrees, counter-clockwise + markerSize = 2, # size of center dot + closingIteration = None # open iteration for patch borders ): + ''' plot rotated and centered patch borders @@ -925,17 +917,19 @@ def plotPatchBorders2(patches, ... ''' - # generating plot axis + #generating plot axis if plotAxis == None: f = plt.figure() plotAxis = f.add_subplot(111) - # generating list for plotting - # for each patch: first item: center, second item: area, third item: patch array, forth item: sign - forPlotting = [] + #generating list for plotting + #for each patch: first item: center, second item: area, third item: patch array, forth item: sign + forPlotting=[] + + for key, value in patches.items(): - for key, value in patches.iteritems(): - currPatch = Patch(ni.zoom(value.array, zoom, order=0), value.sign) + + currPatch = Patch(ni.zoom(value.array, zoom, order = 0),value.sign) forPlotting.append([currPatch.getCenter(), currPatch.getArea(), @@ -946,13 +940,13 @@ def plotPatchBorders2(patches, forPlotting = sorted(forPlotting, key=lambda a: a[1], reverse=True) # get the plotting center - center = forPlotting[centerPatch - 1][0] + center = forPlotting[centerPatch-1][0] # width and height of original plot width = forPlotting[0][2].shape[1] height = forPlotting[0][2].shape[0] - # coordinate of four corners + #coordinate of four corners NW = np.array([0, 0]) NE = np.array([0, width]) SW = np.array([height, 0]) @@ -973,55 +967,54 @@ def plotPatchBorders2(patches, for ind, value in enumerate(forPlotting): - # expanding border map for each patch - value[2] = np.concatenate((np.zeros((expandN, value[2].shape[1])), value[2]), axis=0) - value[2] = np.concatenate((value[2], np.zeros((expandS, value[2].shape[1]))), axis=0) - value[2] = np.concatenate((np.zeros((value[2].shape[0], expandE)), value[2]), axis=1) - value[2] = np.concatenate((value[2], np.zeros((value[2].shape[0], expandW))), axis=1) + #expanding border map for each patch + value[2] = np.concatenate((np.zeros((expandN,value[2].shape[1])),value[2]),axis = 0) + value[2] = np.concatenate((value[2],np.zeros((expandS,value[2].shape[1]))),axis = 0) + value[2] = np.concatenate((np.zeros((value[2].shape[0],expandE)),value[2]),axis = 1) + value[2] = np.concatenate((value[2],np.zeros((value[2].shape[0],expandW))),axis = 1) - value[2][value[2] == 0] = np.nan + value[2][value[2]==0] = np.nan - # rotate border map for each patch + #rotate border map for each patch value[2] = tsfm.rotate(value[2], rotationAngle) # #binarize current border map # value[2][value[2]<0.9]=np.nan # value[2][value[2]>=0.9]=1 - # ploting current border + #ploting current border if value[3] == -1: - pt.plot_mask(value[2], plotAxis=plotAxis, color='#0000ff', borderWidth=borderWidth, - closingIteration=closingIteration) + pt.plot_mask(value[2], plotAxis=plotAxis, color='#0000ff', borderWidth = borderWidth, closingIteration = closingIteration) elif value[3] == 1: - pt.plot_mask(value[2], plotAxis=plotAxis, color='#ff0000', borderWidth=borderWidth, - closingIteration=closingIteration) + pt.plot_mask(value[2], plotAxis=plotAxis, color='#ff0000', borderWidth = borderWidth, closingIteration = closingIteration) # expanding center coordinate for each patch value[0][0] = value[0][0] + expandN value[0][1] = value[0][1] + expandE - # rotate center coordinate for each patch + #rotate center coordinate for each patch x = value[0][1] - maxDis y = maxDis - value[0][0] - xx = x * np.cos(rotationAngle * np.pi / 180) - y * np.sin(rotationAngle * np.pi / 180) - yy = y * np.cos(rotationAngle * np.pi / 180) + x * np.sin(rotationAngle * np.pi / 180) + xx = x*np.cos(rotationAngle*np.pi/180) - y*np.sin(rotationAngle*np.pi/180) + yy = y*np.cos(rotationAngle*np.pi/180) + x*np.sin(rotationAngle*np.pi/180) value[0][0] = int(np.round(maxDis - yy)) value[0][1] = int(np.round(maxDis + xx)) - # ploting current center + #ploting current center if value[3] == -1: - plotAxis.plot(value[0][1], value[0][0], '.b', markersize=markerSize) + plotAxis.plot(value[0][1],value[0][0], '.b', markersize = markerSize) elif value[3] == 1: - plotAxis.plot(value[0][1], value[0][0], '.r', markersize=markerSize) + plotAxis.plot(value[0][1],value[0][0], '.r', markersize = markerSize) + if plotSize: - plotAxis.set_xlim([maxDis - plotSize / 2, maxDis + plotSize / 2]) - plotAxis.set_ylim([maxDis + plotSize / 2, maxDis - plotSize / 2]) + plotAxis.set_xlim([maxDis-plotSize/2, maxDis+plotSize/2]) + plotAxis.set_ylim([maxDis+plotSize/2, maxDis-plotSize/2]) else: - plotAxis.set_xlim([0, 2 * maxDis]) - plotAxis.set_ylim([2 * maxDis, 0]) + plotAxis.set_xlim([0,2*maxDis]) + plotAxis.set_ylim([2*maxDis,0]) plotAxis.get_xaxis().set_visible(False) plotAxis.get_yaxis().set_visible(False) @@ -1031,14 +1024,14 @@ def plotPatchBorders2(patches, def plotPatchBorders3(patches, altPosMap, aziPosMap, - plotAxis=None, - plotSize=None, # size of plotting area - borderWidth=2, - zoom=1, - centerPatchKey='patch01', # center at the largest patch by default - markerSize=2, # size of center dot - closingIteration=None, # open iteration for patch borders - arrowLength=10 # length of arrow of gradiant + plotAxis = None, + plotSize = None, # size of plotting area + borderWidth = 2, + zoom = 1, + centerPatchKey = 'patch01', # center at the largest patch by default + markerSize = 2, # size of center dot + closingIteration = None, # open iteration for patch borders + arrowLength = 10 # length of arrow of gradiant ): ''' plot patch border centered and rotated by a certain patch defined by 'centerPatch' @@ -1046,17 +1039,18 @@ def plotPatchBorders3(patches, also plot vetors of altitude gradiant and azimuth gradiant ''' - # generating plot axis + + #generating plot axis if plotAxis == None: f = plt.figure() plotAxis = f.add_subplot(111) - # calculat rotation angle and center + #calculat rotation angle and center try: centerPatchObj = patches[centerPatchKey] except KeyError: - area = [] - for key, value in patches.iteritems(): + area=[] + for key, value in patches.items(): area.append([key, value.getArea()]) area = sorted(area, key=lambda a: a[1], reverse=True) @@ -1072,18 +1066,19 @@ def plotPatchBorders3(patches, aziGradMapX = np.sum(aziGradMap[0] * centerPatchObj.array) aziGradMapY = np.sum(aziGradMap[1] * centerPatchObj.array) - rotationAngle = -(np.arctan2(-aziGradMapX, aziGradMapY) % (2 * np.pi)) * 180 / np.pi + rotationAngle = -(np.arctan2(-aziGradMapX,aziGradMapY)%(2*np.pi))*180/np.pi # rotationAngle = 0 # print (np.arctan2(-altGradMapX,altGradMapY)%(2*np.pi))*180/np.pi - zoomedCenterPatch = Patch(ni.zoom(centerPatchObj.array, zoom, order=0), centerPatchObj.sign) + zoomedCenterPatch = Patch(ni.zoom(centerPatchObj.array, zoom, order = 0),centerPatchObj.sign) center = zoomedCenterPatch.getCenter() + # width and height of original plot width = zoomedCenterPatch.array.shape[1] height = zoomedCenterPatch.array.shape[0] - # coordinate of four corners + #coordinate of four corners NW = np.array([0, 0]) NE = np.array([0, width]) SW = np.array([height, 0]) @@ -1102,61 +1097,59 @@ def plotPatchBorders3(patches, expandE = maxDis - center[1] expandW = maxDis - (width - center[1]) - for key, currPatch in patches.iteritems(): + for key, currPatch in patches.items(): - zoomedArray = ni.zoom(currPatch.array, zoom, order=0) + zoomedArray = ni.zoom(currPatch.array, zoom, order = 0) - # expanding border map for each patch - zoomedArray = np.concatenate((np.zeros((expandN, zoomedArray.shape[1])), zoomedArray), axis=0) - zoomedArray = np.concatenate((zoomedArray, np.zeros((expandS, zoomedArray.shape[1]))), axis=0) - zoomedArray = np.concatenate((np.zeros((zoomedArray.shape[0], expandE)), zoomedArray), axis=1) - zoomedArray = np.concatenate((zoomedArray, np.zeros((zoomedArray.shape[0], expandW))), axis=1) + #expanding border map for each patch + zoomedArray = np.concatenate((np.zeros((expandN,zoomedArray.shape[1])),zoomedArray),axis = 0) + zoomedArray = np.concatenate((zoomedArray,np.zeros((expandS,zoomedArray.shape[1]))),axis = 0) + zoomedArray = np.concatenate((np.zeros((zoomedArray.shape[0],expandE)),zoomedArray),axis = 1) + zoomedArray = np.concatenate((zoomedArray,np.zeros((zoomedArray.shape[0],expandW))),axis = 1) - # rotate border map for each patch + #rotate border map for each patch zoomedArray = tsfm.rotate(zoomedArray, rotationAngle) - # get center - zoomedCenter = np.round(np.mean(np.argwhere(zoomedArray).astype(np.float32), axis=0)).astype(np.int) + #get center + zoomedCenter = np.round(np.mean(np.argwhere(zoomedArray).astype(np.float32),axis=0)).astype(np.int) - # binarize current border map - zoomedArray[zoomedArray < 0.9] = np.nan - zoomedArray[zoomedArray >= 0.9] = 1 - # ploting current border + #binarize current border map + zoomedArray[zoomedArray<0.9]=np.nan + zoomedArray[zoomedArray>=0.9]=1 + + #ploting current border if currPatch.sign == -1: - pt.plot_mask(zoomedArray, plotAxis=plotAxis, color='#0000ff', borderWidth=borderWidth, - closingIteration=closingIteration) - plotAxis.plot(zoomedCenter[1], zoomedCenter[0], '.b', markersize=markerSize) + pt.plot_mask(zoomedArray, plotAxis=plotAxis, color='#0000ff', borderWidth = borderWidth, closingIteration = closingIteration) + plotAxis.plot(zoomedCenter[1],zoomedCenter[0], '.b', markersize = markerSize) elif currPatch.sign == 1: - pt.plot_mask(zoomedArray, plotAxis=plotAxis, color='#ff0000', borderWidth=borderWidth, - closingIteration=closingIteration) - plotAxis.plot(zoomedCenter[1], zoomedCenter[0], '.r', markersize=markerSize) + pt.plot_mask(zoomedArray, plotAxis=plotAxis, color='#ff0000', borderWidth = borderWidth, closingIteration = closingIteration) + plotAxis.plot(zoomedCenter[1],zoomedCenter[0], '.r', markersize = markerSize) - # get gradiant vectors for current patch + #get gradiant vectors for current patch currAltGradMapX = np.sum(altGradMap[0] * currPatch.array) currAltGradMapY = np.sum(altGradMap[1] * currPatch.array) - currAltAngle = np.arctan2(-currAltGradMapX, currAltGradMapY) % (2 * np.pi) + (rotationAngle * np.pi / 180) + currAltAngle = np.arctan2(-currAltGradMapX,currAltGradMapY)%(2*np.pi)+(rotationAngle*np.pi/180) currAziGradMapX = np.sum(aziGradMap[0] * currPatch.array) currAziGradMapY = np.sum(aziGradMap[1] * currPatch.array) - currAziAngle = np.arctan2(-currAziGradMapX, currAziGradMapY) % (2 * np.pi) + (rotationAngle * np.pi / 180) + currAziAngle = np.arctan2(-currAziGradMapX,currAziGradMapY)%(2*np.pi)+(rotationAngle*np.pi/180) # if key == centerPatchKey: # print currAltAngle*180/np.pi # print np.sin(currAltAngle) # print np.cos(currAltAngle) - # plotting arrow for the current patch - plotAxis.arrow(zoomedCenter[1], zoomedCenter[0], arrowLength * zoom * np.cos(currAltAngle), - -arrowLength * zoom * np.sin(currAltAngle), color='#ff00ff', linewidth=2, width=0.5) - plotAxis.arrow(zoomedCenter[1], zoomedCenter[0], arrowLength * zoom * np.cos(currAziAngle), - -arrowLength * zoom * np.sin(currAziAngle), color='#00ffff', linewidth=2, width=0.5) + #plotting arrow for the current patch + plotAxis.arrow(zoomedCenter[1],zoomedCenter[0],arrowLength*zoom*np.cos(currAltAngle),-arrowLength*zoom*np.sin(currAltAngle),color='#ff00ff',linewidth=2,width=0.5) + plotAxis.arrow(zoomedCenter[1],zoomedCenter[0],arrowLength*zoom*np.cos(currAziAngle),-arrowLength*zoom*np.sin(currAziAngle),color='#00ffff',linewidth=2,width=0.5) + if plotSize: - plotAxis.set_xlim([maxDis - plotSize * zoom / 2, maxDis + plotSize * zoom / 2]) - plotAxis.set_ylim([maxDis + plotSize * zoom / 2, maxDis - plotSize * zoom / 2]) + plotAxis.set_xlim([maxDis-plotSize*zoom/2, maxDis+plotSize*zoom/2]) + plotAxis.set_ylim([maxDis+plotSize*zoom/2, maxDis-plotSize*zoom/2]) else: - plotAxis.set_xlim([0, 2 * maxDis]) - plotAxis.set_ylim([2 * maxDis, 0]) + plotAxis.set_xlim([0,2*maxDis]) + plotAxis.set_ylim([2*maxDis,0]) plotAxis.get_xaxis().set_visible(False) plotAxis.get_yaxis().set_visible(False) @@ -1167,22 +1160,23 @@ def plotPairedPatches(patch1, altMap, aziMap, title, - pixelSize=1, - closeIter=None): - visualSpace1, _, _ = patch1.getVisualSpace(altMap=altMap, - aziMap=aziMap, - pixelSize=pixelSize, - closeIter=closeIter) + pixelSize = 1, + closeIter = None): + + visualSpace1, _, _ = patch1.getVisualSpace(altMap = altMap, + aziMap = aziMap, + pixelSize = pixelSize, + closeIter = closeIter) area1 = np.sum(visualSpace1[:]) * (pixelSize ** 2) - visualSpace2, _, _ = patch2.getVisualSpace(altMap=altMap, - aziMap=aziMap, - pixelSize=pixelSize, - closeIter=closeIter) + visualSpace2, _, _ = patch2.getVisualSpace(altMap = altMap, + aziMap = aziMap, + pixelSize = pixelSize, + closeIter = closeIter) area2 = np.sum(visualSpace2[:]) * (pixelSize ** 2) - visualSpace1 = np.array(visualSpace1, dtype=np.float32) - visualSpace2 = np.array(visualSpace2, dtype=np.float32) + visualSpace1 = np.array(visualSpace1, dtype = np.float32) + visualSpace2 = np.array(visualSpace2, dtype = np.float32) visualSpace1[visualSpace1 == 0] = np.nan visualSpace2[visualSpace2 == 0] = np.nan @@ -1190,13 +1184,13 @@ def plotPairedPatches(patch1, f = plt.figure() f.suptitle(title) f_121 = f.add_subplot(121) - patchPlot1 = f_121.imshow(patch1.getMask(), cmap='jet', interpolation='nearest', alpha=0.5, vmax=2, vmin=1) - patchPlot2 = f_121.imshow(patch2.getMask() * 2, cmap='jet', interpolation='nearest', alpha=0.5, vmax=2, vmin=1) + patchPlot1 = f_121.imshow(patch1.getMask(), interpolation = 'nearest', alpha = 0.5, vmax=2, vmin=1) + patchPlot2 = f_121.imshow(patch2.getMask()*2, interpolation = 'nearest', alpha = 0.5, vmax=2, vmin=1) f_121.set_title('patch1: blue, patch2: red') f_122 = f.add_subplot(122) - areaPlot1 = f_122.imshow(visualSpace1, cmap='jet', interpolation='nearest', alpha=0.5, vmax=2, vmin=1) - areaPlot2 = f_122.imshow(visualSpace2 * 2, cmap='jet', interpolation='nearest', alpha=0.5, vmax=2, vmin=1) + areaPlot1 = f_122.imshow(visualSpace1, interpolation = 'nearest', alpha = 0.5, vmax=2, vmin=1) + areaPlot2 = f_122.imshow(visualSpace2*2, interpolation = 'nearest', alpha = 0.5, vmax=2, vmin=1) f_122.set_title('area1: %.1f, area2: %.1f (deg^2)' % (area1, area2)) f_122.invert_yaxis() @@ -1204,8 +1198,8 @@ def plotPairedPatches(patch1, # reorganize visual space axis label altRange = np.array([np.amin(altMap), np.amax(altMap)]) aziRange = np.array([np.amin(aziMap), np.amax(aziMap)]) - xlist = np.arange(aziRange[0], aziRange[1], pixelSize) - ylist = np.arange(altRange[0], altRange[1], pixelSize) + xlist = np.arange(aziRange[0],aziRange[1],pixelSize) + ylist = np.arange(altRange[0],altRange[1],pixelSize) xtick = [] xticklabel = [] @@ -1216,7 +1210,7 @@ def plotPairedPatches(patch1, xticklabel.append(str(int(np.floor(xlist[i])))) i = int(i + 9 / pixelSize) else: - i = i + 1 + i=i+1 ytick = [] yticklabel = [] @@ -1227,50 +1221,50 @@ def plotPairedPatches(patch1, yticklabel.append(str(int(np.floor(ylist[i])))) i = int(i + 9 / pixelSize) else: - i = i + 1 + i=i+1 f_122.set_xticks(xtick) f_122.set_xticklabels(xticklabel) f_122.set_yticks(ytick) f_122.set_yticklabels(yticklabel) - def getPatchDict(patch): - return {'sparseArray': patch.sparseArray, 'sign': patch.sign} + return {'sparseArray':patch.sparseArray,'sign':patch.sign} class RetinotopicMappingTrial(object): + def __init__(self, - mouseID, # str, mouseID - dateRecorded, # int, date recorded, yearmonthday - trialNum, # str, number of the trail on that day - mouseType, # str, mouse Genotype - visualStimType, # str, stimulation type - visualStimBackground, # str, background of visual stimulation - imageExposureTime, # float, exposure time of image file - altPosMap, # altitude position map - aziPosMap, # azimuth position map - altPowerMap, # altitude power map - aziPowerMap, # azimuth power map - vasculatureMap, # vasculature map - params={ - 'phaseMapFilterSigma': 1, - 'signMapFilterSigma': 10, - 'signMapThr': 0.3, - 'eccMapFilterSigma': 5., - 'splitLocalMinCutStep': 10., - 'mergeOverlapThr': 0.05, - 'closeIter': 3, - 'openIter': 3, - 'dilationIter': 20, - 'borderWidth': 1, - 'smallPatchThr': 200, - 'visualSpacePixelSize': 0.5, - 'visualSpaceCloseIter': 15, - 'splitOverlapThr': 1.1 - }, - isAnesthetized=False, + mouseID, # str, mouseID + dateRecorded, # int, date recorded, yearmonthday + trialNum, # str, number of the trail on that day + mouseType, # str, mouse Genotype + visualStimType, # str, stimulation type + visualStimBackground, # str, background of visual stimulation + imageExposureTime, # float, exposure time of image file + altPosMap, # altitude position map + aziPosMap, # azimuth position map + altPowerMap, # altitude power map + aziPowerMap, # azimuth power map + vasculatureMap, # vasculature map + params ={ + 'phaseMapFilterSigma':1, + 'signMapFilterSigma':10, + 'signMapThr':0.3, + 'eccMapFilterSigma':5., + 'splitLocalMinCutStep':10., + 'mergeOverlapThr':0.05, + 'closeIter':3, + 'openIter':3, + 'dilationIter':20, + 'borderWidth':1, + 'smallPatchThr':200, + 'visualSpacePixelSize':0.5, + 'visualSpaceCloseIter':15, + 'splitOverlapThr':1.1 + }, + isAnesthetized = False, ): self.mouseID = mouseID @@ -1289,14 +1283,15 @@ def __init__(self, self.isAnesthetized = isAnesthetized + def getName(self): - trialName = str(self.dateRecorded) + \ - '_M' + str(self.mouseID) + \ + trialName = str(self.dateRecorded)+\ + '_M'+str(self.mouseID)+\ '_Trial' + str(self.trialNum) - # '_'+self.mouseType.split('-')[0]+';'+self.mouseType.split(';')[-1][0:4] - # '_' + str(self.visualStimType)+\ - # '_' + str(self.visualStimBackground) + #'_'+self.mouseType.split('-')[0]+';'+self.mouseType.split(';')[-1][0:4] + #'_' + str(self.visualStimType)+\ + #'_' + str(self.visualStimBackground) # if self.isAnesthetized: # trialName += '_Anesth' @@ -1305,24 +1300,23 @@ def getName(self): return trialName - def _getSignMap(self, isReverse=False, isPlot=False, isFixedRange=True): - altPosMapf = ni.filters.gaussian_filter(self.altPosMap, - self.params['phaseMapFilterSigma']) - aziPosMapf = ni.filters.gaussian_filter(self.aziPosMap, - self.params['phaseMapFilterSigma']) + def _getSignMap(self, isReverse = False, isPlot = False, isFixedRange = True): + + altPosMapf = ni.filters.gaussian_filter(self.altPosMap, + self.params['phaseMapFilterSigma']) + aziPosMapf = ni.filters.gaussian_filter(self.aziPosMap, + self.params['phaseMapFilterSigma']) if self.altPowerMap is not None: - altPowerMapf = ni.filters.gaussian_filter(self.altPowerMap, - self.params['phaseMapFilterSigma']) - else: - altPowerMapf = None + altPowerMapf = ni.filters.gaussian_filter(self.altPowerMap, + self.params['phaseMapFilterSigma']) + else: altPowerMapf = None if self.aziPowerMap is not None: - aziPowerMapf = ni.filters.gaussian_filter(self.aziPowerMap, - self.params['phaseMapFilterSigma']) - else: - aziPowerMapf = None + aziPowerMapf = ni.filters.gaussian_filter(self.aziPowerMap, + self.params['phaseMapFilterSigma']) + else: aziPowerMapf = None signMap = visualSignMap(altPosMapf, aziPosMapf) @@ -1332,20 +1326,16 @@ def _getSignMap(self, isReverse=False, isPlot=False, isFixedRange=True): self.params['signMapFilterSigma']) if isPlot: - f1 = plt.figure(figsize=(18, 9)) + f1=plt.figure(figsize=(18,9)) f1_231 = f1.add_subplot(231) - if isFixedRange: - currfig = f1_231.imshow(self.altPosMap, vmin=-40, vmax=60, cmap='hsv', interpolation='nearest') - else: - currfig = f1_231.imshow(self.altPosMap, cmap='hsv', interpolation='nearest') + if isFixedRange: currfig = f1_231.imshow(self.altPosMap, vmin=-40, vmax=60, cmap='hsv', interpolation='nearest') + else: currfig = f1_231.imshow(self.altPosMap, cmap='hsv', interpolation='nearest') f1.colorbar(currfig) f1_231.set_axis_off() f1_231.set_title('alt position') f1_232 = f1.add_subplot(232) - if isFixedRange: - currfig = f1_232.imshow(self.aziPosMap, vmin=-0, vmax=120, cmap='hsv', interpolation='nearest') - else: - currfig = f1_232.imshow(self.aziPosMap, cmap='hsv', interpolation='nearest') + if isFixedRange: currfig = f1_232.imshow(self.aziPosMap, vmin=-0, vmax=120, cmap='hsv', interpolation='nearest') + else: currfig = f1_232.imshow(self.aziPosMap, cmap='hsv', interpolation='nearest') f1.colorbar(currfig) f1_232.set_axis_off() f1_232.set_title('azi position') @@ -1355,18 +1345,14 @@ def _getSignMap(self, isReverse=False, isPlot=False, isFixedRange=True): f1_233.set_axis_off() f1_233.set_title('sign map') f1_234 = f1.add_subplot(234) - if isFixedRange: - currfig = f1_234.imshow(altPosMapf, vmin=-40, vmax=60, cmap='hsv', interpolation='nearest') - else: - currfig = f1_234.imshow(altPosMapf, cmap='hsv', interpolation='nearest') + if isFixedRange: currfig = f1_234.imshow(altPosMapf, vmin=-40, vmax=60, cmap='hsv', interpolation='nearest') + else: currfig = f1_234.imshow(altPosMapf, cmap='hsv', interpolation='nearest') f1.colorbar(currfig) f1_234.set_axis_off() f1_234.set_title('alt position filtered') f1_235 = f1.add_subplot(235) - if isFixedRange: - currfig = f1_235.imshow(aziPosMapf, vmin=0, vmax=120, cmap='hsv', interpolation='nearest') - else: - currfig = f1_235.imshow(aziPosMapf, cmap='hsv', interpolation='nearest') + if isFixedRange: currfig = f1_235.imshow(aziPosMapf, vmin=0, vmax=120, cmap='hsv', interpolation='nearest') + else: currfig = f1_235.imshow(aziPosMapf, cmap='hsv', interpolation='nearest') f1.colorbar(currfig) plt.axis('off') f1_235.set_title('azi position filtered') @@ -1376,22 +1362,21 @@ def _getSignMap(self, isReverse=False, isPlot=False, isFixedRange=True): plt.axis('off') f1_236.set_title('sign map filtered') - f2 = plt.figure(figsize=(12, 4)) + f2=plt.figure(figsize=(12,4)) f2_121 = f2.add_subplot(121) if altPowerMapf is not None: - currfig = f2_121.imshow(ia.array_nor(self.altPowerMap), cmap='hot', vmin=0, vmax=1, - interpolation='nearest') + currfig = f2_121.imshow(ia.array_nor(self.altPowerMap), cmap ='hot', vmin = 0, vmax=1, interpolation='nearest') f2.colorbar(currfig) f2_121.set_title('alt power map') f2_121.set_axis_off() f2_122 = f2.add_subplot(122) if aziPowerMapf is not None: - currfig = f2_122.imshow(ia.array_nor(self.aziPowerMap), cmap='hot', vmin=0, vmax=1, - interpolation='nearest') + currfig = f2_122.imshow(ia.array_nor(self.aziPowerMap), cmap ='hot', vmin = 0, vmax=1, interpolation='nearest') f2.colorbar(currfig) f2_122.set_title('azi power map') f2_122.set_axis_off() + self.altPosMapf = altPosMapf self.aziPosMapf = aziPosMapf self.altPowerMapf = altPowerMapf @@ -1401,7 +1386,8 @@ def _getSignMap(self, isReverse=False, isPlot=False, isFixedRange=True): return altPosMapf, aziPosMapf, altPowerMapf, aziPowerMapf, signMap, signMapf - def _getRawPatchMap(self, isPlot=False): + + def _getRawPatchMap(self, isPlot = False): if not hasattr(self, 'signMapf'): _ = self._getSignMap() @@ -1411,20 +1397,21 @@ def _getRawPatchMap(self, isPlot=False): openIter = self.params['openIter'] closeIter = self.params['closeIter'] - # thresholding filtered signmap + #thresholding filtered signmap patchmap = np.zeros(signMapf.shape) patchmap[signMapf >= signMapThr] = 1 patchmap[signMapf <= -1 * signMapThr] = 1 patchmap[(signMapf < signMapThr) & (signMapf > -1 * signMapThr)] = 0 - patchmap = ni.binary_opening(np.abs(patchmap), iterations=openIter).astype(np.int) + patchmap = ni.binary_opening(np.abs(patchmap), iterations = openIter).astype(np.int) patches, patchNum = ni.label(patchmap) - # closing each patch, then put them together - patchmap2 = np.zeros(patchmap.shape).astype(np.int) + + #closing each patch, then put them together + patchmap2=np.zeros(patchmap.shape).astype(np.int) for i in range(patchNum): currPatch = np.zeros(patches.shape).astype(np.int) - currPatch[patches == i + 1] = 1 - currPatch = ni.binary_closing(currPatch, iterations=closeIter).astype(np.int) + currPatch[patches==i+1]=1 + currPatch = ni.binary_closing(currPatch, iterations = closeIter).astype(np.int) patchmap2 = patchmap2 + currPatch if isPlot: @@ -1438,7 +1425,8 @@ def _getRawPatchMap(self, isPlot=False): return patchmap2 - def _getRawPatches(self, isPlot=False): + + def _getRawPatches(self, isPlot = False): if not hasattr(self, 'rawPatchMap'): _ = self._getRawPatchMap() @@ -1452,22 +1440,22 @@ def _getRawPatches(self, isPlot=False): patchMapDilated = dilationPatches2(rawPatchMap, dilationIter=dilationIter, borderWidth=borderWidth) - # generate raw patch dictionary - rawPatches = labelPatches(patchMapDilated, signMapf, connectivity=4) + #generate raw patch dictionary + rawPatches = labelPatches(patchMapDilated, signMapf, connectivity = 4) rawPatches2 = dict(rawPatches) - # remove small patches - for key, value in rawPatches2.iteritems(): + #remove small patches + for key, value in rawPatches2.items(): if (value.getArea() < smallPatchThr): rawPatches.pop(key) - # remove isolated Patches + #remove isolated Patches rawPatches2 = dict(rawPatches) - for key in rawPatches2.iterkeys(): + for key in rawPatches2.keys(): isTouching = 0 - for key2 in rawPatches2.iterkeys(): + for key2 in rawPatches2.keys(): if key != key2: - if rawPatches2[key].isTouching(rawPatches2[key2], borderWidth * 2): + if rawPatches2[key].isTouching(rawPatches2[key2], borderWidth*2): isTouching = 1 break @@ -1484,10 +1472,10 @@ def _getRawPatches(self, isPlot=False): f = plt.figure() f_axis = f.add_subplot(111) try: - f_axis.imshow(vasculatureMap, cmap='gray', interpolation='nearest') + f_axis.imshow(vasculatureMap, cmap = 'gray', interpolation = 'nearest') except: pass - _ = plotPatches(rawPatches, plotaxis=f_axis, zoom=zoom) + _ = plotPatches(rawPatches, plotaxis = f_axis, zoom = zoom) f_axis.set_title('raw patches') plt.gca().set_axis_off() del _ @@ -1496,7 +1484,8 @@ def _getRawPatches(self, isPlot=False): return rawPatches - def _getDeterminantMap(self, isPlot=False): + + def _getDeterminantMap(self, isPlot = False): if not hasattr(self, 'altPosMapf') or not hasattr(self, 'aziPosMapf'): _ = self._getSignMap() @@ -1510,12 +1499,13 @@ def _getDeterminantMap(self, isPlot=False): detMap = np.array([[gradAltMap[0], gradAltMap[1]], [gradAziMap[0], gradAziMap[1]]]) - detMap = detMap.transpose(2, 3, 0, 1) + detMap = detMap.transpose(2,3,0,1) detMap = np.abs(np.linalg.det(detMap)) + if isPlot: plt.figure() - plt.imshow(detMap, vmin=0, vmax=1, cmap='hsv', interpolation='nearest') + plt.imshow(detMap, vmin = 0, vmax = 1,cmap='hsv', interpolation='nearest') plt.colorbar() plt.title('determinant map') plt.gca().set_axis_off() @@ -1524,7 +1514,8 @@ def _getDeterminantMap(self, isPlot=False): return detMap - def _getEccentricityMap(self, isPlot=False): + + def _getEccentricityMap(self, isPlot = False): if not hasattr(self, 'rawPatches'): _ = self._getRawPatches() @@ -1539,17 +1530,18 @@ def _getEccentricityMap(self, isPlot=False): eccMap[:] = np.nan eccMapf[:] = np.nan - for key, value in patches.iteritems(): - patchAltC, patchAziC = value.getPixelVisualCenter(altPosMapf, aziPosMapf) + for key, value in patches.items(): + + patchAltC, patchAziC = value.getPixelVisualCenter(altPosMapf,aziPosMapf) patchEccMap = eccentricityMap(altPosMapf, aziPosMapf, patchAltC, patchAziC) - patchEccMapf = ni.filters.uniform_filter(patchEccMap, eccMapFilterSigma) + patchEccMapf = ni.filters.uniform_filter(patchEccMap, eccMapFilterSigma) eccMap[value.array == 1] = patchEccMap[value.array == 1] eccMapf[value.array == 1] = patchEccMapf[value.array == 1] if isPlot: plt.figure() - plt.imshow(eccMapf, cmap='hsv', interpolation='nearest') + plt.imshow(eccMapf, interpolation='nearest') plt.colorbar() plt.title('filtered eccentricity map') plt.gca().set_axis_off() @@ -1559,7 +1551,8 @@ def _getEccentricityMap(self, isPlot=False): return eccMap, eccMapf - def _splitPatches(self, isPlot=False): + + def _splitPatches(self,isPlot = False): if not hasattr(self, 'eccentricityMapf'): _ = self._getEccentricityMap() @@ -1569,7 +1562,7 @@ def _splitPatches(self, isPlot=False): altPosMapf = self.altPosMapf aziPosMapf = self.aziPosMapf - # eccMap = self.eccentricityMapf +# eccMap = self.eccentricityMapf eccMapf = self.eccentricityMapf patches = dict(self.rawPatches) detMap = self.determinantMap @@ -1583,17 +1576,17 @@ def _splitPatches(self, isPlot=False): overlapPatches = [] newPatchesDict = {} - for key, value in patches.iteritems(): + for key, value in patches.items(): visualSpace, _, _ = value.getVisualSpace(altPosMapf, aziPosMapf, - pixelSize=visualSpacePixelSize, - closeIter=visualSpaceCloseIter) + pixelSize = visualSpacePixelSize, + closeIter = visualSpaceCloseIter) AU = np.sum(visualSpace[:]) * (visualSpacePixelSize ** 2) AS = value.getSigmaArea(detMap) - print key, 'AU=' + str(AU), ' AS=' + str(AS), ' ratio=' + str(AS / AU) + print(key, 'AU='+str(AU), ' AS='+str(AS), ' ratio='+str(AS/AU)) - if AS / AU >= splitOverlapThr: + if AS/AU >= splitOverlapThr: patchEccMapf = eccMapf * value.getMask() patchEccMapf[value.array == 0] = np.nan @@ -1602,54 +1595,52 @@ def _splitPatches(self, isPlot=False): NumOfMin = np.amax(minMarker) if NumOfMin == 1: - print 'Only one local minumum was found!!!' + print('Only one local minumum was found!!!') elif NumOfMin == 0: - print 'No local minumum was found!!!' + print('No local minumum was found!!!') else: - print str(NumOfMin) + ' local minuma were found!!!' + print(str(NumOfMin) + ' local minuma were found!!!') overlapPatches.append(key) newPatches = value.split2(patchEccMapf, - patchName=key, - cutStep=splitLocalMinCutStep, - borderWidth=borderWidth, - isplot=False) + patchName = key, + cutStep = splitLocalMinCutStep, + borderWidth = borderWidth, + isplot = False) - # plotting splitted patches + #plotting splitted patches if len(newPatches) > 1: - f = plt.figure() + f=plt.figure() f121 = f.add_subplot(121) f121.set_title(key) f122 = f.add_subplot(122) f122.set_title('visual space') currPatchValue = 0 - for key2, value2 in newPatches.iteritems(): + for key2, value2 in newPatches.items(): currPatchValue += 1 currArray = np.array(value2.array, dtype=np.float32) - currArray[currArray == 0] = np.nan - currArray[currArray == 1] = currPatchValue - f121.imshow(currArray, cmap='jet', interpolation='nearest', vmin=0, - vmax=len(newPatches.keys())) + currArray[currArray==0]=np.nan + currArray[currArray==1]=currPatchValue + f121.imshow(currArray,interpolation='nearest', vmin=0, vmax=len(list(newPatches.keys()))) f121.set_axis_off() currVisualSpace, _, _ = value2.getVisualSpace(altPosMapf, aziPosMapf, - pixelSize=visualSpacePixelSize, - closeIter=visualSpaceCloseIter) - currVisualSpace = currVisualSpace.astype(np.float32) - currVisualSpace[currVisualSpace == 0] = np.nan - currVisualSpace[currVisualSpace == 1] = currPatchValue - f122.imshow(currVisualSpace, cmap='jet', interpolation='nearest', alpha=0.5, vmin=0, - vmax=len(newPatches.keys())) + pixelSize = visualSpacePixelSize, + closeIter = visualSpaceCloseIter) + currVisualSpace=currVisualSpace.astype(np.float32) + currVisualSpace[currVisualSpace==0]=np.nan + currVisualSpace[currVisualSpace==1]=currPatchValue + f122.imshow(currVisualSpace,interpolation='nearest',alpha=0.5, vmin=0, vmax=len(list(newPatches.keys()))) - xlabel = np.arange(-20, 120, visualSpacePixelSize) - ylabel = np.arange(60, -40, -visualSpacePixelSize) + xlabel = np.arange(-20,120,visualSpacePixelSize) + ylabel = np.arange(60,-40,-visualSpacePixelSize) - indSpace = int(10. / visualSpacePixelSize) + indSpace = int(10./visualSpacePixelSize) - xtickInd = range(0, len(xlabel), indSpace) - ytickInd = range(0, len(ylabel), indSpace) + xtickInd = list(range(0,len(xlabel),indSpace)) + ytickInd = list(range(0,len(ylabel),indSpace)) xtickLabel = [str(int(xlabel[x])) for x in xtickInd] ytickLabel = [str(int(ylabel[y])) for y in ytickInd] @@ -1671,21 +1662,23 @@ def _splitPatches(self, isPlot=False): zoom = self.vasculatureMap.shape[0] / self.altPosMap.shape[0] except: zoom = 1 - f2 = plt.figure() - f2_111 = f2.add_subplot(111) + f2=plt.figure() + f2_111=f2.add_subplot(111) try: - f2_111.imshow(self.vasculatureMap, cmap='gray', interpolation='nearest') + f2_111.imshow(self.vasculatureMap, cmap = 'gray', interpolation = 'nearest') except: pass - h = plotPatches(patches, plotaxis=f2_111, zoom=zoom) + h = plotPatches(patches, plotaxis = f2_111, zoom = zoom) f2_111.set_axis_off() f2_111.set_title('patches after split') f2_111.set_axis_off() + self.patchesAfterSplit = patches return patches + def _mergePatches(self, isPlot=False): if not hasattr(self, 'patchesAfterSplit'): @@ -1701,7 +1694,7 @@ def _mergePatches(self, isPlot=False): mergeOverlapThr = self.params['mergeOverlapThr'] smallPatchThr = self.params['smallPatchThr'] - # merging non-overlaping patches + #merging non-overlaping patches mergeIter = 1 # pairs of patches that meet the criterion of merging @@ -1715,75 +1708,62 @@ def _mergePatches(self, isPlot=False): while (mergeIter == 1) or (len(mergePairs) > 0): - print 'merge iteration: ' + str(mergeIter) + print('merge iteration: ' + str(mergeIter)) mergePairs = [] - # get adjacent pairs - adjPairs = adjacentPairs(patches, borderWidth=borderWidth + 1) + #get adjacent pairs + adjPairs = adjacentPairs(patches, borderWidth = borderWidth+1) - for ind, pair in enumerate(adjPairs): # for every adjacent pair + for ind, pair in enumerate(adjPairs): #for every adjacent pair patch1 = patches[pair[0]] patch2 = patches[pair[1]] try: - # merge these two patches - currMergedPatch = Patch(mergePatches(patch1.array, patch2.array, borderWidth=borderWidth), - sign=patch1.sign) + #merge these two patches + currMergedPatch = Patch(mergePatches(patch1.array,patch2.array,borderWidth=borderWidth), + sign = patch1.sign) - # calculate unique area of the merged patch + #calculate unique area of the merged patch visualSpace, _, _ = currMergedPatch.getVisualSpace(altPosMapf, aziPosMapf, - pixelSize=visualSpacePixelSize, - closeIter=visualSpaceCloseIter) + pixelSize = visualSpacePixelSize, + closeIter = visualSpaceCloseIter) AU = np.sum(visualSpace[:]) * (visualSpacePixelSize ** 2) - # calculate the visual space and unique area of the first patch - visualSpace1, _, _ = patch1.getVisualSpace(altPosMapf, - aziPosMapf, - pixelSize=visualSpacePixelSize, - closeIter=visualSpaceCloseIter) - visualSpace1 = visualSpace1.astype(np.uint8) + #calculate the visual space and unique area of the first patch + visualSpace1, _, _ =patch1.getVisualSpace(altPosMapf, + aziPosMapf, + pixelSize = visualSpacePixelSize, + closeIter = visualSpaceCloseIter) AU1 = np.sum(visualSpace1[:]) * (visualSpacePixelSize ** 2) - # calculate the visual space and unique area of the second patch - visualSpace2, _, _ = patch2.getVisualSpace(altPosMapf, - aziPosMapf, - pixelSize=visualSpacePixelSize, - closeIter=visualSpaceCloseIter) - visualSpace2 = visualSpace2.astype(np.uint8) + #calculate the visual space and unique area of the second patch + visualSpace2, _, _ =patch2.getVisualSpace(altPosMapf, + aziPosMapf, + pixelSize = visualSpacePixelSize, + closeIter = visualSpaceCloseIter) AU2 = np.sum(visualSpace2[:]) * (visualSpacePixelSize ** 2) - # calculate the overlapping area of these two patches + #calculate the overlapping area of these two patches sumSpace = visualSpace1 + visualSpace2 - overlapSpace = np.zeros(sumSpace.shape, dtype=np.int) + overlapSpace = np.zeros(sumSpace.shape, dtype = np.int) overlapSpace[sumSpace == 2] = 1 - - # f = plt.figure() - # ax1 = f.add_subplot(141) - # ax1.imshow(visualSpace1) - # ax2 = f.add_subplot(142) - # ax2.imshow(visualSpace2) - # ax3 = f.add_subplot(143) - # ax3.imshow(sumSpace) - # ax4 = f.add_subplot(144) - # ax4.imshow(overlapSpace) - # plt.show() - Aoverlap = np.sum(overlapSpace[:]) * (visualSpacePixelSize ** 2) - # calculate the ratio of overlaping area to the unique area of each patch + #calculate the ratio of overlaping area to the unique area of each patch overlapRatio1 = Aoverlap / AU1 overlapRatio2 = Aoverlap / AU2 - # if both ratios are small than merge overlap threshold definded at the beginning of the file + #if both ratios are small than merge overlap threshold definded at the beginning of the file if (overlapRatio1 <= mergeOverlapThr) and (overlapRatio2 <= mergeOverlapThr): - # put this pair and related information to mergePairs list + + #put this pair and related information to mergePairs list mergePairs.append([pair[0], pair[1], currMergedPatch, - np.max([overlapRatio1, overlapRatio2]), + np.max([overlapRatio1,overlapRatio2]), (-1 * AU)]) del visualSpace1, visualSpace2, AU1, AU2, sumSpace, overlapSpace, Aoverlap @@ -1796,41 +1776,42 @@ def _mergePatches(self, isPlot=False): del patch1, patch2 if len(mergePairs) > 0: - # for each identified patch pair to merge sort them with the sum of two - # overlap ratios, from smallest to biggest and then sort them with the - # unique area of merged patches from biggest to smallest - mergePairs.sort(key=itemgetter(3, 4)) + #for each identified patch pair to merge sort them with the sum of two + #overlap ratios, from smallest to biggest and then sort them with the + #unique area of merged patches from biggest to smallest + mergePairs.sort(key = itemgetter(3,4)) - for ind, value in enumerate(mergePairs): # for each of these pairs + for ind, value in enumerate(mergePairs): #for each of these pairs patch1 = value[0] patch2 = value[1] # if both of these two patches are still in the 'patches' dictionary - if (patch1 in patches.keys()) and (patch2 in patches.keys()): - # plot these patches and their visual space + if (patch1 in list(patches.keys())) and (patch2 in list(patches.keys())): + + #plot these patches and their visual space plotPairedPatches(patches[patch1], patches[patch2], altPosMapf, aziPosMapf, - title='merge iteation:' + str( - mergeIter) + ' patch1:' + patch1 + ' patch2:' + patch2, - pixelSize=visualSpacePixelSize, - closeIter=visualSpaceCloseIter) + title = 'merge iteation:'+str(mergeIter)+' patch1:'+patch1+' patch2:'+patch2, + pixelSize = visualSpacePixelSize, + closeIter = visualSpaceCloseIter) - # remove these two patches from the 'patches' dictionary + #remove these two patches from the 'patches' dictionary patches.pop(patch1) patches.pop(patch2) - # add merged patches into the 'patches' dictionare - patches.update({patch1 + '+' + patch2[5:]: value[2]}) + #add merged patches into the 'patches' dictionare + patches.update({patch1+'+'+patch2[5:]:value[2]}) - print 'merging: ' + patch1 + ' & ' + patch2 + ', overlap ratio: ' + str(value[3]) + print('merging: '+patch1+' & '+patch2 + ', overlap ratio: ' + str(value[3])) mergeIter = mergeIter + 1 - # remove small patches + + #remove small patches patches2 = dict(patches) - for key, value in patches2.iteritems(): + for key, value in patches2.items(): if (value.getArea() < smallPatchThr): patches.pop(key) @@ -1845,13 +1826,13 @@ def _mergePatches(self, isPlot=False): zoom = self.vasculatureMap.shape[0] / self.altPosMap.shape[0] except: zoom = 1 - f = plt.figure() - f111 = f.add_subplot(111) + f=plt.figure() + f111=f.add_subplot(111) try: - f111.imshow(self.vasculatureMap, cmap='gray', interpolation='nearest') + f111.imshow(self.vasculatureMap, cmap = 'gray', interpolation = 'nearest') except: pass - h = plotPatches(finalPatches, plotaxis=f111, zoom=zoom) + h = plotPatches(finalPatches, plotaxis = f111, zoom = zoom) f111.set_axis_off() f111.set_title('final Patches') @@ -1859,26 +1840,28 @@ def _mergePatches(self, isPlot=False): return patches, finalPatches + def getTraces(self, moviePath, - resampleFrequency=10, # at which frequency the traces are resampled - centerPatch='patch01', # the patch to get ROI - ROIcenters=([30., 0.], [60., 0.], [90., 0.]), # visual space centers of ROIs - ROIsearchRange=0.5, # range to search pixels in ROI - ROIsize=10, # ROI size (pixel) - ROIcolor=('#ff0000', '#00ff00', '#0000ff'), # color for each ROI - isPlot=False, + resampleFrequency = 10, # at which frequency the traces are resampled + centerPatch = 'patch01', # the patch to get ROI + ROIcenters = ([30.,0.],[60.,0.],[90.,0.]), # visual space centers of ROIs + ROIsearchRange = 0.5, #range to search pixels in ROI + ROIsize = 10, # ROI size (pixel) + ROIcolor = ('#ff0000','#00ff00','#0000ff'),#color for each ROI + isPlot = False, ): + if not hasattr(self, 'finalPatches'): self.processTrial() if type(ROIsearchRange) == int or type(ROIsearchRange) == float: - ROIsearchRanges = [] + ROIsearchRanges=[] for i in range(len(ROIcenters)): ROIsearchRanges.append(ROIsearchRange) elif type(ROIsearchRange) == list: - ROIsearchRanges = ROIsearchRange + ROIsearchRanges = ROIsearchRange if not hasattr(self, 'altPosMapf'): self._getSignMap() @@ -1894,29 +1877,25 @@ def getTraces(self, for i, ROIcenter in enumerate(ROIcenters): try: - altIndex = np.logical_and(altPosMapf > ROIcenter[1] - ROIsearchRanges[i], - altPosMapf < ROIcenter[1] + ROIsearchRanges[i]) - aziIndex = np.logical_and(aziPosMapf > ROIcenter[0] - ROIsearchRanges[i], - aziPosMapf < ROIcenter[0] + ROIsearchRanges[i]) + altIndex = np.logical_and(altPosMapf > ROIcenter[1]-ROIsearchRanges[i], altPosMapf < ROIcenter[1]+ROIsearchRanges[i]) + aziIndex = np.logical_and(aziPosMapf > ROIcenter[0]-ROIsearchRanges[i], aziPosMapf < ROIcenter[0]+ROIsearchRanges[i]) index = np.logical_and(altIndex, aziIndex) index = np.where(np.logical_and(index, centerPatchArray) == True) - pos = int(len(index[0]) / 2) - print ROIcenter, index - print 'ROI' + str(i) + ' center: [' + str(index[0][pos]) + ',' + str( - index[1][pos]) + ']; visual space: [' + str(altPosMapf[index[0][pos], index[1][pos]]) + ',' + str( - aziPosMapf[index[0][pos], index[1][pos]]) + ']' + pos = int(len(index[0])/2) + print(ROIcenter, index) + print('ROI'+str(i)+' center: ['+str(index[0][pos])+','+str(index[1][pos])+']; visual space: ['+str(altPosMapf[index[0][pos],index[1][pos]])+','+str(aziPosMapf[index[0][pos],index[1][pos]])+']') mask = ia.generate_rectangle_mask(mov, (index[0][pos], index[1][pos]), ROIsize, ROIsize) trace = ia.get_trace(mov, mask) t = np.arange(len(trace)) * self.imageExposureTime traceP = findPhaseIndex(trace) * self.imageExposureTime - t2, y2 = ia.resample(t, trace, 1. / resampleFrequency) + t2, y2 = ia.resample(t, trace, 1./resampleFrequency) traces.append({'position': ROIcenter, 'mask': mask, - 'trace': [t2, y2], + 'trace': [t2,y2], 'tracePhaseTime': traceP, - 'ROIcolor': ROIcolor[i]}) + 'ROIcolor':ROIcolor[i]}) except: pass @@ -1928,34 +1907,33 @@ def getTraces(self, except: zoom = 1 - f = plt.figure(figsize=(15, 5)) - ax1 = f.add_axes([0.1, 0.1, 0.2, 0.8]) - ax2 = f.add_axes([0.4, 0.1, 0.5, 0.8]) + f = plt.figure(figsize=(15,5)) + ax1 = f.add_axes([0.1,0.1,0.2,0.8]) + ax2 = f.add_axes([0.4,0.1,0.5,0.8]) try: - ax1.imshow(vasculatureMap, cmap='gray', interpolation='nearest', aspect='equal') + ax1.imshow(vasculatureMap, cmap='gray', interpolation='nearest',aspect='equal') except: pass ROIlegend = [] for i in range(len(traces)): try: - pt.plot_mask(traces[i]['mask'], plotAxis=ax1, borderWidth=5, zoom=zoom, color=traces[i]['ROIcolor']) - currT = traces[i]['trace'][0] - currTrace = traces[i]['trace'][1] - ax2.plot(currT, currTrace - np.mean(currTrace), '-', color=traces[i]['ROIcolor'], lw=2) - ROIlegend.append( - ['center:' + str(traces[i]['position']) + ', baseline:' + '{0:.0f}'.format(np.mean(currTrace))]) + pt.plot_mask(traces[i]['mask'], plotAxis = ax1, borderWidth=5, zoom=zoom, color=traces[i]['ROIcolor']) + currT=traces[i]['trace'][0] + currTrace=traces[i]['trace'][1] + ax2.plot(currT,currTrace-np.mean(currTrace), '-', color=traces[i]['ROIcolor'], lw=2) + ROIlegend.append(['center:'+str(traces[i]['position'])+', baseline:' + '{0:.0f}'.format(np.mean(currTrace))]) except: pass - ax2.legend(ROIlegend, prop={'size': 8}) - ymin = np.floor(ax2.yaxis.get_data_interval()[0] / 10) * 10 - ymax = np.ceil(ax2.yaxis.get_data_interval()[1] / 10) * 10 + ax2.legend(ROIlegend,prop={'size':8}) + + ymin = np.floor(ax2.yaxis.get_data_interval()[0]/10)*10 + ymax = np.ceil(ax2.yaxis.get_data_interval()[1]/10)*10 for i in range(len(traces)): try: - ax2.plot([traces[i]['tracePhaseTime'], traces[i]['tracePhaseTime']], [ymin, ymax], '--', - color=traces[i]['ROIcolor'], lw=2) + ax2.plot([traces[i]['tracePhaseTime'],traces[i]['tracePhaseTime']],[ymin,ymax],'--',color=traces[i]['ROIcolor'],lw=2) except: pass @@ -1966,91 +1944,62 @@ def getTraces(self, return traces + def cleanMaps(self): - try: - del self.altPosMapf - except AttributeError: - pass + try:del self.altPosMapf + except AttributeError:pass - try: - del self.aziPosMapf - except AttributeError: - pass + try:del self.aziPosMapf + except AttributeError:pass - try: - del self.altPowerMapf - except AttributeError: - pass + try:del self.altPowerMapf + except AttributeError:pass - try: - del self.aziPowerMapf - except AttributeError: - pass + try:del self.aziPowerMapf + except AttributeError:pass - try: - del self.signMap - except AttributeError: - pass + try:del self.signMap + except AttributeError:pass - try: - del self.signMapf - except AttributeError: - pass + try:del self.signMapf + except AttributeError:pass - try: - del self.rawPatchMap - except AttributeError: - pass + try:del self.rawPatchMap + except AttributeError:pass - try: - del self.rawPatches - except AttributeError: - pass + try:del self.rawPatches + except AttributeError:pass - try: - del self.eccentricityMap - except AttributeError: - pass + try:del self.eccentricityMap + except AttributeError:pass - try: - del self.eccentricityMapf - except AttributeError: - pass + try:del self.eccentricityMapf + except AttributeError:pass - try: - del self.determinantMap - except AttributeError: - pass + try:del self.determinantMap + except AttributeError:pass - try: - del self.patchesAfterSplit - except AttributeError: - pass + try:del self.patchesAfterSplit + except AttributeError:pass - try: - del self.patchesAfterMerge - except AttributeError: - pass + try:del self.patchesAfterMerge + except AttributeError:pass - try: - del self.finalPatches - except AttributeError: - pass + try:del self.finalPatches + except AttributeError:pass + + try:del self.finalPatchesMarked + except AttributeError:pass - try: - del self.finalPatchesMarked - except AttributeError: - pass def cleanTraces(self): - try: - del self.traces - except AttributeError: - pass + try:del self.traces + except AttributeError:pass + - def processTrial(self, isPlot=False): + def processTrial(self, isPlot = False): self.cleanMaps() _ = self._getSignMap(isPlot=isPlot) if isPlot: plt.show() @@ -2067,15 +2016,16 @@ def processTrial(self, isPlot=False): _ = self._mergePatches(isPlot=isPlot) if isPlot: plt.show() + def refresh(self, - moviePath=None, - imageExposureTime=None, - centerPatch='patch01', - ROIcenters=[[30, 0], [60, 0], [90, 0]], # visual space centers of ROIs - ROIsearchRange=0.5, - ROIsize=10, # ROI size (pixel) - ROIcolor=['#ff0000', '#00ff00', '#0000ff'], # color for each ROI - isPlot=False): + moviePath = None, + imageExposureTime = None, + centerPatch = 'patch01', + ROIcenters = [[30,0],[60,0],[90,0]], # visual space centers of ROIs + ROIsearchRange = 0.5, + ROIsize = 10, # ROI size (pixel) + ROIcolor = ['#ff0000','#00ff00','#0000ff'], #color for each ROI + isPlot = False): self.cleanSelf() @@ -2088,108 +2038,112 @@ def refresh(self, self._mergePatches(isPlot=isPlot) try: - _ = self.getTraces(moviePath=moviePath, - imageExposureTime=imageExposureTime, - centerPatch=centerPatch, - ROIcenters=ROIcenters, - ROIsearchRange=ROIsearchRange, - ROIsize=ROIsize, - ROIcolor=ROIcolor, - isPlot=isPlot) + _ = self.getTraces(moviePath = moviePath, + imageExposureTime = imageExposureTime, + centerPatch = centerPatch, + ROIcenters = ROIcenters, + ROIsearchRange = ROIsearchRange, + ROIsize = ROIsize, + ROIcolor = ROIcolor, + isPlot = isPlot) except: pass + def generateTrialDict(self, - keysToRetain=('altPosMap', - 'aziPosMap', - 'signMap', - 'altPosMapf', - 'aziPosMapf', - 'signMapf', - 'rawPatchMap', - 'eccentricityMapf', - 'finalPatches', - 'finalPatchesMarked', - 'mouseID', - 'dateRecorded', - 'trialNum', - 'mouseType', - 'visualStimType', - 'visualStimBackground', - 'imageExposureTime', - 'altPowerMap', - 'altPowerMapf', - 'aziPowerMap', - 'aziPowerMapf', - 'vasculatureMap', - 'params', - 'isAnesthetized' - ) + keysToRetain = ('altPosMap', + 'aziPosMap', + 'signMap', + 'altPosMapf', + 'aziPosMapf', + 'signMapf', + 'rawPatchMap', + 'eccentricityMapf', + 'finalPatches', + 'finalPatchesMarked', + 'mouseID', + 'dateRecorded', + 'trialNum', + 'mouseType', + 'visualStimType', + 'visualStimBackground', + 'imageExposureTime', + 'altPowerMap', + 'altPowerMapf', + 'aziPowerMap', + 'aziPowerMapf', + 'vasculatureMap', + 'params', + 'isAnesthetized' + ) ): + trialDict = {} keysLeft = list(keysToRetain) - for key in self.__dict__.iterkeys(): + for key in self.__dict__.keys(): if key in keysToRetain: - if key == 'finalPatches': + if key=='finalPatches': finalPatches = {} - for area, patch in self.finalPatches.iteritems(): - finalPatches.update({area: getPatchDict(patch)}) - trialDict.update({'finalPatches': finalPatches}) + for area,patch in self.finalPatches.items(): + finalPatches.update({area:getPatchDict(patch)}) + trialDict.update({'finalPatches':finalPatches}) keysLeft.remove('finalPatches') elif key == 'finalPathcesMarked': finalPatchesMarked = {} - for area, patch in self.finalPathcesMarked.iteritems(): - finalPatchesMarked.update({area: getPatchDict(patch)}) - trialDict.update({'finalPatchesMarked': finalPatchesMarked}) + for area,patch in self.finalPathcesMarked.items(): + finalPatchesMarked.update({area:getPatchDict(patch)}) + trialDict.update({'finalPatchesMarked':finalPatchesMarked}) keysLeft.remove('finalPatchesMarked') else: try: - trialDict.update({key: self.__dict__[key]}) + trialDict.update({key:self.__dict__[key]}) keysLeft.remove(key) except AttributeError: pass if keysLeft: - print 'Can not find wanted key(s): ' + str(keysLeft) + print('Can not find wanted key(s): ' + str(keysLeft)) return trialDict + def generatePosOverlay(self): if (not hasattr(self, 'altPosMapf')) or (not hasattr(self, 'aziPosMapf')): self._getSignMap() - vasMap = self.vasculatureMap + vasMap=self.vasculatureMap altPosMap = self.altPosMapf aziPosMap = self.aziPosMapf - zoom = vasMap.shape[0] / altPosMap.shape[0] + zoom = vasMap.shape[0]/altPosMap.shape[0] - altPosMap = ni.zoom(altPosMap, zoom) - aziPosMap = ni.zoom(aziPosMap, zoom) + altPosMap = ni.zoom(altPosMap,zoom) + aziPosMap = ni.zoom(aziPosMap,zoom) - f = plt.figure(figsize=(20, 5)) - ax1 = f.add_subplot(121) - ax1.imshow(vasMap, cmap='gray', interpolation='nearest') - currfig = ax1.imshow(altPosMap, cmap='jet', interpolation='nearest', vmin=-30, vmax=50, alpha=0.5) + f=plt.figure(figsize=(20,5)) + ax1=f.add_subplot(121) + ax1.imshow(vasMap,cmap='gray',interpolation='nearest') + currfig=ax1.imshow(altPosMap,cmap='jet',interpolation='nearest',vmin=-30,vmax=50,alpha=0.5) f.colorbar(currfig) ax1.axis('off') ax1.set_title('altitude position') - ax2 = f.add_subplot(122) - ax2.imshow(vasMap, cmap='gray', interpolation='nearest') - currfig = ax2.imshow(aziPosMap, cmap='jet', interpolation='nearest', vmin=0, vmax=100, alpha=0.5) + ax2=f.add_subplot(122) + ax2.imshow(vasMap,cmap='gray',interpolation='nearest') + currfig=ax2.imshow(aziPosMap,cmap='jet',interpolation='nearest',vmin=0,vmax=100,alpha=0.5) f.colorbar(currfig) ax2.axis('off') ax2.set_title('azimuth position') - def generateStandardOutput(self, traces=None, isSave=False, saveFolder=None): + + def generateStandardOutput(self,traces = None,isSave = False,saveFolder = None): if not hasattr(self, 'finalPatches'): self.processTrial() @@ -2198,13 +2152,13 @@ def generateStandardOutput(self, traces=None, isSave=False, saveFolder=None): self._getSignMap() try: - zoom = self.vasculatureMap.shape[0] / self.altPosMap.shape[0] + zoom = self.vasculatureMap.shape[0]/self.altPosMap.shape[0] except: zoom = 1 trialName = self.getName() - f = plt.figure(figsize=(15, 10)) + f = plt.figure(figsize=(15,10)) f.suptitle(trialName) f_331 = f.add_subplot(331) @@ -2215,35 +2169,33 @@ def generateStandardOutput(self, traces=None, isSave=False, saveFolder=None): f_331.set_axis_off() if traces: - f_3389 = f.add_axes([0.4, 0.1, 0.5, 0.23]) - ROIlegend = [] + f_3389 = f.add_axes([0.4,0.1,0.5,0.23]) + ROIlegend=[] for i in range(len(traces)): - pt.plot_mask(traces[i]['mask'], plotAxis=f_331, borderWidth=5, zoom=zoom, color=traces[i]['ROIcolor']) + pt.plot_mask(traces[i]['mask'], plotAxis = f_331, borderWidth=5, zoom=zoom, color=traces[i]['ROIcolor']) - currT = traces[i]['trace'][0] - currTrace = traces[i]['trace'][1] - f_3389.plot(currT, currTrace - np.mean(currTrace), '-', color=traces[i]['ROIcolor'], lw=2) + currT=traces[i]['trace'][0] + currTrace=traces[i]['trace'][1] + f_3389.plot(currT,currTrace-np.mean(currTrace), '-', color=traces[i]['ROIcolor'], lw=2) - ROIlegend.append( - ['center:' + str(traces[i]['position']) + ', baseline:' + '{0:.0f}'.format(np.mean(currTrace))]) + ROIlegend.append(['center:'+str(traces[i]['position'])+', baseline:' + '{0:.0f}'.format(np.mean(currTrace))]) - f_3389.legend(ROIlegend, prop={'size': 8}) + f_3389.legend(ROIlegend,prop={'size':8}) - ymin = np.floor(f_3389.yaxis.get_data_interval()[0] / 10) * 10 - ymax = np.ceil(f_3389.yaxis.get_data_interval()[1] / 10) * 10 + ymin = np.floor(f_3389.yaxis.get_data_interval()[0]/10)*10 + ymax = np.ceil(f_3389.yaxis.get_data_interval()[1]/10)*10 for i in range(len(traces)): - f_3389.plot([traces[i]['tracePhaseTime'], traces[i]['tracePhaseTime']], [ymin, ymax], '--', - color=traces[i]['ROIcolor'], lw=2) + f_3389.plot([traces[i]['tracePhaseTime'],traces[i]['tracePhaseTime']],[ymin,ymax],'--',color=traces[i]['ROIcolor'], lw=2) f_3389.set_ylim([ymin, ymax]) f_3389.set_xlabel('time (sec)') f_3389.set_ylabel('normalized count') + f_332 = f.add_subplot(332) - currfig = f_332.imshow(np.mean([self.altPowerMap, self.aziPowerMap], axis=0), cmap='hot', - interpolation='nearest') + currfig = f_332.imshow(np.mean([self.altPowerMap,self.aziPowerMap],axis=0), cmap='hot', interpolation='nearest') f.colorbar(currfig) f_332.set_axis_off() @@ -2252,11 +2204,11 @@ def generateStandardOutput(self, traces=None, isSave=False, saveFolder=None): vm = self.vasculatureMap.astype(np.float) vmin = np.amin(vm) vmax = np.amax(vm) - vmin = vmin - 0.5 * (vmax - vmin) + vmin = vmin - 0.5*(vmax-vmin) f_333.imshow(self.vasculatureMap, vmin=vmin, vmax=vmax, cmap='gray', interpolation='nearest') except: pass - plotPatches(self.finalPatches, plotaxis=f_333, zoom=zoom, markersize=3) + plotPatches(self.finalPatches,plotaxis=f_333,zoom=zoom, markersize=3) f_333.set_axis_off() f_334 = f.add_subplot(334) @@ -2280,25 +2232,24 @@ def generateStandardOutput(self, traces=None, isSave=False, saveFolder=None): plotPatchBorders3(self.finalPatches, self.altPosMapf, self.aziPosMapf, - plotAxis=f_337, - plotSize=500, - borderWidth=2, - zoom=1, - centerPatchKey='patch01', - markerSize=5, - closingIteration=1, - arrowLength=15) + plotAxis = f_337, + plotSize = 500, + borderWidth = 2, + zoom = 1, + centerPatchKey = 'patch01', + markerSize = 5, + closingIteration = 1, + arrowLength = 15) f_337.get_xaxis().set_visible(False) f_337.get_yaxis().set_visible(False) if isSave: - f.savefig(os.path.join(saveFolder, trialName + '.pdf'), format='pdf', dpi=300, orientation='landscape', - papertype='a4') - f.savefig(os.path.join(saveFolder, trialName + '.png'), format='png', dpi=300, orientation='landscape', - papertype='a4') + f.savefig(os.path.join(saveFolder,trialName+'.pdf'), format='pdf', dpi = 300, orientation='landscape', papertype='a4') + f.savefig(os.path.join(saveFolder,trialName+'.png'), format='png', dpi = 300, orientation='landscape', papertype='a4') del f - def generateNormalizedMaps(self, centerPatchKey='patch01', mapSize=512, isPlot=False, borderValue=0.): + + def generateNormalizedMaps(self,centerPatchKey='patch01',mapSize = 512,isPlot = False,borderValue=0.): if not hasattr(self, 'finalPatches'): self.processTrial() @@ -2317,16 +2268,15 @@ def generateNormalizedMaps(self, centerPatchKey='patch01', mapSize=512, isPlot=F aziPosMapC = ia.center_image(self.aziPosMap, centerPixel=centerPixel, newSize=mapSize, borderValue=borderValue) aziPosMapNor = ia.rotate_image(aziPosMapC, rotationAngle, borderValue=borderValue) + if hasattr(self, 'altPowerMap') and self.altPowerMap is not None: - altPowerMapC = ia.center_image(self.altPowerMap, centerPixel=centerPixel, newSize=mapSize, - borderValue=borderValue) + altPowerMapC = ia.center_image(self.altPowerMap, centerPixel=centerPixel, newSize=mapSize, borderValue=borderValue) altPowerMapNor = ia.rotate_image(altPowerMapC, rotationAngle, borderValue=borderValue) else: altPowerMapNor = None if hasattr(self, 'aziPowerMap') and self.aziPowerMap is not None: - aziPowerMapC = ia.center_image(self.aziPowerMap, centerPixel=centerPixel, newSize=mapSize, - borderValue=borderValue) + aziPowerMapC = ia.center_image(self.aziPowerMap, centerPixel=centerPixel, newSize=mapSize, borderValue=borderValue) aziPowerMapNor = ia.rotate_image(aziPowerMapC, rotationAngle, borderValue=borderValue) else: aziPowerMapNor = None @@ -2338,43 +2288,45 @@ def generateNormalizedMaps(self, centerPatchKey='patch01', mapSize=512, isPlot=F signMapfNor = ia.rotate_image(signMapfC, rotationAngle, borderValue=borderValue) if isPlot: + trialName = self.getName() - f = plt.figure(figsize=(15, 8)) - f.suptitle('normalized maps for' + trialName) + f = plt.figure(figsize=(15,8)) + f.suptitle('normalized maps for'+trialName) f_231 = f.add_subplot(231) - currfig = f_231.imshow(altPosMapNor, vmin=-30, vmax=50, cmap='hsv', interpolation='nearest') + currfig = f_231.imshow(altPosMapNor,vmin=-30,vmax=50, cmap = 'hsv', interpolation='nearest') f.colorbar(currfig) f_231.set_axis_off() f_231.set_title('normalized altitude position') f_232 = f.add_subplot(232) - currfig = f_232.imshow(aziPosMapNor, vmin=0, vmax=120, cmap='hsv', interpolation='nearest') + currfig = f_232.imshow(aziPosMapNor,vmin=0,vmax=120, cmap = 'hsv', interpolation='nearest') f.colorbar(currfig) f_232.set_axis_off() f_232.set_title('normalized altitude position') f_233 = f.add_subplot(233) - currfig = f_233.imshow(signMapfNor, vmin=-1, vmax=1, cmap='jet', interpolation='nearest') + currfig = f_233.imshow(signMapfNor,vmin=-1,vmax=1,cmap = 'jet', interpolation='nearest') f.colorbar(currfig) f_233.set_axis_off() f_233.set_title('normalized sign map') f_234 = f.add_subplot(234) - currfig = f_234.imshow(altPowerMapNor, cmap='hot', interpolation='nearest') + currfig = f_234.imshow(altPowerMapNor,cmap = 'hot', interpolation='nearest') f.colorbar(currfig) f_234.set_axis_off() f_234.set_title('normalized altitude power') f_235 = f.add_subplot(235) - currfig = f_235.imshow(aziPowerMapNor, cmap='hot', interpolation='nearest') + currfig = f_235.imshow(aziPowerMapNor,cmap = 'hot', interpolation='nearest') f.colorbar(currfig) f_235.set_axis_off() f_235.set_title('normalized azimuth power') return altPosMapNor, aziPosMapNor, altPowerMapNor, aziPowerMapNor, signMapNor, signMapfNor - def generateNormalizedTrial(self, centerPatchKey='patch01', mapSize=512, isPlot=False, borderValue=0.): + + def generateNormalizedTrial(self,centerPatchKey='patch01', mapSize=512, isPlot=False, borderValue=0.): if not hasattr(self, 'finalPatches'): self.processTrial() @@ -2393,15 +2345,13 @@ def generateNormalizedTrial(self, centerPatchKey='patch01', mapSize=512, isPlot= # aziPosMapNor = aziPosMapNor - aziPosOrigin if hasattr(self, 'altPowerMap') and self.altPowerMap is not None: - altPowerMapC = ia.center_image(self.altPowerMap, centerPixel=centerPixel, newSize=mapSize, - borderValue=borderValue) + altPowerMapC = ia.center_image(self.altPowerMap, centerPixel=centerPixel, newSize=mapSize, borderValue=borderValue) altPowerMapNor = ia.rotate_image(altPowerMapC, rotationAngle, borderValue=borderValue) else: altPowerMapNor = None if hasattr(self, 'aziPowerMap') and self.aziPowerMap is not None: - aziPowerMapC = ia.center_image(self.aziPowerMap, centerPixel=centerPixel, newSize=mapSize, - borderValue=borderValue) + aziPowerMapC = ia.center_image(self.aziPowerMap, centerPixel=centerPixel, newSize=mapSize, borderValue=borderValue) aziPowerMapNor = ia.rotate_image(aziPowerMapC, rotationAngle, borderValue=borderValue) else: aziPowerMapNor = None @@ -2425,8 +2375,8 @@ def generateNormalizedTrial(self, centerPatchKey='patch01', mapSize=512, isPlot= trialName = self.getName() - f = plt.figure(figsize=(15, 8)) - f.suptitle('normalized maps for' + trialName) + f = plt.figure(figsize=(15,8)) + f.suptitle('normalized maps for'+trialName) f_231 = f.add_subplot(231) currfig = f_231.imshow(self.altPosMap, vmin=-30, vmax=50, cmap='hsv', interpolation='nearest') @@ -2471,7 +2421,8 @@ def generateNormalizedTrial(self, centerPatchKey='patch01', mapSize=512, isPlot= altPosMapNor, aziPosMapNor, altPosMapNor, aziPowerMapNor, vasMapNor, params={}, isAnesthetized=self.isAnesthetized) - def getNormalizeTransform(self, centerPatchKey='patch01'): + + def getNormalizeTransform(self,centerPatchKey = 'patch01'): try: centerPatchObj = self.finalPatchesMarked[centerPatchKey] @@ -2485,11 +2436,12 @@ def getNormalizeTransform(self, centerPatchKey='patch01'): aziGradMap = np.gradient(self.aziPosMapf) aziGradMapX = np.sum(aziGradMap[0] * centerPatchObj.array) aziGradMapY = np.sum(aziGradMap[1] * centerPatchObj.array) - rotationAngle = -(np.arctan2(-aziGradMapX, aziGradMapY) % (2 * np.pi)) * 180 / np.pi + rotationAngle = -(np.arctan2(-aziGradMapX,aziGradMapY)%(2*np.pi))*180/np.pi return centerPixel, rotationAngle - def normalize(self, centerPatchKey='patch01', mapSize=800, isPlot=False, borderValue=0.): + + def normalize(self,centerPatchKey = 'patch01',mapSize = 800,isPlot = False,borderValue=0.): ''' Generate normalized vasculature map and normalized final patches @@ -2512,10 +2464,10 @@ def normalize(self, centerPatchKey='patch01', mapSize=800, isPlot=False, borderV try: vasMap = self.vasculatureMap.astype(np.float) - zoom = int(float(vasMap.shape[0]) / float(self.aziPosMapf.shape[0])) + zoom = int(float(vasMap.shape[0])/float(self.aziPosMapf.shape[0])) except AttributeError as e: - print 'Can not find vasculature map!!\n\n' - print e + print('Can not find vasculature map!!\n\n') + print(e) zoom = 1 mapSize = mapSize * zoom @@ -2528,43 +2480,43 @@ def normalize(self, centerPatchKey='patch01', mapSize=800, isPlot=False, borderV pass patchesNor = {} - for key, patch in patches.iteritems(): + for key, patch in patches.items(): patchArray = patch.array.astype(np.float) - patchArrayNor = ni.zoom(patchArray, zoom=zoom) - patchArrayNor = ia.center_image(patchArrayNor, centerPixel=centerPixel, newSize=mapSize, - borderValue=borderValue) + patchArrayNor = ni.zoom(patchArray,zoom=zoom) + patchArrayNor = ia.center_image(patchArrayNor, centerPixel=centerPixel, newSize=mapSize, borderValue=borderValue) patchArrayNor = ia.rotate_image(patchArrayNor, rotationAngle, borderValue=borderValue) patchArrayNor = np.round(patchArrayNor).astype(np.int8) - newPatch = Patch(patchArrayNor, patch.sign) - patchesNor.update({key: newPatch}) + newPatch = Patch(patchArrayNor,patch.sign) + patchesNor.update({key:newPatch}) if isPlot: - f = plt.figure(figsize=(12, 5)) + f = plt.figure(figsize=(12,5)) ax1 = f.add_subplot(121) ax1.set_title('original') try: - ax1.imshow(vasMap, cmap='gray', interpolation='nearest') + ax1.imshow(vasMap, cmap = 'gray', interpolation = 'nearest') except: pass - h = plotPatches(patches, plotaxis=ax1, zoom=zoom) + h = plotPatches(patches, plotaxis = ax1, zoom = zoom) ax1.set_axis_off() ax2 = f.add_subplot(122) ax2.set_title('normalized') try: - ax2.imshow(vasMapNor, cmap='gray', interpolation='nearest') + ax2.imshow(vasMapNor, cmap = 'gray', interpolation = 'nearest') except: pass - h = plotPatches(patchesNor, plotaxis=ax2, zoom=1) + h = plotPatches(patchesNor, plotaxis = ax2, zoom = 1) ax2.set_axis_off() + return vasMapNor, patchesNor - def plotNormalizedPatchCenter(self, centerPatchKey='patch01', mapSize=512, plotAxis=None, markerSize=5., - markerEdgeWidth=2.): + + def plotNormalizedPatchCenter(self,centerPatchKey = 'patch01',mapSize = 512,plotAxis = None,markerSize = 5.,markerEdgeWidth = 2.): if not plotAxis: - f = plt.figure() + f=plt.figure() plotAxis = f.add_subplot(111) if not hasattr(self, 'finalPatches'): @@ -2582,9 +2534,9 @@ def plotNormalizedPatchCenter(self, centerPatchKey='patch01', mapSize=512, plotA aziGradMap = np.gradient(aziPosMapf) aziGradMapX = np.sum(aziGradMap[0] * centerPatchObj.array) aziGradMapY = np.sum(aziGradMap[1] * centerPatchObj.array) - rotationAngle = -(np.arctan2(-aziGradMapX, aziGradMapY) % (2 * np.pi)) * 180 / np.pi + rotationAngle = -(np.arctan2(-aziGradMapX,aziGradMapY)%(2*np.pi))*180/np.pi - for key, patch in self.finalPatches.iteritems(): + for key, patch in self.finalPatches.items(): patchArray = patch.array.astype(np.float32) patchSign = patch.sign @@ -2597,27 +2549,28 @@ def plotNormalizedPatchCenter(self, centerPatchKey='patch01', mapSize=512, plotA if patchSign == 1: plotAxis.plot(center[1], - mapSize - center[0], + mapSize-center[0], 'o', mfc='none', - mec='r', - markersize=markerSize, - mew=markerEdgeWidth) + mec = 'r', + markersize = markerSize, + mew = markerEdgeWidth) elif patchSign == -1: plotAxis.plot(center[1], - mapSize - center[0], + mapSize-center[0], 'o', mfc='none', - mec='b', - markersize=markerSize, - mew=markerEdgeWidth) + mec = 'b', + markersize = markerSize, + mew = markerEdgeWidth) plotAxis.set_xlim([0, mapSize]) plotAxis.set_ylim([0, mapSize]) plotAxis.set_axis_off() - def plotTrial(self, isSave=False, saveFolder=None): + + def plotTrial(self,isSave = False,saveFolder = None): if not hasattr(self, 'finalPatches'): self.processTrial() @@ -2632,8 +2585,8 @@ def plotTrial(self, isSave=False, saveFolder=None): trialName = self.getName() - # plot figure 1 - f1 = plt.figure(figsize=(18, 9)) + #plot figure 1 + f1=plt.figure(figsize=(18,9)) f1.suptitle(trialName) f1_231 = f1.add_subplot(231) currfig = f1_231.imshow(self.altPosMapf, vmin=-40, vmax=60, cmap='hsv', interpolation='nearest') @@ -2657,116 +2610,113 @@ def plotTrial(self, isSave=False, saveFolder=None): f1_234.set_title('filtered visual sign map') f1_235 = f1.add_subplot(235) try: - currfig = f1_235.imshow(np.mean([self.altPowerMap, self.aziPowerMap], axis=0), cmap='hot', - interpolation='nearest') + currfig = f1_235.imshow(np.mean([self.altPowerMap,self.aziPowerMap],axis=0), cmap='hot', interpolation='nearest') f1.colorbar(currfig) plt.axis('off') f1_235.set_title('power map') - except TypeError: - pass + except TypeError: pass f1_236 = f1.add_subplot(236) currfig = f1_236.imshow(self.rawPatchMap, vmin=0, vmax=1, cmap='jet', interpolation='nearest') f1.colorbar(currfig) plt.axis('off') f1_236.set_title('raw patchmap') - # plot figure 2 - f2 = plt.figure(figsize=(10, 8)) + + #plot figure 2 + f2 = plt.figure(figsize=(10,8)) f2.suptitle(trialName) f2_221 = f2.add_subplot(221) - for key, value in self.rawPatches.iteritems(): - currfig = f2_221.imshow(self.altPosMapf * value.getMask(), cmap='jet', vmin=-40, vmax=60, - interpolation='nearest') + for key, value in self.rawPatches.items(): + currfig = f2_221.imshow(self.altPosMapf * value.getMask(), vmin=-40, vmax=60, interpolation='nearest') f2.colorbar(currfig) plt.tick_params( - axis='both', # changes apply to the x-axis - which='both', # both major and minor ticks are affected - bottom='off', # ticks along the bottom edge are off - top='off', # ticks along the top edge are off - left='off', - right='off', - labelbottom='off', - labelleft='off') + axis='both', # changes apply to the x-axis + which='both', # both major and minor ticks are affected + bottom='off', # ticks along the bottom edge are off + top='off', # ticks along the top edge are off + left='off', + right='off', + labelbottom='off', + labelleft='off') f2_221.set_title('patches with altitude postion') f2_222 = f2.add_subplot(222) - for key, value in self.rawPatches.iteritems(): - currfig = f2_222.imshow(self.aziPosMapf * value.getMask(), cmap='jet', vmin=-10, vmax=120, - interpolation='nearest') + for key, value in self.rawPatches.items(): + currfig = f2_222.imshow(self.aziPosMapf * value.getMask(), vmin=-10, vmax=120, interpolation='nearest') f2.colorbar(currfig) plt.tick_params( - axis='both', # changes apply to the x-axis - which='both', # both major and minor ticks are affected - bottom='off', # ticks along the bottom edge are off - top='off', # ticks along the top edge are off - left='off', - right='off', - labelbottom='off', - labelleft='off') + axis='both', # changes apply to the x-axis + which='both', # both major and minor ticks are affected + bottom='off', # ticks along the bottom edge are off + top='off', # ticks along the top edge are off + left='off', + right='off', + labelbottom='off', + labelleft='off') f2_222.set_title('patches with azimuth postion') f2_223 = f2.add_subplot(223) try: - f2_223.imshow(self.vasculatureMap, cmap='gray', interpolation='nearest') + f2_223.imshow(self.vasculatureMap, cmap = 'gray', interpolation = 'nearest') except: pass - h = plotPatches(self.rawPatches, plotaxis=f2_223, zoom=zoom) - f2.colorbar(h[h.keys()[0]]) + h = plotPatches(self.rawPatches, plotaxis = f2_223, zoom = zoom) + f2.colorbar(h[list(h.keys())[0]]) plt.axis('off') plt.title('patches with center and sign') + f2_224 = f2.add_subplot(224) - currfig = f2_224.imshow(self.eccentricityMapf, cmap='jet', interpolation='nearest') + currfig = f2_224.imshow(self.eccentricityMapf, interpolation='nearest') plt.tick_params( - axis='both', # changes apply to the x-axis - which='both', # both major and minor ticks are affected - bottom='off', # ticks along the bottom edge are off - top='off', # ticks along the top edge are off - left='off', - right='off', - labelbottom='off') + axis='both', # changes apply to the x-axis + which='both', # both major and minor ticks are affected + bottom='off', # ticks along the bottom edge are off + top='off', # ticks along the top edge are off + left='off', + right='off', + labelbottom='off') f2_224.set_title('patches with patch eccentricity') f2.colorbar(currfig) - # plot figure 3 - f3 = plt.figure(figsize=(18, 7)) + #plot figure 3 + f3=plt.figure(figsize=(18,7)) f3.suptitle(trialName) f3_131 = f3.add_subplot(131) try: - f3_131.imshow(self.vasculatureMap, cmap='gray', interpolation='nearest') + f3_131.imshow(self.vasculatureMap, cmap = 'gray', interpolation = 'nearest') except: pass - h = plotPatches(self.rawPatches, plotaxis=f3_131, zoom=zoom) + h = plotPatches(self.rawPatches, plotaxis = f3_131, zoom = zoom) f3_131.set_axis_off() f3_131.set_title('original patches') f3_132 = f3.add_subplot(132) try: - f3_132.imshow(self.vasculatureMap, cmap='gray', interpolation='nearest') + f3_132.imshow(self.vasculatureMap, cmap = 'gray', interpolation = 'nearest') except: pass - h = plotPatches(self.patchesAfterSplit, plotaxis=f3_132, zoom=zoom) + h = plotPatches(self.patchesAfterSplit, plotaxis = f3_132, zoom = zoom) f3_132.set_axis_off() f3_132.set_title('patches after split') f3_133 = f3.add_subplot(133) try: - f3_133.imshow(self.vasculatureMap, cmap='gray', interpolation='nearest') + f3_133.imshow(self.vasculatureMap, cmap = 'gray', interpolation = 'nearest') except: pass - h = plotPatches(self.patchesAfterMerge, plotaxis=f3_133, zoom=zoom) + h = plotPatches(self.patchesAfterMerge, plotaxis = f3_133, zoom = zoom) f3_133.set_axis_off() f3_133.set_title('patches after merge') if isSave: - f1.savefig(os.path.join(saveFolder, trialName + '_SignMap.pdf'), format='pdf', dpi=600, - orientation='landscape', papertype='a4') - f2.savefig(os.path.join(saveFolder, trialName + '_RawPatches.pdf'), format='pdf', dpi=600, - orientation='landscape', papertype='a4') - f3.savefig(os.path.join(saveFolder, trialName + '_SplitMerge.pdf'), format='pdf', dpi=600, - orientation='landscape', papertype='a4') + f1.savefig(os.path.join(saveFolder,trialName+'_SignMap.pdf'), format='pdf', dpi = 600, orientation='landscape', papertype='a4') + f2.savefig(os.path.join(saveFolder,trialName+'_RawPatches.pdf'), format='pdf', dpi = 600, orientation='landscape', papertype='a4') + f3.savefig(os.path.join(saveFolder,trialName+'_SplitMerge.pdf'), format='pdf', dpi = 600, orientation='landscape', papertype='a4') + + + def plotFinalPatches(self,plotAxis = None): - def plotFinalPatches(self, plotAxis=None): if not hasattr(self, 'finalPatches'): self.processTrial() @@ -2779,121 +2729,99 @@ def plotFinalPatches(self, plotAxis=None): name = self.getName() if not plotAxis: - f = plt.figure(figsize=(10, 10)) - plotAxis = f.add_subplot(111) + f=plt.figure(figsize=(10,10)) + plotAxis=f.add_subplot(111) try: - plotAxis.imshow(self.vasculatureMap, cmap='gray', interpolation='nearest') + plotAxis.imshow(self.vasculatureMap, cmap = 'gray', interpolation = 'nearest') except: pass - h = plotPatches(self.finalPatches, plotaxis=plotAxis, zoom=zoom) + h = plotPatches(self.finalPatches, plotaxis = plotAxis, zoom = zoom) plotAxis.set_axis_off() plotAxis.set_title(name) - def plotFinalPatchBorders(self, plotAxis=None, plotName=True, plotVasMap=True, isTitle=True, isColor=True, - borderWidth=2, fontSize=15, interpolation='bilinear'): - if hasattr(self, 'finalPatchesMarked'): - finalPatches = self.finalPatchesMarked - elif hasattr(self, 'finalPatches'): - finalPatches = self.finalPatches - else: - self.processTrial();finalPatches = self.finalPatches + def plotFinalPatchBorders(self,plotAxis = None,plotName=True,plotVasMap=True,isTitle=True,isColor=True, + borderWidth=2,fontSize=15,interpolation='bilinear'): - try: - zoom = self.vasculatureMap.shape[0] / self.altPosMap.shape[0] - except AttributeError: - zoom = 1 + if hasattr(self,'finalPatchesMarked'):finalPatches=self.finalPatchesMarked + elif hasattr(self, 'finalPatches'):finalPatches=self.finalPatches + else:self.processTrial();finalPatches=self.finalPatches + + try:zoom = self.vasculatureMap.shape[0] / self.altPosMap.shape[0] + except AttributeError:zoom = 1 name = self.getName() if not plotAxis: - f = plt.figure(figsize=(10, 10)) - plotAxis = f.add_subplot(111) + f=plt.figure(figsize=(10,10)) + plotAxis=f.add_subplot(111) if plotVasMap: - try: - plotAxis.imshow(self.vasculatureMap, cmap='gray', interpolation='nearest') - except AttributeError: - pass + try:plotAxis.imshow(self.vasculatureMap, cmap = 'gray', interpolation = 'nearest') + except AttributeError:pass - for key, patch in finalPatches.iteritems(): + for key, patch in finalPatches.items(): mask = patch.getMask() if isColor: - if patch.sign == 1: - plotColor = '#ff0000' - elif patch.sign == -1: - plotColor = '#0000ff' - else: - plotColor = '#000000' - else: - plotColor = '#000000' + if patch.sign == 1:plotColor='#ff0000' + elif patch.sign == -1:plotColor='#0000ff' + else:plotColor='#000000' + else:plotColor='#000000' im = pt.plot_mask(mask, plotAxis=plotAxis, color=plotColor, zoom=zoom, borderWidth=borderWidth) im.set_interpolation(interpolation) if plotName: - center = patch.getCenter() - plotAxis.text(center[1] * zoom, center[0] * zoom, key, verticalalignment='center', - horizontalalignment='center', color=plotColor, fontsize=fontSize) + center=patch.getCenter() + plotAxis.text(center[1]*zoom,center[0]*zoom,key,verticalalignment='center', horizontalalignment='center',color=plotColor,fontsize=fontSize) plotAxis.set_axis_off() - if isTitle: plotAxis.set_title(name) + if isTitle:plotAxis.set_title(name) return plotAxis.get_figure() + def plotFinalPatchBorders2(self, plotAxis=None, plotName=True, plotVasMap=True, isTitle=True, isColor=True, positiveColor='#ff0000', negativeColor='#0000ff', borderWidth=2, fontSize=15): - if hasattr(self, 'finalPatchesMarked'): - finalPatches = self.finalPatchesMarked - elif hasattr(self, 'finalPatches'): - finalPatches = self.finalPatches - else: - self.processTrial();finalPatches = self.finalPatches + if hasattr(self,'finalPatchesMarked'):finalPatches=self.finalPatchesMarked + elif hasattr(self, 'finalPatches'):finalPatches=self.finalPatches + else:self.processTrial();finalPatches=self.finalPatches - try: - zoom = self.vasculatureMap.shape[0] / self.altPosMap.shape[0] - except AttributeError: - zoom = 1 + try:zoom = self.vasculatureMap.shape[0] / self.altPosMap.shape[0] + except AttributeError:zoom = 1 name = self.getName() if not plotAxis: - f = plt.figure(figsize=(10, 10)) - plotAxis = f.add_subplot(111) + f=plt.figure(figsize=(10,10)) + plotAxis=f.add_subplot(111) if (plotVasMap) and (self.vasculatureMap is not None): - try: - plotAxis.imshow(self.vasculatureMap, cmap='gray', interpolation='nearest') - except AttributeError: - plotAxis.invert_yaxis();pass - else: - plotAxis.invert_yaxis() + try:plotAxis.imshow(self.vasculatureMap, cmap = 'gray', interpolation = 'nearest') + except AttributeError: plotAxis.invert_yaxis();pass + else: plotAxis.invert_yaxis() - for key, patch in finalPatches.iteritems(): + for key, patch in finalPatches.items(): if isColor: - if patch.sign == 1: - plotColor = positiveColor - elif patch.sign == -1: - plotColor = negativeColor - else: - plotColor = '#000000' - else: - plotColor = '#000000' + if patch.sign == 1:plotColor=positiveColor + elif patch.sign == -1:plotColor=negativeColor + else:plotColor='#000000' + else:plotColor='#000000' - currArray = ni.binary_erosion(patch.array, iterations=1) + currArray = ni.binary_erosion(patch.array,iterations=1) im = pt.plot_mask_borders(currArray, plotAxis=plotAxis, color=plotColor, zoom=zoom, borderWidth=borderWidth) if plotName: - center = patch.getCenter() - plotAxis.text(center[1] * zoom, center[0] * zoom, key, verticalalignment='center', - horizontalalignment='center', color=plotColor, fontsize=fontSize) + center=patch.getCenter() + plotAxis.text(center[1]*zoom,center[0]*zoom,key,verticalalignment='center', horizontalalignment='center',color=plotColor,fontsize=fontSize) plotAxis.set_axis_off() - if isTitle: plotAxis.set_title(name) + if isTitle:plotAxis.set_title(name) return plotAxis.get_figure() + def getBaselineFluorscence(self): ''' get mean baseline fluorescence of each visual area @@ -2906,42 +2834,44 @@ def getBaselineFluorscence(self): vasMap = ia.array_nor(self.vasculatureMap) - # get V1 mean fluorscence + #get V1 mean fluorscence try: - V1 = finalPatches['V1'] + V1 = finalPatches['V1'] except KeyError: - V1 = finalPatches['patch01'] + V1 = finalPatches['patch01'] V1array = V1.array - zoom = vasMap.shape[-1] / V1array.shape[-1] + zoom = vasMap.shape[-1]/V1array.shape[-1] - if zoom != 1: - V1array = ni.zoom(V1array, zoom) + if zoom!=1: + V1array = ni.zoom(V1array,zoom) V1array = ia.binarize(V1array, 0.5) V1area = np.sum(V1array).astype(np.float) - V1totalF = np.sum(V1array * vasMap).astype(np.float) - V1meanF = V1totalF / V1area + V1totalF = np.sum(V1array*vasMap).astype(np.float) + V1meanF = V1totalF/V1area - # get fluorscence for all visual areas normalized by V1 + #get fluorscence for all visual areas normalized by V1 baselineDict = {} - for key, patch in finalPatches.iteritems(): + for key, patch in finalPatches.items(): array = patch.array - if zoom != 1: - array = ni.zoom(array, zoom) + if zoom!=1: + array = ni.zoom(array,zoom) array = ia.binarize(array, 0.5) area = np.sum(array).astype(np.float) - totalF = np.sum(array * vasMap).astype(np.float) + totalF = np.sum(array*vasMap).astype(np.float) + + meanFnor = (totalF/area)/V1meanF - meanFnor = (totalF / area) / V1meanF + baselineDict.update({key:meanFnor}) - baselineDict.update({key: meanFnor}) return baselineDict + def getMeanPowerAmplitude(self): ''' get mean response power amplitude of each visual area @@ -2953,39 +2883,40 @@ def getMeanPowerAmplitude(self): finalPatches = self.finalPatches try: - powerMap = ia.array_nor(np.mean([self.altPowerMapf, self.aziPowerMapf], axis=0)) + powerMap=ia.array_nor(np.mean([self.altPowerMapf, self.aziPowerMapf], axis=0)) except AttributeError: - _ = self._getSignMap() - powerMap = ia.array_nor(np.mean([self.altPowerMapf, self.aziPowerMapf], axis=0)) + _=self._getSignMap() + powerMap=ia.array_nor(np.mean([self.altPowerMapf, self.aziPowerMapf], axis=0)) - # get V1 mean fluorscence + #get V1 mean fluorscence try: - V1 = finalPatches['V1'] + V1 = finalPatches['V1'] except KeyError: - V1 = finalPatches['patch01'] + V1 = finalPatches['patch01'] V1array = V1.array V1area = np.sum(V1array).astype(np.float) - V1totalPower = np.sum(V1array * powerMap).astype(np.float) - V1meanPower = V1totalPower / V1area + V1totalPower = np.sum(V1array*powerMap).astype(np.float) + V1meanPower = V1totalPower/V1area - # get mean power amplitude for all visual areas normalized by V1 + #get mean power amplitude for all visual areas normalized by V1 meanPowerDict = {} - for key, patch in finalPatches.iteritems(): + for key, patch in finalPatches.items(): array = patch.array area = np.sum(array).astype(np.float) - totalPower = np.sum(array * powerMap).astype(np.float) + totalPower = np.sum(array*powerMap).astype(np.float) - meanPowerNor = (totalPower / area) / V1meanPower + meanPowerNor = (totalPower/area)/V1meanPower - meanPowerDict.update({key: meanPowerNor}) + meanPowerDict.update({key:meanPowerNor}) return meanPowerDict - def getCorticalArea(self, pixelSize=0.0129): + + def getCorticalArea(self,pixelSize = 0.0129): ''' get area of each visual area (mm^2) unit of pixelSize is mm @@ -2996,25 +2927,27 @@ def getCorticalArea(self, pixelSize=0.0129): except AttributeError: finalPatches = self.finalPatches - # get mean power amplitude for all visual areas normalized by V1 + #get mean power amplitude for all visual areas normalized by V1 areaDict = {} - for key, patch in finalPatches.iteritems(): - area = patch.getArea().astype(np.float) * (pixelSize ** 2) + for key, patch in finalPatches.items(): + + area = patch.getArea().astype(np.float)*(pixelSize**2) - areaDict.update({key: area}) + areaDict.update({key:area}) return areaDict - def getMagnification(self, pixelSize=0.0129, isFilter=False, erodeIter=None, ): + + def getMagnification(self,pixelSize = 0.0129, isFilter = False,erodeIter = None,): ''' get magnification of each visual area (mm^2/deg^2) unit of pixelSize is mm ''' - if not hasattr(self, 'determinantMap'): + if not hasattr(self,'determinantMap'): _ = self._getDeterminantMap() - if hasattr(self, 'finalPathesMarked'): + if hasattr(self,'finalPathesMarked'): finalPatches = self.finalPatchesMarked elif hasattr(self, 'finalPatches'): finalPatches = self.finalPatches @@ -3022,27 +2955,28 @@ def getMagnification(self, pixelSize=0.0129, isFilter=False, erodeIter=None, ): self.processTrial() finalPatches = self.finalPatches - magMap = 1 / self.determinantMap + magMap = 1/self.determinantMap if isFilter: - magMap = ni.filters.gaussian_filter(magMap, self.params['signMapFilterSigma']) + magMap = ni.filters.gaussian_filter(magMap,self.params['signMapFilterSigma']) - # get mean power amplitude for all visual areas normalized by V1 + #get mean power amplitude for all visual areas normalized by V1 magDict = {} - for key, patch in finalPatches.iteritems(): + for key, patch in finalPatches.items(): array = patch.array.astype(np.float) if erodeIter: - array = ni.binary_erosion(array, iterations=erodeIter) + array = ni.binary_erosion(array,iterations=erodeIter) area = np.sum(array) - totalMag = np.sum(array * magMap) + totalMag = np.sum(array*magMap) - magDict.update({key: (pixelSize ** 2) * totalMag / area}) + magDict.update({key:(pixelSize**2)*totalMag/area}) return magDict + def getVisualFieldOrigin(self): ''' get the visual field origin as the retinotopic coordinates at the pixels @@ -3052,49 +2986,50 @@ def getVisualFieldOrigin(self): mean retinotopic locations of all overlap pixels ''' - if not hasattr(self, 'finalPatchesMarked'): - raise LookupError, 'Please mark the final patches first!!' + if not hasattr(self,'finalPatchesMarked'): + raise LookupError('Please mark the final patches first!!') - if not hasattr(self, 'altPosMapf'): - _ = self._getSignMap() + if not hasattr(self,'altPosMapf'): + _=self._getSignMap() try: V1 = self.finalPatchesMarked['V1'].array.astype(np.float) LM = self.finalPatchesMarked['LM'].array.astype(np.float) RL = self.finalPatchesMarked['RL'].array.astype(np.float) - overlap = 0 # number of overlaping pixels - iterNum = 1 # number of iteration - while overlap < 1: - # print 'Iteration number for finding overlapping pixel:', iterNum - V1 = ni.morphology.binary_dilation(V1, iterations=1).astype(np.float) - LM = ni.morphology.binary_dilation(LM, iterations=1).astype(np.float) - RL = ni.morphology.binary_dilation(RL, iterations=1).astype(np.float) - totalField = V1 + LM + RL - # plt.imshow(totalField) - overlap = len(np.argwhere(totalField == 3)) + overlap=0 #number of overlaping pixels + iterNum = 1 #number of iteration + while overlap<1: + # print 'Iteration number for finding overlapping pixel:', iterNum + V1=ni.morphology.binary_dilation(V1,iterations=1).astype(np.float) + LM=ni.morphology.binary_dilation(LM,iterations=1).astype(np.float) + RL=ni.morphology.binary_dilation(RL,iterations=1).astype(np.float) + totalField = V1+LM+RL + # plt.imshow(totalField) + overlap = len(np.argwhere(totalField==3)) iterNum += 1 - # print 'Number of overlapping pixels:', overlap - # plt.show() + # print 'Number of overlapping pixels:', overlap + # plt.show() - altPosOrigin = np.mean(self.altPosMapf[totalField == 3], axis=0) - aziPosOrigin = np.mean(self.aziPosMapf[totalField == 3], axis=0) + altPosOrigin = np.mean(self.altPosMapf[totalField==3],axis=0) + aziPosOrigin = np.mean(self.aziPosMapf[totalField==3],axis=0) except KeyError: - print 'Can not find necessary visual areas (V1, LM, RL) for normalization. \nSetting origins to 0 ...' + print('Can not find necessary visual areas (V1, LM, RL) for normalization. \nSetting origins to 0 ...') altPosOrigin = 0. aziPosOrigin = 0. return altPosOrigin, aziPosOrigin - def plotMagnificationMap(self, pixelSize=0.0129, plotAxis=None, isFilter=False): + + def plotMagnificationMap(self,pixelSize = 0.0129,plotAxis = None,isFilter = False): ''' param pixelSize: mm ''' - if not hasattr(self, 'determinantMap'): + if not hasattr(self,'determinantMap'): _ = self._getDeterminantMap() - if hasattr(self, 'finalPathesMarked'): + if hasattr(self,'finalPathesMarked'): finalPatches = self.finalPatchesMarked elif hasattr(self, 'finalPatches'): finalPatches = self.finalPatches @@ -3103,24 +3038,25 @@ def plotMagnificationMap(self, pixelSize=0.0129, plotAxis=None, isFilter=False): finalPatches = self.finalPatches name = self.getName() - magMap = (pixelSize ** 2) / self.determinantMap + magMap = (pixelSize**2)/self.determinantMap if isFilter: - magMap = ni.filters.gaussian_filter(magMap, self.params['signMapFilterSigma']) + magMap = ni.filters.gaussian_filter(magMap,self.params['signMapFilterSigma']) if not plotAxis: - f = plt.figure(figsize=(10, 10)) + f=plt.figure(figsize=(10,10)) ax = f.add_subplot(111) else: ax = plotAxis - for key, patch in finalPatches.iteritems(): - currMagMap = patch.getMask() * magMap - ax.imshow(currMagMap, cmap='hot_r', vmin=0, vmax=0.015, interpolation='nearest') + for key, patch in finalPatches.items(): + currMagMap = patch.getMask()*magMap + ax.imshow(currMagMap,cmap='hot_r',vmin = 0, vmax = 0.015,interpolation='nearest') ax.set_aspect(1) ax.set_title(name) + def _generateTotalMask(self): ''' generate a single mask (0s and 1s) of the entire visual areas @@ -3128,17 +3064,18 @@ def _generateTotalMask(self): mask = np.zeros(self.altPosMap.shape) - for patch in self.finalPatches.itervalues(): + for patch in self.finalPatches.values(): mask = mask + patch.array.astype(np.float) mask = ni.binary_closing(mask, - structure=np.array([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]), - iterations=self.params['borderWidth']) + structure = np.array([[1.,1.,1.],[1.,1.,1.],[1.,1.,1.]]), + iterations = self.params['borderWidth']) + return mask.astype(np.int8) - def plotRetinotopicLocation(self, plotAxis=None, location=(0., 50.), color='#ff0000', searchRange=3., borderWidth=1, - closeIter=3, openIter=3): + + def plotRetinotopicLocation(self,plotAxis=None,location=(0.,50.),color='#ff0000',searchRange=3.,borderWidth=1,closeIter=3,openIter=3): if not plotAxis: f = plt.figure() @@ -3147,45 +3084,47 @@ def plotRetinotopicLocation(self, plotAxis=None, location=(0., 50.), color='#ff0 altPosMap = self.altPosMapf aziPosMap = self.aziPosMapf - altMin = location[0] - np.abs(searchRange) - altMax = location[0] + np.abs(searchRange) + altMin = location[0]-np.abs(searchRange) + altMax = location[0]+np.abs(searchRange) - aziMin = location[1] - np.abs(searchRange) - aziMax = location[1] + np.abs(searchRange) + aziMin = location[1]-np.abs(searchRange) + aziMax = location[1]+np.abs(searchRange) - altMask = np.logical_and(altPosMap >= altMin, altPosMap <= altMax) - aziMask = np.logical_and(aziPosMap >= aziMin, aziPosMap <= aziMax) + altMask = np.logical_and(altPosMap>=altMin,altPosMap<=altMax) + aziMask = np.logical_and(aziPosMap>=aziMin,aziPosMap<=aziMax) - mask = np.logical_and(altMask, aziMask).astype(np.float) + mask = np.logical_and(altMask,aziMask).astype(np.float) totalMask = self._generateTotalMask() - mask = (mask * totalMask).astype(np.int) + mask = (mask*totalMask).astype(np.int) - mask = ni.binary_closing(mask, iterations=closeIter) + mask = ni.binary_closing(mask,iterations=closeIter) - mask = ni.binary_opening(mask, iterations=openIter) + mask = ni.binary_opening(mask,iterations=openIter) - mask = mask.astype(np.float) - mask[mask == 0] = np.nan + mask=mask.astype(np.float) + mask[mask==0]=np.nan pt.plot_mask(mask, plotAxis=plotAxis, color=color, borderWidth=borderWidth) - def plotPatchesWithName(self, patchDict, plotAxis=None): - if not hasattr(self, - patchDict): raise LookupError, 'This RetinotopicMappingTrial object does not have "' + patchDict + '" attribute!' + def plotPatchesWithName(self,patchDict,plotAxis=None): + + if not hasattr(self,patchDict): raise LookupError('This RetinotopicMappingTrial object does not have "' + patchDict + '" attribute!') patchesForPlotting = self.__dict__[patchDict] if plotAxis is None: f = plt.figure(); plotAxis = f.add_subplot(111) plotAxis.figure.suptitle(self.getName()) - plotPatches(patchesForPlotting, plotaxis=plotAxis, markersize=0) + plotPatches(patchesForPlotting,plotaxis=plotAxis,markersize=0) + + for key,patch in patchesForPlotting.items(): - for key, patch in patchesForPlotting.iteritems(): center = patch.getCenter() - plotAxis.text(center[1], center[0], key, verticalalignment='center', horizontalalignment='center') + plotAxis.text(center[1],center[0],key,verticalalignment='center', horizontalalignment='center') return plotAxis.figure + def plotVisualCoverage(self, is_normalize=False, altRange=(-40., 60.), aziRange=(-20., 100.)): ''' plot the visual coverage of each visual area in a compact way @@ -3193,9 +3132,9 @@ def plotVisualCoverage(self, is_normalize=False, altRange=(-40., 60.), aziRange= if is_normalize is True, the retinotopy will correct for visual origin ''' - if hasattr(self, 'finalPatchesMarked'): + if hasattr(self,'finalPatchesMarked'): finalPatches = self.finalPatchesMarked - elif hasattr(self, 'finalPatches'): + elif hasattr(self,'finalPatches'): finalPatches = self.finalPatches else: self.processTrial() @@ -3206,23 +3145,23 @@ def plotVisualCoverage(self, is_normalize=False, altRange=(-40., 60.), aziRange= else: visualFieldOrigin = None - figList, axList = pt.grid_axis(3, 4, len(finalPatches.keys()), figsize=(12, 10)) + figList, axList = pt.grid_axis(3, 4, len(list(finalPatches.keys())), figsize=(12, 10)) i = 0 pixelSize = self.params['visualSpacePixelSize'] closeIter = self.params['visualSpaceCloseIter'] - for key, patch in finalPatches.iteritems(): + for key, patch in finalPatches.items(): currAx = axList[i] - visualSpace, altAxis, aziAxis = patch.getVisualSpace(self.altPosMapf, - self.aziPosMapf, - altRange=altRange, - aziRange=aziRange, - visualFieldOrigin=visualFieldOrigin, - pixelSize=pixelSize, - closeIter=closeIter, - isplot=False) + visualSpace, altAxis, aziAxis=patch.getVisualSpace(self.altPosMapf, + self.aziPosMapf, + altRange=altRange, + aziRange=aziRange, + visualFieldOrigin=visualFieldOrigin, + pixelSize = pixelSize, + closeIter = closeIter, + isplot = False) if patch.sign == 1: plotColor = '#ff0000' @@ -3241,26 +3180,27 @@ def plotVisualCoverage(self, is_normalize=False, altRange=(-40., 60.), aziRange= currAx.set_title(key) - i = i + 1 + i=i+1 return figList, axList + def plotContours(self, - isNormalize=True, # is resetting the origin of visual field - altLevels=np.arange(-30., 50., 5.), - aziLevels=np.arange(0., 120., 5.), + isNormalize = True, #is resetting the origin of visual field + altLevels = np.arange(-30.,50.,5.), + aziLevels = np.arange(0.,120.,5.), isPlottingBorder=True, - inline=False, - lineWidth=3, - figSize=(12, 12), - fontSize=15, - altAxis=None, - aziAxis=None): + inline = False, + lineWidth = 3, + figSize = (12,12), + fontSize = 15, + altAxis = None, + aziAxis = None): ''' plot contours of altitute posititon and azimuth position ''' - if not hasattr(self, 'altPosMapf'): + if not hasattr(self,'altPosMapf'): self._getSignMap() altPosMap = self.altPosMapf @@ -3271,24 +3211,25 @@ def plotContours(self, altPosMap = altPosMap - altPosOrigin aziPosMap = aziPosMap - aziPosOrigin - if hasattr(self, 'vasculatureMap') and type(self.vasculatureMap) != type(None) and isPlottingBorder: - zoom = self.vasculatureMap.shape[0] / altPosMap.shape[0] - altPosMap = ni.zoom(altPosMap, zoom) - aziPosMap = ni.zoom(aziPosMap, zoom) - totalMask = ni.zoom(self._generateTotalMask().astype(np.float32), zoom) - altPosMap[totalMask < 0.5] = np.nan - aziPosMap[totalMask < 0.5] = np.nan + if hasattr(self,'vasculatureMap') and type(self.vasculatureMap)!=type(None) and isPlottingBorder: + zoom = self.vasculatureMap.shape[0]/altPosMap.shape[0] + altPosMap = ni.zoom(altPosMap,zoom) + aziPosMap = ni.zoom(aziPosMap,zoom) + totalMask = ni.zoom(self._generateTotalMask().astype(np.float32),zoom) + altPosMap[totalMask<0.5]=np.nan + aziPosMap[totalMask<0.5]=np.nan else: totalMask = self._generateTotalMask() - altPosMap[totalMask == 0] = np.nan - aziPosMap[totalMask == 0] = np.nan + altPosMap[totalMask==0]=np.nan + aziPosMap[totalMask==0]=np.nan + + X,Y = np.meshgrid(np.arange(altPosMap.shape[1]), + np.arange(altPosMap.shape[0])) - X, Y = np.meshgrid(np.arange(altPosMap.shape[1]), - np.arange(altPosMap.shape[0])) # plotting altitute contours if not altAxis: - altf = plt.figure(figsize=figSize, facecolor='#ffffff') + altf=plt.figure(figsize=figSize,facecolor='#ffffff') altAxis = altf.add_subplot(111) altContour = altAxis.contour(X, @@ -3299,7 +3240,7 @@ def plotContours(self, linewidths=lineWidth) if inline: - altContour.clabel(inline=inline, fontsize=fontSize, fmt='%1.1f') + altContour.clabel(inline=inline, fontsize=fontSize,fmt='%1.1f') else: altAxis.get_figure().colorbar(altContour) @@ -3313,9 +3254,10 @@ def plotContours(self, altAxis.set_title('Altitute Positions') + # plotting azimuth contours if not aziAxis: - azif = plt.figure(figsize=figSize, facecolor='#ffffff') + azif=plt.figure(figsize=figSize,facecolor='#ffffff') aziAxis = azif.add_subplot(111) aziContour = aziAxis.contour(X, @@ -3325,7 +3267,7 @@ def plotContours(self, levels=aziLevels, linewidths=lineWidth) if inline: - aziContour.clabel(inline=1, fontsize=fontSize, fmt='%1.1f') + aziContour.clabel(inline=1, fontsize=fontSize,fmt='%1.1f') else: aziAxis.get_figure().colorbar(aziContour) @@ -3337,6 +3279,7 @@ def plotContours(self, borderWidth=lineWidth, interpolation='bilinear') + aziAxis.set_title('Azimuth Positions') return altAxis, aziAxis @@ -3344,20 +3287,17 @@ def plotContours(self, class Patch(object): - def __init__(self, patchArray, sign): + def __init__(self,patchArray,sign): - if isinstance(patchArray, sparse.coo_matrix): - self.sparseArray = patchArray.astype(np.uint8) + if isinstance(patchArray,sparse.coo_matrix):self.sparseArray=patchArray.astype(np.uint8) else: arr = patchArray.astype(np.int8) arr[arr > 0] = 1 arr[arr == 0] = 0 self.sparseArray = sparse.coo_matrix(arr) - if sign == 1 or sign == 0 or sign == -1: - self.sign = int(sign) - else: - raise ValueError, 'Sign should be -1, 0 or 1!' + if sign==1 or sign==0 or sign==-1: self.sign = int(sign) + else: raise ValueError('Sign should be -1, 0 or 1!') @property def array(self): @@ -3369,7 +3309,7 @@ def getCenter(self): [rowIndex, columnIndex] ''' pixels = np.argwhere(self.array) - center = np.mean(pixels.astype(np.float32), axis=0) + center = np.mean(pixels.astype(np.float32), axis = 0) return np.round(center).astype(np.int) def getArea(self): @@ -3382,7 +3322,7 @@ def getMask(self): ''' generating ploting mask for the patch ''' - mask = np.array(self.array, dtype=np.float32) + mask = np.array(self.array, dtype = np.float32) mask[mask == 0] = np.nan return mask @@ -3390,29 +3330,29 @@ def getSignedMask(self): ''' generating ploting mask with visual sign for the patch ''' - signedMask = np.array(self.array * self.sign, dtype=np.float32) + signedMask = np.array(self.array * self.sign, dtype = np.float32) signedMask[signedMask == 0] = np.nan return signedMask def getDict(self): - return {'sparseArray': self.sparseArray, 'sign': self.sign} + return {'sparseArray':self.sparseArray,'sign':self.sign} - def getTrace(self, mov): + def getTrace(self,mov): ''' return trace of this patch in a certain movie ''' return ia.get_trace(mov, self.array) - def isTouching(self, patch2, distance=1): + def isTouching(self, patch2, distance = 1): ''' decide if this patch is adjacent to another patch within certain distance ''' if distance < 1: - raise LookupError, 'distance should be integer no less than 1.' + raise LookupError('distance should be integer no less than 1.') bigPatch = ni.binary_dilation(self.array, - iterations=distance).astype(np.int) + iterations = distance).astype(np.int) if np.amax(bigPatch + patch2.array) > 1: return True @@ -3452,8 +3392,7 @@ def getVisualSpace(self, altMap, aziMap, altRange=(-40., 60.), aziRange=(-20., 1 if patchArray[i, j]: corAlt = altMap[i, j] corAzi = aziMap[i, j] - if (corAlt >= altRange[0]) & (corAlt <= altRange[1]) & (corAzi >= aziRange[0]) & ( - corAzi <= aziRange[1]): + if (corAlt >= altRange[0]) & (corAlt <= altRange[1]) & (corAzi >= aziRange[0]) & (corAzi <= aziRange[1]): indAlt = (corAlt - altRange[0]) // pixelSize indAzi = (corAzi - aziRange[0]) // pixelSize visualSpace[int(indAlt), int(indAzi)] = 1 @@ -3508,25 +3447,25 @@ def eccentricityMap(self, altMap, aziMap, altCenter, aziCenter): aziCenter2 = aziCenter * np.pi / 180 eccMap = np.zeros(self.array.shape) - # eccMap[:] = np.nan - # for i in xrange(self.array.shape[0]): - # for j in xrange(self.array.shape[1]): - # if self.array[i,j]: - # alt = altMap2[i,j] - # azi = aziMap2[i,j] - # eccMap[i,j] = np.arctan(np.sqrt(np.tan(alt-altCenter2)**2 + ((np.tan(azi-aziCenter2)**2)/(np.cos(alt-altCenter2)**2)))) +# eccMap[:] = np.nan +# for i in xrange(self.array.shape[0]): +# for j in xrange(self.array.shape[1]): +# if self.array[i,j]: +# alt = altMap2[i,j] +# azi = aziMap2[i,j] +# eccMap[i,j] = np.arctan(np.sqrt(np.tan(alt-altCenter2)**2 + ((np.tan(azi-aziCenter2)**2)/(np.cos(alt-altCenter2)**2)))) eccMap = np.arctan( - np.sqrt( - np.square(np.tan(altMap2 - altCenter2)) - + - np.square(np.tan(aziMap2 - aziCenter2)) / np.square(np.cos(altMap2 - altCenter2)) - ) - ) + np.sqrt( + np.square(np.tan(altMap2-altCenter2)) + + + np.square(np.tan(aziMap2-aziCenter2))/np.square(np.cos(altMap2-altCenter2)) + ) + ) eccMap = eccMap * 180 / np.pi - eccMap[self.array == 0] = np.nan + eccMap[self.array==0]=np.nan return eccMap - def split2(self, eccMap, patchName='patch00', cutStep=1, borderWidth=2, isplot=False): + def split2(self, eccMap, patchName = 'patch00', cutStep = 1, borderWidth = 2, isplot = False): ''' split this patch into two or more patch, according to the eccentricity map (in degree). return a dictionary of patches after split @@ -3535,56 +3474,60 @@ def split2(self, eccMap, patchName='patch00', cutStep=1, borderWidth=2, isplot=F ''' minMarker = localMin(eccMap, cutStep) - connectivity = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) + connectivity=np.array([[1,1,1],[1,1,1],[1,1,1]]) - newLabel = sm.watershed(eccMap, minMarker, connectivity=connectivity, mask=self.array) + newLabel = sm.watershed(eccMap, minMarker, connectivity=connectivity, mask = self.array) border = ni.binary_dilation(self.array).astype(np.int8) - self.array - for i in xrange(1, np.amax(newLabel) + 1): - currArray = np.zeros(self.array.shape, dtype=np.int8) + for i in range(1,np.amax(newLabel)+1): + currArray = np.zeros(self.array.shape, dtype = np.int8) currArray[newLabel == i] = 1 currBorder = ni.binary_dilation(currArray).astype(np.int8) - currArray - border = border + currBorder + border = border+currBorder border[border > 1] = 1 border = sm.skeletonize(border) + if borderWidth > 1: - border = ni.binary_dilation(border, iterations=borderWidth - 1).astype(np.int8) + border = ni.binary_dilation(border, iterations = borderWidth - 1).astype(np.int8) newPatchMap = ni.binary_dilation(self.array).astype(np.int8) * (-1 * (border - 1)) + labeledNewPatchMap, patchNum = ni.label(newPatchMap) - # if patchNum != np.amax(newLabel): - # print 'number of patches: ', patchNum, '; number of local minimum:', np.amax(newLabel) - # raise ValueError, "Number of patches after splitting does not equal to number of local minimum!" + +# if patchNum != np.amax(newLabel): +# print 'number of patches: ', patchNum, '; number of local minimum:', np.amax(newLabel) +# raise ValueError, "Number of patches after splitting does not equal to number of local minimum!" newPatchDict = {} - for j in xrange(1, patchNum + 1): + for j in range(1, patchNum + 1): currPatchName = patchName + '.' + str(j) - currArray = np.zeros(self.array.shape, dtype=np.int8) + currArray = np.zeros(self.array.shape, dtype = np.int8) currArray[labeledNewPatchMap == j] = 1 currArray = currArray * self.array if np.sum(currArray[:]) > 0: - newPatchDict.update({currPatchName: Patch(currArray, self.sign)}) + newPatchDict.update({currPatchName : Patch(currArray, self.sign)}) if isplot: plt.figure() plt.subplot(121) - plt.imshow(self.array, cmap='jet', interpolation='nearest') + plt.imshow(self.array, interpolation = 'nearest') plt.title(patchName + ': before split') plt.subplot(122) - plt.imshow(labeledNewPatchMap, cmap='jet', interpolation='nearest') + plt.imshow(labeledNewPatchMap, interpolation = 'nearest') plt.title(patchName + ': after split') + return newPatchDict - def split(self, eccMap, patchName='patch00', cutStep=1, borderWidth=2, isplot=False): + def split(self, eccMap, patchName = 'patch00', cutStep = 1, borderWidth = 2, isplot = False): ''' split this patch into two or more patch, according to the eccentricity map (in degree). return a dictionary of patches after split @@ -3594,7 +3537,7 @@ def split(self, eccMap, patchName='patch00', cutStep=1, borderWidth=2, isplot=Fa minMarker = localMin(eccMap, cutStep) plt.figure() - plt.imshow(minMarker, cmap='jet', vmin=0, interpolation='nearest') + plt.imshow(minMarker, vmin = 0, interpolation='nearest') plt.colorbar() plt.title('markers 1') plt.show() @@ -3602,34 +3545,36 @@ def split(self, eccMap, patchName='patch00', cutStep=1, borderWidth=2, isplot=Fa minMarker = minMarker.astype(np.int32) selfArray = self.array.astype(np.int32) minMarker = minMarker + 1 - minMarker[minMarker == 1] = 0 - minMarker = minMarker + (-1 * (selfArray - 1)) - # minMarker: marker type for opencv watershed, - # sure background = 1 - # unknow = 0 - # sure forgrand = 2,3,4... etc + minMarker[minMarker==1] = 0 + minMarker = minMarker + (-1 * (selfArray-1)) + #minMarker: marker type for opencv watershed, + #sure background = 1 + #unknow = 0 + #sure forgrand = 2,3,4... etc plt.figure() - plt.imshow(minMarker, cmap='jet', vmin=0, interpolation='nearest') + plt.imshow(minMarker, vmin = 0, interpolation='nearest') plt.colorbar() plt.title('markers 2') plt.show() eccMapNor = (np.round(ia.array_nor(eccMap) * 255)).astype(np.uint8) - eccMapRGB = cv2.cvtColor(eccMapNor, cv2.COLOR_GRAY2RGB) - # eccMapRGB: image type for opencv watershed, RGB, [uint8, uint8, uint8] + eccMapRGB = cv2.cvtColor(eccMapNor,cv2.COLOR_GRAY2RGB) + #eccMapRGB: image type for opencv watershed, RGB, [uint8, uint8, uint8] newLabel = cv2.watershed(eccMapRGB, minMarker) plt.figure() - plt.imshow(newLabel, cmap='jet', vmin=0, interpolation='nearest') + plt.imshow(newLabel, vmin = 0, interpolation='nearest') plt.colorbar() plt.title('markers 3') plt.show() newBorder = np.zeros(newLabel.shape).astype(np.int) - newBorder[newLabel == -1] = 1 + newBorder[newLabel==-1]=1 + + border = ni.binary_dilation(self.array).astype(np.int) - self.array @@ -3639,63 +3584,67 @@ def split(self, eccMap, patchName='patch00', cutStep=1, borderWidth=2, isplot=Fa border = sm.skeletonize(border) + if borderWidth > 1: - border = ni.binary_dilation(border, iterations=borderWidth - 1).astype(np.int8) + border = ni.binary_dilation(border, iterations = borderWidth - 1).astype(np.int8) newPatchMap = ni.binary_dilation(self.array).astype(np.int8) * (-1 * (border - 1)) + labeledNewPatchMap, patchNum = ni.label(newPatchMap) - # if patchNum != np.amax(newLabel): - # print 'number of patches: ', patchNum, '; number of local minimum:', np.amax(newLabel) - # raise ValueError, "Number of patches after splitting does not equal to number of local minimum!" + +# if patchNum != np.amax(newLabel): +# print 'number of patches: ', patchNum, '; number of local minimum:', np.amax(newLabel) +# raise ValueError, "Number of patches after splitting does not equal to number of local minimum!" newPatchDict = {} - for j in xrange(1, patchNum + 1): + for j in range(1, patchNum + 1): currPatchName = patchName + '.' + str(j) - currArray = np.zeros(self.array.shape, dtype=np.int8) + currArray = np.zeros(self.array.shape, dtype = np.int8) currArray[labeledNewPatchMap == j] = 1 currArray = currArray * self.array if np.sum(currArray[:]) > 0: - newPatchDict.update({currPatchName: Patch(currArray, self.sign)}) + newPatchDict.update({currPatchName : Patch(currArray, self.sign)}) if isplot: plt.figure() plt.subplot(121) - plt.imshow(self.array, cmap='jet', interpolation='nearest') + plt.imshow(self.array, interpolation = 'nearest') plt.title(patchName + ': before split') plt.subplot(122) - plt.imshow(labeledNewPatchMap, cmap='jet', interpolation='nearest') + plt.imshow(labeledNewPatchMap, interpolation = 'nearest') plt.title(patchName + ': after split') + return newPatchDict - def getBorder(self, borderWidth=2): + def getBorder(self, borderWidth = 2): ''' return boder of this patch with boder width defined by "borderWidth" ''' - patchMap = np.array(self.array, dtype=np.float32) + patchMap = np.array(self.array, dtype = np.float32) - smallPatch = ni.binary_erosion(patchMap, iterations=borderWidth).astype(np.float32) + smallPatch = ni.binary_erosion(patchMap, iterations = borderWidth).astype(np.float32) border = patchMap - smallPatch - border[border == 0] = np.nan + border[border==0] = np.nan return border - def getCorticalPixelForVisualSpaceCenter(self, eccMap): + def getCorticalPixelForVisualSpaceCenter(self,eccMap): ''' return the coordinates of the pixel representing the center of the visual space of the patch ''' - eccMap2 = np.array(eccMap).astype(np.float) + eccMap2=np.array(eccMap).astype(np.float) - eccMap2[self.array == 0] = np.nan + eccMap2[self.array==0]=np.nan cor = np.array(np.where(eccMap2 == np.nanmin(eccMap2))).transpose() @@ -3703,185 +3652,192 @@ def getCorticalPixelForVisualSpaceCenter(self, eccMap): if __name__ == "__main__": + plt.ioff() - # testTrial = ft.loadFile(r'\\aibsdata2\nc-ophys\Jun\exampleData\testTrial.pkl') - - # params = {'borderWidth': 1, - # 'closeIter': 3, - # 'dilationIter': 10, - # 'eccMapFilterSigma': 5.0, - # 'mergeOverlapThr': 0.1, - # 'openIter': 3, - # 'phaseMapFilterSigma': 0.8, - # 'signMapFilterSigma': 9.0, - # 'signMapThr': 0.3, - # 'smallPatchThr': 200, - # 'splitLocalMinCutStep': 10., - # 'splitOverlapThr': 1.1, - # 'visualSpaceCloseIter': 15, - # 'visualSpacePixelSize': 0.5} - # - # trial = RetinotopicMappingTrial(mouseID = testTrial['mouseID'], # str, mouseID - # dateRecorded = testTrial['dateRecorded'], # int, date recorded, yearmonthday - # trialNum = testTrial['trialNum'], # str, number of the trail on that day - # mouseType = testTrial['mouseType'], # str, mouse Genotype - # visualStimType = testTrial['visualStimType'], # str, stimulation type - # visualStimBackground = testTrial['visualStimBackground'], # str, background of visual stimulation - # imageExposureTime = testTrial['imageExposureTime'], # exposure time of the recorded image - # altPosMap = testTrial['altPosMap'], # altitute position map - # aziPosMap = testTrial['aziPosMap'], # azimuth position map - # altPowerMap = testTrial['altPowerMap'], # altitude power map - # aziPowerMap = testTrial['aziPowerMap'], # azimuth power map - # vasculatureMap = testTrial['vasculatureMap'], # vasculature map - # params = params, # testTrial['params'], # parameters for imaging analysis - # isAnesthetized = testTrial['isAnesthetized']) - # - # trial.processTrial(isPlot=True) - # trial._getSignMap() - # - # traces = trial.getTraces(moviePath = r'\\aibsdata2\nc-ophys\Jun\exampleData\testMov.tif', isPlot = True) - # trial.generateStandardOutput(traces=traces) - # - # trialDict = trial.generateTrialDict() - # print trialDict.keys() - # - # trial.plotTrial() - # - # altPosMapfNor, aziPosMapfNor, altPowerMapfNor, aziPowerMapfNor, signMapfNor = trial.generateNormalizedMaps(isPlot = True) - # - # - # trial.plotNormalizedPatchCenter() - # - # trial.plotFinalPatches() - # - # - # vasMapNor, finalPatchesNor = trial.normalize(isPlot = True) - - # plt.show() - - # ------------------------------------------------------------------------------------------------ - # testTrial, traces = loadTrial(r"E:\data2\2015-02-03-population-maps\20140623_M140174_Trial3.pkl") - # - # testTrial.plotFinalPatchBorders() - # plt.show() - # - # corArea = testTrial.getCorticalArea() - # print '\nCortical area for each visual area (mm^2):' - # for key, item in corArea.iteritems(): - # print key, ':', item - # - # baselineF = testTrial.getBaselineFluorscence() - # print '\nNormalized baseline fluroscence for each visual area:' - # for key, item in baselineF.iteritems(): - # print key, ':', item - # - # power = testTrial.getMeanPowerAmplitude() - # print '\nNormalized power amplitude for each visual area:' - # for key, item in power.iteritems(): - # print key, ':', item - # - # magnification = testTrial.getMagnification() - # print '\nMagnification for each visual area (mm^2/deg^2):' - # for key, item in magnification.iteritems(): - # print key, ':', item - - # altPosOrigin, aziPosOrigin = testTrial.getVisualFieldOrigin() - # print 'Altitude origin:', altPosOrigin - # print 'Azimuth origin:', aziPosOrigin - - # ------------------------------------------------------------------------------------------------- - - # testTrial, traces = loadTrial(r"E:\data2\2015-02-03-population-maps\20150130_M160809_Trial1_2_3_4.pkl") - # visualArea = 'LM' - # patch = testTrial.finalPatchesMarked[visualArea] - # - # testTrial._getSignMap() - # - # f=plt.figure() - # ax=f.add_subplot(111) - # ax.imshow(testTrial.altPosMapf,vmin=-30,vmax=50,cmap='hsv',interpolation='nearest') - # pt.plot_mask(patch.getMask(),plotAxis=ax) - # - # VSlist = patch.getVisualSpace(testTrial.altPosMapf,testTrial.aziPosMapf) - # - # VSSpace, VSCoverage, VSAltCenter, VSAziCenter = VSlist - # print 'Altitute center for area '+visualArea+': '+str(VSAltCenter) - # print 'Azimuth center for area '+visualArea+': '+str(VSAziCenter) - # plt.figure() - # plt.imshow(VSSpace,interpolation='nearest') - # plt.title(visualArea) - # plt.show() - # ------------------------------------------------------------------------------------------------ - - # testTrial, traces = loadTrial(r"E:\data2\2015-02-03-population-maps\20150116_M156569_Trial1_2_3_4.pkl") - # visualArea = 'V1' - # patch = testTrial.finalPatchesMarked[visualArea] - # - # testTrial._getSignMap() - # - # f=plt.figure() - # ax=f.add_subplot(111) - # ax.imshow(testTrial.altPosMapf,vmin=-30,vmax=50,cmap='hsv',interpolation='nearest') - # pt.plot_mask(patch.getMask(),plotAxis=ax) - # - # plt.show() - - # ---------------------------------------------------------------------------------------------- - - # testTrial, traces = loadTrial(r"E:\data2\2015-02-03-population-maps\populationTrial_Ai9330min.pkl") - # testTrial.plotMagnificationMap(isFilter=False) - # plt.show() - - # ---------------------------------------------------------------------------------------------- - # testTrial, traces = loadTrial(r"E:\data2\2015-02-03-population-maps\populationTrial_All.pkl") - # totalMask = testTrial._generateTotalMask() - # plt.imshow(totalMask) - # plt.title('total mask of all visual areas') - # plt.show() - - # ---------------------------------------------------------------------------------------------- - # testTrial, traces = loadTrial(r"E:\data2\2015-02-03-population-maps\populationTrial_All.pkl") - # - # locationList = [[0.,40.], - # [0.,50.], - # [0.,60.]] - # - # colorList = pt.random_color(len(locationList)) - # - # f=plt.figure(figsize=(12,12)) - # ax=f.add_subplot(111) - # testTrial.plotFinalPatchBorders(plotAxis=ax,plotName=False,isTitle=True) - # - # for i, location in enumerate(locationList): - # testTrial.plotRetinotopicLocation(plotAxis=ax,location=location,color=colorList[i]) - # - # labels = ['alt:'+str(x[0])+'; azi:'+str(x[1]) for x in locationList] - # [ax.plot(None,None,ls='',c=c,label=l) for c,l in zip(colorList,labels)] - # leg = ax.legend(labels,frameon=False) - # for color,text in zip(colorList,leg.get_texts()): - # text.set_color(color) - # - # plt.show() - - # ---------------------------------------------------------------------------------------------- - - # testTrial, traces = loadTrial(r"E:\data2\2015-02-03-population-maps\populationTial_Ai93&Ai9630min.pkl") - # testTrial.plotVisualCoverage() - # plt.show() - # - # ---------------------------------------------------------------------------------------------- +# testTrial = ft.loadFile(r'\\aibsdata2\nc-ophys\Jun\exampleData\testTrial.pkl') + +# params = {'borderWidth': 1, +# 'closeIter': 3, +# 'dilationIter': 10, +# 'eccMapFilterSigma': 5.0, +# 'mergeOverlapThr': 0.1, +# 'openIter': 3, +# 'phaseMapFilterSigma': 0.8, +# 'signMapFilterSigma': 9.0, +# 'signMapThr': 0.3, +# 'smallPatchThr': 200, +# 'splitLocalMinCutStep': 10., +# 'splitOverlapThr': 1.1, +# 'visualSpaceCloseIter': 15, +# 'visualSpacePixelSize': 0.5} +# +# trial = RetinotopicMappingTrial(mouseID = testTrial['mouseID'], # str, mouseID +# dateRecorded = testTrial['dateRecorded'], # int, date recorded, yearmonthday +# trialNum = testTrial['trialNum'], # str, number of the trail on that day +# mouseType = testTrial['mouseType'], # str, mouse Genotype +# visualStimType = testTrial['visualStimType'], # str, stimulation type +# visualStimBackground = testTrial['visualStimBackground'], # str, background of visual stimulation +# imageExposureTime = testTrial['imageExposureTime'], # exposure time of the recorded image +# altPosMap = testTrial['altPosMap'], # altitute position map +# aziPosMap = testTrial['aziPosMap'], # azimuth position map +# altPowerMap = testTrial['altPowerMap'], # altitude power map +# aziPowerMap = testTrial['aziPowerMap'], # azimuth power map +# vasculatureMap = testTrial['vasculatureMap'], # vasculature map +# params = params, # testTrial['params'], # parameters for imaging analysis +# isAnesthetized = testTrial['isAnesthetized']) +# +# trial.processTrial(isPlot=True) +# trial._getSignMap() +# +# traces = trial.getTraces(moviePath = r'\\aibsdata2\nc-ophys\Jun\exampleData\testMov.tif', isPlot = True) +# trial.generateStandardOutput(traces=traces) +# +# trialDict = trial.generateTrialDict() +# print trialDict.keys() +# +# trial.plotTrial() +# +# altPosMapfNor, aziPosMapfNor, altPowerMapfNor, aziPowerMapfNor, signMapfNor = trial.generateNormalizedMaps(isPlot = True) +# +# +# trial.plotNormalizedPatchCenter() +# +# trial.plotFinalPatches() +# +# +# vasMapNor, finalPatchesNor = trial.normalize(isPlot = True) + +# plt.show() + + +#------------------------------------------------------------------------------------------------ +# testTrial, traces = loadTrial(r"E:\data2\2015-02-03-population-maps\20140623_M140174_Trial3.pkl") +# +# testTrial.plotFinalPatchBorders() +# plt.show() +# +# corArea = testTrial.getCorticalArea() +# print '\nCortical area for each visual area (mm^2):' +# for key, item in corArea.iteritems(): +# print key, ':', item +# +# baselineF = testTrial.getBaselineFluorscence() +# print '\nNormalized baseline fluroscence for each visual area:' +# for key, item in baselineF.iteritems(): +# print key, ':', item +# +# power = testTrial.getMeanPowerAmplitude() +# print '\nNormalized power amplitude for each visual area:' +# for key, item in power.iteritems(): +# print key, ':', item +# +# magnification = testTrial.getMagnification() +# print '\nMagnification for each visual area (mm^2/deg^2):' +# for key, item in magnification.iteritems(): +# print key, ':', item + +# altPosOrigin, aziPosOrigin = testTrial.getVisualFieldOrigin() +# print 'Altitude origin:', altPosOrigin +# print 'Azimuth origin:', aziPosOrigin + + +#------------------------------------------------------------------------------------------------- + +# testTrial, traces = loadTrial(r"E:\data2\2015-02-03-population-maps\20150130_M160809_Trial1_2_3_4.pkl") +# visualArea = 'LM' +# patch = testTrial.finalPatchesMarked[visualArea] +# +# testTrial._getSignMap() +# +# f=plt.figure() +# ax=f.add_subplot(111) +# ax.imshow(testTrial.altPosMapf,vmin=-30,vmax=50,cmap='hsv',interpolation='nearest') +# pt.plot_mask(patch.getMask(),plotAxis=ax) +# +# VSlist = patch.getVisualSpace(testTrial.altPosMapf,testTrial.aziPosMapf) +# +# VSSpace, VSCoverage, VSAltCenter, VSAziCenter = VSlist +# print 'Altitute center for area '+visualArea+': '+str(VSAltCenter) +# print 'Azimuth center for area '+visualArea+': '+str(VSAziCenter) +# plt.figure() +# plt.imshow(VSSpace,interpolation='nearest') +# plt.title(visualArea) +# plt.show() +#------------------------------------------------------------------------------------------------ + +# testTrial, traces = loadTrial(r"E:\data2\2015-02-03-population-maps\20150116_M156569_Trial1_2_3_4.pkl") +# visualArea = 'V1' +# patch = testTrial.finalPatchesMarked[visualArea] +# +# testTrial._getSignMap() +# +# f=plt.figure() +# ax=f.add_subplot(111) +# ax.imshow(testTrial.altPosMapf,vmin=-30,vmax=50,cmap='hsv',interpolation='nearest') +# pt.plot_mask(patch.getMask(),plotAxis=ax) +# +# plt.show() + +#---------------------------------------------------------------------------------------------- + +# testTrial, traces = loadTrial(r"E:\data2\2015-02-03-population-maps\populationTrial_Ai9330min.pkl") +# testTrial.plotMagnificationMap(isFilter=False) +# plt.show() + + +#---------------------------------------------------------------------------------------------- +# testTrial, traces = loadTrial(r"E:\data2\2015-02-03-population-maps\populationTrial_All.pkl") +# totalMask = testTrial._generateTotalMask() +# plt.imshow(totalMask) +# plt.title('total mask of all visual areas') +# plt.show() + + +#---------------------------------------------------------------------------------------------- +# testTrial, traces = loadTrial(r"E:\data2\2015-02-03-population-maps\populationTrial_All.pkl") +# +# locationList = [[0.,40.], +# [0.,50.], +# [0.,60.]] +# +# colorList = pt.random_color(len(locationList)) +# +# f=plt.figure(figsize=(12,12)) +# ax=f.add_subplot(111) +# testTrial.plotFinalPatchBorders(plotAxis=ax,plotName=False,isTitle=True) +# +# for i, location in enumerate(locationList): +# testTrial.plotRetinotopicLocation(plotAxis=ax,location=location,color=colorList[i]) +# +# labels = ['alt:'+str(x[0])+'; azi:'+str(x[1]) for x in locationList] +# [ax.plot(None,None,ls='',c=c,label=l) for c,l in zip(colorList,labels)] +# leg = ax.legend(labels,frameon=False) +# for color,text in zip(colorList,leg.get_texts()): +# text.set_color(color) +# +# plt.show() + + +#---------------------------------------------------------------------------------------------- + + +# testTrial, traces = loadTrial(r"E:\data2\2015-02-03-population-maps\populationTial_Ai93&Ai9630min.pkl") +# testTrial.plotVisualCoverage() +# plt.show() +# +#---------------------------------------------------------------------------------------------- # testTrial, traces = loadTrial(r"E:\data2\2015-02-03-population-maps\populationTial_Ai93&Ai9630min.pkl") - testTrial, traces = loadTrial( - r"E:\data\2015-11-13-150821-M177931-RetinotopicMapping\20150821_MM177931_Trial1_3_4.pkl") + testTrial, traces = loadTrial(r"E:\data\2015-11-13-150821-M177931-RetinotopicMapping\20150821_MM177931_Trial1_3_4.pkl") # testTrial.processTrial(isPlot=True) testTrial.plotFinalPatchBorders2(borderWidth=1) plt.show() -# ---------------------------------------------------------------------------------------------- +#---------------------------------------------------------------------------------------------- # testTrial, traces = loadTrial(r"E:\data2\2015-02-03-population-maps\populationTial_Ai93&Ai9630min.pkl") # eccMap = testTrial.eccentricityMapf # V1 = testTrial.finalPatchesMarked['V1'] # V1center = V1.getCorticalPixelForVisualSpaceCenter(eccMap) # print V1center + diff --git a/corticalmapping/SingleCellAnalysis.py b/corticalmapping/SingleCellAnalysis.py index 9c4575b..e0c5c36 100644 --- a/corticalmapping/SingleCellAnalysis.py +++ b/corticalmapping/SingleCellAnalysis.py @@ -1,17 +1,17 @@ -import warnings +from corticalmapping.core.ImageAnalysis import ROI, WeightedROI + +__author__ = 'junz' + import numpy as np import matplotlib.pyplot as plt -import core.PlottingTools as pt -import core.ImageAnalysis as ia +from . import core.PlottingTools as pt +from . import core.ImageAnalysis as ia +from . import core.FileTools as ft import scipy.ndimage as ni import scipy.interpolate as ip -import scipy.stats as stats import math import h5py -from pandas import DataFrame -from corticalmapping.core.ImageAnalysis import ROI, WeightedROI -warnings.simplefilter('always', RuntimeWarning) def get_sparse_noise_onset_index(sparseNoiseDisplayLog): """ @@ -22,30 +22,30 @@ def get_sparse_noise_onset_index(sparseNoiseDisplayLog): onsetIndWithLocationSign: indices of frames for each white square, list with element structure [np.array([alt, azi]),sign,[list of indices]] """ + frames = sparseNoiseDisplayLog['presentation']['displayFrames'] - frames = [tuple([np.array([x[1][1], x[1][0]]), x[2], x[3], i]) for i, x in enumerate(frames)] - dtype = [('location', np.ndarray), ('sign', int), ('isOnset', int), ('index', int)] - frames = np.array(frames, dtype=dtype) + frames = [tuple([np.array([x[1][1],x[1][0]]),x[2],x[3],i]) for i, x in enumerate(frames)] + dtype = [('location',np.ndarray),('sign',int),('isOnset',int),('index',int)] + frames = np.array(frames, dtype = dtype) allOnsetInd = [] for i in range(len(frames)): - if frames[i]['isOnset'] == 1 and (i == 0 or frames[i - 1]['isOnset'] == -1): + if frames[i]['isOnset'] == 1 and (i == 0 or frames[i-1]['isOnset'] == -1): allOnsetInd.append(i) onsetFrames = frames[allOnsetInd] - allSquares = list(set([tuple([x[0][0], x[0][1], x[1]]) for x in onsetFrames])) + allSquares = list(set([tuple([x[0][0],x[0][1],x[1]]) for x in onsetFrames])) onsetIndWithLocationSign = [] for square in allSquares: indices = [] for onsetFrame in onsetFrames: - if onsetFrame['location'][0] == square[0] and onsetFrame['location'][1] == square[1] and onsetFrame[ - 'sign'] == square[2]: + if onsetFrame['location'][0]==square[0] and onsetFrame['location'][1]==square[1] and onsetFrame['sign']==square[2]: indices.append(onsetFrame['index']) - onsetIndWithLocationSign.append([np.array([square[0], square[1]]), square[2], indices]) + onsetIndWithLocationSign.append([np.array([square[0],square[1]]),square[2],indices]) return allOnsetInd, onsetIndWithLocationSign @@ -55,16 +55,12 @@ def get_peak_weighted_roi(arr, thr): return: a WeightROI object representing the mask which contains the peak of arr and cut by the thr (thr) """ nanLabel = np.isnan(arr) - arr2 = arr.copy() - arr2[nanLabel] = np.nanmin(arr) - labeled, _ = ni.label(arr2 >= thr) - peakCoor = np.array(np.where(arr2 == np.amax(arr2))).transpose()[0] + arr2=arr.copy();arr2[nanLabel]=np.nanmin(arr) + labeled,_=ni.label(arr2>=thr) + peakCoor = np.array(np.where(arr2==np.amax(arr2))).transpose()[0] peakMask = ia.get_marked_masks(labeled, peakCoor) - if peakMask is None: - # print('Threshold too high! No ROI found. Returning None'.) - return None - else: - return WeightedROI(arr2 * peakMask) + if peakMask is None: 'Threshold too high! No ROI found. Returning None'; return None + else: return WeightedROI(arr2 * peakMask) def plot_2d_receptive_field(mapArray, altPos, aziPos, plot_axis=None, **kwargs): @@ -79,11 +75,8 @@ def plot_2d_receptive_field(mapArray, altPos, aziPos, plot_axis=None, **kwargs): :return: plot_axis """ - if plot_axis == None: - f = plt.figure(figsize=(10, 10)) - plot_axis = f.add_subplot(111) - - fig = plot_axis.imshow(mapArray, **kwargs) + if plot_axis == None: f=plt.figure(figsize=(10,10)); plot_axis=f.add_subplot(111) + fig = plot_axis.imshow(mapArray,**kwargs) plot_axis.set_yticks(np.arange(len(altPos))) plot_axis.set_xticks(np.arange(len(aziPos))) plot_axis.set_yticklabels(altPos.astype(np.int)) @@ -96,13 +89,12 @@ def merge_weighted_rois(roi1, roi2): merge two WeightedROI objects, most useful for merge ON and OFF subfields """ if (roi1.pixelSizeX != roi2.pixelSizeX) or (roi1.pixelSizeY != roi2.pixelSizeY): - raise ValueError, 'The pixel sizes of the two WeightedROI objects should match!' + raise ValueError('The pixel sizes of the two WeightedROI objects should match!') if roi1.pixelSizeUnit != roi2.pixelSizeUnit: - raise ValueError, 'The pixel size units of the two WeightedROI objects should match!' + raise ValueError('The pixel size units of the two WeightedROI objects should match!') - mask1 = roi1.get_weighted_mask() - mask2 = roi2.get_weighted_mask() + mask1 = roi1.get_weighted_mask(); mask2 = roi2.get_weighted_mask() return WeightedROI(mask1 + mask2, pixelSize=[roi1.pixelSizeY, roi1.pixelSizeX], pixelSizeUnit=roi1.pixelSizeUnit) @@ -112,221 +104,16 @@ def merge_binary_rois(roi1, roi2): merge two ROI objects, most useful for merge ON and OFF subfields """ if (roi1.pixelSizeX != roi2.pixelSizeX) or (roi1.pixelSizeY != roi2.pixelSizeY): - raise ValueError, 'The pixel sizes of the two WeightedROI objects should match!' + raise ValueError('The pixel sizes of the two WeightedROI objects should match!') if roi1.pixelSizeUnit != roi2.pixelSizeUnit: - raise ValueError, 'The pixel size units of the two WeightedROI objects should match!' + raise ValueError('The pixel size units of the two WeightedROI objects should match!') - mask1 = roi1.get_binary_mask() - mask2 = roi2.get_binary_mask() - mask3 = np.logical_or(mask1, mask2).astype(np.int8) + mask1 = roi1.get_binary_mask(); mask2 = roi2.get_binary_mask(); mask3 = np.logical_or(mask1, mask2).astype(np.int8) return ROI(mask3, pixelSize=[roi1.pixelSizeY, roi1.pixelSizeX], pixelSizeUnit=roi1.pixelSizeUnit) -def get_dff(traces, t_axis, response_window, baseline_window): - """ - - :param traces: 3d array, roi x trial x time points - :param t_axis: local timestamps of sta responses - :param response_window: - :param baseline_window: - :return dffs_trial: 3d array, roi x trial x 1, list of dffs for each roi, each_trial - :return dffs_mean: 1d array, mean dff of each roi, collapsed across trials before dff calculation - """ - - baseline_ind = np.logical_and(t_axis > baseline_window[0], t_axis <= baseline_window[1]) - response_ind = np.logical_and(t_axis > response_window[0], t_axis <= response_window[1]) - - baselines = np.mean(traces[:, :, baseline_ind], axis=2, keepdims=True) - responses = np.mean(traces[:, :, response_ind], axis=2, keepdims=True) - - dffs_trial = (responses - baselines) / baselines - - traces_mean = np.mean(traces, axis=1) # roi x time points - baselines_mean = np.mean(traces_mean[:, baseline_ind], axis=1) - responses_mean = np.mean(traces_mean[:, response_ind], axis=1) - dffs_mean = (responses_mean - baselines_mean) / baselines_mean - - return dffs_trial, dffs_mean.squeeze() - - -def get_df(traces, t_axis, response_window, baseline_window): - """ - - :param traces: 3d array, roi x trial x time points - :param t_axis: local timestamps of sta responses - :param response_window: - :param baseline_window: - :return dfs_trial: 3d array, roi x trial x 1, list of dffs for each roi, each_trial - :return dfs_mean: 1d array, mean df of each roi - """ - - baseline_ind = np.logical_and(t_axis > baseline_window[0], t_axis <= baseline_window[1]) - response_ind = np.logical_and(t_axis > response_window[0], t_axis <= response_window[1]) - - baselines = np.mean(traces[:, :, baseline_ind], axis=2, keepdims=True) - responses = np.mean(traces[:, :, response_ind], axis=2, keepdims=True) - - dfs_trial = responses - baselines - - dfs_mean = np.mean(dfs_trial, axis=1).squeeze() - - return dfs_trial, dfs_mean - - -def get_df_dff_trace(trace, t_axis, baseline_window): - - baseline_ind = np.logical_and(t_axis > baseline_window[0], t_axis <= baseline_window[1]) - - baseline = np.mean(trace[baseline_ind]) - - trace_df = trace - baseline - trace_dff = trace_df / baseline - - return trace_df, trace_dff - - -def get_skewness(trace, ts, filter_length=5.): - """ - calculate skewness of a calcium trace, returns the skewness of input trace and the skewness of the trace after - removing slow trend. Because slow drifting trend creates artificial and confounding skewness other than calcium - signal. - - :param trace: 1d array - :param ts: 1d array, timestamps of the input trace in seconds - :param filter_length: float, second, the length to filter input trace to get slow trend - :return skew_o: skewness of original input trace - :return skew_d: skewness of detrended trace - """ - - fs = 1. / np.mean(np.diff(ts)) - sigma = float(filter_length) * fs - skew_o = stats.skew(trace) - - trend = ni.gaussian_filter1d(trace, sigma=sigma) - trace_d = trace - trend - skew_d = stats.skew(trace_d) - - return skew_o, skew_d - - -def get_dgc_condition_name(alt, azi, sf, tf, dire, con, rad): - return 'alt{:06.1f}_azi{:06.1f}_sf{:04.2f}_tf{:04.1f}_dire{:03d}_con{:04.2f}_rad{:03d}'.format(alt, - azi, - sf, - tf, - dire, - con, - rad) - - -def get_dgc_condition_params(condi_name): - alt = float(condi_name[3:9]) - azi = float(condi_name[13:19]) - sf = float(condi_name[22:26]) - tf = float(condi_name[29:33]) - dire = int(condi_name[38:41]) - con = float(condi_name[45:49]) - rad = int(condi_name[53:56]) - return alt, azi, sf, tf, dire, con, rad - - -def get_strf_from_nwb(h5_grp, roi_ind, trace_type='sta_f_center_subtracted', location_unit='degree'): - - sta_ts = h5_grp.attrs['sta_timestamps'] - - probe_ns = h5_grp.keys() - probe_ns.sort() - - locations = [] - signs = [] - traces = [] - trigger_ts = [] - - for probe_i, probe_n in enumerate(probe_ns): - - locations.append([float(probe_n[3:9]), float(probe_n[13:19])]) - signs.append(int(probe_n[24:26])) - - traces.append(h5_grp['{}/{}'.format(probe_n, trace_type)][roi_ind, :, :]) - trigger_ts.append(h5_grp['{}/global_trigger_timestamps'.format(probe_n)].value) - - return SpatialTemporalReceptiveField(locations=locations, signs=signs, traces=traces, time=sta_ts, - trigger_ts=trigger_ts, name='roi_{:04d}'.format(roi_ind), - locationUnit=location_unit, trace_data_type=trace_type) - - -def get_dgc_response_matrix_from_nwb(h5_grp, roi_ind, trace_type='sta_f_center_subtracted'): - sta_ts = h5_grp.attrs['sta_timestamps'] - - dgcrm = DataFrame([], columns=['alt', 'azi', 'sf', 'tf', 'dire', 'con', 'rad', 'onset_ts', 'matrix']) - - condi_ns = h5_grp.keys() - condi_ns.sort() - - for condi_i, condi_n in enumerate(condi_ns): - - condi_grp = h5_grp[condi_n] - - alt, azi, sf, tf, dire, con, rad = get_dgc_condition_params(condi_name=condi_n) - - if 'global_trigger_timestamps' in condi_grp.attrs: - onset_ts = condi_grp.attrs['global_trigger_timestamps'] - else: - onset_ts = [] - - matrix = condi_grp[trace_type][roi_ind, :, :] - - dgcrm.loc[condi_i, 'alt'] = alt - dgcrm.loc[condi_i, 'azi'] = azi - dgcrm.loc[condi_i, 'sf'] = sf - dgcrm.loc[condi_i, 'tf'] = tf - dgcrm.loc[condi_i, 'dire'] = dire - dgcrm.loc[condi_i, 'con'] = con - dgcrm.loc[condi_i, 'rad'] = rad - dgcrm.loc[condi_i, 'onset_ts'] = onset_ts - dgcrm.loc[condi_i, 'matrix'] = matrix - - return DriftingGratingResponseMatrix(sta_ts=sta_ts, trace_type=trace_type, data=dgcrm) - - -def get_local_similarity_index(mask1, mask2): - """ - calculate local similarity index between two receptive field maps - - LSI = sum(mask1 x mask2) / sqrt( sum(mask1 x mask1) * sum(mask2 x mask2)) - - DOI: https://doi.org/10.1523/JNEUROSCI.0863-13.2013 - - :param mask1: 2d array - :param mask2: 2d array - :return: - """ - - if not len(mask1.shape) == len(mask2.shape) == 2: - raise ValueError('mask1 and mask2 should both be 2d array with same shape.') - - square1 = np.sum((mask1 * mask1).flat) - square2 = np.sum((mask2 * mask2).flat) - - if square1 == 0 or square2 == 0.: - return np.nan - else: - value1 = np.sum((mask1 * mask2).flat) - value2 = np.sqrt(square1 * square2) - return value1 / value2 - - -def dire2ori(dire): - """ - convert grating drifting direction to grating orientation, unit: degrees - direction: right is 0 degree, increase counterclockwise - orientation: horizontal 0 degree, increase counterclockwise - """ - return (dire + 90) % 180 - - class SpatialReceptiveField(WeightedROI): """ Object for spatial receptive field, a subclass of WeightedROI object @@ -347,17 +134,17 @@ def __init__(self, mask, altPos, aziPos, sign=None, temporalWindow=None, pixelSi the correct way to process RF: gaussian filter first, interpolation second, and thr third """ - super(SpatialReceptiveField, self).__init__(mask, pixelSize=None, pixelSizeUnit=pixelSizeUnit) + super(SpatialReceptiveField,self).__init__(mask, pixelSize = None, pixelSizeUnit = pixelSizeUnit) self.altPos = altPos self.aziPos = aziPos self.dataType = dataType - if (sign is None or sign == 'ON' or sign == 'OFF' or sign == 'ON_OFF'): - self.sign = sign - elif sign == 1: - self.sign = 'ON' - elif sign == -1: - self.sign = 'OFF' + if (sign is None or sign=='ON' or sign=='OFF' or sign=='ON_OFF'): + self.sign=sign + elif sign==1: + self.sign='ON' + elif sign==-1: + self.sign='OFF' else: raise ValueError('sign should be 1, -1, "ON", "OFF", "ON_OFF" or None!') self.temporalWindow = temporalWindow @@ -372,9 +159,6 @@ def __init__(self, mask, altPos, aziPos, sign=None, temporalWindow=None, pixelSi else: raise ValueError('interpolate_rate should be larger than 1!') - def __str__(self): - return 'corticalmapping.SingleCellAnalysis.SpatialReceptiveField object' - def get_name(self): name = [] @@ -392,12 +176,12 @@ def get_name(self): name.append('thr:None') if self.filter_sigma is not None: - name.append('sigma:' + str(self.filter_sigma)) + name.append('sigma:'+str(self.filter_sigma)) else: name.append('sigma:None') if self.interpolate_rate is not None: - name.append('interp:' + str(self.interpolate_rate)) + name.append('interp:'+str(self.interpolate_rate)) else: name.append('interp:None') @@ -421,10 +205,10 @@ def plot_rf(self, plot_axis=None, is_colorbar=False, cmap='Reds', interpolation= else: interpolate_rate = self.interpolate_rate - plot_axis.set_yticks(range(len(self.altPos))[::interpolate_rate]) - plot_axis.set_xticks(range(len(self.aziPos))[::interpolate_rate]) - plot_axis.set_yticklabels(['{:.1f}'.format(p) for p in self.altPos[::interpolate_rate]]) - plot_axis.set_xticklabels(['{:.1f}'.format(p) for p in self.aziPos[::interpolate_rate]]) + plot_axis.set_yticks(list(range(len(self.altPos)))[::interpolate_rate]) + plot_axis.set_xticks(list(range(len(self.aziPos)))[::interpolate_rate]) + plot_axis.set_yticklabels(self.altPos[::interpolate_rate]) + plot_axis.set_xticklabels(self.aziPos[::interpolate_rate]) if is_colorbar: plot_axis.get_figure().colorbar(curr_plot) @@ -449,9 +233,9 @@ def plot_contour(self, plot_axis=None, peak_amplitude=None, level_num=10, **kwar elif self.sign == 'OFF': colors = 'b' else: - colors = 'k' + colors ='k' - contour_levels = list(np.arange(level_num) * (float(peak_amplitude) / (level_num))) + contour_levels = list(np.arange(level_num) * (float(peak_amplitude) / (level_num))) if self.thr is not None: contour_levels = [l for l in contour_levels if l >= self.thr] @@ -474,13 +258,13 @@ def plot_contour(self, plot_axis=None, peak_amplitude=None, level_num=10, **kwar plot_axis.set_aspect('equal') if self.interpolate_rate is not None: - plot_axis.set_yticks(range(len(self.altPos))[::self.interpolate_rate]) - plot_axis.set_xticks(range(len(self.aziPos))[::self.interpolate_rate]) + plot_axis.set_yticks(list(range(len(self.altPos)))[::self.interpolate_rate]) + plot_axis.set_xticks(list(range(len(self.aziPos)))[::self.interpolate_rate]) plot_axis.set_yticklabels(self.altPos[::self.interpolate_rate]) plot_axis.set_xticklabels(self.aziPos[::self.interpolate_rate]) else: - plot_axis.set_yticks(range(len(self.altPos))) - plot_axis.set_xticks(range(len(self.aziPos))) + plot_axis.set_yticks(list(range(len(self.altPos)))) + plot_axis.set_xticks(list(range(len(self.aziPos)))) plot_axis.set_yticklabels(self.altPos) plot_axis.set_xticklabels(self.aziPos) @@ -489,14 +273,14 @@ def plot_contour(self, plot_axis=None, peak_amplitude=None, level_num=10, **kwar def threshold(self, thr): """ - threshold the current receptive field, return a new SpatialReceptiveField object after thresholding + thr the current receptive field, return a new SpatialReceptiveField object after thresholding """ - if (self.thr is not None) and (thr < self.thr): - raise ValueError, 'Can not cut a thresholded receptive field with a lower thresold!' + if (self.thr is not None) and (thr 1: - stdTrace = np.std(np.array(traces, dtype=np.float32), axis=0) - semTrace = stdTrace / np.sqrt(float(len(traces))) - axis.fill_between(self.time, meanTrace - semTrace, meanTrace + semTrace, facecolor=color, - linewidth=0, alpha=0.5) - axis.plot(self.time, meanTrace, '-', color=color, lw=1) + meanTrace = np.mean(np.array(traces, dtype=np.float32),axis=0) + stdTrace = np.std(np.array(traces, dtype=np.float32),axis=0) + semTrace = stdTrace/np.sqrt(float(len(traces))) + if self.data[index]['sign'] == 1: color = '#ff0000' + if self.data[index]['sign'] == -1: color = '#0000ff' + # print self.time.shape + # print (meanTrace-semTrace).shape + # print (meanTrace+semTrace).shape + axis.fill_between(self.time,meanTrace-semTrace,meanTrace+semTrace,facecolor=color,linewidth=0,alpha=0.5) + axis.plot(self.time,meanTrace,'-',color=color,lw=1) return axisLists[0][0].figure def _get_axis_layout(self, f=None, figSize=(10, 10), yRange=(0, 20), altRange=None, aziRange=None, **kwargs): - locations = np.array(self.get_probes()) + locations = np.array(self.get_locations()) - altPositions = np.sort(np.unique(locations[:, 0]))[::-1] - if altRange is not None: - altPositions = np.array([x for x in altPositions if (x >= altRange[0] and x <= altRange[1])]) + altPositions = np.sort(np.unique(locations[:,0]))[::-1] + if altRange is not None: altPositions = np.array([x for x in altPositions if (x>=altRange[0] and x<=altRange[1])]) - aziPositions = np.sort(np.unique(locations[:, 1])) - if aziRange is not None: - aziPositions = np.array([x for x in aziPositions if (x >= aziRange[0] and x <= aziRange[1])]) + aziPositions = np.sort(np.unique(locations[:,1])) + if aziRange is not None: aziPositions = np.array([x for x in aziPositions if (x>=aziRange[0] and x<=aziRange[1])]) - indexLists = [[[] for aziPosition in aziPositions] for altPosition in altPositions] + indexLists = [ [[] for aziPosition in aziPositions] for altPosition in altPositions] - if f is None: - f = plt.figure(figsize=figSize) - - f.suptitle('cell:{}; xrange:[{:6.3f}, {:6.3f}]; yrange: [{:.3f}, {:.3f}]'. - format(self.name, self.time[0], self.time[-1], yRange[0], yRange[1])) + if f is None: f=plt.figure(figsize=figSize) + f.suptitle('cell:'+str(self.name)+'; xrange:['+str(self.time[0])[0:6]+','+str(self.time[-1])[0:6]+']; yrange:'+str(yRange)) axisLists = pt.tile_axis(f, len(altPositions), len(aziPositions), **kwargs) for i, altPosition in enumerate(altPositions): for j, aziPosition in enumerate(aziPositions): - axisLists[i][j].text(0, yRange[1], str(int(altPosition)) + ';' + str(int(aziPosition)), - ha='left', va='top', fontsize=10) - axisLists[i][j].set_xlim([self.time[0], self.time[-1]]) + axisLists[i][j].text(0,yRange[1],str(int(altPosition))+';'+str(int(aziPosition)),ha='left',va='top',fontsize=10) + axisLists[i][j].set_xlim([self.time[0],self.time[-1]]) axisLists[i][j].set_ylim(yRange) for k, location in enumerate(locations): if location[0] == altPosition and location[1] == aziPosition: indexLists[i][j].append(k) + return indexLists, axisLists def get_amplitude_map(self, timeWindow=(0, 0.5)): @@ -924,21 +530,16 @@ def get_amplitude_map(self, timeWindow=(0, 0.5)): coordinate of each pixel is defined by np.meshgrid(allAziPos, allAltPos) """ - windowIndex = np.logical_and(self.time >= timeWindow[0], self.time <= timeWindow[1]) + windowIndex = np.logical_and(self.time>=timeWindow[0], self.time<=timeWindow[1]) - indON, indOFF, allAltPos, allAziPos = self._sort_index() + indON,indOFF,allAltPos,allAziPos = self._sort_index() - ampON = np.zeros(indON.shape) - ampON[:] = np.nan - ampOFF = ampON.copy() + ampON = np.zeros(indON.shape); ampON[:]=np.nan; ampOFF = ampON.copy() for i in np.ndindex(indON.shape): - traceIndON = indON[i] - traceIndOFF = indOFF[i] - if traceIndON is not None: - ampON[i] = np.mean(np.mean(self.data.iloc[traceIndON]['traces'], axis=0)[windowIndex]) - if traceIndOFF is not None: - ampOFF[i] = np.mean(np.mean(self.data.iloc[traceIndOFF]['traces'], axis=0)[windowIndex]) + traceIndON = indON[i]; traceIndOFF = indOFF[i] + if traceIndON is not None: ampON[i] = np.mean(np.mean(self.data[traceIndON]['traces'],axis=0)[windowIndex]) + if traceIndOFF is not None: ampOFF[i] = np.mean(np.mean(self.data[traceIndOFF]['traces'],axis=0)[windowIndex]) return ampON, ampOFF, allAltPos, allAziPos @@ -952,12 +553,8 @@ def get_amplitude_receptive_field(self, timeWindow=(0, 0.5)): ampON, ampOFF, allAltPos, allAziPos = self.get_amplitude_map(timeWindow) - ampRFON = SpatialReceptiveField(mask=ampON, altPos=allAltPos, aziPos=allAziPos, sign=1, - temporalWindow=timeWindow, pixelSizeUnit=self.locationUnit, - dataType='amplitude') - ampRFOFF = SpatialReceptiveField(mask=ampOFF, altPos=allAltPos, aziPos=allAziPos, sign=-1, - temporalWindow=timeWindow, pixelSizeUnit=self.locationUnit, - dataType='amplitude') + ampRFON = SpatialReceptiveField(ampON,allAltPos,allAziPos,sign=1,temporalWindow=timeWindow,pixelSizeUnit=self.locationUnit,dataType='amplitude') + ampRFOFF = SpatialReceptiveField(ampOFF,allAltPos,allAziPos,sign=-1,temporalWindow=timeWindow,pixelSizeUnit=self.locationUnit,dataType='amplitude') return ampRFON, ampRFOFF @@ -975,20 +572,20 @@ def get_delta_amplitude_map(self, timeWindow=(0, 0.5)): indON, indOFF, allAltPos, allAziPos = self._sort_index() - ampON = np.zeros(indON.shape) - ampON[:] = np.nan + ampON = np.zeros(indON.shape); + ampON[:] = np.nan; ampOFF = ampON.copy() for i in np.ndindex(indON.shape): - traceIndON = indON[i] + traceIndON = indON[i]; traceIndOFF = indOFF[i] if traceIndON is not None: - curr_trace_ON = np.mean(self.data.iloc[traceIndON]['traces'], axis=0) + curr_trace_ON = np.mean(self.data[traceIndON]['traces'], axis=0) curr_baseline_ON = np.mean(curr_trace_ON[baseline_index]) curr_delta_trace_ON = curr_trace_ON - curr_baseline_ON ampON[i] = np.mean(curr_delta_trace_ON[windowIndex]) if traceIndOFF is not None: - curr_trace_OFF = np.mean(self.data.iloc[traceIndOFF]['traces'], axis=0) + curr_trace_OFF = np.mean(self.data[traceIndOFF]['traces'], axis=0) curr_baseline_OFF = np.mean(curr_trace_OFF[baseline_index]) curr_delta_trace_OFF = curr_trace_OFF - curr_baseline_OFF ampOFF[i] = np.mean(curr_delta_trace_OFF[windowIndex]) @@ -1005,12 +602,11 @@ def get_delta_amplitude_receptive_field(self, timeWindow=(0, 0.5)): ampON, ampOFF, allAltPos, allAziPos = self.get_delta_amplitude_map(timeWindow) - ampRFON = SpatialReceptiveField(mask=ampON, altPos=allAltPos, aziPos=allAziPos, sign=1, - temporalWindow=timeWindow, pixelSizeUnit=self.locationUnit, - dataType='delta_amplitude') - ampRFOFF = SpatialReceptiveField(mask=ampOFF, altPos=allAltPos, aziPos=allAziPos, sign=-1, - temporalWindow=timeWindow, pixelSizeUnit=self.locationUnit, - dataType='delta_amplitude') + ampRFON = SpatialReceptiveField(ampON, allAltPos, allAziPos, sign=1, temporalWindow=timeWindow, + pixelSizeUnit=self.locationUnit, dataType='delta_amplitude') + ampRFOFF = SpatialReceptiveField(ampOFF, allAltPos, allAziPos, sign=-1,temporalWindow=timeWindow, + pixelSizeUnit=self.locationUnit, dataType='delta_amplitude') + return ampRFON, ampRFOFF def get_zscore_map(self, timeWindow=(0, 0.5)): @@ -1036,12 +632,10 @@ def get_zscore_receptive_field(self, timeWindow=(0, 0.5)): ampON, ampOFF, allAltPos, allAziPos = self.get_amplitude_map(timeWindow) - zscoreRFON = SpatialReceptiveField(mask=ia.zscore(ampON), altPos=allAltPos, aziPos=allAziPos, sign='ON', - temporalWindow=timeWindow, pixelSizeUnit=self.locationUnit, - dataType='zscore') - zscoreRFOFF = SpatialReceptiveField(mask=ia.zscore(ampOFF), altPos=allAltPos, aziPos=allAziPos, sign='OFF', - temporalWindow=timeWindow, pixelSizeUnit=self.locationUnit, - dataType='zscore') + zscoreRFON = SpatialReceptiveField(ia.zscore(ampON),allAltPos,allAziPos,sign='ON',temporalWindow=timeWindow, + pixelSizeUnit=self.locationUnit,dataType='zscore') + zscoreRFOFF = SpatialReceptiveField(ia.zscore(ampOFF),allAltPos,allAziPos,sign='OFF',temporalWindow=timeWindow, + pixelSizeUnit=self.locationUnit,dataType='zscore') return zscoreRFON, zscoreRFOFF @@ -1065,15 +659,15 @@ def get_zscore_rois(self, timeWindow=(0, 0.5), zscoreThr=2): if zscoreROION is not None and zscoreROIOFF is not None: zscoreROIALL = WeightedROI(zscoreROION.get_weighted_mask() + zscoreROIOFF.get_weighted_mask()) elif zscoreROION is None and zscoreROIOFF is not None: - print 'No zscore receptive field found for ON channel. Threshold too high.' + print('No zscore receptive field found for ON channel. Threshold too high.') zscoreROIALL = zscoreROIOFF elif zscoreROION is not None and zscoreROIOFF is None: - print 'No zscore receptive field found for OFF channel. Threshold too high.' + print('No zscore receptive field found for OFF channel. Threshold too high.') zscoreROIALL = zscoreROION else: zscoreROIALL = None - return zscoreROION, zscoreROIOFF, zscoreROIALL, allAltPos, allAziPos + return zscoreROION,zscoreROIOFF,zscoreROIALL,allAltPos,allAziPos def get_zscore_thresholded_receptive_fields(self, timeWindow=(0, 0.3), thr_ratio=0.3, filter_sigma=None, interpolate_rate=None, absolute_thr=None): @@ -1093,11 +687,11 @@ def get_zscore_thresholded_receptive_fields(self, timeWindow=(0, 0.3), thr_ratio zscoreON, zscoreOFF, allAltPos, allAziPos = self.get_zscore_map(timeWindow) - zscoreRFON = SpatialReceptiveField(zscoreON, allAltPos, allAziPos, sign='ON', temporalWindow=timeWindow, + zscoreRFON = SpatialReceptiveField(zscoreON, allAltPos, allAziPos, sign='ON',temporalWindow=timeWindow, pixelSizeUnit=self.locationUnit, dataType='zscore') zscoreRFOFF = SpatialReceptiveField(zscoreOFF, allAltPos, allAziPos, sign='OFF', temporalWindow=timeWindow, - pixelSizeUnit=self.locationUnit, dataType='zscore') + pixelSizeUnit=self.locationUnit, dataType='zscore') if filter_sigma is not None: zscoreRFON = zscoreRFON.gaussian_filter(filter_sigma) @@ -1109,7 +703,7 @@ def get_zscore_thresholded_receptive_fields(self, timeWindow=(0, 0.3), thr_ratio max_value = max([np.amax(zscoreRFON.get_weighted_mask()), np.amax(zscoreRFOFF.get_weighted_mask())]) - thr = max_value * thr_ratio + thr = max_value * thr_ratio if absolute_thr is not None: thr = max([thr, absolute_thr]) @@ -1117,7 +711,7 @@ def get_zscore_thresholded_receptive_fields(self, timeWindow=(0, 0.3), thr_ratio zscoreRFON = zscoreRFON.threshold(thr) zscoreRFOFF = zscoreRFOFF.threshold(thr) - zscoreRFALL = SpatialReceptiveField(zscoreRFON.get_weighted_mask() + zscoreRFOFF.get_weighted_mask(), + zscoreRFALL = SpatialReceptiveField(zscoreRFON.get_weighted_mask()+zscoreRFOFF.get_weighted_mask(), zscoreRFON.altPos, zscoreRFON.aziPos, sign='ON_OFF', temporalWindow=timeWindow, pixelSizeUnit=self.locationUnit, dataType='zscore', thr=thr, filter_sigma=filter_sigma, @@ -1134,7 +728,7 @@ def get_zscore_roi_centers(self, timeWindow=(0, 0.5), zscoreThr=2): zscore ROIs was generated by the method get_zscore_rois() """ - zscoreROION, zscoreROIOFF, zscoreROIALL, allAltPos, allAziPos = self.get_zscore_rois(timeWindow, zscoreThr) + zscoreROION,zscoreROIOFF,zscoreROIALL,allAltPos,allAziPos = self.get_zscore_rois(timeWindow, zscoreThr) if zscoreROION is not None: centerON = zscoreROION.get_weighted_center_in_coordinate(allAltPos, allAziPos) else: @@ -1160,1469 +754,133 @@ def _sort_index(self): allAltPos = np.array(sorted(list(set(list(self.data['altitude'])))))[::-1] allAziPos = np.array(sorted(list(set(list(self.data['azimuth']))))) - indON = [[None for azi in allAziPos] for alt in allAltPos] - indOFF = [[None for azi in allAziPos] for alt in allAltPos] + indON = [[None for azi in allAziPos] for alt in allAltPos]; indOFF = [[None for azi in allAziPos] for alt in allAltPos] - for i, traceItem in self.data.iterrows(): - alt = traceItem['altitude'] - azi = traceItem['azimuth'] - sign = traceItem['sign'] + for i, traceItem in enumerate(self.data): + alt = traceItem['altitude'];azi = traceItem['azimuth'];sign = traceItem['sign'] for j, altPos in enumerate(allAltPos): for k, aziPos in enumerate(allAziPos): - if alt == altPos and azi == aziPos: - - if sign == 1: - if indON[j][k] is not None: - raise LookupError, 'Duplication of trace items found at location: ' + str( - [alt, azi]) + '; sign: 1!' - else: - indON[j][k] = i + if alt==altPos and azi==aziPos: + if sign==1: + if indON[j][k] is not None: raise LookupError('Duplication of trace items found at location:'+str([alt, azi])+'; sign: 1!') + else: indON[j][k]=i - if sign == -1: - if indOFF[j][k] is not None: - raise LookupError, 'Duplication of trace items found at location: ' + str( - [alt, azi]) + '; sign:-1!' - else: - indOFF[j][k] = i + if sign==-1: + if indOFF[j][k] is not None: raise LookupError('Duplication of trace items found at location:'+str([alt, azi])+'; sign:-1!') + else: indOFF[j][k]=i - indON = np.array([np.array(x) for x in indON]); - indOFF = np.array([np.array(x) for x in indOFF]) + indON = np.array([np.array(x) for x in indON]); indOFF = np.array([np.array(x) for x in indOFF]) - return indON, indOFF, allAltPos, allAziPos + return indON,indOFF,allAltPos,allAziPos - def shrink(self, altRange=None, aziRange=None, is_reset_index=True): + def shrink(self,altRange=None,aziRange=None): """ shrink the current spatial temporal receptive field into the defined altitude and/or azimuth range """ - if altRange is None and aziRange is None: - raise LookupError, 'At least one of altRange and aziRange should be defined!' + if altRange is None and aziRange is None: raise LookupError('At least one of altRange and aziRange should be defined!') - if altRange is not None: - indAlt = np.logical_and(self.data['altitude'] >= altRange[0], - self.data['altitude'] <= altRange[1]) - else: - indAlt = np.ones(len(self.data), dtype=np.bool) - - if aziRange is not None: - indAzi = np.logical_and(self.data['azimuth'] >= aziRange[0], - self.data['azimuth'] <= aziRange[1]) - else: - indAzi = np.ones(len(self.data), dtype=np.bool) - - ind = np.logical_and(indAlt, indAzi) + if altRange is not None: indAlt = np.logical_and(self.data['altitude']>=altRange[0],self.data['altitude']<=altRange[1]) + else: indAlt = np.ones(len(self.data),dtype=np.bool) + if aziRange is not None: indAzi = np.logical_and(self.data['azimuth']>=aziRange[0],self.data['azimuth']<=aziRange[1]) + else: indAzi = np.ones(len(self.data),dtype=np.bool) + ind = np.logical_and(indAlt,indAzi) + self.data = self.data[ind] - if np.sum(ind) == 0: - raise ValueError('No probes were sampled within the given altitude and azimuth range.') - - if is_reset_index: - self.data = self.data[ind].reset_index(drop=True) - else: - self.data = self.data[ind] - - def get_local_dff_strf(self, is_collaps_before_normalize=True, add_to_trace=0.): + @staticmethod + def from_h5_group(h5Group): """ - - :param is_collaps_before_normalize: if True, for each location, the traces across multiple trials will be - averaged before calculating df/f - :return: + load SpatialTemporalReceptiveField object from a hdf5 data group """ - bl_inds = self.time <= 0 - # print(bl_inds) - - dff_traces = [] - for probe_i, probe in self.data.iterrows(): - curr_traces = np.array(probe['traces']) + add_to_trace - - if is_collaps_before_normalize: - curr_traces = np.mean(curr_traces, axis=0, keepdims=True) - - curr_bl = np.mean(curr_traces[:, bl_inds], axis=1, keepdims=True) - curr_dff = (curr_traces - curr_bl) / curr_bl - - dff_traces.append(list(curr_dff)) - - locations = zip(list(self.data['altitude']), list(self.data['azimuth'])) - - if is_collaps_before_normalize: - - strf_dff = SpatialTemporalReceptiveField(locations=locations, - signs=list(self.data['sign']), - traces=dff_traces, - trigger_ts=None, - time=self.time, - name=self.name, - locationUnit=self.locationUnit, - trace_data_type=self.trace_data_type + '_local_dff') - - else: - - strf_dff = SpatialTemporalReceptiveField(locations=locations, - signs=list(self.data['sign']), - traces=dff_traces, - trigger_ts=list(self.data['trigger_ts']), - time=self.time, - name=self.name, - locationUnit=self.locationUnit, - trace_data_type=self.trace_data_type + '_local_dff') - return strf_dff - - def get_data_range(self): - - v_min = None - v_max = None - - for probe_i, probe in self.data.iterrows(): - - curr_trace = np.array(probe['traces']) - - if curr_trace.shape[0] > 1: - curr_std_trace = np.std(np.array(curr_trace, dtype=np.float32), axis=0, keepdims=True) - curr_sem_trace = curr_std_trace / np.sqrt(float(len(curr_trace))) - curr_trace_high = curr_trace + curr_sem_trace - curr_trace_low = curr_trace - curr_sem_trace - else: - curr_trace_low = curr_trace - curr_trace_high = curr_trace - - if v_min is None: - v_min = np.amin(curr_trace_low) - else: - v_min = min([v_min, np.amin(curr_trace_low)]) - - if v_max is None: - v_max = np.max(curr_trace_high) - else: - v_max = max([v_max, np.amax(curr_trace_high)]) - - return v_min, v_max - - def temporal_subset(self): - # todo: finish this. But need to do TimeIntervals containing TimeIntervals first in TimingAnalysis - pass - - -class DriftingGratingResponseMatrix(DataFrame): - """ - class for response matrix to drifting grating circle - contains event triggered traces for all traces of one roi - - subclassed from pandas.DataFrame with more attribute: - sta_ts: 1d array, local time stamps for event triggered traces - trace_type: str, type of traces - - columns: - alt - altitute of circle center - azi - azimuth of circle center - sf - spatial frequency, cpd - tf - temporal frequency, Hz - dire - drifting direction, deg, 0 is to right, increase counter-clockwise - con - contrast, [0, 1] - rad - radius, deg - onset_ts - 1d array, global onset time stamps for each trial - matrix - 2d array, trial x time point - """ - - def __init__(self, sta_ts, trace_type='', *args, **kwargs): - - super(DriftingGratingResponseMatrix, self).__init__(*args, **kwargs) - - self.sta_ts = sta_ts - self.trace_type = trace_type + time = h5Group.attrs['time'] + # try: + # name = h5Group.parent.name[1:] + '.' + h5Group.parent.attrs['name'] + # except KeyError: + # name = None + try: + name = h5Group.attrs['name'] + except KeyError: + name = None + locationUnit = h5Group.attrs['retinotopic_location_unit'] + trace_data_type = h5Group.attrs['trace_data_type'] + locations = [] + signs = [] + traces = [] + for key, traceItem in h5Group.items(): + locations.append(np.array([traceItem.attrs['altitude'], traceItem.attrs['azimuth']])) + signs.append((traceItem.attrs['sign'])) + traces.append(traceItem.value) - self.check_integrity() + return SpatialTemporalReceptiveField(locations, signs, traces, time, name, locationUnit, trace_data_type) - def get_condition_name(self, row_index): - condition_name = get_dgc_condition_name(alt=self.loc[row_index, 'alt'], - azi=self.loc[row_index, 'azi'], - sf=self.loc[row_index, 'sf'], - tf=self.loc[row_index, 'tf'], - dire=self.loc[row_index, 'dire'], - con=self.loc[row_index, 'con'], - rad=self.loc[row_index, 'rad']) - return condition_name +if __name__=='__main__': - def check_integrity(self): + plt.ioff() - if len(self.sta_ts.shape) != 1: - raise ValueError('self.sta_ts should be 1d array.') + #===================================================================== + # f = h5py.File(r"E:\data2\2015-07-02-150610-M160809-2P_analysis\cells_test.hdf5") + # STRF = load_STRF_FromH5(f['cell0003']['spatial_temporal_receptive_field']) + # ampRFON, ampRFOFF = STRF.get_amplitude_receptive_field() + # + # print ampRFON.sign + # print ampRFOFF.get_weighted_mask()[7,9] + # + # plt.imshow(ampRFON.get_weighted_mask(),interpolation='nearest') + # plt.show() + #===================================================================== - if self.duplicated(subset=['alt', 'azi', 'sf', 'tf', 'dire', 'con', 'rad']).any(): - raise ValueError('there is duplicated conditions.') + #===================================================================== + # f = h5py.File(r"E:\data2\2015-07-02-150610-M160809-2P_analysis\cells_test.hdf5") + # STRF = load_STRF_FromH5(f['cell0003']['spatial_temporal_receptive_field']) + # zscoreRFON, zscoreRFOFF = STRF.get_zscore_receptive_field() + # + # print zscoreRFON.sign + # print zscoreRFOFF.get_weighted_mask()[7,9] + # + # plt.imshow(zscoreRFON.get_weighted_mask(),interpolation='nearest') + # plt.show() + #===================================================================== - sta_ts_len = self.sta_ts.shape[0] + #===================================================================== + # f = h5py.File(r"E:\data2\2015-07-02-150610-M160809-2P_analysis\cells_test.hdf5") + # STRF = load_STRF_FromH5(f['cell0003']['spatial_temporal_receptive_field']) + # zscoreRFON, zscoreRFOFF = STRF.get_amplitude_receptive_field() + # + # zscoreRFON.interpolate(10) + # + # plt.imshow(zscoreRFON.get_weighted_mask(),interpolation='nearest') + # plt.show() + #===================================================================== - for row_i, row in self.iterrows(): + #===================================================================== + # f = h5py.File(r"E:\data2\2015-07-02-150610-M160809-2P_analysis\cells_test.hdf5") + # STRF = load_STRF_FromH5(f['cell0003']['spatial_temporal_receptive_field']) + # STRF.shrink([-10,10],None) + # print np.unique(np.array(STRF.get_locations())[:,0]) + # STRF.shrink(None,[0,20]) + # print np.unique(np.array(STRF.get_locations())[:,1]) + #===================================================================== - if len(row['onset_ts']) == 0: - pass - # print('condition: {}. No onset timestamps available.'.format(self.get_condition_name(row_i))) - else: - if (len(row['onset_ts'].shape) != 1): - raise ValueError( - 'condition: {}, onset_ts should be 1-d array.'.format(self.get_condition_name(row_i))) + # ===================================================================== + dfile = h5py.File(r"G:\2016-08-15-160815-M238599-wf2p-Retinotopy\sparse_noise_2p\cells_refined.hdf5", 'r') + strf = SpatialTemporalReceptiveField.from_h5_group(dfile['cell0519']['spatial_temporal_receptive_field']) - if row['matrix'].shape[0] != row['onset_ts'].shape[0]: - print('condition: {}, mismatched trial number ({}) and onset number ({}).' - .format(self.get_condition_name(row_i), row['matrix'].shape[0], - row['onset_ts'].shape[0])) + rf_on, rf_off, rf_all = strf.get_zscore_thresholded_receptive_fields(timeWindow=(0., 0.3), thr_ratio=0.4, + filter_sigma=1., interpolate_rate=10, + absolute_thr=0.8) - if len(row['matrix'].shape) != 2: - raise ValueError('condition: {}, onset_ts should be 2-d array.'.format(self.get_condition_name(row_i))) + peak_amplitude = max([np.amax(rf_on.get_weighted_mask()), np.amax(rf_off.get_weighted_mask())]) - if row['matrix'].shape[1] != sta_ts_len: - raise ValueError('condition: {}, mismatched trace length ({}) and sta ts length ({}).' - .format(self.get_condition_name(row_i), row['matrix'].shape[1], sta_ts_len)) + f = plt.figure(figsize=(6, 8)) + ax = f.add_subplot(111) + rf_on.plot_contour(ax, peak_amplitude=peak_amplitude, level_num=10, linewidths=1.5) + rf_off.plot_contour(ax, peak_amplitude=peak_amplitude, level_num=10, linewidths=1.5) + plt.show() - def get_df_response_matrix(self, baseline_win=(-0.5, 0.)): - """ - return df response matrix - :param baseline_win: - :return: - """ + # ===================================================================== - baseline_ind = np.logical_and(self.sta_ts > baseline_win[0], self.sta_ts <= baseline_win[1]) - - dgcrm_df = self.copy() - - for row_i, row in self.iterrows(): - curr_matrix = row['matrix'].astype(np.float64) - curr_baseline = np.mean(curr_matrix[:, baseline_ind], axis=1, keepdims=True) - dgcrm_df.loc[row_i, 'matrix'] = curr_matrix - curr_baseline - - return DriftingGratingResponseMatrix(sta_ts=self.sta_ts, trace_type='{}_df'.format(self.trace_type), - data=dgcrm_df) - - def get_zscore_response_matrix(self, baseline_win=(-0.5, 0.)): - """ - - return zscore response matrix, zscore is calculated as (trace - baseline_mean) / baseline_std - - :param baseline_win: - :return: - """ - - baseline_ind = np.logical_and(self.sta_ts > baseline_win[0], self.sta_ts <= baseline_win[1]) - - dgcrm_zscore = self.copy() - - for row_i, row in self.iterrows(): - curr_matrix = row['matrix'].astype(np.float64) - curr_baseline_mean = np.mean(curr_matrix[:, baseline_ind], axis=1, keepdims=True) - curr_baseline_std = np.std(curr_matrix[:, baseline_ind].flat) - dgcrm_zscore.loc[row_i, 'matrix'] = (curr_matrix - curr_baseline_mean) / curr_baseline_std - - return DriftingGratingResponseMatrix(sta_ts=self.sta_ts, trace_type='{}_zscore'.format(self.trace_type), - data=dgcrm_zscore) - - def get_dff_response_matrix(self, baseline_win=(-0.5, 0.), bias=0., warning_level=0.9): - """ - - return df over f response matrix - - :param baseline_win: - :param bias: float, a number added to all matrices before calculating df over f - :param warning_level: float, if the absolute value of the baseline of a given condition and a given trial is - smaller than this value, print a waring - :return: - """ - - baseline_ind = np.logical_and(self.sta_ts > baseline_win[0], self.sta_ts <= baseline_win[1]) - - dgcrm_dff = self.copy() - - for row_i, row in self.iterrows(): - curr_matrix = row['matrix'].astype(np.float64) - curr_matrix = curr_matrix + bias - dff_matrix = np.empty(curr_matrix.shape, dtype=np.float32) - for trial_i in range(curr_matrix.shape[0]): - curr_trial = curr_matrix[trial_i, :] - curr_baseline = np.mean(curr_trial[baseline_ind]) - - # print(curr_baseline) - if curr_baseline <= warning_level: - msg = '\ncondition:{}, trial:{}, baseline too low: {}'.format(self.get_condition_name(row_i), - trial_i, - curr_baseline) - warnings.warn(msg, RuntimeWarning) - - curr_trial_dff = (curr_trial - curr_baseline) / curr_baseline - dff_matrix[trial_i, :] = curr_trial_dff - - dgcrm_dff.loc[row_i, 'matrix'] = dff_matrix - - return DriftingGratingResponseMatrix(sta_ts=self.sta_ts, trace_type='{}_dff'.format(self.trace_type), - data=dgcrm_dff) - - def get_condition_trial_responses(self, condi_i, response_win=(0., 1.)): - """ - for a given condition specified by df index: condi_i, return responses for each trial - :param condi_i: int - :param response_win: list of two floats, time window to calculate responses - :return: 1d array, response of each trial for the specified condition - """ - - response_ind = np.logical_and(self.sta_ts > response_win[0], self.sta_ts <= response_win[1]) - - traces = self.loc[condi_i, 'matrix'] - - responses = np.mean(traces[:, response_ind], axis=1) - - return responses.squeeze() - - def collapse_trials(self): - - """ - calculate mean response for each condition across all trials - - :return: DriftingGratingResponseMatrix object - """ - - dgcrm_collapsed = self.copy() - - for row_i, row in self.iterrows(): - curr_matrix = row['matrix'] - dgcrm_collapsed.loc[row_i, 'matrix'] = np.mean(curr_matrix, axis=0, keepdims=True) - dgcrm_collapsed.loc[row_i, 'onset_ts'] = [] - - return DriftingGratingResponseMatrix(sta_ts=self.sta_ts, trace_type='{}_collapsed'.format(self.trace_type), - data=dgcrm_collapsed) - - def get_response_table(self, response_win=(0., 1.)): - - response_ind = np.logical_and(self.sta_ts > response_win[0], self.sta_ts <= response_win[1]) - - dgcrt = self.loc[:, ['alt', 'azi', 'sf', 'tf', 'dire', 'con', 'rad']] - dgcrt['resp_mean'] = np.nan - dgcrt['resp_max'] = np.nan - dgcrt['resp_min'] = np.nan - dgcrt['resp_std'] = np.nan - dgcrt['resp_stdev'] = np.nan - - for row_i, row in self.iterrows(): - - responses = np.mean(row['matrix'][:, response_ind], axis=1) - - dgcrt.loc[row_i, 'resp_mean'] = np.mean(responses) - dgcrt.loc[row_i, 'resp_max'] = np.max(responses) - dgcrt.loc[row_i, 'resp_min'] = np.min(responses) - - if len(responses) > 1: - dgcrt.loc[row_i, 'resp_std'] = np.std(responses) - dgcrt.loc[row_i, 'resp_stdev'] = np.std(responses) / np.sqrt(len(responses)) - - return DriftingGratingResponseTable(trace_type=self.trace_type, data=dgcrt) - - def get_df_response_table(self, baseline_win=(-0.5, 0.), response_win=(0., 1.)): - """ - this is suppose to give the most robust measurement of df response table. - - for each condition: - 1. mean_baseline is calculated by averaging across all trials and all data points in the baseline_win - 2. mean_response is calculated by averaging across all trials and all data points in the response_win - 3. df for every condition is defined by (mean_response - mean_baseline) and response table is generated - - # separate operation - 4. for each trial of each condition, df is calculated by (mean_response - mean_baseline) - 5. one-way anova is performed from these trial responses - 6. peak positive condition and peak negative condition is selected from previously generated response table - 7. ttest is performed for these two conditions against blank trial responses - - - :param baseline_win: - :param response_win: - :return df_response_table: - :return p_anova: - :return p_ttest_pos: - :return p_ttest_neg: - """ - - baseline_ind = np.logical_and(self.sta_ts > baseline_win[0], self.sta_ts <= baseline_win[1]) - response_ind = np.logical_and(self.sta_ts > response_win[0], self.sta_ts <= response_win[1]) - - dgcrt = self.loc[:, ['alt', 'azi', 'sf', 'tf', 'dire', 'con', 'rad']] - dgcrt['resp_mean'] = np.nan - dgcrt['resp_max'] = np.nan - dgcrt['resp_min'] = np.nan - dgcrt['resp_std'] = np.nan - dgcrt['resp_stdev'] = np.nan - - trial_responses = [] - - for row_i, row in self.iterrows(): - curr_matrix = row['matrix'] - baseline_mean = np.mean(curr_matrix[:, baseline_ind].astype(np.float64).flat) - response_mean = np.mean(curr_matrix[:, response_ind].astype(np.float64).flat) - dgcrt.loc[row_i, 'resp_mean'] = response_mean - baseline_mean - - baseline_trial = np.mean(curr_matrix[:, baseline_ind].astype(np.float64), axis=1) - response_trial = np.mean(curr_matrix[:, response_ind].astype(np.float64), axis=1) - curr_trial_responses = response_trial - baseline_trial - trial_responses.append(curr_trial_responses) - - dgcrt.loc[row_i, 'resp_max'] = np.max(curr_trial_responses) - dgcrt.loc[row_i, 'resp_min'] = np.min(curr_trial_responses) - dgcrt.loc[row_i, 'resp_std'] = np.std(curr_trial_responses) - dgcrt.loc[row_i, 'resp_stdev'] = np.std(curr_trial_responses) / np.sqrt(len(curr_trial_responses)) - - _, p_anova = stats.f_oneway(*trial_responses) - - df_response_table = DriftingGratingResponseTable(data=dgcrt, trace_type='{}_df'.format(self.trace_type)) - responses_blank = trial_responses[df_response_table.blank_condi_ind] - responses_peak_pos = trial_responses[df_response_table.peak_condi_ind_pos] - responses_peak_neg = trial_responses[df_response_table.peak_condi_ind_neg] - - n_min_pos = np.min([len(responses_blank), len(responses_peak_pos)]) - _, p_ttest_pos = stats.ttest_rel(responses_blank[0:n_min_pos], responses_peak_pos[0:n_min_pos]) - n_min_neg = np.min([len(responses_blank), len(responses_peak_neg)]) - _, p_ttest_neg = stats.ttest_rel(responses_blank[0:n_min_pos], responses_peak_neg[0:n_min_neg]) - - return df_response_table, p_anova, p_ttest_pos, p_ttest_neg - - def get_dff_response_table(self, baseline_win=(-0.5, 0.), response_win=(0., 1.), bias=0, warning_level=0.1): - """ - this is suppose to give the most robust measurement of df/f response table. - - for each condition: - 1. mean_baseline is calculated by averaging across all trials and all data points in the baseline_win - 2. mean_response is calculated by averaging across all trials and all data points in the response_win - 3. df/f for each condition is defined by - (mean_response - mean_baseline) / mean_baseline and response table is generated - - # separate operation - 4. for each trial of each condition, df is calculated by (mean_response - mean_baseline) / mean_baseline - 5. one-way anova is performed from these trial responses - 6. peak positive condition and peak negative condition is selected from previously generated response table - 7. ttest is performed for these two conditions against blank trial responses - - - :param baseline_win: - :param response_win: - :param bias: float, a constant added to all matrices - :param warning_level: float, warning level of low baseline - :return dff_response_table: - :return p_anova: - :return p_ttest_pos: - :return p_ttest_neg: - """ - - baseline_ind = np.logical_and(self.sta_ts > baseline_win[0], self.sta_ts <= baseline_win[1]) - response_ind = np.logical_and(self.sta_ts > response_win[0], self.sta_ts <= response_win[1]) - - dgcrt = self.loc[:, ['alt', 'azi', 'sf', 'tf', 'dire', 'con', 'rad']] - dgcrt['resp_mean'] = np.nan - dgcrt['resp_max'] = np.nan - dgcrt['resp_min'] = np.nan - dgcrt['resp_std'] = np.nan - dgcrt['resp_stdev'] = np.nan - - trial_responses = [] - - for row_i, row in self.iterrows(): - curr_matrix = row['matrix'] + bias - baseline_mean = np.mean(curr_matrix[:, baseline_ind].astype(np.float64).flat) - response_mean = np.mean(curr_matrix[:, response_ind].astype(np.float64).flat) - dgcrt.loc[row_i, 'resp_mean'] = (response_mean - baseline_mean) / baseline_mean - - if baseline_mean <= warning_level: - msg = '\ncondition:{}, mean baseline too low: {}'.format(self.get_condition_name(row_i), baseline_mean) - warnings.warn(msg, RuntimeWarning) - - - baseline_trial = np.mean(curr_matrix[:, baseline_ind].astype(np.float64), axis=1) - response_trial = np.mean(curr_matrix[:, response_ind].astype(np.float64), axis=1) - curr_trial_responses = (response_trial - baseline_trial) / baseline_trial - - if np.min(baseline_trial) <= warning_level: - msg = '\ncondition:{}, trial baseline too low: {}'.format(self.get_condition_name(row_i), - np.min(baseline_trial)) - warnings.warn(msg, RuntimeWarning) - - - trial_responses.append(curr_trial_responses) - dgcrt.loc[row_i, 'resp_max'] = np.max(curr_trial_responses) - dgcrt.loc[row_i, 'resp_min'] = np.min(curr_trial_responses) - dgcrt.loc[row_i, 'resp_std'] = np.std(curr_trial_responses) - dgcrt.loc[row_i, 'resp_stdev'] = np.std(curr_trial_responses) / np.sqrt(len(curr_trial_responses)) - - _, p_anova = stats.f_oneway(*trial_responses) - - dff_response_table = DriftingGratingResponseTable(data=dgcrt, trace_type='{}_df'.format(self.trace_type)) - responses_blank = trial_responses[dff_response_table.blank_condi_ind] - responses_peak_pos = trial_responses[dff_response_table.peak_condi_ind_pos] - responses_peak_neg = trial_responses[dff_response_table.peak_condi_ind_neg] - - n_min_pos = np.min([len(responses_blank), len(responses_peak_pos)]) - _, p_ttest_pos = stats.ttest_rel(responses_blank[0:n_min_pos], responses_peak_pos[0:n_min_pos]) - n_min_neg = np.min([len(responses_blank), len(responses_peak_neg)]) - _, p_ttest_neg = stats.ttest_rel(responses_blank[0:n_min_pos], responses_peak_neg[0:n_min_neg]) - - return dff_response_table, p_anova, p_ttest_pos, p_ttest_neg - - def get_zscore_response_table(self, baseline_win=(-0.5, 0.), response_win=(0., 1.)): - """ - this is suppose to give the most robust measurement of zscore response table. - - for each condition: - 1. mean_baseline is calculated by averaging across all trials and all data points in the baseline_win - 2. mean_response is calculated by averaging across all trials and all data points in the response_win - 3. mean_standard_deviation is calculated as following: - i. the baseline of each trial is normalized with zero mean - ii. normalized baselines are concatenated to a 1d array - iii. mean_standard_deviation is calculated from the concatenated baseline - 4. zscore for each condition is defined by - (mean_response - mean_baseline) / mean_standard_deviation and response table is generated - - # separate operation - 4. for each trial of each condition, zscore is calculated by (mean_response - mean_baseline) / mean_standard_deviation - 5. one-way anova is performed from these trial responses - 6. peak positive condition and peak negative condition is selected from previously generated response table - 7. ttest is performed for these two conditions against blank trial responses - - - :param baseline_win: - :param response_win: - :return zscore_response_table: - :return p_anova: - :return p_ttest_pos: - :return p_ttest_neg: - """ - - baseline_ind = np.logical_and(self.sta_ts > baseline_win[0], self.sta_ts <= baseline_win[1]) - response_ind = np.logical_and(self.sta_ts > response_win[0], self.sta_ts <= response_win[1]) - - dgcrt = self.loc[:, ['alt', 'azi', 'sf', 'tf', 'dire', 'con', 'rad']] - dgcrt['resp_mean'] = np.nan - dgcrt['resp_max'] = np.nan - dgcrt['resp_min'] = np.nan - dgcrt['resp_std'] = np.nan - dgcrt['resp_stdev'] = np.nan - - trial_responses = [] - - for row_i, row in self.iterrows(): - curr_matrix = row['matrix'] - - baseline = curr_matrix[:, baseline_ind].astype(np.float64) - baseline_trial = np.mean(baseline, axis=1, keepdims=True) - baseline_norm = baseline - baseline_trial - std_mean = np.std(baseline_norm.flat) - baseline_mean = np.mean(baseline_trial.flat) - response_mean = np.mean(curr_matrix[:, response_ind].astype(np.float64).flat) - dgcrt.loc[row_i, 'resp_mean'] = (response_mean - baseline_mean) / std_mean - - baseline_trial = np.mean(curr_matrix[:, baseline_ind].astype(np.float64), axis=1) - # std_trial = np.std(curr_matrix[:, baseline_ind].astype(np.float64), axis=1) - response_trial = np.mean(curr_matrix[:, response_ind].astype(np.float64), axis=1) - curr_trial_responses = (response_trial - baseline_trial) / std_mean - - trial_responses.append(curr_trial_responses) - dgcrt.loc[row_i, 'resp_max'] = np.max(curr_trial_responses) - dgcrt.loc[row_i, 'resp_min'] = np.min(curr_trial_responses) - dgcrt.loc[row_i, 'resp_std'] = np.std(curr_trial_responses) - dgcrt.loc[row_i, 'resp_stdev'] = np.std(curr_trial_responses) / np.sqrt(len(curr_trial_responses)) - - _, p_anova = stats.f_oneway(*trial_responses) - - zscore_response_table = DriftingGratingResponseTable(data=dgcrt, trace_type='{}_df'.format(self.trace_type)) - responses_blank = trial_responses[zscore_response_table.blank_condi_ind] - responses_peak_pos = trial_responses[zscore_response_table.peak_condi_ind_pos] - responses_peak_neg = trial_responses[zscore_response_table.peak_condi_ind_neg] - - n_min_pos = np.min([len(responses_blank), len(responses_peak_pos)]) - _, p_ttest_pos = stats.ttest_rel(responses_blank[0:n_min_pos], responses_peak_pos[0:n_min_pos]) - n_min_neg = np.min([len(responses_blank), len(responses_peak_neg)]) - _, p_ttest_neg = stats.ttest_rel(responses_blank[0:n_min_pos], responses_peak_neg[0:n_min_neg]) - - return zscore_response_table, p_anova, p_ttest_pos, p_ttest_neg - - def plot_traces(self, condi_ind, axis=None, blank_ind=None, block_dur=None, response_window=None, - baseline_window=None, trace_color='#ff0000', block_face_color='#aaaaaa', - response_window_color='#ff00ff', baseline_window_color='#888888', blank_trace_color='#888888', - lw_single=0.5, lw_mean=2.): - - if axis is None: - f = plt.figure() - axis = f.add_subplot(111) - - if block_dur is not None: - axis.axvspan(0., block_dur, color=block_face_color) - - if baseline_window is not None: - axis.axvline(x=baseline_window[0], linestyle='--', color=baseline_window_color, lw=1.5) - axis.axvline(x=baseline_window[1], linestyle='--', color=baseline_window_color, lw=1.5) - - if response_window is not None: - axis.axvline(x=response_window[0], linestyle='--', color=response_window_color, lw=1.5) - axis.axvline(x=response_window[1], linestyle='--', color=response_window_color, lw=1.5) - - ymin = None - ymax = None - - if blank_ind is not None: - traces_blank = self.loc[blank_ind, 'matrix'] - for t in traces_blank: - axis.plot(self.sta_ts, t, color=blank_trace_color, lw=lw_single) - - axis.plot(self.sta_ts, np.mean(traces_blank, axis=0), color=blank_trace_color, lw=lw_mean) - - ymin = np.amin(traces_blank) - ymax = np.amax(traces_blank) - - - traces = self.loc[condi_ind, 'matrix'] - for t in traces: - axis.plot(self.sta_ts, t, color=trace_color, lw=lw_single) - - if ymin is None: - ymin = np.amin(traces) - else: - ymin = min([ymin, np.amin(traces)]) - - if ymax is None: - ymax = np.amax(traces) - else: - ymax = max([ymax, np.amax(traces)]) - - axis.plot(self.sta_ts, np.mean(traces, axis=0), color=trace_color, lw=lw_mean) - axis.set_xlim([self.sta_ts[0], self.sta_ts[-1]]) - - return ymin, ymax - - -class DriftingGratingResponseTable(DataFrame): - """ - class for response table to drifting grating circle - contains responses to all conditions of one roi - - subclassed from pandas.DataFrame with more attribute: - trace_type: str, type of traces - - columns: - alt - float, altitute of circle center - azi - float, azimuth of circle center - sf - float, spatial frequency, cpd - tf - float, temporal frequency, Hz - dire - int, drifting direction, deg, 0 is to right, increase counter-clockwise - con - float, contrast, [0, 1] - rad - float, radius, deg - resp_mean - float, mean response to the condition - resp_max - float, max response to the condition - resp_min - float, min response to the condition - resp_std - float, standard deviation across trials - resp_stdev - float, standard error of mean across trials - """ - - def __init__(self, trace_type='', *args, **kwargs): - - super(DriftingGratingResponseTable, self).__init__(*args, **kwargs) - - self.trace_type = trace_type - - def get_peak_condi_params_pos(self): - ind = self.peak_condi_ind_pos - return self.loc[ind, ['sf', 'tf', 'dire', 'con', 'rad']] - - @property - def sfs(self): - return self['sf'].unique() - - @property - def tfs(self): - return self['tf'].unique() - - @property - def dires(self): - return self['dire'].unique() - - @property - def cons(self): - return self['con'].unique() - - @property - def rads(self): - return self['rad'].unique() - - @property - def blank_condi_ind(self): - """ - if more than one blank conditions found, raise error - :return: int, blank condition index. None if no blank condition found - """ - inds = [] - - for row_i, row in self.iterrows(): - if row['sf'] == 0.: - inds.append(row_i) - if row['tf'] == 0.: - inds.append(row_i) - if row['con'] == 0.: - inds.append(row_i) - if row['rad'] == 0.: - inds.append(row_i) - - inds = list(set(inds)) - - if len(inds) == 0: # no blank condition - return None - elif len(inds) == 1: # 1 blank condition - return inds[0] - else: - raise LookupError('more than one blank conditions found ({}).'.format(len(inds))) - - @property - def peak_condi_ind_pos(self): - """return the index of the condition with biggest postitive response (exclude the blank condition)""" - if self.blank_condi_ind is None: - return self['resp_mean'].argmax() - else: - return self.drop(self.blank_condi_ind)['resp_mean'].idxmax() - - @property - def peak_condi_ind_neg(self): - """return the index of the condition with biggest negative response (exclude the blank condition)""" - if self.blank_condi_ind is None: - return self['resp_mean'].argmin() - else: - return self.drop(self.blank_condi_ind)['resp_mean'].idxmin() - - @property - def peak_response_pos(self): - return self.loc[self.peak_condi_ind_pos, 'resp_mean'] - - @property - def peak_response_neg(self): - return self.loc[self.peak_condi_ind_neg, 'resp_mean'] - - @property - def peak_response_abs(self): - return np.max([abs(self.peak_response_pos), - abs(self.peak_response_neg)]) - - def get_sf_tf_matrix(self, response_dir='pos'): - """ - rerurn 2d array of sf/tf responses, rows: sf; cols: tf, other conditions are at peak in positive or negative - direction - :param response_dir: 'pos' or 'neg', response type to select peak condition - :return responses: 2d array of 'resp_mean' - :return sf_lst: 1d array, sf conditions - :return tf_lst: 1d array, tf conditions - """ - - if response_dir == 'pos': - ind_p = self.peak_condi_ind_pos - elif response_dir == 'neg': - ind_p = self.peak_condi_ind_neg - else: - raise LookupError('Do not understand response_dir ({}). Should be "pos" or "neg"'.format(response_dir)) - - alt_p = self.loc[ind_p, 'alt'] - azi_p = self.loc[ind_p, 'azi'] - dire_p = self.loc[ind_p, 'dire'] - con_p = self.loc[ind_p, 'con'] - rad_p = self.loc[ind_p, 'rad'] - - df_sub = self.loc[(self['alt'] == alt_p) & (self['azi'] == azi_p) & (self['dire'] == dire_p) & - (self['con'] == con_p) & (self['rad'] == rad_p)] - - sfs = list(df_sub['sf'].unique()) - sfs.sort() - tfs = list(df_sub['tf'].unique()) - tfs.sort() - - resps = np.zeros((len(sfs), len(tfs))) - resps[:] = np.nan - - for sf_i, sf in enumerate(sfs): - for tf_i, tf in enumerate(tfs): - - curr_condi = df_sub[(df_sub['sf'] == sf) & (df_sub['tf'] == tf)] - # print(curr_condi['resp_mean']) - - if not curr_condi.empty: - resps[sf_i, tf_i] = curr_condi['resp_mean'] - - return resps, sfs, tfs - - def get_dire_tuning(self, response_dir='pos', is_collapse_sf=True, is_collapse_tf=False): - """ - dataframe of direction responses, other conditions are at peak in positive or negative direction, if not - specified by is_collapse - :param is_collapse_sf: bool, - :param is_collapse_tf: bool, - :param response_dir: 'pos' or 'neg', response type to select peak condition - :return dire_tuning: dataframe with two columns: 'dire','resp_mean', 'resp_max', 'resp_min', 'resp_std', - 'resp_stdev' - """ - - if response_dir == 'pos': - ind_p = self.peak_condi_ind_pos - elif response_dir == 'neg': - ind_p = self.peak_condi_ind_neg - else: - raise LookupError('Do not understand response_dir ({}). Should be "pos" or "neg"'.format(response_dir)) - - alt_p = self.loc[ind_p, 'alt'] - azi_p = self.loc[ind_p, 'azi'] - sf_p = self.loc[ind_p, 'sf'] - tf_p = self.loc[ind_p, 'tf'] - con_p = self.loc[ind_p, 'con'] - rad_p = self.loc[ind_p, 'rad'] - - # print('sf_p: {}'.format(sf_p)) - # print('tf_p: {}'.format(tf_p)) - - df_sub = self.loc[(self['alt'] == alt_p) & (self['azi'] == azi_p) & (self['con'] == con_p) & - (self['rad'] == rad_p)] - - df_sub = df_sub[['sf', 'tf', 'dire', 'resp_mean', 'resp_max', 'resp_min', 'resp_std', 'resp_stdev']] - # print(df_sub) - - if is_collapse_sf: - df_sub = df_sub.groupby(['tf', 'dire']).mean().reset_index() - else: - df_sub = df_sub.loc[df_sub['sf'] == sf_p].drop('sf', axis=1) - - if is_collapse_tf: - df_sub = df_sub.groupby(['dire']).mean().reset_index() - else: - df_sub = df_sub.loc[df_sub['tf'] == tf_p].drop('tf', axis=1) - - # print(df_sub) - - return df_sub[['dire', 'resp_mean', 'resp_max', 'resp_min', 'resp_std', 'resp_stdev']].copy() - - def get_sf_tuning(self, response_dir='pos', is_collapse_tf=False, is_collapse_dire=False): - """ - dataframe of sf responses, other conditions are at peak in positive or negative direction, if not - specified by is_collapse - :param is_collapse_tf: bool, - :param is_collapse_dire: bool, - :param response_dir: 'pos' or 'neg', response type to select peak condition - :return sf_tuning: dataframe with two columns: 'sf','resp_mean', 'resp_max', 'resp_min', 'resp_std', - 'resp_stdev' - """ - - if response_dir == 'pos': - ind_p = self.peak_condi_ind_pos - elif response_dir == 'neg': - ind_p = self.peak_condi_ind_neg - else: - raise LookupError('Do not understand response_dir ({}). Should be "pos" or "neg"'.format(response_dir)) - - alt_p = self.loc[ind_p, 'alt'] - azi_p = self.loc[ind_p, 'azi'] - dire_p = self.loc[ind_p, 'dire'] - tf_p = self.loc[ind_p, 'tf'] - con_p = self.loc[ind_p, 'con'] - rad_p = self.loc[ind_p, 'rad'] - - df_sub = self.loc[(self['alt'] == alt_p) & (self['azi'] == azi_p) & (self['con'] == con_p) & - (self['rad'] == rad_p)] - - df_sub = df_sub[['sf', 'tf', 'dire', 'resp_mean', 'resp_max', 'resp_min', 'resp_std', 'resp_stdev']] - # print(df_sub) - - if is_collapse_tf: - df_sub = df_sub.groupby(['sf', 'dire']).mean().reset_index() - else: - df_sub = df_sub.loc[df_sub['tf'] == tf_p].drop('tf', axis=1) - - if is_collapse_dire: - df_sub = df_sub.groupby(['sf']).mean().reset_index() - else: - df_sub = df_sub.loc[df_sub['dire'] == dire_p].drop('dire', axis=1) - - # print(df_sub) - return df_sub[['sf', 'resp_mean', 'resp_max', 'resp_min', 'resp_std', 'resp_stdev']].copy() - - def get_tf_tuning(self, response_dir='pos', is_collapse_sf=False, is_collapse_dire=False): - """ - dataframe of tf responses, other conditions are at peak in positive or negative direction, if not - specified by is_collapse - :param is_collapse_sf: bool, - :param is_collapse_dire: bool, - :param response_dir: 'pos' or 'neg', response type to select peak condition - :return tf_tuning: dataframe with two columns: 'tf','resp_mean', 'resp_max', 'resp_min', 'resp_std', - 'resp_stdev' - """ - - if response_dir == 'pos': - ind_p = self.peak_condi_ind_pos - elif response_dir == 'neg': - ind_p = self.peak_condi_ind_neg - else: - raise LookupError('Do not understand response_dir ({}). Should be "pos" or "neg"'.format(response_dir)) - - alt_p = self.loc[ind_p, 'alt'] - azi_p = self.loc[ind_p, 'azi'] - dire_p = self.loc[ind_p, 'dire'] - sf_p = self.loc[ind_p, 'sf'] - con_p = self.loc[ind_p, 'con'] - rad_p = self.loc[ind_p, 'rad'] - - df_sub = self.loc[(self['alt'] == alt_p) & (self['azi'] == azi_p) & (self['con'] == con_p) & - (self['rad'] == rad_p)] - - df_sub = df_sub[['sf', 'tf', 'dire', 'resp_mean', 'resp_max', 'resp_min', 'resp_std', 'resp_stdev']] - # print(df_sub) - - if is_collapse_sf: - df_sub = df_sub.groupby(['tf', 'dire']).mean().reset_index() - else: - df_sub = df_sub.loc[df_sub['sf'] == sf_p].drop('sf', axis=1) - - if is_collapse_dire: - df_sub = df_sub.groupby(['tf']).mean().reset_index() - else: - df_sub = df_sub.loc[df_sub['dire'] == dire_p].drop('dire', axis=1) - - # print(df_sub) - return df_sub[['tf', 'resp_mean', 'resp_max', 'resp_min', 'resp_std', 'resp_stdev']].copy() - - @staticmethod - def get_dire_tuning_properties(dire_tuning, response_dir='pos', elevation_bias=0.): - """ - - input dire_tuning has two columns: directions in degrees and mean responses to each direction. - the responses are treated in 3 different ways - - 1. raw, no treatment - 2. elevated, if minimum is below elevation_bias, the whole curve will be elevated to have minimum - response equal to elevation_bias. - 3. rectified, the response smaller than 0 will be set as zero. - - because gOSI and gDSI calculation is independent of curve base line. so gOSI_raw and gDSI_raw represent - gOSI and gDSI in both raw and elevated conditions. - - :param dire_tuning: - :param response_dir: str, 'pos' or 'neg - :param elevation_bias: float, minimum response after elevation. - :return OSI_raw: - :return DSI_raw: - :return gOSI_raw: - :return gDSI_raw: - :return OSI_ele: - :return DSI_ele: - :return gOSI_ele: - :return gDSI_ele: - :return OSI_rec: - :return DSI_rec: - :return gOSI_rec: - :return gDSI_rec: - :return peak_dire_raw: optimal direction in tested conditions - :return vs_dire_raw: vector sum of raw direction responses - :return vs_dire_ele: vector sum of elevated direction response - :return vs_dire_rec: vactor sum of rectified direction responses - """ - - dire_tuning_2 = dire_tuning.copy() - - if response_dir == 'pos': - pass - elif response_dir == 'neg': - dire_tuning_2['resp_mean'] = -dire_tuning_2['resp_mean'] - else: - raise LookupError('Do not understand response_dir ({}). Should be "pos" or "neg"'.format(response_dir)) - - if np.max(dire_tuning_2['resp_mean']) <= 0.: # no positive response - return tuple([np.nan] * 16) - - else: - - dire_tuning_2['dire'] = dire_tuning_2['dire'] % 360 - arcs = np.array(list(dire_tuning_2['dire'] * np.pi / 180)) - - if np.min(dire_tuning_2['resp_mean']) < elevation_bias: - dire_tuning_2['resp_mean_ele'] = dire_tuning_2['resp_mean'] - np.min(dire_tuning_2['resp_mean']) + \ - elevation_bias - else: - dire_tuning_2['resp_mean_ele'] = dire_tuning_2['resp_mean'] - - dire_tuning_2['resp_mean_rec'] = dire_tuning_2['resp_mean'] - dire_tuning_2.loc[dire_tuning_2['resp_mean'] < 0, 'resp_mean_rec'] = 0 - - - # get orientation indices - peak_dire_raw_ind = dire_tuning_2['resp_mean'].argmax() - peak_dire_raw = dire_tuning_2.loc[peak_dire_raw_ind, 'dire'] - oppo_dire_ind = (dire_tuning_2['dire'] == ((peak_dire_raw + 180) % 360)).argmax() - othr_dire_ind_1 = (dire_tuning_2['dire'] == ((peak_dire_raw + 90) % 360)).argmax() - othr_dire_ind_2 = (dire_tuning_2['dire'] == ((peak_dire_raw - 90) % 360)).argmax() - - # get raw os tuning properties - peak_resp_raw = dire_tuning_2.loc[peak_dire_raw_ind, 'resp_mean'] - oppo_resp_raw = dire_tuning_2.loc[oppo_dire_ind, 'resp_mean'] - othr_resp_raw_1 = dire_tuning_2.loc[othr_dire_ind_1, 'resp_mean'] - othr_resp_raw_2 = dire_tuning_2.loc[othr_dire_ind_2, 'resp_mean'] - - # print('aaa, {}, {}, {}, {}'.format(peak_resp_raw, oppo_resp_raw, othr_resp_raw_1, othr_resp_raw_2)) - - othr_resp_raw = (othr_resp_raw_1 + othr_resp_raw_2) / 2. - - # print('othr_resp_raw, {}'.format(othr_resp_raw)) - # print('bbb, {}'.format(peak_resp_raw - othr_resp_raw)) - # print('ccc, {}'.format(peak_resp_raw + othr_resp_raw)) - - OSI_raw = (peak_resp_raw - othr_resp_raw) / (peak_resp_raw + othr_resp_raw) - DSI_raw = (peak_resp_raw - oppo_resp_raw) / (peak_resp_raw + oppo_resp_raw) - - resp_raw = np.array(list(dire_tuning_2['resp_mean'])) - vs_raw = np.sum(resp_raw * np.exp(1j * arcs)) / np.sum(resp_raw) - vs_dire_raw = (np.angle(vs_raw) * 180 / np.pi) % 360 - gDSI_raw = np.abs(vs_raw) - - vs2_raw = np.sum(resp_raw * np.exp(1j * 2 * arcs)) / np.sum(resp_raw) - gOSI_raw = np.abs(vs2_raw) - - # get elevated os properties - peak_resp_ele = dire_tuning_2.loc[peak_dire_raw_ind, 'resp_mean_ele'] - oppo_resp_ele = dire_tuning_2.loc[oppo_dire_ind, 'resp_mean_ele'] - othr_resp_ele_1 = dire_tuning_2.loc[othr_dire_ind_1, 'resp_mean_ele'] - othr_resp_ele_2 = dire_tuning_2.loc[othr_dire_ind_2, 'resp_mean_ele'] - - othr_resp_ele = (othr_resp_ele_1 + othr_resp_ele_2) / 2. - OSI_ele = (peak_resp_ele - othr_resp_ele) / (peak_resp_ele + othr_resp_ele) - DSI_ele = (peak_resp_ele - oppo_resp_ele) / (peak_resp_ele + oppo_resp_ele) - - resp_ele = np.array(list(dire_tuning_2['resp_mean_ele'])) - vs_ele = np.sum(resp_ele * np.exp(1j * arcs)) / np.sum(resp_ele) - vs_dire_ele = (np.angle(vs_ele) * 180 / np.pi) % 360 - gDSI_ele = np.abs(vs_ele) - - vs2_ele = np.sum(resp_ele * np.exp(1j * 2 * arcs)) / np.sum(resp_ele) - gOSI_ele = np.abs(vs2_ele) - - # get rectified os tuning properties - peak_resp_rec = dire_tuning_2.loc[peak_dire_raw_ind, 'resp_mean_rec'] - oppo_resp_rec = dire_tuning_2.loc[oppo_dire_ind, 'resp_mean_rec'] - othr_resp_rec_1 = dire_tuning_2.loc[othr_dire_ind_1, 'resp_mean_rec'] - othr_resp_rec_2 = dire_tuning_2.loc[othr_dire_ind_2, 'resp_mean_rec'] - - othr_resp_rec = (othr_resp_rec_1 + othr_resp_rec_2) / 2. - OSI_rec = (peak_resp_rec - othr_resp_rec) / (peak_resp_rec + othr_resp_rec) - DSI_rec = (peak_resp_rec - oppo_resp_rec) / (peak_resp_rec + oppo_resp_rec) - - resp_rec = np.array(list(dire_tuning_2['resp_mean_rec'])) - vs_rec = np.sum(resp_rec * np.exp(1j * arcs)) / np.sum(resp_rec) - vs_dire_rec = (np.angle(vs_rec) * 180 / np.pi) % 360 - gDSI_rec = np.abs(vs_rec) - - vs2_rec = np.sum(resp_rec * np.exp(1j * 2 * arcs)) / np.sum(resp_rec) - gOSI_rec = np.abs(vs2_rec) - - return OSI_raw, DSI_raw, gOSI_raw, gDSI_raw, OSI_ele, DSI_ele, gOSI_ele, gDSI_ele, OSI_rec, DSI_rec, \ - gOSI_rec, gDSI_rec, peak_dire_raw, vs_dire_raw, vs_dire_ele, vs_dire_rec - - @staticmethod - def get_tf_tuning_properties(tf_tuning, response_dir='pos', elevation_bias=0.): - """ - - :param tf_tuning: - :param response_dir: str, 'pos' or 'neg - :param elevation_bias: float, minimum response after elevation. - :return peak_tf_raw: tf condition (presented) with maxmium response - :return weighted_tf_raw: average tf conditions weighted by response - :return weighted_tf_log_raw: average tf conditions weighted by response (on log scale) - :return weighted_tf_ele: - :return weighted_tf_log_ele: - :return weighted_tf_rec: - :return weighted_tf_log_rec: - """ - - tf_tuning_2 = tf_tuning.copy() - - if response_dir == 'pos': - pass - elif response_dir == 'neg': - tf_tuning_2['resp_mean'] = -tf_tuning_2['resp_mean'] - else: - raise LookupError('Do not understand response_dir ({}). Should be "pos" or "neg"'.format(response_dir)) - - if np.max(tf_tuning_2['resp_mean']) <= 0.: - return tuple([np.nan] * 7) - else: - - if np.min(tf_tuning_2['resp_mean']) < elevation_bias: - tf_tuning_2['resp_mean_ele'] = tf_tuning_2['resp_mean'] -np.min(tf_tuning_2['resp_mean']) \ - + elevation_bias - else: - tf_tuning_2['resp_mean_ele'] = tf_tuning_2['resp_mean'] - - tf_tuning_2['resp_mean_rec'] = tf_tuning_2['resp_mean'] - tf_tuning_2.loc[tf_tuning_2['resp_mean'] < 0, 'resp_mean_rec'] = 0. - - peak_tf_raw_ind = tf_tuning_2['resp_mean'].argmax() - peak_tf_raw = tf_tuning_2.loc[peak_tf_raw_ind, 'tf'] - - tfs = tf_tuning_2['tf'].astype(np.float) - tfs_log = np.log(tfs) / np.log(2) - # print('aaa, {}'.format(tfs_log)) - - # get raw weight tuning - resp_raw = tf_tuning_2['resp_mean'].astype(np.float) - weighted_tf_raw = np.sum(tfs * resp_raw) / np.sum(resp_raw) - weighted_tf_log_raw = np.sum(tfs_log * resp_raw) / np.sum(resp_raw) - # print('bbb, {}'.format(weighted_tf_log_raw)) - # print('ccc, {}'.format(resp_raw)) - weighted_tf_log_raw = 2 ** weighted_tf_log_raw - - # get elevated weight tuning - resp_ele = tf_tuning_2['resp_mean_ele'].astype(np.float) - weighted_tf_ele = np.sum(tfs * resp_ele) / np.sum(resp_ele) - weighted_tf_log_ele = np.sum(tfs_log * resp_ele) / np.sum(resp_ele) - weighted_tf_log_ele = 2 ** weighted_tf_log_ele - - # get rectified weight tuning - resp_rec = tf_tuning_2['resp_mean_rec'].astype(np.float) - weighted_tf_rec = np.sum(tfs * resp_rec) / np.sum(resp_rec) - weighted_tf_log_rec = np.sum(tfs_log * resp_rec) / np.sum(resp_rec) - weighted_tf_log_rec = 2 ** weighted_tf_log_rec - - return peak_tf_raw, weighted_tf_raw, weighted_tf_log_raw, weighted_tf_ele, weighted_tf_log_ele, \ - weighted_tf_rec, weighted_tf_log_rec - - @staticmethod - def get_sf_tuning_properties(sf_tuning, response_dir='pos', elevation_bias=0.): - """ - - :param sf_tuning: - :param response_dir: str, 'pos' or 'neg - :param elevation_bias: float, minimum response after elevation. - :return peak_sf_raw: sf condition (presented) with maxmium response - :return weighted_sf_raw: average sf conditions weighted by response - :return weighted_sf_log_raw: average sf conditions weighted by response (on log scale) - :return weighted_sf_ele: - :return weighted_sf_log_ele: - :return weighted_sf_rec: - :return weighted_sf_log_rec: - """ - - sf_tuning_2 = sf_tuning.copy() - - if response_dir == 'pos': - pass - elif response_dir == 'neg': - sf_tuning_2['resp_mean'] = -sf_tuning_2['resp_mean'] - else: - raise LookupError('Do not understand response_dir ({}). Should be "pos" or "neg"'.format(response_dir)) - - if np.max(sf_tuning_2['resp_mean']) <= 0.: - return tuple([np.nan] * 7) - else: - - if np.min(sf_tuning_2['resp_mean']) < elevation_bias: - sf_tuning_2['resp_mean_ele'] = sf_tuning_2['resp_mean'] - np.min(sf_tuning_2['resp_mean']) \ - + elevation_bias - else: - sf_tuning_2['resp_mean_ele'] = sf_tuning_2['resp_mean'] - - sf_tuning_2['resp_mean_rec'] = sf_tuning_2['resp_mean'] - sf_tuning_2.loc[sf_tuning_2['resp_mean'] < 0, 'resp_mean_rec'] = 0. - - peak_sf_raw_ind = sf_tuning_2['resp_mean'].argmax() - peak_sf_raw = sf_tuning_2.loc[peak_sf_raw_ind, 'sf'] - - sfs = sf_tuning_2['sf'].astype(np.float) - sfs_log = np.log(sfs) / np.log(2) - - # get raw weight tuning - resp_raw = sf_tuning_2['resp_mean'].astype(np.float) - weighted_sf_raw = np.sum(sfs * resp_raw) / np.sum(resp_raw) - weighted_sf_log_raw = np.sum(sfs_log * resp_raw) / np.sum(resp_raw) - weighted_sf_log_raw = 2 ** weighted_sf_log_raw - - # get elevated weight tuning - resp_ele = sf_tuning_2['resp_mean_ele'].astype(np.float) - weighted_sf_ele = np.sum(sfs * resp_ele) / np.sum(resp_ele) - weighted_sf_log_ele = np.sum(sfs_log * resp_ele) / np.sum(resp_ele) - weighted_sf_log_ele = 2 ** weighted_sf_log_ele - - # get rectified weight tuning - resp_rec = sf_tuning_2['resp_mean_rec'].astype(np.float) - weighted_sf_rec = np.sum(sfs * resp_rec) / np.sum(resp_rec) - weighted_sf_log_rec = np.sum(sfs_log * resp_rec) / np.sum(resp_rec) - weighted_sf_log_rec = 2 ** weighted_sf_log_rec - - return peak_sf_raw, weighted_sf_raw, weighted_sf_log_raw, weighted_sf_ele, weighted_sf_log_ele, \ - weighted_sf_rec, weighted_sf_log_rec - - # @staticmethod - # def get_sf_tuning_properties_old(sf_tuning, response_dir='pos', is_rectify=True): - # """ - # :param sf_tuning: - # :param response_dir: str, 'pos' or 'neg - # :param is_rectify: bool, if True, responses below zero will be set as zero - # :return peak_sf_raw: sf condition (presented) with maxmium response - # :return peak_sf_linear: average sf conditions weighted by response amplitude - # :return peak_sf_log: average sf conditions weighted by response amplitude (on log scale) - # """ - # - # if response_dir == 'pos': - # pass - # elif response_dir == 'neg': - # sf_tuning['resp_mean'] = -sf_tuning['resp_mean'] - # else: - # raise LookupError('Do not understand response_dir ({}). Should be "pos" or "neg"'.format(response_dir)) - # - # if is_rectify: - # sf_tuning.loc[sf_tuning['resp_mean'] < 0., 'resp_mean'] = 0. - # - # if np.max(sf_tuning['resp_mean']) <= 0.: - # return tuple([np.nan] * 3) - # else: - # peak_sf_raw_ind = sf_tuning['resp_mean'].argmax() - # peak_sf_raw = sf_tuning.loc[peak_sf_raw_ind, 'sf'] - # - # - # sfs = sf_tuning['sf'].astype(np.float) - # sfs_log = np.log(sfs / 0.01) / np.log(2) - # resp = sf_tuning['resp_mean'].astype(np.float) - # - # peak_sf_linear = np.sum(sfs * resp) / np.sum(resp) - # - # peak_sf_log = np.sum(sfs_log * resp) / np.sum(resp) - # peak_sf_log = 2 ** peak_sf_log * 0.01 - # - # return peak_sf_raw, peak_sf_linear, peak_sf_log - - def plot_sf_tf_matrix(self, response_dir='pos', axis=None, cmap='RdBu_r', vmax=4, vmin=-4): - - if axis is None: - f = plt.figure() - axis = f.add_subplot(111) - - if response_dir == 'pos': - sftf, sfs, tfs = self.get_sf_tf_matrix(response_dir='pos') - elif response_dir == 'neg': - sftf, sfs, tfs = self.get_sf_tf_matrix(response_dir='neg') - else: - raise ValueError('Do not understand "response_dir" ({}). Should be "pos" or "neg".'.format(response_dir)) - - axis.imshow(sftf, cmap=cmap, vmax=vmax, vmin=vmin, interpolation='nearest') - axis.set_yticks(range(len(sfs))) - axis.set_yticklabels(sfs) - axis.set_xticks(range(len(tfs))) - axis.set_xticklabels(tfs) - axis.tick_params(length=0) - - def plot_dire_tuning(self, axis=None, response_dir='pos', is_collapse_sf=True, is_collapse_tf=False, - trace_color='#ff0000', postprocess='raw', is_plot_errbar=False, - is_normalize=False, **kwargs): - """ - - :param axis: - :param response_dir: - :param is_collapse_sf: - :param is_collapse_tf: - :param trace_color: - :param is_normalize: - :param postprocess: str, 'raw', 'elevate' or 'rectify' - 'raw': plot raw response - 'elevate': if there is response below zero, how curve will be elevated so the minimum - is zero - 'rectify': if there is response below zero, those responses will be set as zero. - :return: - """ - - if axis is None: - f = plt.figure() - axis = f.add_axes([0, 0, 1, 1], projection='polar') - - dire_tuning = self.get_dire_tuning(response_dir=response_dir, is_collapse_sf=is_collapse_sf, - is_collapse_tf=is_collapse_tf) - - dire_tuning = dire_tuning.sort_values(by='dire') - dire_tuning = dire_tuning.append(dire_tuning.iloc[0, :]) - dire_tuning['dire'] = dire_tuning['dire'] * np.pi / 180. - - if response_dir == 'neg': - dire_tuning['resp_mean'] = -dire_tuning['resp_mean'] - - # bias = -np.min(dire_tuning['resp_mean']) - # - # # print('bias: {}'.format(bias)) - # - # resp = dire_tuning['resp_mean'] + bias - - resp = dire_tuning['resp_mean'] - bias = 0. - - if postprocess == 'raw': - pass - elif postprocess == 'elevate': - if np.min(resp) < 0.: - resp = resp - np.min(resp) - bias = -np.min(resp) - elif postprocess == 'rectify': - resp[resp < 0] = 0 - else: - raise LookupError('do not understand "postprocess": ({}). should be "raw", ' - '"elevate" or "rectify".'.format(postprocess)) - - if is_normalize: - if is_plot_errbar: - raise ValueError('Cannot plot normalized tuning curve with error bar.') - else: - resp = resp / np.max(resp) - - if is_plot_errbar: - y1 = np.array(resp - dire_tuning['resp_stdev']) - y1[y1 < 0.] = 0. - y2 = np.array(resp + dire_tuning['resp_stdev']) - y2[y2 < 0.] = 0. - axis.fill_between(x=np.array(dire_tuning['dire']), y1=y1, y2=y2, - edgecolor='none', facecolor='#cccccc') - - axis.plot(dire_tuning['dire'], resp, '-', color=trace_color, **kwargs) - - axis.set_xticklabels([]) - ylim = axis.get_ylim() - ymax = np.ceil(ylim[1] * 100) / 100 - axis.set_ylim([0, ymax]) - axis.set_yticks([ymax]) - - return ymax - - -if __name__ == '__main__': - plt.ioff() - # ===================================================================== - # f = h5py.File(r"F:\data2\chandelier_cell_project\M441626\2019-04-03-deepscope\190403_M441626_110.nwb", 'r') - # f = h5py.File(r"G:\190410_M439943_110.nwb", 'r') - # f = h5py.File(r"G:\repacked\190410_M439943_110_repacked.nwb", 'r') - f = h5py.File(r"G:\repacked\180323_M360495_110_repacked.nwb", 'r') - dgcrm = get_dgc_response_matrix_from_nwb(f['analysis/response_table_001_DriftingGratingCircleRetinotopicMapping/plane0'], - roi_ind=0, - trace_type='sta_f_center_raw') - - dgcrt_zscore, _, _, _ = dgcrm.get_df_response_table(response_win=[0., 1.], baseline_win=[-0.5, 0.]) - - dgcrt_zscore.plot_dire_tuning(axis=None, response_dir='pos', is_collapse_sf=True, is_collapse_tf=False, - postprocess='elevate') - plt.show() - - dire_tuning = dgcrt_zscore.get_dire_tuning(response_dir='pos', is_collapse_sf=True, is_collapse_tf=False) - print(dire_tuning) - _ = DriftingGratingResponseTable.get_dire_tuning_properties(dire_tuning=dire_tuning, - response_dir='pos', - elevation_bias=0.) - OSI_raw, DSI_raw, gOSI_raw, gDSI_raw, OSI_ele, DSI_ele ,gOSI_ele, gDSI_ele, OSI_rec, DSI_rec, \ - gOSI_rec, gDSI_rec, peak_dire_raw, vs_dire_raw, vs_dire_ele, vs_dire_rec = _ - - print('\nOSI_raw: {}'.format(OSI_raw)) - print('DSI_raw: {}'.format(DSI_raw)) - print('gOSI_raw: {}'.format(gOSI_raw)) - print('gDSI_raw: {}'.format(gDSI_raw)) - print('\nOSI_ele: {}'.format(OSI_ele)) - print('DSI_ele: {}'.format(DSI_ele)) - print('gOSI_ele: {}'.format(gOSI_ele)) - print('gDSI_ele: {}'.format(gDSI_ele)) - print('\nOSI_rec: {}'.format(OSI_rec)) - print('DSI_rec: {}'.format(DSI_rec)) - print('gOSI_rec: {}'.format(gOSI_rec)) - print('gDSI_rec: {}'.format(gDSI_rec)) - print('\npeak_dire_raw: {}'.format(peak_dire_raw)) - print('\nvs_dire_raw: {}'.format(vs_dire_raw)) - print('\nvs_dire_ele: {}'.format(vs_dire_ele)) - print('\nvs_dire_rec: {}'.format(vs_dire_rec)) - - - sf_tuning = dgcrt_zscore.get_sf_tuning(response_dir='pos', is_collapse_tf=False, is_collapse_dire=False) - print - print(sf_tuning) - _ = DriftingGratingResponseTable.get_sf_tuning_properties(sf_tuning=sf_tuning, response_dir='pos', - elevation_bias=0.) - peak_sf_raw, weighted_sf_raw, weighted_sf_log_raw, weighted_sf_ele, weighted_sf_log_ele, \ - weighted_sf_rec, weighted_sf_log_rec = _ - print('\npeak_sf_raw: {}'.format(peak_sf_raw)) - print('weighted_sf_raw: {}'.format(weighted_sf_raw)) - print('weighted_sf_log_raw: {}'.format(weighted_sf_log_raw)) - print('weighted_sf_ele: {}'.format(weighted_sf_ele)) - print('weighted_sf_log_ele: {}'.format(weighted_sf_log_ele)) - print('weighted_sf_rec: {}'.format(weighted_sf_rec)) - print('weighted_sf_log_rec: {}'.format(weighted_sf_log_rec)) - - tf_tuning = dgcrt_zscore.get_tf_tuning(response_dir='pos', is_collapse_sf=False, is_collapse_dire=False) - print - print(tf_tuning) - _ = DriftingGratingResponseTable.get_tf_tuning_properties(tf_tuning=tf_tuning, response_dir='pos', - elevation_bias=0.) - peak_tf_raw, weighted_tf_raw, weighted_tf_log_raw, weighted_tf_ele, weighted_tf_log_ele, \ - weighted_tf_rec, weighted_tf_log_rec = _ - print('\npeak_tf_raw: {}'.format(peak_tf_raw)) - print('weighted_tf_raw: {}'.format(weighted_tf_raw)) - print('weighted_tf_log_raw: {}'.format(weighted_tf_log_raw)) - print('weighted_tf_ele: {}'.format(weighted_tf_ele)) - print('weighted_tf_log_ele: {}'.format(weighted_tf_log_ele)) - print('weighted_tf_rec: {}'.format(weighted_tf_rec)) - print('weighted_tf_log_rec: {}'.format(weighted_tf_log_rec)) - - # ===================================================================== - - # ===================================================================== - # f = h5py.File(r"E:\data2\2015-07-02-150610-M160809-2P_analysis\cells_test.hdf5") - # STRF = load_STRF_FromH5(f['cell0003']['spatial_temporal_receptive_field']) - # ampRFON, ampRFOFF = STRF.get_amplitude_receptive_field() - # - # print ampRFON.sign - # print ampRFOFF.get_weighted_mask()[7,9] - # - # plt.imshow(ampRFON.get_weighted_mask(),interpolation='nearest') - # plt.show() - # ===================================================================== - - # ===================================================================== - # f = h5py.File(r"E:\data2\2015-07-02-150610-M160809-2P_analysis\cells_test.hdf5") - # STRF = load_STRF_FromH5(f['cell0003']['spatial_temporal_receptive_field']) - # zscoreRFON, zscoreRFOFF = STRF.get_zscore_receptive_field() - # - # print zscoreRFON.sign - # print zscoreRFOFF.get_weighted_mask()[7,9] - # - # plt.imshow(zscoreRFON.get_weighted_mask(),interpolation='nearest') - # plt.show() - # ===================================================================== - - # ===================================================================== - # f = h5py.File(r"E:\data2\2015-07-02-150610-M160809-2P_analysis\cells_test.hdf5") - # STRF = load_STRF_FromH5(f['cell0003']['spatial_temporal_receptive_field']) - # zscoreRFON, zscoreRFOFF = STRF.get_amplitude_receptive_field() - # - # zscoreRFON.interpolate(10) - # - # plt.imshow(zscoreRFON.get_weighted_mask(),interpolation='nearest') - # plt.show() - # ===================================================================== - - # ===================================================================== - # f = h5py.File(r"E:\data2\2015-07-02-150610-M160809-2P_analysis\cells_test.hdf5") - # STRF = load_STRF_FromH5(f['cell0003']['spatial_temporal_receptive_field']) - # STRF.shrink([-10,10],None) - # print np.unique(np.array(STRF.get_locations())[:,0]) - # STRF.shrink(None,[0,20]) - # print np.unique(np.array(STRF.get_locations())[:,1]) - # ===================================================================== - - # ===================================================================== - # dfile = h5py.File(r"G:\2016-08-15-160815-M238599-wf2p-Retinotopy\sparse_noise_2p\cells_refined.hdf5", 'r') - # strf = SpatialTemporalReceptiveField.from_h5_group(dfile['cell0519']['spatial_temporal_receptive_field']) - # - # rf_on, rf_off, rf_all = strf.get_zscore_thresholded_receptive_fields(timeWindow=(0., 0.3), thr_ratio=0.4, - # filter_sigma=1., interpolate_rate=10, - # absolute_thr=0.8) - # - # peak_amplitude = max([np.amax(rf_on.get_weighted_mask()), np.amax(rf_off.get_weighted_mask())]) - # - # f = plt.figure(figsize=(6, 8)) - # ax = f.add_subplot(111) - # rf_on.plot_contour(ax, peak_amplitude=peak_amplitude, level_num=10, linewidths=1.5) - # rf_off.plot_contour(ax, peak_amplitude=peak_amplitude, level_num=10, linewidths=1.5) - # plt.show() - - # ===================================================================== - print '\nfor debug...' + print('for debug...') \ No newline at end of file diff --git a/corticalmapping/VasculatureMapMatching.py b/corticalmapping/VasculatureMapMatching.py index 2bb2cb0..e911650 100644 --- a/corticalmapping/VasculatureMapMatching.py +++ b/corticalmapping/VasculatureMapMatching.py @@ -28,8 +28,8 @@ import tifffile as tf import corticalmapping.core.PlottingTools as pt -try: import cv2; from core.ImageAnalysis import rigid_transform_cv2 as rigid_transform -except ImportError as e: print e; from core.ImageAnalysis import rigid_transform as rigid_transform +try: import cv2; from .core.ImageAnalysis import rigid_transform_cv2 as rigid_transform +except ImportError as e: print(e); from .core.ImageAnalysis import rigid_transform as rigid_transform class AppForm(QMainWindow): @@ -81,7 +81,7 @@ def __init__(self, parent=None): def save_alignment_json(self): - path = unicode(QFileDialog.getSaveFileName(self, + path = str(QFileDialog.getSaveFileName(self, 'Save file', self.currSaveFolder, '*.json')) @@ -177,7 +177,7 @@ def get_RPath(self): try: if len(fnames) == 0: # no file is chosen - print "no file is chosen! Setting reference map as None..." + print("no file is chosen! Setting reference map as None...") self.textbrowser_RPath.clear() self.ReferenceVasMap = None @@ -204,7 +204,7 @@ def get_RPath(self): self.ReferenceVasMap = pt.merge_normalized_images([currMap[0]]) self.textbrowser_RPath.setText(filePath) else: - print 'Can not read reference map '+filePath + print('Can not read reference map '+filePath) self.textbrowser_RPath.clear() self.ReferenceVasMap = None @@ -226,21 +226,21 @@ def get_RPath(self): elif 'JCam' in fileName: currMap, _ = ft.importRawJCam(filePath) else: - print 'Can not read '+filePath + print('Can not read '+filePath) mapList.append(currMap[0].astype(np.float32)) if len(mapList) == 0: - print "no file can be read! Setting reference map as None..." + print("no file can be read! Setting reference map as None...") self.textbrowser_RPath.clear() self.ReferenceVasMap = None else: self.ReferenceVasMap = pt.merge_normalized_images(mapList).astype(np.float32) self.textbrowser_RPath.setText(displayText) - except Exception, e: - print e, '\n\n' - print 'Can not load reference Map! Setting it as None...' + except Exception as e: + print(e, '\n\n') + print('Can not load reference Map! Setting it as None...') self.textbrowser_RPath.clear() self.ReferenceVasMap = None @@ -268,7 +268,7 @@ def get_MPath(self): try: if len(fnames) == 0: # no file is chosen - print "no file is chosen! Setting matching map as None..." + print("no file is chosen! Setting matching map as None...") self.textbrowser_MPath.clear() self.MatchingVasMap = None self.MatchingVasMapRaw = None @@ -303,7 +303,7 @@ def get_MPath(self): self.MatchingVasMapRaw = currMap[0] self.textbrowser_MPath.setText(filePath) else: - print 'Can not read matching map '+filePath + print('Can not read matching map '+filePath) self.textbrowser_MPath.clear() self.MatchingVasMap = None self.MatchingVasMapAfterChange = None @@ -325,12 +325,12 @@ def get_MPath(self): elif 'JCam' in fileName: currMap, _ = ft.importRawJCam(filePath) else: - print 'Can not read '+filePath + print('Can not read '+filePath) mapList.append(currMap[0].astype(np.float32)) if len(mapList) == 0: - print "no file can be read! Setting matching map as None..." + print("no file can be read! Setting matching map as None...") self.textbrowser_MPath.clear() self.MatchingVasMap = None self.MatchingVasMapRaw = None @@ -341,9 +341,9 @@ def get_MPath(self): self.textbrowser_MPath.setText(displayText) self.MatchingVasMapAfterChange = None - except Exception, e: - print e, '\n\n' - print 'Can not load matching Map! Setting it as None...' + except Exception as e: + print(e, '\n\n') + print('Can not load matching Map! Setting it as None...') self.textbrowser_MPath.clear() self.MatchingVasMap = None self.MatchingVasMapRaw = None diff --git a/corticalmapping/VisualStim.py b/corticalmapping/VisualStim.py index 2b96398..c4f0584 100644 --- a/corticalmapping/VisualStim.py +++ b/corticalmapping/VisualStim.py @@ -16,19 +16,19 @@ from random import shuffle import socket -import tifffile as tf -import core.FileTools as ft -import core.ImageAnalysis as ia +#import tifffile as tf +from . import core.FileTools as ft +from . import core.ImageAnalysis as ia -from zro import RemoteObject, Proxy +#from zro import RemoteObject, Proxy try: import toolbox.IO.nidaq as iodaq except ImportError as e: - print e - print 'import iodaq from aibs package...' + print(e) + print('import iodaq from aibs package...') try: import aibs.iodaq as iodaq - except ImportError as er: print er + except ImportError as er: print(er) @@ -62,7 +62,7 @@ def analyze_frames(ts, refreshRate, checkPoint=(0.02, 0.033, 0.05, 0.1)): frameNumber = len(frameDuration[frameDuration>checkNumber]) frame_stats += 'Number of frames longer than %d ms: %d; %.2f%% \n' % (round(checkNumber*1000), frameNumber, round(frameNumber*10000/(len(ts)-1))/100) - print frame_stats + print(frame_stats) return frameDuration, frame_stats @@ -84,7 +84,7 @@ def noise_movie(frameFilter, widthFilter, heightFilter, isplot = False): filterXY = filterX * filterY - for i in xrange(rawMovFFT.shape[0]): + for i in range(rawMovFFT.shape[0]): rawMovFFT[i] = frameFilter[i]* (rawMovFFT[i] * filterXY) @@ -99,7 +99,7 @@ def noise_movie(frameFilter, widthFilter, heightFilter, isplot = False): noise_movie = ((filteredMov - np.amin(filteredMov)) / rangeFilteredMov) * 2 - 1 if isplot: - tf.imshow(noise_movie, vmin=-1, vmax=1, cmap='gray') + print('no tiffs 4 u')# tf.imshow(noise_movie, vmin=-1, vmax=1, cmap='gray') return noise_movie @@ -118,7 +118,7 @@ def generate_filter(length, # length of filter filterArray = np.ones(length) - for i in xrange(len(freqs)): + for i in range(len(freqs)): if ((freqs[i] > 0) and (freqs[i] < Flow) or (freqs[i] > Fhigh)) or \ ((freqs[i] < 0) and (freqs[i] > -Flow) or (freqs[i] < -Fhigh)): filterArray[i] = 0 @@ -129,7 +129,7 @@ def generate_filter(length, # length of filter filterArray = (filterArray - np.amin(filterArray)) / (np.amax(filterArray) - np.amin(filterArray)) elif mode == 'box': filterArray[0] = 0 - else: raise NameError, 'Variable "mode" should be either "1/f" or "box"!' + else: raise NameError('Variable "mode" should be either "1/f" or "box"!') if Flow == 0: filterArray[0] = 1 @@ -143,10 +143,10 @@ def lookup_image(img, lookupI, lookupJ): """ if not img.shape == lookupI.shape: - raise LookupError, 'The image and lookupI should have same size!!' + raise LookupError('The image and lookupI should have same size!!') if not lookupI.shape == lookupJ.shape: - raise LookupError, 'The lookupI and lookupJ should have same size!!' + raise LookupError('The lookupI and lookupJ should have same size!!') img2 = np.zeros(img.shape) @@ -184,7 +184,7 @@ def get_warped_square(degCorX,degCorY,center,width,height,ori,foregroundColor=1, frame = np.ones(degCorX.shape,dtype=np.float32)*backgroundColor - if ori < 0. or ori > 180.: raise ValueError, 'ori should be between 0 and 180.' + if ori < 0. or ori > 180.: raise ValueError('ori should be between 0 and 180.') k1 = np.tan(ori*np.pi/180.) k2 = np.tan((ori+90.)*np.pi/180.) @@ -209,9 +209,9 @@ def circle_mask(map_x, map_y, center, radius): :return: binary mask for the circle, value range [0., 1.] """ - if map_x.shape != map_y.shape: raise ValueError, 'map_x and map_y should have same shape!' + if map_x.shape != map_y.shape: raise ValueError('map_x and map_y should have same shape!') - if len(map_x.shape) != 2: raise ValueError, 'map_x and map_y should be 2-d!!' + if len(map_x.shape) != 2: raise ValueError('map_x and map_y should be 2-d!!') circle_mask = np.zeros(map_x.shape, dtype = np.uint8) for (i, j), value in np.ndenumerate(circle_mask): @@ -235,9 +235,9 @@ def get_grating(map_x, map_y, ori=0., spatial_freq=0.1, center=(0.,60.), phase=0 :return: a frame as floating point 2-d array with grating, value range [0., 1.] """ - if map_x.shape != map_y.shape: raise ValueError, 'map_x and map_y should have same shape!' + if map_x.shape != map_y.shape: raise ValueError('map_x and map_y should have same shape!') - if len(map_x.shape) != 2: raise ValueError, 'map_x and map_y should be 2-d!!' + if len(map_x.shape) != 2: raise ValueError('map_x and map_y should be 2-d!!') map_x_h = np.array(map_x, dtype = np.float32) map_y_h = np.array(map_y, dtype = np.float32) @@ -276,7 +276,7 @@ def __init__(self, refreshRate = 60.): if resolution[0] % downSampleRate != 0 or resolution[1] % downSampleRate != 0: - raise ArithmeticError, 'Resolution pixel numbers are not divisible by down sampling rate' + raise ArithmeticError('Resolution pixel numbers are not divisible by down sampling rate') self.resolution = resolution self.dis = dis @@ -304,7 +304,7 @@ def __init__(self, resolution[0]=self.resolution[0]/downSampleRate resolution[1]=self.resolution[1]/downSampleRate - mapcorX, mapcorY = np.meshgrid(range(resolution[1]), range(resolution[0])) + mapcorX, mapcorY = np.meshgrid(list(range(resolution[1])), list(range(resolution[0]))) if self.visualField == "left": mapX = np.linspace(self.C2Acm, -1.0 * self.C2Pcm, resolution[1]) @@ -331,7 +331,7 @@ def set_downsample_rate(self, downSampleRate): if self.resolution[0] % downSampleRate != 0 or self.resolution[1] % downSampleRate != 0: - raise ArithmeticError, 'resolutionolution pixel numbers are not divisible by down sampling rate' + raise ArithmeticError('resolutionolution pixel numbers are not divisible by down sampling rate') self.downSampleRate=downSampleRate @@ -339,7 +339,7 @@ def set_downsample_rate(self, downSampleRate): resolution[0]=self.resolution[0]/downSampleRate resolution[1]=self.resolution[1]/downSampleRate - mapcorX, mapcorY = np.meshgrid(range(resolution[1]), range(resolution[0])) + mapcorX, mapcorY = np.meshgrid(list(range(resolution[1])), list(range(resolution[0]))) if self.visualField == "left": mapX = np.linspace(self.C2Acm, -1.0 * self.C2Pcm, resolution[1]) @@ -362,7 +362,7 @@ def remap(self): resolution[0]=self.resolution[0]/self.downSampleRate resolution[1]=self.resolution[1]/self.downSampleRate - mapcorX, mapcorY = np.meshgrid(range(resolution[1]), range(resolution[0])) + mapcorX, mapcorY = np.meshgrid(list(range(resolution[1])), list(range(resolution[0]))) newmapX = np.zeros(resolution,dtype=np.float16) newmapY = np.zeros(resolution,dtype=np.float16) @@ -384,7 +384,7 @@ def plot_map(self): resolution[0]=self.resolution[0]/self.downSampleRate resolution[1]=self.resolution[1]/self.downSampleRate - mapcorX, mapcorY = np.meshgrid(range(resolution[1]), range(resolution[0])) + mapcorX, mapcorY = np.meshgrid(list(range(resolution[1])), list(range(resolution[0]))) f1 = plt.figure(figsize=(12,5)) f1.suptitle('Remap monitor', fontsize=14, fontweight='bold') @@ -392,7 +392,7 @@ def plot_map(self): OMX = plt.subplot(221) OMX.set_title('Linear Map X (cm)') currfig = plt.imshow(self.linCorX) - levels1 = range(int(np.floor(self.linCorX.min() / 10) * 10), int((np.ceil(self.linCorX.max() / 10)+1) * 10), 10) + levels1 = list(range(int(np.floor(self.linCorX.min() / 10) * 10), int((np.ceil(self.linCorX.max() / 10)+1) * 10), 10)) im1 =plt.contour(mapcorX, mapcorY, self.linCorX, levels1, colors = 'k', linewidth = 2) # plt.clabel(im1, levels1, fontsize = 10, inline = 1, fmt='%2.1f') f1.colorbar(currfig,ticks=levels1) @@ -401,7 +401,7 @@ def plot_map(self): OMY = plt.subplot(222) OMY.set_title('Linear Map Y (cm)') currfig = plt.imshow(self.linCorY) - levels2 = range(int(np.floor(self.linCorY.min() / 10) * 10), int((np.ceil(self.linCorY.max() / 10)+1) * 10), 10) + levels2 = list(range(int(np.floor(self.linCorY.min() / 10) * 10), int((np.ceil(self.linCorY.max() / 10)+1) * 10), 10)) im2 =plt.contour(mapcorX, mapcorY, self.linCorY, levels2, colors = 'k', linewidth = 2) # plt.clabel(im2, levels2, fontsize = 10, inline = 1, fmt='%2.2f') f1.colorbar(currfig,ticks=levels2) @@ -410,7 +410,7 @@ def plot_map(self): NMX = plt.subplot(223) NMX.set_title('Spherical Map X (deg)') currfig = plt.imshow(self.degCorX) - levels3 = range(int(np.floor(self.degCorX.min() / 10) * 10), int((np.ceil(self.degCorX.max() / 10)+1) * 10), 10) + levels3 = list(range(int(np.floor(self.degCorX.min() / 10) * 10), int((np.ceil(self.degCorX.max() / 10)+1) * 10), 10)) im3 =plt.contour(mapcorX, mapcorY, self.degCorX, levels3, colors = 'k', linewidth = 2) # plt.clabel(im3, levels3, fontsize = 10, inline = 1, fmt='%2.1f') f1.colorbar(currfig,ticks=levels3) @@ -419,7 +419,7 @@ def plot_map(self): NMY = plt.subplot(224) NMY.set_title('Spherical Map Y (deg)') currfig = plt.imshow(self.degCorY) - levels4 = range(int(np.floor(self.degCorY.min() / 10) * 10), int((np.ceil(self.degCorY.max() / 10)+1) * 10), 10) + levels4 = list(range(int(np.floor(self.degCorY.min() / 10) * 10), int((np.ceil(self.degCorY.max() / 10)+1) * 10), 10)) im4 =plt.contour(mapcorX, mapcorY, self.degCorY, levels4, colors = 'k', linewidth = 2) # plt.clabel(im4, levels4, fontsize = 10, inline = 1, fmt='%2.1f') f1.colorbar(currfig,ticks=levels4) @@ -450,13 +450,13 @@ def generate_Lookup_table(self): lookupI = np.zeros(degCorX.shape).astype(np.int32) lookupJ = np.zeros(degCorX.shape).astype(np.int32) - for j in xrange(lookupI.shape[1]): + for j in range(lookupI.shape[1]): currDegX = degCorX[0,j] diffDegX = degNoWarpCorX[0,:] - currDegX IndJ = np.argmin(np.abs(diffDegX)) lookupJ[:,j] = IndJ - for i in xrange(lookupI.shape[0]): + for i in range(lookupI.shape[0]): currDegY = degCorY[i,j] diffDegY = degNoWarpCorY[:,IndJ] - currDegY indI = np.argmin(np.abs(diffDegY)) @@ -524,7 +524,7 @@ def get_center(self): centerH = screen_height - self.height_pixel / 2 else: - raise LookupError, '"position" attributor should be "northeast", "southeast", "northwest" and "southwest"' + raise LookupError('"position" attributor should be "northeast", "southeast", "northwest" and "southwest"') return int(centerW), int(centerH) @@ -538,7 +538,7 @@ def get_frames(self): refreshRate = self.monitor.refreshRate if refreshRate % self.freq != 0: - raise ArithmeticError, "self update frequency of should be divisible by monitor's refresh rate." + raise ArithmeticError("self update frequency of should be divisible by monitor's refresh rate.") return refreshRate/self.freq @@ -575,17 +575,17 @@ def generate_frames(self): """ place holder of function "generate_frames" for each specific stimulus """ - print 'Nothing executed! This is place holder of function "generate_frames" for each specific stimulus.' - print 'This function should return a list of tuples, each tuple represents a single frame of the stimulus and contains all the information to recreate the frame.' + print('Nothing executed! This is place holder of function "generate_frames" for each specific stimulus.') + print('This function should return a list of tuples, each tuple represents a single frame of the stimulus and contains all the information to recreate the frame.') def generate_movie(self): """ place holder of function "generate_movie" for each specific stimulus """ - print 'Nothing executed! This is place holder of function "generate_movie" for each specific stimulus.' - print 'This function should return two things:' - print 'First: a 3-d array (with format of uint8) of the stimulus to be displayed.' - print 'Second: a dictionary contain the information of this particular stimulus' + print('Nothing executed! This is place holder of function "generate_movie" for each specific stimulus.') + print('This function should return two things:') + print('First: a 3-d array (with format of uint8) of the stimulus to be displayed.') + print('Second: a dictionary contain the information of this particular stimulus') def clear(self): self.frames = None @@ -653,7 +653,7 @@ def generate_movie(self): dtype=np.float16) if not (self.coordinate == 'degree' or self.coordinate == 'linear'): - raise LookupError, 'the "coordinate" attributate show be either "degree" or "linear"' + raise LookupError('the "coordinate" attributate show be either "degree" or "linear"') for i in range(len(self.frames)): currFrame = self.frames[i] @@ -668,7 +668,7 @@ def generate_movie(self): fullSequence[i] = currFCsequence if i in range(0, len(self.frames), len(self.frames) / 10): - print ['Generating numpy sequence: ' + str(int(100 * (i + 1) / len(self.frames))) + '%'] + print(['Generating numpy sequence: ' + str(int(100 * (i + 1) / len(self.frames))) + '%']) mondict = dict(self.monitor.__dict__) indicatordict = dict(self.indicator.__dict__) @@ -740,7 +740,7 @@ def generate_squares(self): mapY = self.monitor.linCorY else: - raise LookupError, 'the "coordinate" attributate show be either "degree" or "linear"' + raise LookupError('the "coordinate" attributate show be either "degree" or "linear"') minX = mapX.min() maxX = mapX.max() @@ -799,7 +799,7 @@ def generate_sweeps(self): mapX = self.monitor.linCorX mapY = self.monitor.linCorY else: - raise LookupError, 'the "coordinate" attributate show be either "degree" or "linear"' + raise LookupError('the "coordinate" attributate show be either "degree" or "linear"') minX = mapX.min() maxX = mapX.max() @@ -818,7 +818,7 @@ def generate_sweeps(self): stepX = np.arange(minX - sweepWidth, maxX + stepWidth, stepWidth)[::-1] # stepX = np.arange(maxX, minX - sweepWidth - stepWidth, -1 * stepWidth) else: - raise LookupError, 'attribute "direction" should be "B2U", "U2B", "L2R" or "R2L".' + raise LookupError('attribute "direction" should be "B2U", "U2B", "L2R" or "R2L".') sweepTable = [] @@ -954,7 +954,7 @@ def generate_movie(self): fullSequence[i] = currNMsequence if i in range(0, len(self.frames),len(self.frames)/10): - print ['Generating numpy sequence: '+str(int(100 * (i+1) / len(self.frames)))+'%'] + print(['Generating numpy sequence: '+str(int(100 * (i+1) / len(self.frames)))+'%']) mondict=dict(self.monitor.__dict__) @@ -980,7 +980,7 @@ def set_direction(self,direction): self.direction = direction self.clear() else: - raise LookupError, 'attribute "direction" should be "B2U", "U2B", "L2R" or "R2L".' + raise LookupError('attribute "direction" should be "B2U", "U2B", "L2R" or "R2L".') def set_sweep_sigma(self,sweepSigma): self.sweepSigma = sweepSigma @@ -1096,7 +1096,7 @@ def generate_sweeps(self): mapY = self.monitor.linCorY else: - raise LookupError, 'the "coordinate" attributate show be either "degree" or "linear"' + raise LookupError('the "coordinate" attributate show be either "degree" or "linear"') minX = mapX.min() maxX = mapX.max() @@ -1115,7 +1115,7 @@ def generate_sweeps(self): stepX = np.arange(minX - edgeWidth - sweepWidth / 2, maxX + edgeWidth + stepWidth + sweepWidth / 2, stepWidth)[::-1] # stepX = np.arange(maxX + edgeWidth + sweepWidth / 2, minX - edgeWidth - stepWidth - sweepWidth / 2, -1 * stepWidth) else: - raise LookupError, 'attribute "direction" should be "B2U", "U2B", "L2R" or "R2L".' + raise LookupError('attribute "direction" should be "B2U", "U2B", "L2R" or "R2L".') sweepTable = [] @@ -1264,7 +1264,7 @@ def generate_movie(self): fullSequence[i] = currNMsequence if i in range(0, len(self.frames),len(self.frames)/10): - print ['Generating numpy sequence: '+str(int(100 * (i+1) / len(self.frames)))+'%'] + print(['Generating numpy sequence: '+str(int(100 * (i+1) / len(self.frames)))+'%']) mondict=dict(self.monitor.__dict__) @@ -1288,7 +1288,7 @@ def set_direction(self,direction): self.direction = direction self.clear() else: - raise LookupError, 'attribute "direction" should be "B2U", "U2B", "L2R" or "R2L".' + raise LookupError('attribute "direction" should be "B2U", "U2B", "L2R" or "R2L".') def set_sweep_sigma(self,sweepSigma): self.sweepSigma = sweepSigma @@ -1358,7 +1358,7 @@ def generate_squares(self): mapY = self.monitor.linCorY else: - raise LookupError, 'the "coordinate" attributate show be either "degree" or "linear"' + raise LookupError('the "coordinate" attributate show be either "degree" or "linear"') minX = mapX.min() maxX = mapX.max() @@ -1418,7 +1418,7 @@ def generate_sweeps(self): mapY = self.monitor.linCorY else: - raise LookupError, 'the "coordinate" attributate show be either "degree" or "linear"' + raise LookupError('the "coordinate" attributate show be either "degree" or "linear"') all_x = mapX.flatten(); all_y = mapY.flatten() rotation_matrix = np.array([[np.cos(self.rotation_angle), np.sin(self.rotation_angle)], @@ -1442,7 +1442,7 @@ def generate_sweeps(self): stepX = np.arange(min_x_r - sweepWidth, max_x_r + stepWidth, stepWidth)[::-1] # stepX = np.arange(maxX, minX - sweepWidth - stepWidth, -1 * stepWidth) else: - raise LookupError, 'attribute "direction" should be "B2U", "U2B", "L2R" or "R2L".' + raise LookupError('attribute "direction" should be "B2U", "U2B", "L2R" or "R2L".') sweepTable = [] @@ -1578,7 +1578,7 @@ def generate_movie(self): fullSequence[i] = currNMsequence if i in range(0, len(self.frames),len(self.frames)/10): - print ['Generating numpy sequence: '+str(int(100 * (i+1) / len(self.frames)))+'%'] + print(['Generating numpy sequence: '+str(int(100 * (i+1) / len(self.frames)))+'%']) mondict=dict(self.monitor.__dict__) @@ -1604,7 +1604,7 @@ def set_direction(self,direction): self.direction = direction self.clear() else: - raise LookupError, 'attribute "direction" should be "B2U", "U2B", "L2R" or "R2L".' + raise LookupError('attribute "direction" should be "B2U", "U2B", "L2R" or "R2L".') def set_sweep_sigma(self,sweepSigma): self.sweepSigma = sweepSigma @@ -1695,7 +1695,7 @@ def generate_frames(self): #initilize indicator color frames[:,3] = -1 - for i in xrange(frames.shape[0]): + for i in range(frames.shape[0]): # current iteration number frames[i,2] = i // iterationFrameNum @@ -1759,7 +1759,7 @@ def generate_movie(self): fullSequence[i] = currFNsequence if i in range(0, len(self.frames),len(self.frames)/10): - print ['Generating numpy sequence: '+str(int(100 * (i+1) / len(self.frames)))+'%'] + print(['Generating numpy sequence: '+str(int(100 * (i+1) / len(self.frames)))+'%']) mondict=dict(self.monitor.__dict__) indicatordict=dict(self.indicator.__dict__) @@ -1893,7 +1893,7 @@ def generate_frames(self): #initilize indicator color frames[:,3] = -1 - for i in xrange(frames.shape[0]): + for i in range(frames.shape[0]): # current iteration number frames[i,2] = i // iterationFrameNum @@ -1973,7 +1973,7 @@ def generate_movie(self): fullSequence[i] = currGNsequence if i in range(0, len(self.frames),len(self.frames)/10): - print ['Generating numpy sequence: '+str(int(100 * (i+1) / len(self.frames)))+'%'] + print(['Generating numpy sequence: '+str(int(100 * (i+1) / len(self.frames)))+'%']) mondict=dict(self.monitor.__dict__) indicatordict=dict(self.indicator.__dict__) @@ -2073,7 +2073,7 @@ def generate_frames(self): #initilize indicator color frames[:,3] = -1 - for i in xrange(frames.shape[0]): + for i in range(frames.shape[0]): # current iteration number frames[i, 2] = i // iterationFrameNum @@ -2126,7 +2126,7 @@ def generate_movie(self): mapX = self.monitor.linCorX mapY = self.monitor.linCorY else: - raise LookupError, 'the "coordinate" attributate show be either "degree" or "linear"' + raise LookupError('the "coordinate" attributate show be either "degree" or "linear"') circleMask = circle_mask(mapX,mapY,self.center,self.radius).astype(np.float16) @@ -2143,7 +2143,7 @@ def generate_movie(self): fullSequence[i] = currFCsequence if i in range(0, len(self.frames),len(self.frames)/10): - print ['Generating numpy sequence: '+str(int(100 * (i+1) / len(self.frames)))+'%'] + print(['Generating numpy sequence: '+str(int(100 * (i+1) / len(self.frames)))+'%']) mondict=dict(self.monitor.__dict__) indicatordict=dict(self.indicator.__dict__) @@ -2281,7 +2281,7 @@ def _generate_grid_points_sequence(self): allGridPoints = [[x,1] for x in gridPoints] + [[x,-1] for x in gridPoints] shuffle(allGridPoints) # remove coincident hit of same location by continuous frames - print 'removing coincident hit of same location with continuous frames:' + print('removing coincident hit of same location with continuous frames:') while True: iteration = 0 coincidentHitNum = 0 @@ -2290,7 +2290,7 @@ def _generate_grid_points_sequence(self): allGridPoints[i+1], allGridPoints[i+2] = allGridPoints[i+2], allGridPoints[i+1] coincidentHitNum += 1 iteration += 1 - print 'iteration:',iteration,' continous hits number:',coincidentHitNum + print('iteration:',iteration,' continous hits number:',coincidentHitNum) if coincidentHitNum == 0: break @@ -2390,7 +2390,7 @@ def generate_movie(self): fullSequence[i, indicatorHmin:indicatorHmax, indicatorWmin:indicatorWmax]=currFrame[3] if i in range(0, len(self.frames),len(self.frames)/10): - print ['Generating numpy sequence: '+str(int(100 * (i+1) / len(self.frames)))+'%'] + print(['Generating numpy sequence: '+str(int(100 * (i+1) / len(self.frames)))+'%']) #generate log dictionary mondict=dict(self.monitor.__dict__) @@ -2446,11 +2446,11 @@ def __init__(self, for tf in tf_list: period = 1. / tf if (0.05 * period) < (blockDur % period) < (0.95 * period): - print period - print blockDur % period - print 0.95 * period + print(period) + print(blockDur % period) + print(0.95 * period) error_msg = 'Duration of each block times tf '+ str(tf) + ' should be close to a whole number!' - raise ValueError, error_msg + raise ValueError(error_msg) def _generate_all_conditions(self): """ @@ -2590,7 +2590,7 @@ def generate_movie(self): if self.coordinate=='degree':corX=self.monitor.degCorX;corY=self.monitor.degCorY elif self.coordinate=='linear':corX=self.monitor.linCorX;corY=self.monitor.linCorY else: - raise LookupError, "self.coordinate should be either 'linear' or 'degree'." + raise LookupError("self.coordinate should be either 'linear' or 'degree'.") indicatorWmin=self.indicator.centerWpixel - (self.indicator.width_pixel / 2) indicatorWmax=self.indicator.centerWpixel + (self.indicator.width_pixel / 2) @@ -2624,7 +2624,7 @@ def generate_movie(self): if i in range(0, len(self.frames),len(self.frames)/10): - print ['Generating numpy sequence: '+str(int(100 * (i+1) / len(self.frames)))+'%'] + print(['Generating numpy sequence: '+str(int(100 * (i+1) / len(self.frames)))+'%']) @@ -2918,7 +2918,7 @@ def __init__(self, self._remote_obj = RemoteObject(rep_port=self.displayControlPort) self._remote_obj.close = self.flag_to_close() except Exception as e: - print e + print(e) # set up remote zro object for sync program if self.isRemoteSync: @@ -2927,7 +2927,7 @@ def __init__(self, if displayIteration % 1 == 0: self.displayIteration = displayIteration else: - raise ArithmeticError, "displayIteration should be a whole number." + raise ArithmeticError("displayIteration should be a whole number.") self.displayOrder = displayOrder self.logdir = logdir @@ -2954,7 +2954,7 @@ def set_any_array(self, anyArray, logDict = None): to display any numpy 3-d array. """ if len(anyArray.shape) != 3: - raise LookupError, "Input numpy array should have dimension of 3!" + raise LookupError("Input numpy array should have dimension of 3!") Vmax = np.amax(anyArray).astype(np.float32) Vmin = np.amin(anyArray).astype(np.float32) @@ -2966,7 +2966,7 @@ def set_any_array(self, anyArray, logDict = None): if type(logDict) is dict: self.sequenceLog = logDict else: - raise ValueError, '"logDict" should be a dictionary!' + raise ValueError('"logDict" should be a dictionary!') else: self.sequenceLog = {} self.clear() @@ -2991,12 +2991,12 @@ def trigger_display(self): try: refreshRate = self.sequenceLog['monitor']['refreshRate'] except KeyError: - print "No monitor refresh rate information, assuming 60Hz.\n" + print("No monitor refresh rate information, assuming 60Hz.\n") refreshRate = 60. #prepare display frames log if self.sequence is None: - raise LookupError, "Please set the sequence to be displayed!!\n" + raise LookupError("Please set the sequence to be displayed!!\n") try: sequenceFrames = self.sequenceLog['stimulation']['frames'] if self.displayOrder == -1: sequenceFrames = sequenceFrames[::-1] @@ -3005,17 +3005,17 @@ def trigger_display(self): for i in range(self.displayIteration): self.displayFrames += sequenceFrames except Exception as e: - print e - print "No frame information in sequenceLog dictionary. \nSetting displayFrames to 'None'.\n" + print(e) + print("No frame information in sequenceLog dictionary. \nSetting displayFrames to 'None'.\n") self.displayFrames = None # calculate expected display time displayTime = float(self.sequence.shape[0]) * self.displayIteration / refreshRate - print '\n Expected display time: ', displayTime, ' seconds\n' + print('\n Expected display time: ', displayTime, ' seconds\n') # generate file name self._get_file_name() - print 'File name:', self.fileName + '\n' + print('File name:', self.fileName + '\n') # ---------------------------- early preparation for display---------------------------------------------------- @@ -3056,12 +3056,12 @@ def trigger_display(self): try: self._get_file_name() - print 'File name:', self.fileName + '\n' + print('File name:', self.fileName + '\n') self.remoteSync.set_output_path(os.path.join("c:/sync/output", self.fileName + '-sync.h5'), timestamp=False) self.remoteSync.start() except Exception as err: - print "remote sync object is not started correctly. \n" + str(err) + "\n\n" + print("remote sync object is not started correctly. \n" + str(err) + "\n\n") # handle display trigger if self.isTriggered: @@ -3095,22 +3095,22 @@ def trigger_display(self): try: self.remoteSync.stop() except Exception as err: - print "remote sync object is not stopped correctly. \n" + str(err) + print("remote sync object is not stopped correctly. \n" + str(err)) # backup remote sync file try: backupFileFolder = self._get_backup_folder() - print '\nRemote sync backup file folder: ' + backupFileFolder + '\n' + print('\nRemote sync backup file folder: ' + backupFileFolder + '\n') if backupFileFolder is not None: if not (os.path.isdir(backupFileFolder)): os.makedirs(backupFileFolder) backupFilePath = os.path.join(backupFileFolder,self.fileName+'-sync.h5') time.sleep(self.remoteSyncSaveWaitTime ) # wait remote sync to finish saving self.remoteSync.copy_last_dataset(backupFilePath) - print "remote sync dataset saved successfully." + print("remote sync dataset saved successfully.") else: - print "did not find backup path, no remote sync dataset has been saved." + print("did not find backup path, no remote sync dataset has been saved.") except Exception as e: - print "remote sync dataset is not saved successfully!\n", e + print("remote sync dataset is not saved successfully!\n", e) # save display log self.save_log() @@ -3121,38 +3121,38 @@ def trigger_display(self): refreshRate=self.sequenceLog['monitor'][ 'refreshRate']) except KeyError: - print "No monitor refresh rate information, assuming 60Hz." + print("No monitor refresh rate information, assuming 60Hz.") self.frameDuration, self.frame_stats = analyze_frames(ts=self.timeStamp, refreshRate=60.) self.clear() return None try: self.remoteSync.stop() except Exception as err: - print "remote sync object is not stopped correctly. \n" + str(err) + print("remote sync object is not stopped correctly. \n" + str(err)) self.save_log() #analyze frames try: self.frameDuration, self.frame_stats = analyze_frames(ts = self.timeStamp, refreshRate = self.sequenceLog['monitor']['refreshRate']) except KeyError: - print "No monitor refresh rate information, assuming 60Hz." + print("No monitor refresh rate information, assuming 60Hz.") self.frameDuration, self.frame_stats = analyze_frames(ts = self.timeStamp, refreshRate = 60.) # backup remote dataset if self.isRemoteSync: try: backupFileFolder = self._get_backup_folder() - print '\nRemote sync backup file folder: ' + backupFileFolder + '\n' + print('\nRemote sync backup file folder: ' + backupFileFolder + '\n') if backupFileFolder is not None: if not (os.path.isdir(backupFileFolder)): os.makedirs(backupFileFolder) backupFilePath = os.path.join(backupFileFolder,self.fileName+'-sync.h5') time.sleep(self.remoteSyncSaveWaitTime ) # wait remote sync to finish saving self.remoteSync.copy_last_dataset(backupFilePath) - print "remote sync dataset saved successfully." + print("remote sync dataset saved successfully.") else: - print "did not find backup path, no remote sync dataset has been saved." + print("did not find backup path, no remote sync dataset has been saved.") except Exception as e: - print "remote sync dataset is not saved successfully!\n", e + print("remote sync dataset is not saved successfully!\n", e) #clear display data self.clear() @@ -3172,7 +3172,7 @@ def _wait_for_trigger(self, event): triggerTask = iodaq.DigitalInput(self.triggerNIDev, self.triggerNIPort, self.triggerNILine) triggerTask.StartTask() - print "Waiting for trigger: " + event + ' on ' + triggerTask.devstr + print("Waiting for trigger: " + event + ' on ' + triggerTask.devstr) if event == 'LowLevel': lastTTL = triggerTask.read() @@ -3180,33 +3180,33 @@ def _wait_for_trigger(self, event): lastTTL = triggerTask.read()[0] self._update_display_status() else: - if self.keepDisplay: triggerTask.StopTask(); print 'Trigger detected. Start displaying...\n\n'; return True - else: triggerTask.StopTask(); print 'Manual stop signal detected during waiting period. Stop the program.'; return False + if self.keepDisplay: triggerTask.StopTask(); print('Trigger detected. Start displaying...\n\n'); return True + else: triggerTask.StopTask(); print('Manual stop signal detected during waiting period. Stop the program.'); return False elif event == 'HighLevel': lastTTL = triggerTask.read()[0] while lastTTL != 1 and self.keepDisplay: lastTTL = triggerTask.read()[0] self._update_display_status() else: - if self.keepDisplay: triggerTask.StopTask(); print 'Trigger detected. Start displaying...\n\n'; return True - else: triggerTask.StopTask(); print 'Manual stop signal detected during waiting period. Stop the program.'; return False + if self.keepDisplay: triggerTask.StopTask(); print('Trigger detected. Start displaying...\n\n'); return True + else: triggerTask.StopTask(); print('Manual stop signal detected during waiting period. Stop the program.'); return False elif event == 'NegativeEdge': lastTTL = triggerTask.read()[0] while self.keepDisplay: currentTTL = triggerTask.read()[0] if (lastTTL == 1) and (currentTTL == 0):break else:lastTTL = int(currentTTL);self._update_display_status() - else: triggerTask.StopTask(); print 'Manual stop signal detected during waiting period. Stop the program.';return False - triggerTask.StopTask(); print 'Trigger detected. Start displaying...\n\n'; return True + else: triggerTask.StopTask(); print('Manual stop signal detected during waiting period. Stop the program.');return False + triggerTask.StopTask(); print('Trigger detected. Start displaying...\n\n'); return True elif event == 'PositiveEdge': lastTTL = triggerTask.read()[0] while self.keepDisplay: currentTTL = triggerTask.read()[0] if (lastTTL == 0) and (currentTTL == 1):break else:lastTTL = int(currentTTL);self._update_display_status() - else: triggerTask.StopTask(); print 'Manual stop signal detected during waiting period. Stop the program.'; return False - triggerTask.StopTask(); print 'Trigger detected. Start displaying...\n\n'; return True - else:raise NameError, 'trigger should be one of "NegativeEdge", "PositiveEdge", "HighLevel", or "LowLevel"!' + else: triggerTask.StopTask(); print('Manual stop signal detected during waiting period. Stop the program.'); return False + triggerTask.StopTask(); print('Trigger detected. Start displaying...\n\n'); return True + else:raise NameError('trigger should be one of "NegativeEdge", "PositiveEdge", "HighLevel", or "LowLevel"!') def _get_file_name(self): @@ -3246,7 +3246,7 @@ def _get_file_number(self): fileNumber = int(numStr, 2) # print array, fileNumber except Exception as e: - print e + print(e) fileNumber = None return fileNumber @@ -3302,7 +3302,7 @@ def _display(self, window, stim): if self.displayFrames is not None: self.displayFrames = self.displayFrames[:i] - if self.keepDisplay == True: print '\nDisplay successfully completed.' + if self.keepDisplay == True: print('\nDisplay successfully completed.') def flag_to_close(self): @@ -3311,19 +3311,19 @@ def flag_to_close(self): def _update_display_status(self): - if self.keepDisplay is None: raise LookupError, 'self.keepDisplay should start as True for updating display status' + if self.keepDisplay is None: raise LookupError('self.keepDisplay should start as True for updating display status') #check keyboard input 'q' or 'escape' keyList = event.getKeys(['q','escape']) if len(keyList) > 0: self.keepDisplay = False - print "Keyboard stop signal detected. Stop displaying. \n" + print("Keyboard stop signal detected. Stop displaying. \n") try: msg, addr = self.displayControlSock.recvfrom(128) if msg[0:4].upper() == 'STOP': self.keepDisplay = False - print "Remote stop signal detected. Stop displaying. \n" + print("Remote stop signal detected. Stop displaying. \n") except: pass if self.isRemoteSync: @@ -3339,7 +3339,7 @@ def set_display_order(self, displayOrder): def set_display_iteration(self, displayIteration): if displayIteration % 1 == 0:self.displayIteration = displayIteration - else:raise ArithmeticError, "displayIteration should be a whole number." + else:raise ArithmeticError("displayIteration should be a whole number.") self.clear() @@ -3347,7 +3347,7 @@ def save_log(self): if self.displayLength is None: self.clear() - raise LookupError, "Please display sequence first!" + raise LookupError("Please display sequence first!") if self.fileName is None: self._get_file_name() @@ -3377,7 +3377,7 @@ def save_log(self): #generate full log dictionary path = os.path.join(directory, filename) ft.saveFile(path,logFile) - print ".pkl file generated successfully." + print(".pkl file generated successfully.") backupFileFolder = self._get_backup_folder() @@ -3385,15 +3385,15 @@ def save_log(self): if not (os.path.isdir(backupFileFolder)): os.makedirs(backupFileFolder) backupFilePath = os.path.join(backupFileFolder,filename) ft.saveFile(backupFilePath,logFile) - print ".pkl backup file generate successfully" + print(".pkl backup file generate successfully") else: - print "did not find backup path, no backup has been saved." + print("did not find backup path, no backup has been saved.") def _get_backup_folder(self): if self.fileName is None: - raise LookupError, 'self.fileName not found.' + raise LookupError('self.fileName not found.') else: if self.backupdir is not None: @@ -3628,4 +3628,4 @@ def clear(self): # print phases # ============================================================================================================================== - print 'for debug...' \ No newline at end of file + print('for debug...') \ No newline at end of file diff --git a/corticalmapping/core/FileTools.py b/corticalmapping/core/FileTools.py index d431025..5315e40 100644 --- a/corticalmapping/core/FileTools.py +++ b/corticalmapping/core/FileTools.py @@ -5,40 +5,24 @@ import os import shutil import struct +from . import ImageAnalysis as ia +from . import tifffile as tf import h5py -import warnings -import numbers -try: - import ImageAnalysis as ia -except (AttributeError, ImportError): - from . import ImageAnalysis as ia - -try: - import tifffile as tf -except ImportError: - import skimage.external.tifffile as tf try: import cv2 -except ImportError as e: print('cannot import OpenCV. {}'.format(e)) - -try: import sync.dataset as sync_dset -except ImportError as e: print('cannot import sync.dataset. {}'.format(e)) - - -def is_integer(var): - return isinstance(var, numbers.Integral) +except ImportError as e: print('can not import OpenCV. ' + str(e)) def saveFile(path,data): f = open(path,'wb') - pickle.dump(data, f) + pickle.dump(data, f, 2) f.close() def loadFile(path): f = open(path,'rb') - data = pickle.load(f) + data = pickle.load(f, encoding='bytes') f.close() return data @@ -271,13 +255,13 @@ def importRawNewJPhys(path, channelNum = len(channels) channelLength = len(JPhysFile) / channelNum -# print('length of JPhys:', len(JPhysFile)) -# print('length of JPhys channel number:', channelNum) +# print 'length of JPhys:', len(JPhysFile) +# print 'length of JPhys channel number:', channelNum if len(JPhysFile) % channelNum != 0: raise ArithmeticError('Length of the file should be divisible by channel number!') - JPhysFile = JPhysFile.reshape([channelLength, channelNum]) + JPhysFile = JPhysFile.reshape([int(channelLength), int(channelNum)]) headerMatrix = JPhysFile[0:headerLength,:] bodyMatrix = JPhysFile[headerLength:,:] @@ -346,7 +330,7 @@ def importRawJPhys2(path, # first time of visual stimulation visualStart = None - for i in xrange(80,len(photodiode)): + for i in range(80,len(photodiode)): if ((photodiode[i] - photodiodeThr) * (photodiode[i-1] - photodiodeThr)) < 0 and \ ((photodiode[i] - photodiodeThr) * (photodiode[i-75] - photodiodeThr)) < 0: #first frame of big change visualStart = i*(1./sf) @@ -413,7 +397,7 @@ def importRawNewJPhys2(path, # first time of visual stimulation visualStart = None - for i in xrange(80,len(photodiode)): + for i in range(80,len(photodiode)): if ((photodiode[i] - photodiodeThr) * (photodiode[i-1] - photodiodeThr)) < 0 and \ ((photodiode[i] - photodiodeThr) * (photodiode[i-75] - photodiodeThr)) < 0: #first frame of big change visualStart = i*(1./sf) @@ -492,8 +476,8 @@ def generateAVI(saveFolder, out.release() cv2.destroyAllWindows() - - + + def importRawJCamF(path, saveFolder = None, dtype = np.dtype(' 0: - warnings.warn('You choose to extract digital channels by index. But there are ' - 'digital channels with string labels: {}. All the string labels ' - 'will be lost.'.format(str(digital_cns))) - digital_cns = range(ds.meta_data['ni_daq']['event_bits']) - digital_cns = [str(cn) for cn in digital_cns] - - # print(digital_cns) - - for digital_i, digital_cn in enumerate(digital_cns): - if digital_cn: - digital_channels[digital_cn] = {'rise': ds.get_rising_edges(line=digital_i, units='seconds'), - 'fall': ds.get_falling_edges(line=digital_i, units='seconds')} - - # read analog channels - data_f = h5py.File(f_path, 'r') - if 'analog_meta' not in data_f.keys(): - data_f.close() - print ('no analog data found in file: {}.'.format(f_path)) - return {'digital_channels': digital_channels} - else: - - analog_channels = {} - - if analog_downsample_rate is None: - analog_downsample_rate = 1 - analog_fs = ds.analog_meta_data['analog_sample_rate'] / analog_downsample_rate - if analog_labels is not None: - analog_cns = analog_labels - for analog_cn in analog_cns: - analog_channels[str(analog_cn)] = ds.get_analog_channel(channel=analog_cn, - downsample=analog_downsample_rate) - elif by_label: - analog_cns = [al for al in ds.analog_meta_data['analog_labels'] if al] - - for analog_cn in analog_cns: - analog_channels[str(analog_cn)] = ds.get_analog_channel(channel=analog_cn, - downsample=analog_downsample_rate) - else: - analog_cns = [al for al in ds.analog_meta_data['analog_labels'] if al] - if len(analog_cns) > 0: - warnings.warn('You choose to extract analog channels by index. But there are ' - 'analog channels with string labels: {}. All the string labels ' - 'will be lost.'.format(str(digital_cns))) - analog_cns = ds.analog_meta_data['analog_channels'] - - for analog_ind, analog_cn in enumerate(analog_cns): - analog_channels[str(analog_cn)] = ds.get_analog_channel(channel=analog_ind, - downsample=analog_downsample_rate) - - return {'digital_channels': digital_channels, - 'analog_channels': analog_channels, - 'analog_sample_rate': analog_fs} - if __name__=='__main__': - # ---------------------------------------------------------------------------- - sync_path = r"D:\data2\rabies_tracing_project\method_development" \ - r"\2017-10-05-read-sync\171003_M345521_FlashingCircle_106_171003165755.h5" - # sync_path = r"\\allen\programs\braintv\workgroups\nc-ophys\ImageData\Soumya\trees\m255" \ - # r"\log_m255\sync_pkl\m255_presynaptic_pop_vol1_stimDG_bessel170918013215.h5" - sync_dict = read_sync(f_path=sync_path, by_label=False, digital_labels=['vsync_2p'], - analog_labels=['photodiode'], analog_downsample_rate=None) - # sync_dict = read_sync(f_path=sync_path, by_label=False, digital_labels=None, - # analog_labels=None, analog_downsample_rate=None) - # print(sync_dict) - # ---------------------------------------------------------------------------- - #---------------------------------------------------------------------------- + # mov = np.random.rand(250,512,512,4) # generateAVI(r'C:\JunZhuang\labwork\data\python_temp_folder','tempMov',mov) + #---------------------------------------------------------------------------- - # print(int2str(5)) - # print(int2str(5,2)) - # print(int2str(155,6)) + # print int2str(5) + # print int2str(5,2) + # print int2str(155,6) #---------------------------------------------------------------------------- # ---------------------------------------------------------------------------- @@ -808,15 +691,15 @@ def read_sync(f_path, analog_downsample_rate=None, by_label=True, digital_labels # ---------------------------------------------------------------------------- # ---------------------------------------------------------------------------- - # ff = h5py.File(r"E:\data\python_temp_folder\test4.hdf5") - # test_dict = {'a':1, 'b':2, 'c': {'A': 4, 'B': 5}} - # write_dictionary_to_h5group_recursively(target=ff, source=test_dict, is_overwrite=True) - # ff.close() - # - # ff = h5py.File(r"E:\data\python_temp_folder\test4.hdf5") - # test_dict2 = {'a': {'C': 6, 'D': 7}, 'c': {'A': 4, 'B': 6}, 'd':10, 'e':{'E':11, 'F':'xx'}} - # write_dictionary_to_h5group_recursively(target=ff, source=test_dict2, is_overwrite=False) - # ff.close() + ff = h5py.File(r"E:\data\python_temp_folder\test4.hdf5") + test_dict = {'a':1, 'b':2, 'c': {'A': 4, 'B': 5}} + write_dictionary_to_h5group_recursively(target=ff, source=test_dict, is_overwrite=True) + ff.close() + + ff = h5py.File(r"E:\data\python_temp_folder\test4.hdf5") + test_dict2 = {'a': {'C': 6, 'D': 7}, 'c': {'A': 4, 'B': 6}, 'd':10, 'e':{'E':11, 'F':'xx'}} + write_dictionary_to_h5group_recursively(target=ff, source=test_dict2, is_overwrite=False) + ff.close() # ---------------------------------------------------------------------------- diff --git a/corticalmapping/core/ImageAnalysis.py b/corticalmapping/core/ImageAnalysis.py index 4423cbb..a6e13ce 100644 --- a/corticalmapping/core/ImageAnalysis.py +++ b/corticalmapping/core/ImageAnalysis.py @@ -8,28 +8,13 @@ import scipy.ndimage as ni import scipy.stats as stats import skimage.morphology as sm -import skimage.measure as measure +from . import FileTools as ft +from . import PlottingTools as pt import time - -try: - import FileTools as ft -except (AttributeError, ImportError): - from . import FileTools as ft - -try: - import PlottingTools as pt -except (AttributeError, ImportError): - from . import PlottingTools as pt - -try: - import cv2 -except ImportError as e: - print(e) - -try: - from toolbox.misc import BinarySlicer -except ImportError as e: - print(e) +try: import cv2 +except ImportError as e: print(e) +try: from toolbox.misc import BinarySlicer +except ImportError as e: print(e) def resample(t1,y1,interval,kind='linear', isPlot = False): @@ -85,16 +70,8 @@ def array_nor(A): normalize a np.array to the scale [0, 1] ''' - if np.isnan(A).any(): - B = A.astype(np.float) - maxv = np.nanmax(B.flat) - minv = np.nanmin(B.flat) - else: - B=A.astype(np.float) - maxv = np.max(B.flat) - minv = np.min(B.flat) - - return (B - minv) / (maxv - minv) + B=A.astype(np.float) + return (B-np.amin(B))/(np.amax(B)-np.amin(B)) def array_nor_median(A): @@ -141,10 +118,10 @@ def distance(p0, p1): #old code====================================================================== # if (len(p0.shape) > 1) or (len(p1.shape) > 1): - # raise(LookupError('Both input arrays should be 1d array!!')) + # raise LookupError, 'Both input arrays should be 1d array!!' # # if p0.shape != p1.shape: - # raise LookupError('The two input arrays should have same dimensions.') + # raise LookupError, 'The two input arrays should have same dimensions.' # # distance = math.sqrt(np.sum(((p0.astype(np.float)-p1.astype(np.float))**2))) #=============================================================================== @@ -384,21 +361,21 @@ def rigid_transform(img, zoom=None, rotation=None, offset=None, outputShape=None newImg = img.astype(np.float32) - if zoom is not None: + if zoom: if len(img.shape) == 2: newZoom = (zoom,zoom) elif len(img.shape) == 3: newZoom = (1,zoom,zoom) newImg = ni.zoom(newImg,zoom=newZoom,mode=mode,cval=cval) - if rotation is not None: + if rotation: newImg = expand_image(newImg) if len(img.shape) == 2: newImg = ni.rotate(newImg,angle=rotation,reshape=False,mode=mode,cval=cval) elif len(img.shape) == 3: newImg = ni.rotate(newImg,angle=rotation,axes=(1,2),reshape=False,mode=mode,cval=cval) - if offset is not None: + if offset: if len(img.shape) == 2: newImg = ni.shift(newImg,(offset[1],offset[0]),mode=mode,cval=cval) if len(img.shape) == 3: @@ -452,26 +429,20 @@ def rigid_transform_cv2_3d(img, zoom=None, rotation=None, offset=None, outputSha if len(img.shape) != 3: raise LookupError('Input image is not a 3d array!') - # if outputShape is None: - # - # if zoom is not None: - # try: - # newHeight = int(img.shape[1] * zoom[0]) - # newWidth = int(img.shape[2] * zoom[1]) - # except TypeError: - # newHeight = int(img.shape[1] * zoom) - # newWidth = int(img.shape[2] * zoom) - # else: - # newHeight = img.shape[1] - # newWidth = img.shape[2] - # else: - # newHeight = outputShape[0] - # newWidth = outputShape[1] - - frame_1 = rigid_transform_cv2_2d(img[0, :, :], zoom=zoom, rotation=rotation, offset=offset, outputShape=outputShape) - newHeight = frame_1.shape[0] - newWidth = frame_1.shape[1] - + if not outputShape: + if zoom is not None: + try: + newHeight = int(img.shape[1]*zoom[0]) + newWidth = int(img.shape[2]*zoom[1]) + except TypeError: + newHeight = int(img.shape[1] * zoom) + newWidth = int(img.shape[2] * zoom) + else: + newHeight = img.shape[1] + newWidth = img.shape[2] + else: + newHeight = outputShape[0] + newWidth = outputShape[1] newImg = np.empty((img.shape[0],newHeight,newWidth),dtype=img.dtype) for i in range(img.shape[0]): @@ -587,7 +558,7 @@ def temporal_filter_movie(mov, # array of movie filterArray = np.ones(frameNum) - for i in xrange(frameNum): + for i in range(frameNum): if ((freqs[i] > 0) and (freqs[i] < Flow) or (freqs[i] > Fhigh)) or \ ((freqs[i] < 0) and (freqs[i] > -Flow) or (freqs[i] < -Fhigh)): filterArray[i] = 0 @@ -605,8 +576,8 @@ def temporal_filter_movie(mov, # array of movie movFFT = np.fft.fft(mov, axis = 0) - for i in xrange(mov.shape[1]): - for j in xrange(mov.shape[2]): + for i in range(mov.shape[1]): + for j in range(mov.shape[2]): movFFT[:,i,j] = movFFT[:,i,j] * filterArray movF = np.real(np.fft.ifft(movFFT, axis = 0)) @@ -719,7 +690,7 @@ def get_trace_binaryslicer(bl_obj, mask, mask_mode = 'binary'): raise ValueError('Binary mask should only contain zeros and ones!!') else: mask_ind = np.where(mask != 0) - # print(mask_ind) + # print mask_ind min_row = min(mask_ind[0]); max_row = max(mask_ind[0]) + 1 min_col = min(mask_ind[1]); max_col = max(mask_ind[1]) + 1 finalMask = np.array(mask.astype(np.float))[min_row:max_row, min_col:max_col] @@ -749,7 +720,7 @@ def get_trace_binaryslicer(bl_obj, mask, mask_mode = 'binary'): finalMask = finalMask[min_row:max_row, min_col:max_col] mov = bl_obj[:,min_row:max_row, min_col:max_col] - # print(mov) + # print mov return get_trace(mov, finalMask, maskMode='weighted') @@ -834,7 +805,7 @@ def get_trace_binaryslicer3(bl_obj, masks, mask_mode = 'binary', loading_frame_n print('Translating in chunks: '+str(chunkNum-1)+' x '+str(loading_frame_num)+' frame(s)'+' + '+str(frameNum % loading_frame_num)+' frame(s)') traces = {} - for key in masks.iterkeys(): traces.update({'trace_'+key:[]}) + for key in masks.keys(): traces.update({'trace_'+key:[]}) for i in range(chunkNum): indStart = i*loading_frame_num @@ -842,13 +813,13 @@ def get_trace_binaryslicer3(bl_obj, masks, mask_mode = 'binary', loading_frame_n if indEnd > frameNum: indEnd = frameNum print('Extracting signal from frame '+str(indStart)+' to frame '+str(indEnd)+'.\t'+str(i*100./chunkNum)+'%') currMov = bl_obj[indStart:indEnd,:,:] - for key, mask in masks.iteritems(): + for key, mask in masks.items(): if len(mask.shape) != 2: raise ValueError('Mask "' + key + '" should be 2d!') if bl_obj.shape[1] != mask.shape[0] or bl_obj.shape[2] != mask.shape[1]: raise ValueError('the size of each frame of the BinarySlicer object should be the same as the size of mask "' + key + '"!') traces['trace_'+key].append(get_trace(currMov, mask, maskMode=mask_mode)) - for key in traces.iterkeys(): + for key in traces.keys(): traces[key] = np.concatenate(traces[key]) return traces @@ -916,7 +887,7 @@ def discretize(array, binSize): newArray = np.zeros(flatArray.shape) newArray[:] = np.nan - for i in xrange(len(indArray)): + for i in range(len(indArray)): if np.isnan(flatArray[i]) == False: newArray[i] = bins[indArray[i]] @@ -1054,7 +1025,7 @@ def get_area_edges(img, else: return edgesThick.astype(np.bool) -def z_downsample(img, downSampleRate, is_verbose=True): +def z_downsample(img, downSampleRate): ''' downsample input image in z direction ''' @@ -1066,15 +1037,13 @@ def z_downsample(img, downSampleRate, is_verbose=True): newFrameNum = img.shape[0] //downSampleRate newImg = np.empty((newFrameNum,img.shape[1],img.shape[2]),dtype=img.dtype) - if is_verbose: - print('Start downsampling...') + print('Start downsampling...') for i in range(newFrameNum): # print (float(i)*100/newFrameNum),'%' currChunk = img[i*downSampleRate:(i+1)*downSampleRate,:,:].astype(np.float) currFrame = np.mean(currChunk,axis=0) newImg[i,:,:]=currFrame.astype(img.dtype) - if is_verbose: - print('End of downsampling.') + print('End of downsampling.') return newImg @@ -1128,7 +1097,7 @@ def get_marked_masks(labeled, markCoor): ''' masks = get_masks(labeled) - for key, value in masks.iteritems(): + for key, value in masks.items(): if hit_or_miss(markCoor, value): return value return None @@ -1138,9 +1107,9 @@ def sort_masks(masks, keyPrefix=None, labelLength=3): sort a dictionary of binary masks, big to small ''' - maskNum = len(masks.keys()) + maskNum = len(list(masks.keys())) order = [] - for key, mask in masks.iteritems(): + for key, mask in masks.items(): order.append([key,np.sum(mask.flatten())]) order = sorted(order, key=lambda a:a[1], reverse=True) @@ -1158,7 +1127,7 @@ def sort_masks(masks, keyPrefix=None, labelLength=3): # down sample a 3-d array in 0 direction # ''' # -# if len(A.shape) != 3: raise ValueError('input array should be 3-d.') +# if len(A.shape) != 3: raise ValueError, 'input array should be 3-d.' # rate = int(rate) # dataType = A.dtype # newZDepth = (A.shape[0] - (A.shape[0]%rate))/rate @@ -1186,9 +1155,9 @@ def get_average_movie(mov, frameTS, onsetTimes, chunkDur, isReturnN=False): chunkFrameDur = int(np.ceil(chunkDur / meanFrameDur)) - # print('chunkDur:', chunkDur) - # print('meanFrameDur:', meanFrameDur) - # print('chunkFrameDur:', chunkFrameDur) + # print 'chunkDur:', chunkDur + # print 'meanFrameDur:', meanFrameDur + # print 'chunkFrameDur:', chunkFrameDur sumMov = None n = 0. @@ -1201,19 +1170,19 @@ def get_average_movie(mov, frameTS, onsetTimes, chunkDur, isReturnN=False): if onset >= frameTS[0] and onset + chunkDur <= frameTS[-1]: if i // (onset_num // 10) > curr_onset: - # print(t0 - time.time(), ' second :', (i // (onset_num // 10)) * 10, '%') - print('{:09.2f} second: {:2d} %'.format(time.time() - t0, (i // (onset_num // 10)) * 10)) + # print t0 - time.time(), ' second :', (i // (onset_num // 10)) * 10, '%' + print(('{:09.2f} second: {:2d} %'.format(time.time() - t0, (i // (onset_num // 10)) * 10))) curr_onset = i // (onset_num // 10) - onsetFrameInd = np.argmin(np.abs(frameTS-onset)) - # print('Chunk:',int(n),'; Starting frame index:',onsetFrameInd,'; Ending frame index', onsetFrameInd+chunkFrameDur) + onsetFrameInd = int(np.argmin(np.abs(frameTS-onset))) + # print 'Chunk:',int(n),'; Starting frame index:',onsetFrameInd,'; Ending frame index', onsetFrameInd+chunkFrameDur if onsetFrameInd+chunkFrameDur <= mov.shape[0]: if sumMov is None: sumMov = np.zeros((chunkFrameDur,mov.shape[1],mov.shape[2]), dtype=np.float64) sumMov += mov[onsetFrameInd:onsetFrameInd+chunkFrameDur,:,:].astype(np.float64) n += 1. else: - print('Ending frame index ('+str(int(onsetFrameInd+chunkFrameDur))+') is larger than frames in movie (' + + print('Ending frame index ('+str(int(onsetFrameInd+chunkFrameDur))+') is larger than frames in movie ('+\ str(int(mov.shape[0]))+'.\nExclude this trigger.') continue @@ -1236,9 +1205,9 @@ def get_average_movie2(mov, frameTS, onsetTimes, chunkDur, verbose=True): chunkFrameDur = int(np.ceil(chunkDur / meanFrameDur)) - # print('chunkDur:', chunkDur) - # print('meanFrameDur:', meanFrameDur) - # print('chunkFrameDur:', chunkFrameDur) + # print 'chunkDur:', chunkDur + # print 'meanFrameDur:', meanFrameDur + # print 'chunkFrameDur:', chunkFrameDur sumMov = None real_count = 0 @@ -1250,17 +1219,17 @@ def get_average_movie2(mov, frameTS, onsetTimes, chunkDur, verbose=True): for i, onset in enumerate(onsetTimes): if verbose and (i // (onset_num // 10) > curr_onset): - print('{:09.2f} second: {:2d} %'.format(time.time() - t0, (i // (onset_num // 10)) * 10)) + print(('{:09.2f} second: {:2d} %'.format(time.time() - t0, (i // (onset_num // 10)) * 10))) curr_onset = i // (onset_num // 10) if onset < frameTS[0]: # the onset is before imaging start time. Exclude this onset. continue - # print('onset number:', count, 'is before imaging start time. Exclude this onset.') + # print 'onset number:', count, 'is before imaging start time. Exclude this onset.' else: onsetFrameInd = np.argmin(np.abs(frameTS-onset)) # if verbose: - # print('Chunk:',int(count),'; Starting frame index:',onsetFrameInd,'; Ending frame index', onsetFrameInd+chunkFrameDur) + # print 'Chunk:',int(count),'; Starting frame index:',onsetFrameInd,'; Ending frame index', onsetFrameInd+chunkFrameDur if onsetFrameInd+chunkFrameDur <= mov.shape[0]: if sumMov is None: sumMov = np.zeros((chunkFrameDur,mov.shape[1],mov.shape[2])) @@ -1268,7 +1237,7 @@ def get_average_movie2(mov, frameTS, onsetTimes, chunkDur, verbose=True): real_count += 1. else: # the chunk exceeds the end of imaging. continue - # print('the chunk of onset number', count, 'exceeds the end of imaging. Exclude this onset.') + # print 'the chunk of onset number', count, 'exceeds the end of imaging. Exclude this onset.' if sumMov is None: @@ -1317,211 +1286,15 @@ def regression_detrend_1d(sig, trend): return sig_detrend, slope, r_value -def merge_weighted_rois(roi1, roi2): - """ - merge two WeightedROI objects, most useful for merge ON and OFF subfields - """ - if (roi1.pixelSizeX != roi2.pixelSizeX) or (roi1.pixelSizeY != roi2.pixelSizeY): - raise ValueError('The pixel sizes of the two WeightedROI objects should match!') - - if roi1.pixelSizeUnit != roi2.pixelSizeUnit: - raise ValueError('The pixel size units of the two WeightedROI objects should match!') - - mask1 = roi1.get_weighted_mask() - mask2 = roi2.get_weighted_mask() - - return WeightedROI(mask1 + mask2, pixelSize=[roi1.pixelSizeY, roi1.pixelSizeX], pixelSizeUnit=roi1.pixelSizeUnit) - - -def merge_binary_rois(roi1, roi2): - """ - merge two ROI objects, most useful for merge ON and OFF subfields - """ - if (roi1.pixelSizeX != roi2.pixelSizeX) or (roi1.pixelSizeY != roi2.pixelSizeY): - raise ValueError('The pixel sizes of the two WeightedROI objects should match!') - - if roi1.pixelSizeUnit != roi2.pixelSizeUnit: - raise ValueError('The pixel size units of the two WeightedROI objects should match!') - - mask1 = roi1.get_binary_mask() - mask2 = roi2.get_binary_mask() - mask3 = np.logical_or(mask1, mask2).astype(np.int8) - - return ROI(mask3, pixelSize=[roi1.pixelSizeY, roi1.pixelSizeX], pixelSizeUnit=roi1.pixelSizeUnit) - - -def get_peak_weighted_roi(arr, thr): - """ - return: a WeightROI object representing the mask which contains the peak of arr and cut by the thr (thr) - """ - nanLabel = np.isnan(arr) - arr2 = arr.copy() - arr2[nanLabel] = np.nanmin(arr) - labeled, _ = ni.label(arr2 >= thr) - peakCoor = np.array(np.where(arr2 == np.amax(arr2))).transpose()[0] - peakMask = get_marked_masks(labeled, peakCoor) - if peakMask is None: - 'Threshold too high! No ROI found. Returning None'; return None - else: - return WeightedROI(arr2 * peakMask) - - -def pairwise_distance(coords): - """ - giving coordinates of a set of points, return the pairwise distances of all pairs - :param coords: 2d array, shape is (n, 2). first column: x coordinates, second column: y coordinates. - :return: - """ - - if len(coords.shape) != 2: - raise ValueError("input coordinates should be 2d array.") - - if coords.shape[1] != 2: - raise ValueError("input coordinates should have 2 columns.") - - if coords.shape[0] < 2: - return np.array([]) - else: - point_num = coords.shape[0] - pairs = np.zeros((point_num * (point_num - 1), 4)) - - pair_ind = 0 - for i in range(0, point_num - 1): - for j in range(i + 1, point_num): - pairs[pair_ind, :] = [coords[i, 0], coords[i, 1], coords[j, 0], coords[j, 1]] - pair_ind = pair_ind + 1 - - dis = np.sqrt(np.square(pairs[:, 0] - pairs[:, 2]) + np.square(pairs[:, 1], pairs[:, 3])) - return dis - - -def pairwise_magnification(coords1, coords2): - """ - giving two sets of coordinates of a set of points, say receptive field center location and cortical - location of a set of rois in field of view - - return the pairwise magnification distance of coords1 over distance of coords2 of all pairs - - :param coords1: 2d array, shape is (n, 2). first column: x coordinates, second column: y coordinates. - :param coords2: 2d array, shape is (n, 2). first column: x coordinates, second column: y coordinates. - coords1 one and coords2 should have same shape. - :return: - """ - - if len(coords1.shape) != 2: - raise ValueError("input coordinates should be 2d array.") - - if coords1.shape[1] != 2: - raise ValueError("input coordinates should have 2 columns.") - - if coords1.shape != coords2.shape: - raise ValueError("two input coordinates should have same shape.") - - if coords1.shape[0] < 2: - return np.array([]) - else: - point_num = coords1.shape[0] - pairs = np.zeros((point_num * (point_num - 1) / 2, 8)) - - pair_ind = 0 - for i in range(0, point_num - 1): - for j in range(i + 1, point_num): - - pairs[pair_ind, :] = [coords1[i, 0], coords1[i, 1], coords1[j, 0], coords1[j, 1], - coords2[i, 0], coords2[i, 1], coords2[j, 0], coords2[j, 1]] - pair_ind = pair_ind + 1 - - mag = np.sqrt(np.square(pairs[:, 0] - pairs[:, 2]) + np.square(pairs[:, 1], pairs[:, 3])) / \ - np.sqrt(np.square(pairs[:, 4] - pairs[:, 6]) + np.square(pairs[:, 5], pairs[:, 7])) - - return mag - - -def get_circularity(mask, is_skimage=True): - """ - return circularity of the shape marked by the input mask. If the mask label more than one - continuous regions, only analyze the first one retuned by scipy.ndimage.label. - - This does not consider holes. - - there are two ways to estimate perimeter: - 1. is_skimage=True uses the shape through the center of border pixels. if the labeled region is - large and wide in all orientation, the measurement is very precise. But if the labeled region - is small or narrow, it underestmates the perimeter thus overestimates the circularity. Sometimes - it can be larger than 1. - - 2. is_skimage=False uses the outer boundary line of border pixels. This will treat all shapes as - rectangle, thus systematically underestimates the circularity. upper bound will be the - circularity of square: 0.7853981633974483. - - :param mask: 2d binary array, if not binary, all pixel <= zero will be considered as 0. - all pixels > 0 will be considered as 1. - :param is_skimage: bool. if Ture, use skimage.measure.perimeter to estimate perimeter - :return: circularity, defined by 4 * pi * area / (perimeter) ^ 2 - """ - - if len(mask.shape) != 2: - raise ValueError('input mask should be a 2d array.') - - msk = np.zeros(mask.shape, dtype=np.uint8) - msk[mask>0] = 1 - - labeled, roi_num = ni.label(msk) - - if roi_num > 1: # found more than one labeled regions - # raise(ValueError('input mask should have only one continuous region labeled. {} found.'.format(roi_num))) - print('input mask has {} (> 1) continuous regions labeled. only analyze the first one'.format(roi_num)) - - msk[:] = 0 - msk[labeled == 1] = 1 - elif roi_num == 0: # found no labeled region - print('Did not find labeled region. Returning None') - return None - else: # found one labeled region - pass - - if is_skimage: - perimeter = measure.perimeter(msk) - else: - rows, cols = np.where(msk == 1) - perimeter = 2. * (max(rows) - min(rows) + 1.) + 2. * (max(cols) - min(cols) + 1.) - area = np.sum(msk.flat) - - return 4 * np.pi * area / (perimeter ** 2) - - -def fit_ellipse(mask): - """ - using opencv to fit a mask into ellipse - - :param mask: 2d array, dtype np.uint8 - :return: - """ - - mask2 = np.array(mask) - - if len(mask.shape) != 2: - raise ValueError('input "mask" should be a 2d array.') - - if not mask.dtype == np.uint8: - raise ValueError("input mask should have dtype as np.uint8") - - _, cons, _ = cv2.findContours(image=mask2, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE) - - if len(cons) == 0: - print('ImageAnalysis.fit_ellipse: No contour detected. Returning None.') - return None - elif cons[0].shape[0] < 5: - print('ImageAnalysis.fit_ellipse: No contour detected. Returning None.') - return None - else: - if len(cons) > 1: - print('ImageAnalysis.fit_ellipse: More than one contours detected. Taking the first one.') - con = cons[0] - - box = cv2.fitEllipse(con) - ell = Ellipse.from_cv2_box(box) - return ell +# def get_surround_pixels(shape, (i, j), connectivity=8): +# """ +# given a 2-d shape and a pixel location [i, j], return the locations of its surround pixels. +# +# :param shape: tuple or list of integers, should have length of 2 +# :param i: +# :param j: +# :return: +# """ class ROI(object): @@ -1608,18 +1381,16 @@ def get_nan_mask(self): mask[self.pixels] = 1 return mask - def get_pixel_area(self, verbose=False): + def get_pixel_area(self): ''' return the area coverage of the ROI ''' if (self.pixelSizeX is not None) and (self.pixelSizeX is not None): - if verbose: - print('returning area with unit:' + self.pixelSizeUnit + '^2') + print('returning area with unit:' + self.pixelSizeUnit + '^2') return float(len(self.pixels[0]))*self.pixelSizeX*self.pixelSizeY else: - print('Did not find information about pixel size. ' - 'Returning area as pixel counts without unit.') + print('returning area as pixel counts without unit.') return len(self.pixels[0]) def get_binary_area(self): @@ -1640,7 +1411,7 @@ def get_binary_trace(self, mov): ''' binaryMask = self.get_binary_mask().astype(np.float32) trace = np.multiply(mov, np.array([binaryMask])).sum(axis=1).sum(axis=1) - # print(trace) + # print trace return trace / self.get_binary_area() def get_binary_trace_pixelwise(self, mov): @@ -1653,7 +1424,7 @@ def get_binary_trace_pixelwise(self, mov): for pixel in pixels: # trace += mov[:, pixel[0], pixel[1]] # somehow this is less precise !! do not use trace = trace + mov[:, int(pixel[0]), int(pixel[1])].flatten().astype(np.float32) - # print(trace) + # print trace return trace / self.get_binary_area() def plot_binary_mask(self, plotAxis=None, color='#ff0000', alpha=1): @@ -1681,7 +1452,7 @@ def to_h5_group(self, h5Group): dataDict = dict(self.__dict__) _ = dataDict.pop('dimension');_ = dataDict.pop('pixelSizeX');_ = dataDict.pop('pixelSizeY');_ = dataDict.pop('pixelSizeUnit') - for key, value in dataDict.iteritems(): + for key, value in dataDict.items(): if value is None: h5Group.create_dataset(key,data='None') else: h5Group.create_dataset(key,data=value) @@ -1711,24 +1482,15 @@ def from_h5_group(h5Group): if pixelSizeUnit is 'None': pixelSizeUnit = None pixels = h5Group['pixels'].value - if 'weights' in h5Group.keys(): + if 'weights' in list(h5Group.keys()): weights = h5Group['weights'].value mask = np.zeros(dimension, dtype=np.float32); mask[tuple(pixels)] = weights - roi = WeightedROI(mask, pixelSize=pixelSize, pixelSizeUnit=pixelSizeUnit) + return WeightedROI(mask, pixelSize=pixelSize, pixelSizeUnit=pixelSizeUnit) else: mask = np.zeros(dimension, dtype=np.uint8); mask[tuple(pixels)] = 1 - roi = ROI(mask, pixelSize=pixelSize, pixelSizeUnit=pixelSizeUnit) - - for key in h5Group.keys(): - if key not in ['pixels', 'weights']: - if h5Group[key].value == 'None': - setattr(roi, key, None) - else: - setattr(roi, key, h5Group[key].value) - - return roi + return ROI(mask, pixelSize=pixelSize, pixelSizeUnit=pixelSizeUnit) class WeightedROI(ROI): @@ -1738,7 +1500,8 @@ def __init__(self, mask, pixelSize = None, pixelSizeUnit = None): self.weights = mask[self.pixels] def __str__(self): - return 'corticalmapping.core.ImageAnalysis.WeightedROI object' + return 'corticalmapping.core.ImageAnalysis.WeightedROI object, subclass of ' \ + 'corticalmapping.core.ImageAnalysis.ROI' def get_peak(self): return np.max(self.weights) @@ -1746,9 +1509,6 @@ def get_peak(self): def get_weight_sum(self): return sum(self.weights) - def get_mean_weight(self): - return np.mean(self.weights) - def get_weighted_mask(self): mask = np.zeros(self.dimension,dtype=np.float32) mask[self.pixels] = self.weights @@ -1772,7 +1532,7 @@ def get_weighted_center_in_coordinate(self, yCor, xCor): weightMask = self.get_weighted_mask() if np.sum(weightMask.flatten()) == 0: - return [np.nan, np.nan] + return None else: xMap, yMap = np.meshgrid(xCor, yCor) xCenter = np.sum((xMap*weightMask).flatten())/np.sum(weightMask.flatten()) @@ -1803,7 +1563,7 @@ def get_weighted_trace(self, mov, is_area_weighted=False): ''' weightedMask = self.get_weighted_mask() trace = np.multiply(mov, np.array([weightedMask])).sum(axis=-1).sum(axis=-1) - # print(trace) + # print trace if is_area_weighted: return trace / self.get_binary_area() elif not is_area_weighted: @@ -1823,45 +1583,14 @@ def get_weighted_trace_pixelwise(self, mov, is_area_weighted=False): for i, pixel in enumerate(pixels): # trace += mov[:, pixel[0], pixel[1]] # somehow this is less precise !! do not use trace = trace + self.weights[i] * (mov[:, pixel[0], pixel[1]]).astype(np.float32) - # print(trace) - if not is_area_weighted: + # print trace + if is_area_weighted: return trace / self.get_binary_area() - elif is_area_weighted: + elif not is_area_weighted: return trace / self.get_weight_sum() else: raise ValueError('is_area_weighted should be a boolean variable.') - def ellipse_fitting(self, thr=None, is_plot=False): - """ - using opencv to fit a ellipse - - :param thr: float, threshold to threshold the mask - :param is_plot: bool - :return ell: corticalmapping.ImageAnalysis.Ellipse object - """ - - if thr is None: - mask_thr = self.get_binary_mask() * 255 - else: - mask = self.get_weighted_mask() - mask_thr = np.zeros(mask.shape, dtype=np.uint8) - mask_thr[mask >= thr] = 255 - - ell = fit_ellipse(mask_thr) - - if is_plot: - f = plt.figure() - ax = f.add_subplot(111) - img = np.array([mask_thr, mask_thr, mask_thr]).transpose((1, 2, 0)).copy() - if ell is not None: - img = ell.draw(img=img, thickness=2) - ax.set_title('angle={} deg'.format(ell.angle)) - img = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB) - ax.imshow(img, interpolation='nearest') - plt.show() - - return ell - @staticmethod def from_h5_group(h5Group): ''' @@ -1877,128 +1606,7 @@ def from_h5_group(h5Group): weights = h5Group['weights'].value mask = np.zeros(dimension, dtype=np.float32) mask[tuple(pixels)] = weights - - roi = WeightedROI(mask, pixelSize=pixelSize, pixelSizeUnit=pixelSizeUnit) - - for key in h5Group.keys(): - if key not in ['pixels', 'weights']: - if h5Group[key].value == 'None': - setattr(roi, key, None) - else: - setattr(roi, key, h5Group[key].value) - - return roi - - -class Ellipse(object): - """ - ellipse object - - :attribute center: tuple of two positive floats, (center height, center width) - :attribute axes: tuple of two positive floats, (radius of the long axis, radius of short axis) - :attribute angle: float, degree, counterclockwise rotation of long axis, from right direction - """ - - def __init__(self, center, axes, angle): - """ - ellipse object - - :param center: tuple of two positive floats, (center height, center width) - :param axes: tuple of two positive floats, (radius of the long axis, radius of short axis) - :param angle: float, degree, counterclockwise rotation of long axis, from right direction - """ - self.center = center - - if axes[0] <= 0. or axes[1] <= 0.: - raise ValueError('length of axes should be larger than 0.') - - if axes[0] >= axes[1]: - self.axes = axes - self.angle = angle % 180. - else: - self.axes = (axes[1], axes[0]) - self.angle = (angle + 90.) % 180. - - def get_aspect_ratio(self): - return(self.axes[0] / self.axes[1]) - - def get_cv2_ellips(self): - """ - :return: the ellipse in opencv3 format for drawing - """ - # return ((int(round(self.center[1])), int(round(self.center[0]))), - # (int(round(self.axes[0])), int(round(self.axes[1]))), - # -self.angle, 0, 360) - return ((int(self.center[1]), int(self.center[0])), - (int(self.axes[0]), int(self.axes[1])), - -self.angle, 0, 360) - - def get_area(self): - return np.pi * self.axes[0] * self.axes[1] - - def get_binary_mask(self, shape): - """ - :param shape: tuple of 2 positive integers (height, width) - :return: binary mask of the ellipse with given shape - """ - mask = np.zeros(shape=shape, dtype=np.uint8) - ell_cv2 = self.get_cv2_ellips() - mask = cv2.ellipse(mask, center=ell_cv2[0], axes=ell_cv2[1], angle=ell_cv2[2], startAngle=0, endAngle=360, - color=1, thickness=-1) - return mask.astype(np.uint8) - - def get_intensity(self, img): - """ - :param img: 2d gray scale image - :return: mean intensity of ellipse - """ - - if len(img.shape) != 2: - raise ValueError('input image should be 2d array.') - - mask = self.get_binary_mask(img.shape) - return np.mean(img[mask]) - - def draw(self, img, color=(0, 255, 0), thickness=3): - """ - :param img: 3d array, (height x width x channel), opencv frame - :param color: - :param thickness: - :return: - """ - - ell_cv2 = self.get_cv2_ellips() - img_marked = cv2.ellipse(img=img, center=ell_cv2[0], axes=ell_cv2[1], angle=ell_cv2[2], startAngle=ell_cv2[3], - endAngle=ell_cv2[4], color=color, thickness=thickness) - - # img_marked = cv2.ellipse(img, box=self.to_cv2_box(), color=color, thickness=thickness) - - return img_marked - - def copy(self): - return Ellipse(center=self.center, - axes=self.axes, - angle=self.angle) - - def info(self): - s = 'center: ({:6.2f}, {:6.2f})\n'.format(self.center[0], self.center[1]) - s += 'axes: ({:6.2f}, {:6.2f})\n'.format(self.axes[0], self.axes[1]) - s += 'angle: {:8.2f} deg\n'.format(self.angle) - s += 'area: {:9.2f}\n'.format(self.get_area()) - return s - - @staticmethod - def from_cv2_box(box): - """ - get Ellipse object from cv2 rotated rectangle object (from cv2.fitEllipse() function) - """ - center = (box[0][1], box[0][0]) - axes = (box[1][0] / 2., box[1][1] / 2.) - angle = -box[2] - return Ellipse(center=center, axes=axes, angle=angle) - - def to_cv2_box(self): - return ((self.center[1], self.center[0]), (self.axes[0] * 2., self.axes[1] * 2), -self.angle) + return WeightedROI(mask, pixelSize=pixelSize, pixelSizeUnit=pixelSizeUnit) if __name__ == '__main__': @@ -2048,7 +1656,7 @@ def to_cv2_box(self): #============================================================ #============================================================ - a=np.array(range(15)+range(10)[::-1]).reshape((5,5)) + a=np.array(list(range(15))+list(range(10))[::-1]).reshape((5,5)) print(a) labeled,_ = ni.label(a>7) peakCoor = np.array(np.where(a==np.amax(a))).transpose()[0] @@ -2060,7 +1668,7 @@ def to_cv2_box(self): #============================================================ # mov = np.arange(64).reshape((4,4,4)) - # print(mov) + # print mov # # mask1 = np.zeros((4,4)); mask1[2,2]=1; mask1[1,1]=1 # trace1 = get_trace(mov,mask1,maskMode='binary') @@ -2089,7 +1697,7 @@ def to_cv2_box(self): # # masks = {'mask1':mask1, 'mask2':mask2} # traces = get_trace_binaryslicer3(bl_obj,masks,mask_mode='binary',loading_frame_num=2) - # print(traces) + # print traces # assert(traces['trace_mask1'][2] == 39.5) # assert(traces['trace_mask2'][3] == 60.5) #============================================================ @@ -2099,13 +1707,13 @@ def to_cv2_box(self): # roi1 = np.zeros((10, 10)) # roi1[4:8, 3:7] = 1 # roi1 = ROI(roi1) - # print(roi1.get_pixel_array()) - # print(roi1.get_pixel_list()) - # print(roi1.get_pixel_tuple()) + # print roi1.get_pixel_array() + # print roi1.get_pixel_list() + # print roi1.get_pixel_tuple() # roi2 = np.zeros((10, 10)) # roi2[5:9, 5:8] = 1 # roi2 = ROI(roi2) - # print(roi1.binary_overlap(roi2)) + # print roi1.binary_overlap(roi2) # ============================================================ # ============================================================ @@ -2113,9 +1721,9 @@ def to_cv2_box(self): # trend = np.zeros((100,)) # trend[5] = 1 # detrended, slope, r = regression_detrend_1d(sig, trend) - # print(detrended) - # print(slope) - # print(r) + # print detrended + # print slope + # print r # ============================================================ # ============================================================ diff --git a/corticalmapping/core/PlottingTools.py b/corticalmapping/core/PlottingTools.py index f5d918d..d6c2fb5 100644 --- a/corticalmapping/core/PlottingTools.py +++ b/corticalmapping/core/PlottingTools.py @@ -8,21 +8,14 @@ import numpy as np import matplotlib.pyplot as plt from matplotlib import cm -import matplotlib import matplotlib.gridspec as gridspec import colorsys import matplotlib.colors as col import scipy.ndimage as ni -try: - import tifffile as tf -except ImportError: - import skimage.external.tifffile as tf - -try: - import ImageAnalysis as ia -except (AttributeError, ImportError): - from . import ImageAnalysis as ia +from . import tifffile as tf +from . import ImageAnalysis as ia +from . import TimingAnalysis as ta try: import cv2 @@ -41,7 +34,7 @@ def get_color_str(R, G, B): """ get hex color string from R,G,B value (integer with uint8 format) """ - if not (isinstance(R, (int, long)) and isinstance(G, (int, long)) and isinstance(G, (int, long))): + if not (isinstance(R, int) and isinstance(G, int) and isinstance(G, int)): raise TypeError('Input R, G and B should be integer!') if not ((0 <= R <= 255) and (0 <= G <= 255) and ( @@ -143,9 +136,7 @@ def bar_graph(left, elif errorDir == 'negative': yerr = [[error], [0]] else: - raise (ValueError, '"errorDir" should be one of the following: "both", "positive" of "negative".') - - + raise ValueError plotAxis.errorbar(left + width / 2, height, @@ -161,8 +152,7 @@ def bar_graph(left, color=faceColor, edgecolor=edgeColor, lw=lw, - label=label, - align='edge') + label=label) return plotAxis @@ -522,15 +512,13 @@ def save_figure_without_borders(f, """ remove borders of a figure """ - f.gca().get_xaxis().set_visible(False) - f.gca().get_yaxis().set_visible(False) + # f.gca().get_xaxis().set_visible(False) + # f.gca().get_yaxis().set_visible(False) f.gca().set_axis_off() f.gca().set_title('') if removeSuperTitle: f.suptitle('') - f.tight_layout(pad=0., h_pad=0., w_pad=0., rect=(0, 0, 1, 1)) - # f.savefig(savePath, frameon=False, **kwargs) - f.savefig(savePath, pad_inches=0, bbox_inches='tight', frameon=False, **kwargs) + f.savefig(savePath, pad_inches=0, bbox_inches='tight', **kwargs) def merge_normalized_images(imgList, isFilter=True, sigma=50, mergeMethod='mean', dtype=np.float32): @@ -662,7 +650,7 @@ def plot_spike_waveforms(unit_ts, channels, channel_ts, fig=None, t_range=(-0.00 :return: fig """ - # print('in plotting tools.') + # print 'in plotting tools.' if fig is None: fig = plt.figure(figsize=(8, 6)) @@ -671,14 +659,14 @@ def plot_spike_waveforms(unit_ts, channels, channel_ts, fig=None, t_range=(-0.00 t_step = np.mean(np.diff(channel_ts)) ind_range = [int(t_range[0] / t_step), int(t_range[1] / t_step)] - # print('ind_range:', ind_range) + # print 'ind_range:', ind_range if t_range[0] < 0: base_point_num = -int(t_range[0] / t_step) else: base_point_num = ind_range[1] - ind_range[0] - # print('getting spike indices ...') + # print 'getting spike indices ...' unit_inds = np.round((unit_ts - channel_ts[0]) / t_step).astype(np.int64) unit_inds = np.array([ind for ind in unit_inds if (ind + ind_range[0]) >= 0 and (ind + ind_range[1]) < len(channel_ts)]) @@ -686,11 +674,11 @@ def plot_spike_waveforms(unit_ts, channels, channel_ts, fig=None, t_range=(-0.00 # axis direction: (channel, spike, time) traces = np.zeros((ch_num, len(unit_inds), ind_range[1] - ind_range[0]), dtype=np.float32) - # print('traces shape:', traces.shape) + # print 'traces shape:', traces.shape - # print('filling traces ...') + # print 'filling traces ...' for i, ch in enumerate(channels): - # print('current channel:', i) + # print 'current channel:', i for j, unit_ind in enumerate(unit_inds): curr_trace = ch[unit_ind + ind_range[0]: unit_ind + ind_range[1]] traces[i, j, :] = curr_trace - np.mean(curr_trace[0:base_point_num]) @@ -699,8 +687,8 @@ def plot_spike_waveforms(unit_ts, channels, channel_ts, fig=None, t_range=(-0.00 traces_max = np.amax(traces) mean_traces = np.mean(traces, axis=1) - # print(traces_min) - # print(traces_max) + # print traces_min + # print traces_max t_axis = t_range[0] + np.arange(traces.shape[2], dtype=np.float32) * t_step for k in range(traces.shape[0]): @@ -827,71 +815,10 @@ def plot_multiple_traces(traces, x=None, plot_axis=None, mean_kw=None, is_plot_s return plot_axis -def plot_dire_distribution(dires, weights=None, is_arc=False, bins=12, plot_ax=None, plot_type='bar', **kwargs): - """ - plot the distribution of a list of directions in a nice way. - - :param dires: array of float. directions to be plotted. - :param weights: array with same size as dires, weights of data - :param is_arc: bool. If True, dires are in [0, 2*pi] scale, if False, dires are in [0, 360] scale - :param bins: int, how many bins are there - :param plot_ax: matplotlib.axes._subplots.PolarAxesSubplot object - :param plot_type: str, 'bar' or 'line' - :param kwargs: if plot_type == 'bar', key word argument to the plot_ax.bar() function; - if plot_type == 'line', kew word argument to the plot_ax.plot() function; - :return: - """ - - if plot_ax is None: - f = plt.figure(figsize=(5,5)) - plot_ax = f.add_subplot(111, projection='polar') - - if not isinstance(plot_ax, matplotlib.projections.polar.PolarAxes): - raise TypeError('input "plot_ax" should be a "matplotlib.projections.polar.PolarAxes" or ' - 'a "matplotlib.axes._subplots.PolarAxesSubplot" object') - - plot_dires = np.array(dires, dtype=np.float64) - - if is_arc is False: - plot_dires = plot_dires * np.pi / 180. - - plot_dires = plot_dires % (2 * np.pi) - - bin_width = np.pi * 2 / bins - - for dire_i, dire in enumerate(plot_dires): - if dire > ((np.pi * 2) - (bin_width / 2)): - plot_dires[dire_i] = dire - (np.pi * 2) - - # print(plot_dires) - counts, bin_lst = np.histogram(plot_dires, weights=weights, bins=bins, range=[-bin_width / 2., (np.pi * 2) - (bin_width / 2)]) - bin_lst = bin_lst[0:-1] + (bin_width / 2) - - if plot_type == 'bar': - plot_ax.bar(bin_lst, counts, width=bin_width, align='center', **kwargs) - elif plot_type == 'line': - counts = list(counts) - counts.append(counts[0]) - bin_lst = list(bin_lst) - bin_lst.append(bin_lst[0]) - plot_ax.plot(bin_lst, counts, **kwargs) - else: - raise LookupError('Do not understand parameter "plot_type", should be "bar" or "line".') - - plot_ax.set_xticklabels([]) - - return plot_ax, counts[:-1], bin_lst[:-1] - if __name__ == '__main__': plt.ioff() - # ---------------------------------------------------- - dires = [0,0,0,90,90,90,90,90,90,180,180] - plot_dire_distribution(dires=dires, is_arc=False) - plt.show() - # ---------------------------------------------------- - # ---------------------------------------------------- # bg = np.random.rand(100,100) # maskBin=np.zeros((100,100),dtype=np.uint8) @@ -1015,8 +942,8 @@ def plot_dire_distribution(dires, weights=None, is_arc=False, bins=12, plot_ax= # ---------------------------------------------------- # ---------------------------------------------------- - # grid_axis2(nrows=4, ncols=3, share_level=1) - # plt.show() + grid_axis2(nrows=4, ncols=3, share_level=1) + plt.show() # ---------------------------------------------------- print('for debug') diff --git a/corticalmapping/core/TimingAnalysis.py b/corticalmapping/core/TimingAnalysis.py index 2530691..51dfa41 100644 --- a/corticalmapping/core/TimingAnalysis.py +++ b/corticalmapping/core/TimingAnalysis.py @@ -89,8 +89,8 @@ def discrete_cross_correlation(ts1, ts2, t_range=(-1., 1.), bins=100, isPlot=Fal """ bin_width = (float(t_range[1]) - float(t_range[0])) / bins - t = np.arange(bins).astype(np.float64) * bin_width + t_range[0] - intervals = zip(t, t + bin_width) + t = np.arange(bins) * bin_width + t_range[0] + intervals = list(zip(t, t + bin_width)) values = np.zeros(bins, dtype=np.int64) ts1s = np.sort(ts1) # sort first timestamps array ts2s = np.sort(ts2) # sort second timestamps array @@ -101,7 +101,7 @@ def discrete_cross_correlation(ts1, ts2, t_range=(-1., 1.), bins=100, isPlot=Fal n = len(ts1s) if n == 0: - print 'no overlapping time range (defined as ' + str(t_range) + ' between two input timestamp arrays' + print('no overlapping time range (defined as ' + str(t_range) + ' between two input timestamp arrays') # return None else: ts2_start_ind = 0 @@ -129,7 +129,7 @@ def discrete_cross_correlation(ts1, ts2, t_range=(-1., 1.), bins=100, isPlot=Fal ax = f.add_subplot(111) ax.bar([a[0] for a in intervals], values, bin_width * 0.9) - return t, values.astype(np.float64) + return t, values.astype(np.float32) def find_nearest(trace, value, direction=0): @@ -258,7 +258,7 @@ def sliding_power_spectrum(trace, fs, sliding_window_length=5., sliding_step_len freq_axis: frequency for each row (from low to high) ''' - if len(trace.shape) != 1: raise ValueError, 'Input trace should be 1d array!' + if len(trace.shape) != 1: raise ValueError('Input trace should be 1d array!') total_length = len(trace) / float(fs) @@ -268,14 +268,14 @@ def sliding_power_spectrum(trace, fs, sliding_window_length=5., sliding_step_len freq_axis = np.arange(freq_bins, dtype=np.float32) * freq_bin_width + freq_range[0] if sliding_step_length is None: sliding_step_length = sliding_window_length - if sliding_step_length > sliding_window_length: print "Step length larger than window length, not using all data points!" + if sliding_step_length > sliding_window_length: print("Step length larger than window length, not using all data points!") times = np.arange(0., total_length, sliding_step_length) times = times[(times + sliding_window_length) < total_length] - if len(times) == 0: raise ValueError, 'No time point found.' + if len(times) == 0: raise ValueError('No time point found.') else: points_in_window = int(sliding_window_length * fs) - if points_in_window <= 0: raise ValueError, 'Sliding window length too short!' + if points_in_window <= 0: raise ValueError('Sliding window length too short!') else: spectrum = np.zeros((len(freq_axis), len(times))) for idx, start_time in enumerate(times): @@ -291,8 +291,8 @@ def sliding_power_spectrum(trace, fs, sliding_window_length=5., sliding_step_len fig = ax.imshow(spectrum, interpolation='nearest', **kwargs) ax.set_xlabel('times (sec)') ax.set_ylabel('frequency (Hz)') - ax.set_xticks(range(len(times))[::(len(times)//10)]) - ax.set_yticks(range(len(freq_axis))[::(len(freq_axis)//10)]) + ax.set_xticks(list(range(len(times)))[::(len(times)//10)]) + ax.set_yticks(list(range(len(freq_axis)))[::(len(freq_axis)//10)]) ax.set_xticklabels(times[::(len(times)//10)]) ax.set_yticklabels(freq_axis[::(len(freq_axis)//10)]) ax.invert_yaxis() @@ -640,7 +640,7 @@ def event_triggered_average_irregular(ts_event, continuous, ts_continuous, t_ran eta = np.zeros(t.shape, dtype=np.float32) eta[:] = np.nan - print '\nStart calculating event triggered average ...' + print('\nStart calculating event triggered average ...') percentage = None for ind_eve, eve in enumerate(ts_event): @@ -648,7 +648,7 @@ def event_triggered_average_irregular(ts_event, continuous, ts_continuous, t_ran # for display curr_percentage = int((float(ind_eve) * 100. / float(len(ts_event))) // 10) * 10 if curr_percentage != percentage: - print 'progress: ' + str(curr_percentage) + '%' + print('progress: ' + str(curr_percentage) + '%') # print eve, ':', ts_continuous[-1] percentage = curr_percentage @@ -784,73 +784,6 @@ def event_triggered_event_trains(event_ts, triggers, t_range=(-1., 2.)): return etts, t_range -def threshold_to_intervals(trace, thr, comparison='>='): - """ - threshold a 1d trace, return intervals of indices that are above the threshold. - - :param trace: 1d array - :param thr: float - :param comparison: str, '>', '>=', '<' or '<=' - :return: list of tuples, each tuple contains two non-negative integers representing - the start index and the end index of thresholded intervals. the first int should - be smaller than the second int. - """ - - if len(trace.shape) != 1: - raise ValueError("the input 'trace' should be a 1d array.") - - flag = False - - start = [] - end = [] - - for pi, pv in enumerate(trace): - - if comparison == '>=': - if pv >= thr and (not flag): - start.append(pi) - flag = True - - if pv < thr and flag: - end.append(pi) - flag = False - - elif comparison == '>': - if pv > thr and (not flag): - start.append(pi) - flag = True - - if pv <= thr and flag: - end.append(pi) - flag = False - - elif comparison == '<=': - if pv <= thr and (not flag): - start.append(pi) - flag = True - - if pv > thr and flag: - end.append(pi) - flag = False - - elif comparison == '<': - if pv < thr and (not flag): - start.append(pi) - flag = True - - if pv >= thr and flag: - end.append(pi) - flag = False - - else: - raise LookupError('Do not understand input "comparison", should be ">=", ">", "<=", "<".') - - if len(start) - len(end) == 1: - end.append(len(trace)) - - return zip(start, end) - - def haramp(trace, periods, ceil_f=4): """ get amplitudes of first couple harmonic components from a time series corresponding to a sinusoidal stimulus. @@ -886,133 +819,6 @@ def haramp(trace, periods, ceil_f=4): return harmonic -class TimeIntervals(object): - """ - class to describe time intervals, designed to represent epochs - - self.data save the (start, end) timestamps of each epochs. Shape (n, 2). - Each row: a single interval - column 0: start timestamps - column 1: end timestamps - - the intervals are incremental in time and should not have overlap within them. - """ - - def __init__(self, intervals): - self._intervals = self.check_integraty(intervals) - - def get_intervals(self): - return self._intervals - - @staticmethod - def check_integraty(intervals): - - intervals_cp = np.array([np.array(d, dtype=np.float64) for d in intervals]) - intervals_cp = intervals_cp.astype(np.float64) - - if len(intervals_cp.shape) != 2: - raise ValueError('intervals should be 2d.') - - if intervals_cp.shape[1] != 2: - raise ValueError('intervals.shape[1] should be 2. (start, end) of the interval') - - # for interval_i, interval in enumerate(intervals_cp): - # if interval[1] <= interval[0]: - # raise ValueError('the {}th interval: end time ({}) earlier than start time ({})'. - # format(interval_i, interval[1], interval[0])) - - intervals_cp = intervals_cp[intervals_cp[:, 0].argsort()] - - ts_list = np.concatenate(intervals_cp, axis=0) - if not check_monotonicity(arr=ts_list, direction='increasing'): - raise ValueError('The intervals should be incremental in time and should not have overlap within them.') - - return intervals_cp - - def overlap(self, time_intervals): - """ - return a new TimeIntervals object that represents the overlap between self and the input Timeintervals - - :param time_intervals: corticalmapping.core.TimingAnalysis.TimeIntervals object - """ - - starts0 = [[s, 1] for s in self._intervals[:, 0]] - ends0 = [[e, -1] for e in self._intervals[:, 1]] - - starts1 = [[s, 1] for s in time_intervals.get_intervals()[:, 0]] - ends1 = [[e, -1] for e in time_intervals.get_intervals()[:, 1]] - - events_lst = starts0 + ends0 + starts1 + ends1 - # print(events_lst) - - ts_arr = np.array([e[0] for e in events_lst]) - events_lst = [events_lst[i] for i in np.argsort(ts_arr)] - # print(events_lst) - - mask = np.cumsum([e[1] for e in events_lst]) - - new_starts = [] - new_ends = [] - - flag = 0 # 1: within overlap, 0: outside overlap - for ts_i, msk in enumerate(mask): - - if flag == 0 and msk == 2: - new_starts.append(events_lst[ts_i][0]) - flag = 1 - elif flag == 1 and msk < 2: - new_ends.append(events_lst[ts_i][0]) - flag = 0 - elif flag == 1 and msk == 2: - raise ValueError('do not understand the timestamps: flag={}, msk={}.'.format(flag, msk)) - else: - pass - - if len(new_starts) != len(new_ends): - raise ValueError('the length of new_starts ({}) does not equal the length of new_ends ({}).'. - format(len(new_starts), len(new_ends))) - - if new_starts: - new_intervals = np.array([np.array(new_starts), np.array(new_ends)]).transpose() - return TimeIntervals(intervals=new_intervals) - else: - return None - - def is_contain(self, time_interval): - """ - :param time_interval: list or tuple of two floats, representing one single time interval - :return: bool, if the input interval is completely contained by self - """ - - if len(time_interval) != 2: - raise ValueError('input "time_interval" should have two and only two elements.') - - if time_interval[0] >= time_interval[1]: - raise ValueError('the start of input "time_interval" should be earlier than the end.') - - for interval in self._intervals: - - if interval[0] > time_interval[0]: # current interval starts after input time_interval - return False - else: # current interval starts before input time_interval - if interval[1] < time_interval[0]: # current interval ends before input time_interval - pass - elif interval[1] < time_interval[1]: # current interval ends within input time_interval - return False - else: - return True # current interval contains input time_interval - - # all intervals in self end before input time_interval - # or self._intervals is empty - return False - - def to_h5_group(self, grp): - pass - - @staticmethod - def from_h5_group(grp): - pass - if __name__=='__main__': @@ -1117,4 +923,4 @@ def from_h5_group(grp): # butter_highpass_filter(is_plot=True) # ============================================================================================================ - print 'for debugging...' \ No newline at end of file + print('for debugging...') \ No newline at end of file diff --git a/corticalmapping/core/tifffile.py b/corticalmapping/core/tifffile.py new file mode 100644 index 0000000..8391991 --- /dev/null +++ b/corticalmapping/core/tifffile.py @@ -0,0 +1,3668 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# tifffile.py + +# Copyright (c) 2008-2014, Christoph Gohlke +# Copyright (c) 2008-2014, The Regents of the University of California +# Produced at the Laboratory for Fluorescence Dynamics +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holders nor the names of any +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +"""Read and write image data from and to TIFF files. + +Image and meta-data can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH, +ImageJ, MicroManager, FluoView, SEQ and GEL files. +Only a subset of the TIFF specification is supported, mainly uncompressed +and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float, +grayscale and RGB(A) images, which are commonly used in bio-scientific imaging. +Specifically, reading JPEG/CCITT compressed image data or EXIF/IPTC/GPS/XMP +meta-data is not implemented. Only primary info records are read for STK, +FluoView, MicroManager, and NIH image formats. + +TIFF, the Tagged Image File Format, is under the control of Adobe Systems. +BigTIFF allows for files greater than 4 GB. STK, LSM, FluoView, SEQ, GEL, +and OME-TIFF, are custom extensions defined by MetaMorph, Carl Zeiss +MicroImaging, Olympus, Media Cybernetics, Molecular Dynamics, and the Open +Microscopy Environment consortium respectively. + +For command line usage run ``python tifffile.py --help`` + +:Author: + `Christoph Gohlke `_ + +:Organization: + Laboratory for Fluorescence Dynamics, University of California, Irvine + +:Version: 2014.02.05 + +Requirements +------------ +* `CPython 2.7 or 3.3 `_ +* `Numpy 1.7 `_ +* `Matplotlib 1.3 `_ (optional for plotting) +* `Tifffile.c 2013.01.18 `_ + (recommended for faster decoding of PackBits and LZW encoded strings) + +Notes +----- +The API is not stable yet and might change between revisions. + +Tested on little-endian platforms only. + +Other Python packages and modules for reading bio-scientific TIFF files: +* `Imread `_ +* `PyLibTiff `_ +* `SimpleITK `_ +* `PyLSM `_ +* `PyMca.TiffIO.py `_ +* `BioImageXD.Readers `_ +* `Cellcognition.io `_ +* `CellProfiler.bioformats `_ + +Acknowledgements +---------------- +* Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics. +* Wim Lewis for a bug fix and some read_cz_lsm functions. +* Hadrien Mary for help on reading MicroManager files. + +References +---------- +(1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated. + http://partners.adobe.com/public/developer/tiff/ +(2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html +(3) MetaMorph Stack (STK) Image File Format. + http://support.meta.moleculardevices.com/docs/t10243.pdf +(4) File Format Description - LSM 5xx Release 2.0. + http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc +(5) BioFormats. http://www.loci.wisc.edu/ome/formats.html +(6) The OME-TIFF format. + http://www.openmicroscopy.org/site/support/file-formats/ome-tiff +(7) TiffDecoder.java + http://rsbweb.nih.gov/ij/developer/source/ij/io/TiffDecoder.java.html +(8) UltraQuant(r) Version 6.0 for Windows Start-Up Guide. + http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf +(9) Micro-Manager File Formats. + http://www.micro-manager.org/wiki/Micro-Manager_File_Formats + +Examples +-------- +>>> data = numpy.random.rand(301, 219) +>>> imsave('temp.tif', data) +>>> image = imread('temp.tif') +>>> assert numpy.all(image == data) + +>>> tif = TiffFile('test.tif') +>>> images = tif.asarray() +>>> image0 = tif[0].asarray() +>>> for page in tif: +... for tag in page.tags.values(): +... t = tag.name, tag.value +... image = page.asarray() +... if page.is_rgb: pass +... if page.is_palette: +... t = page.color_map +... if page.is_stk: +... t = page.mm_uic_tags.number_planes +... if page.is_lsm: +... t = page.cz_lsm_info +>>> tif.close() + +""" + + + +import sys +import os +import re +import glob +import math +import zlib +import time +import json +import struct +import warnings +import datetime +import collections +from fractions import Fraction +from xml.etree import cElementTree as ElementTree + +import numpy + +__version__ = '2014.02.05' +__docformat__ = 'restructuredtext en' +__all__ = ['imsave', 'imread', 'imshow', 'TiffFile', 'TiffSequence'] + + +def imsave(filename, data, photometric=None, planarconfig=None, + resolution=None, description=None, software='tifffile.py', + byteorder=None, bigtiff=False, compress=0, extratags=()): + """Write image data to TIFF file. + + Image data are written in one stripe per plane. + Dimensions larger than 2 or 3 (depending on photometric mode and + planar configuration) are flattened and saved as separate pages. + The 'sample_format' and 'bits_per_sample' TIFF tags are derived from + the data type. + + Parameters + ---------- + filename : str + Name of file to write. + data : array_like + Input image. The last dimensions are assumed to be image height, + width, and samples. + photometric : {'minisblack', 'miniswhite', 'rgb'} + The color space of the image data. + By default this setting is inferred from the data shape. + planarconfig : {'contig', 'planar'} + Specifies if samples are stored contiguous or in separate planes. + By default this setting is inferred from the data shape. + 'contig': last dimension contains samples. + 'planar': third last dimension contains samples. + resolution : (float, float) or ((int, int), (int, int)) + X and Y resolution in dots per inch as float or rational numbers. + description : str + The subject of the image. Saved with the first page only. + software : str + Name of the software used to create the image. + Saved with the first page only. + byteorder : {'<', '>'} + The endianness of the data in the file. + By default this is the system's native byte order. + bigtiff : bool + If True, the BigTIFF format is used. + By default the standard TIFF format is used for data less than 2000 MB. + compress : int + Values from 0 to 9 controlling the level of zlib compression. + If 0, data are written uncompressed (default). + extratags: sequence of tuples + Additional tags as [(code, dtype, count, value, writeonce)]. + code : int + The TIFF tag Id. + dtype : str + Data type of items in `value` in Python struct format. + One of B, s, H, I, 2I, b, h, i, f, d, Q, or q. + count : int + Number of data values. Not used for string values. + value : sequence + `Count` values compatible with `dtype`. + writeonce : bool + If True, the tag is written to the first page only. + + Examples + -------- + >>> data = numpy.ones((2, 5, 3, 301, 219), 'float32') * 0.5 + >>> imsave('temp.tif', data, compress=6) + + >>> data = numpy.ones((5, 301, 219, 3), 'uint8') + 127 + >>> value = u'{"shape": %s}' % str(list(data.shape)) + >>> imsave('temp.tif', data, extratags=[(270, 's', 0, value, True)]) + + """ + assert(photometric in (None, 'minisblack', 'miniswhite', 'rgb')) + assert(planarconfig in (None, 'contig', 'planar')) + assert(byteorder in (None, '<', '>')) + assert(0 <= compress <= 9) + + if byteorder is None: + byteorder = '<' if sys.byteorder == 'little' else '>' + + data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C') + data_shape = shape = data.shape + data = numpy.atleast_2d(data) + + if not bigtiff and data.size * data.dtype.itemsize < 2000*2**20: + bigtiff = False + offset_size = 4 + tag_size = 12 + numtag_format = 'H' + offset_format = 'I' + val_format = '4s' + else: + bigtiff = True + offset_size = 8 + tag_size = 20 + numtag_format = 'Q' + offset_format = 'Q' + val_format = '8s' + + # unify shape of data + samplesperpixel = 1 + extrasamples = 0 + if photometric is None: + if data.ndim > 2 and (shape[-3] in (3, 4) or shape[-1] in (3, 4)): + photometric = 'rgb' + else: + photometric = 'minisblack' + if photometric == 'rgb': + if len(shape) < 3: + raise ValueError("not a RGB(A) image") + if planarconfig is None: + planarconfig = 'planar' if shape[-3] in (3, 4) else 'contig' + if planarconfig == 'contig': + if shape[-1] not in (3, 4): + raise ValueError("not a contiguous RGB(A) image") + data = data.reshape((-1, 1) + shape[-3:]) + samplesperpixel = shape[-1] + else: + if shape[-3] not in (3, 4): + raise ValueError("not a planar RGB(A) image") + data = data.reshape((-1, ) + shape[-3:] + (1, )) + samplesperpixel = shape[-3] + if samplesperpixel == 4: + extrasamples = 1 + elif planarconfig and len(shape) > 2: + if planarconfig == 'contig': + data = data.reshape((-1, 1) + shape[-3:]) + samplesperpixel = shape[-1] + else: + data = data.reshape((-1, ) + shape[-3:] + (1, )) + samplesperpixel = shape[-3] + extrasamples = samplesperpixel - 1 + else: + planarconfig = None + # remove trailing 1s + while len(shape) > 2 and shape[-1] == 1: + shape = shape[:-1] + data = data.reshape((-1, 1) + shape[-2:] + (1, )) + + shape = data.shape # (pages, planes, height, width, contig samples) + + bytestr = bytes if sys.version[0] == '2' else ( + lambda x: bytes(x, 'utf-8') if isinstance(x, str) else x) + tifftypes = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6, + 'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17} + tifftags = { + 'new_subfile_type': 254, 'subfile_type': 255, + 'image_width': 256, 'image_length': 257, 'bits_per_sample': 258, + 'compression': 259, 'photometric': 262, 'fill_order': 266, + 'document_name': 269, 'image_description': 270, 'strip_offsets': 273, + 'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278, + 'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283, + 'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296, + 'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320, + 'extra_samples': 338, 'sample_format': 339} + tags = [] # list of (code, ifdentry, ifdvalue, writeonce) + + def pack(fmt, *val): + return struct.pack(byteorder+fmt, *val) + + def addtag(code, dtype, count, value, writeonce=False): + # compute ifdentry and ifdvalue bytes from code, dtype, count, value + # append (code, ifdentry, ifdvalue, writeonce) to tags list + code = tifftags[code] if code in tifftags else int(code) + if dtype not in tifftypes: + raise ValueError("unknown dtype %s" % dtype) + tifftype = tifftypes[dtype] + rawcount = count + if dtype == 's': + value = bytestr(value) + b'\0' + count = rawcount = len(value) + value = (value, ) + if len(dtype) > 1: + count *= int(dtype[:-1]) + dtype = dtype[-1] + ifdentry = [pack('HH', code, tifftype), + pack(offset_format, rawcount)] + ifdvalue = None + if count == 1: + if isinstance(value, (tuple, list)): + value = value[0] + ifdentry.append(pack(val_format, pack(dtype, value))) + elif struct.calcsize(dtype) * count <= offset_size: + ifdentry.append(pack(val_format, pack(str(count)+dtype, *value))) + else: + ifdentry.append(pack(offset_format, 0)) + ifdvalue = pack(str(count)+dtype, *value) + tags.append((code, b''.join(ifdentry), ifdvalue, writeonce)) + + def rational(arg, max_denominator=1000000): + # return nominator and denominator from float or two integers + try: + f = Fraction.from_float(arg) + except TypeError: + f = Fraction(arg[0], arg[1]) + f = f.limit_denominator(max_denominator) + return f.numerator, f.denominator + + if software: + addtag('software', 's', 0, software, writeonce=True) + if description: + addtag('image_description', 's', 0, description, writeonce=True) + elif shape != data_shape: + addtag('image_description', 's', 0, + "shape=(%s)" % (",".join('%i' % i for i in data_shape)), + writeonce=True) + addtag('datetime', 's', 0, + datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"), + writeonce=True) + addtag('compression', 'H', 1, 32946 if compress else 1) + addtag('orientation', 'H', 1, 1) + addtag('image_width', 'I', 1, shape[-2]) + addtag('image_length', 'I', 1, shape[-3]) + addtag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2) + addtag('sample_format', 'H', 1, + {'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind]) + addtag('photometric', 'H', 1, + {'miniswhite': 0, 'minisblack': 1, 'rgb': 2}[photometric]) + addtag('samples_per_pixel', 'H', 1, samplesperpixel) + if planarconfig: + addtag('planar_configuration', 'H', 1, 1 if planarconfig=='contig' + else 2) + addtag('bits_per_sample', 'H', samplesperpixel, + (data.dtype.itemsize * 8, ) * samplesperpixel) + else: + addtag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8) + if extrasamples: + if photometric == 'rgb': + addtag('extra_samples', 'H', 1, 1) # alpha channel + else: + addtag('extra_samples', 'H', extrasamples, (0, ) * extrasamples) + if resolution: + addtag('x_resolution', '2I', 1, rational(resolution[0])) + addtag('y_resolution', '2I', 1, rational(resolution[1])) + addtag('resolution_unit', 'H', 1, 2) + addtag('rows_per_strip', 'I', 1, shape[-3]) + + # use one strip per plane + strip_byte_counts = (data[0, 0].size * data.dtype.itemsize, ) * shape[1] + addtag('strip_byte_counts', offset_format, shape[1], strip_byte_counts) + addtag('strip_offsets', offset_format, shape[1], (0, ) * shape[1]) + + # add extra tags from users + for t in extratags: + addtag(*t) + + # the entries in an IFD must be sorted in ascending order by tag code + tags = sorted(tags, key=lambda x: x[0]) + + with open(filename, 'wb') as fh: + seek = fh.seek + tell = fh.tell + + def write(arg, *args): + fh.write(pack(arg, *args) if args else arg) + + write({'<': b'II', '>': b'MM'}[byteorder]) + if bigtiff: + write('HHH', 43, 8, 0) + else: + write('H', 42) + ifd_offset = tell() + write(offset_format, 0) # first IFD + + for pageindex in range(shape[0]): + # update pointer at ifd_offset + pos = tell() + seek(ifd_offset) + write(offset_format, pos) + seek(pos) + + # write ifdentries + write(numtag_format, len(tags)) + tag_offset = tell() + write(b''.join(t[1] for t in tags)) + ifd_offset = tell() + write(offset_format, 0) # offset to next IFD + + # write tag values and patch offsets in ifdentries, if necessary + for tagindex, tag in enumerate(tags): + if tag[2]: + pos = tell() + seek(tag_offset + tagindex*tag_size + offset_size + 4) + write(offset_format, pos) + seek(pos) + if tag[0] == 273: + strip_offsets_offset = pos + elif tag[0] == 279: + strip_byte_counts_offset = pos + write(tag[2]) + + # write image data + data_offset = tell() + if compress: + strip_byte_counts = [] + for plane in data[pageindex]: + plane = zlib.compress(plane, compress) + strip_byte_counts.append(len(plane)) + fh.write(plane) + else: + # if this fails try update Python/numpy + data[pageindex].tofile(fh) + fh.flush() + + # update strip_offsets and strip_byte_counts if necessary + pos = tell() + for tagindex, tag in enumerate(tags): + if tag[0] == 273: # strip_offsets + if tag[2]: + seek(strip_offsets_offset) + strip_offset = data_offset + for size in strip_byte_counts: + write(offset_format, strip_offset) + strip_offset += size + else: + seek(tag_offset + tagindex*tag_size + offset_size + 4) + write(offset_format, data_offset) + elif tag[0] == 279: # strip_byte_counts + if compress: + if tag[2]: + seek(strip_byte_counts_offset) + for size in strip_byte_counts: + write(offset_format, size) + else: + seek(tag_offset + tagindex*tag_size + + offset_size + 4) + write(offset_format, strip_byte_counts[0]) + break + seek(pos) + fh.flush() + # remove tags that should be written only once + if pageindex == 0: + tags = [t for t in tags if not t[-1]] + + +def imread(files, *args, **kwargs): + """Return image data from TIFF file(s) as numpy array. + + The first image series is returned if no arguments are provided. + + Parameters + ---------- + files : str or list + File name, glob pattern, or list of file names. + key : int, slice, or sequence of page indices + Defines which pages to return as array. + series : int + Defines which series of pages in file to return as array. + multifile : bool + If True (default), OME-TIFF data may include pages from multiple files. + pattern : str + Regular expression pattern that matches axes names and indices in + file names. + + Examples + -------- + >>> im = imread('test.tif', 0) + >>> im.shape + (256, 256, 4) + >>> ims = imread(['test.tif', 'test.tif']) + >>> ims.shape + (2, 256, 256, 4) + + """ + kwargs_file = {} + if 'multifile' in kwargs: + kwargs_file['multifile'] = kwargs['multifile'] + del kwargs['multifile'] + else: + kwargs_file['multifile'] = True + kwargs_seq = {} + if 'pattern' in kwargs: + kwargs_seq['pattern'] = kwargs['pattern'] + del kwargs['pattern'] + + if isinstance(files, str) and any(i in files for i in '?*'): + files = glob.glob(files) + if not files: + raise ValueError('no files found') + if len(files) == 1: + files = files[0] + + if isinstance(files, str): + with TiffFile(files, **kwargs_file) as tif: + return tif.asarray(*args, **kwargs) + else: + with TiffSequence(files, **kwargs_seq) as imseq: + return imseq.asarray(*args, **kwargs) + + +class lazyattr(object): + """Lazy object attribute whose value is computed on first access.""" + __slots__ = ('func', ) + + def __init__(self, func): + self.func = func + + def __get__(self, instance, owner): + if instance is None: + return self + value = self.func(instance) + if value is NotImplemented: + return getattr(super(owner, instance), self.func.__name__) + setattr(instance, self.func.__name__, value) + return value + + +class TiffFile(object): + """Read image and meta-data from TIFF, STK, LSM, and FluoView files. + + TiffFile instances must be closed using the close method, which is + automatically called when using the 'with' statement. + + Attributes + ---------- + pages : list + All TIFF pages in file. + series : list of Records(shape, dtype, axes, TiffPages) + TIFF pages with compatible shapes and types. + micromanager_metadata: dict + Extra MicroManager non-TIFF metadata in the file, if exists. + + All attributes are read-only. + + Examples + -------- + >>> tif = TiffFile('test.tif') + ... try: + ... images = tif.asarray() + ... except Exception as e: + ... print(e) + ... finally: + ... tif.close() + + """ + def __init__(self, arg, name=None, multifile=False): + """Initialize instance from file. + + Parameters + ---------- + arg : str or open file + Name of file or open file object. + The file objects are closed in TiffFile.close(). + name : str + Human readable label of open file. + multifile : bool + If True, series may include pages from multiple files. + + """ + if isinstance(arg, str): + filename = os.path.abspath(arg) + self._fh = open(filename, 'rb') + else: + filename = str(name) + self._fh = arg + + self._fh.seek(0, 2) + self._fsize = self._fh.tell() + self._fh.seek(0) + self.fname = os.path.basename(filename) + self.fpath = os.path.dirname(filename) + self._tiffs = {self.fname: self} # cache of TiffFiles + self.offset_size = None + self.pages = [] + self._multifile = bool(multifile) + try: + self._fromfile() + except Exception: + self._fh.close() + raise + + def close(self): + """Close open file handle(s).""" + for tif in list(self._tiffs.values()): + if tif._fh: + tif._fh.close() + tif._fh = None + self._tiffs = {} + + def _fromfile(self): + """Read TIFF header and all page records from file.""" + self._fh.seek(0) + try: + self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)] + except KeyError: + raise ValueError("not a valid TIFF file") + version = struct.unpack(self.byteorder+'H', self._fh.read(2))[0] + if version == 43: # BigTiff + self.offset_size, zero = struct.unpack(self.byteorder+'HH', + self._fh.read(4)) + if zero or self.offset_size != 8: + raise ValueError("not a valid BigTIFF file") + elif version == 42: + self.offset_size = 4 + else: + raise ValueError("not a TIFF file") + self.pages = [] + while True: + try: + page = TiffPage(self) + self.pages.append(page) + except StopIteration: + break + if not self.pages: + raise ValueError("empty TIFF file") + + if self.is_micromanager: + # MicroManager files contain metadata not stored in TIFF tags. + self.micromanager_metadata = read_micromanager_metadata(self._fh) + + @lazyattr + def series(self): + """Return series of TiffPage with compatible shape and properties.""" + series = [] + if self.is_ome: + series = self._omeseries() + elif self.is_fluoview: + dims = {b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T', + b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R', + b'EVENT': 'V', b'EXPOSURE': 'L'} + mmhd = list(reversed(self.pages[0].mm_header.dimensions)) + series = [Record( + axes=''.join(dims.get(i[0].strip().upper(), 'Q') + for i in mmhd if i[1] > 1), + shape=tuple(int(i[1]) for i in mmhd if i[1] > 1), + pages=self.pages, dtype=numpy.dtype(self.pages[0].dtype))] + elif self.is_lsm: + lsmi = self.pages[0].cz_lsm_info + axes = CZ_SCAN_TYPES[lsmi.scan_type] + if self.pages[0].is_rgb: + axes = axes.replace('C', '').replace('XY', 'XYC') + axes = axes[::-1] + shape = [getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes] + pages = [p for p in self.pages if not p.is_reduced] + series = [Record(axes=axes, shape=shape, pages=pages, + dtype=numpy.dtype(pages[0].dtype))] + if len(pages) != len(self.pages): # reduced RGB pages + pages = [p for p in self.pages if p.is_reduced] + cp = 1 + i = 0 + while cp < len(pages) and i < len(shape)-2: + cp *= shape[i] + i += 1 + shape = shape[:i] + list(pages[0].shape) + axes = axes[:i] + 'CYX' + series.append(Record(axes=axes, shape=shape, pages=pages, + dtype=numpy.dtype(pages[0].dtype))) + elif self.is_imagej: + shape = [] + axes = [] + ij = self.pages[0].imagej_tags + if 'frames' in ij: + shape.append(ij['frames']) + axes.append('T') + if 'slices' in ij: + shape.append(ij['slices']) + axes.append('Z') + if 'channels' in ij and not self.is_rgb: + shape.append(ij['channels']) + axes.append('C') + remain = len(self.pages) // (numpy.prod(shape) if shape else 1) + if remain > 1: + shape.append(remain) + axes.append('I') + shape.extend(self.pages[0].shape) + axes.extend(self.pages[0].axes) + axes = ''.join(axes) + series = [Record(pages=self.pages, shape=shape, axes=axes, + dtype=numpy.dtype(self.pages[0].dtype))] + elif self.is_nih: + series = [Record(pages=self.pages, + shape=(len(self.pages),) + self.pages[0].shape, + axes='I' + self.pages[0].axes, + dtype=numpy.dtype(self.pages[0].dtype))] + elif self.pages[0].is_shaped: + shape = self.pages[0].tags['image_description'].value[7:-1] + shape = tuple(int(i) for i in shape.split(b',')) + series = [Record(pages=self.pages, shape=shape, + axes='Q' * len(shape), + dtype=numpy.dtype(self.pages[0].dtype))] + + if not series: + shapes = [] + pages = {} + for page in self.pages: + if not page.shape: + continue + shape = page.shape + (page.axes, + page.compression in TIFF_DECOMPESSORS) + if not shape in pages: + shapes.append(shape) + pages[shape] = [page] + else: + pages[shape].append(page) + series = [Record(pages=pages[s], + axes=(('I' + s[-2]) + if len(pages[s]) > 1 else s[-2]), + dtype=numpy.dtype(pages[s][0].dtype), + shape=((len(pages[s]), ) + s[:-2] + if len(pages[s]) > 1 else s[:-2])) + for s in shapes] + return series + + def asarray(self, key=None, series=None, memmap=False): + """Return image data of multiple TIFF pages as numpy array. + + By default the first image series is returned. + + Parameters + ---------- + key : int, slice, or sequence of page indices + Defines which pages to return as array. + series : int + Defines which series of pages to return as array. + memmap : bool + If True, use numpy.memmap to read arrays from file if possible. + + """ + if key is None and series is None: + series = 0 + if series is not None: + pages = self.series[series].pages + else: + pages = self.pages + + if key is None: + pass + elif isinstance(key, int): + pages = [pages[key]] + elif isinstance(key, slice): + pages = pages[key] + elif isinstance(key, collections.Iterable): + pages = [pages[k] for k in key] + else: + raise TypeError("key must be an int, slice, or sequence") + + if len(pages) == 1: + return pages[0].asarray(memmap=memmap) + elif self.is_nih: + result = numpy.vstack( + p.asarray(colormapped=False, squeeze=False, memmap=memmap) + for p in pages) + if pages[0].is_palette: + result = numpy.take(pages[0].color_map, result, axis=1) + result = numpy.swapaxes(result, 0, 1) + else: + if self.is_ome and any(p is None for p in pages): + firstpage = next(p for p in pages if p) + nopage = numpy.zeros_like(firstpage.asarray(memmap=memmap)) + result = numpy.vstack((p.asarray(memmap=memmap) if p else nopage) + for p in pages) + if key is None: + try: + result.shape = self.series[series].shape + except ValueError: + warnings.warn("failed to reshape %s to %s" % ( + result.shape, self.series[series].shape)) + result.shape = (-1,) + pages[0].shape + else: + result.shape = (-1,) + pages[0].shape + return result + + def _omeseries(self): + """Return image series in OME-TIFF file(s).""" + root = ElementTree.XML(self.pages[0].tags['image_description'].value) + uuid = root.attrib.get('UUID', None) + self._tiffs = {uuid: self} + modulo = {} + result = [] + for element in root: + if element.tag.endswith('BinaryOnly'): + warnings.warn("not an OME-TIFF master file") + break + if element.tag.endswith('StructuredAnnotations'): + for annot in element: + if not annot.attrib.get('Namespace', + '').endswith('modulo'): + continue + for value in annot: + for modul in value: + for along in modul: + if not along.tag[:-1].endswith('Along'): + continue + axis = along.tag[-1] + newaxis = along.attrib.get('Type', 'other') + newaxis = AXES_LABELS[newaxis] + if 'Start' in along.attrib: + labels = list(range( + int(along.attrib['Start']), + int(along.attrib['End']) + 1, + int(along.attrib.get('Step', 1)))) + else: + labels = [label.text for label in along + if label.tag.endswith('Label')] + modulo[axis] = (newaxis, labels) + if not element.tag.endswith('Image'): + continue + for pixels in element: + if not pixels.tag.endswith('Pixels'): + continue + atr = pixels.attrib + axes = "".join(reversed(atr['DimensionOrder'])) + shape = list(int(atr['Size'+ax]) for ax in axes) + size = numpy.prod(shape[:-2]) + ifds = [None] * size + for data in pixels: + if not data.tag.endswith('TiffData'): + continue + atr = data.attrib + ifd = int(atr.get('IFD', 0)) + num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0)) + num = int(atr.get('PlaneCount', num)) + idx = [int(atr.get('First'+ax, 0)) for ax in axes[:-2]] + idx = numpy.ravel_multi_index(idx, shape[:-2]) + for uuid in data: + if uuid.tag.endswith('UUID'): + if uuid.text not in self._tiffs: + if not self._multifile: + # abort reading multi file OME series + return [] + fn = uuid.attrib['FileName'] + try: + tf = TiffFile(os.path.join(self.fpath, fn)) + except (IOError, ValueError): + warnings.warn("failed to read %s" % fn) + break + self._tiffs[uuid.text] = tf + pages = self._tiffs[uuid.text].pages + try: + for i in range(num if num else len(pages)): + ifds[idx + i] = pages[ifd + i] + except IndexError: + warnings.warn("ome-xml: index out of range") + break + else: + pages = self.pages + try: + for i in range(num if num else len(pages)): + ifds[idx + i] = pages[ifd + i] + except IndexError: + warnings.warn("ome-xml: index out of range") + result.append(Record(axes=axes, shape=shape, pages=ifds, + dtype=numpy.dtype(ifds[0].dtype))) + + for record in result: + for axis, (newaxis, labels) in list(modulo.items()): + i = record.axes.index(axis) + size = len(labels) + if record.shape[i] == size: + record.axes = record.axes.replace(axis, newaxis, 1) + else: + record.shape[i] //= size + record.shape.insert(i+1, size) + record.axes = record.axes.replace(axis, axis+newaxis, 1) + + return result + + def __len__(self): + """Return number of image pages in file.""" + return len(self.pages) + + def __getitem__(self, key): + """Return specified page.""" + return self.pages[key] + + def __iter__(self): + """Return iterator over pages.""" + return iter(self.pages) + + def __str__(self): + """Return string containing information about file.""" + result = [ + self.fname.capitalize(), + format_size(self._fsize), + {'<': 'little endian', '>': 'big endian'}[self.byteorder]] + if self.is_bigtiff: + result.append("bigtiff") + if len(self.pages) > 1: + result.append("%i pages" % len(self.pages)) + if len(self.series) > 1: + result.append("%i series" % len(self.series)) + if len(self._tiffs) > 1: + result.append("%i files" % (len(self._tiffs))) + return ", ".join(result) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + @lazyattr + def fstat(self): + try: + return os.fstat(self._fh.fileno()) + except Exception: # io.UnsupportedOperation + return None + + @lazyattr + def is_bigtiff(self): + return self.offset_size != 4 + + @lazyattr + def is_rgb(self): + return all(p.is_rgb for p in self.pages) + + @lazyattr + def is_palette(self): + return all(p.is_palette for p in self.pages) + + @lazyattr + def is_mdgel(self): + return any(p.is_mdgel for p in self.pages) + + @lazyattr + def is_mediacy(self): + return any(p.is_mediacy for p in self.pages) + + @lazyattr + def is_stk(self): + return all(p.is_stk for p in self.pages) + + @lazyattr + def is_lsm(self): + return self.pages[0].is_lsm + + @lazyattr + def is_imagej(self): + return self.pages[0].is_imagej + + @lazyattr + def is_micromanager(self): + return self.pages[0].is_micromanager + + @lazyattr + def is_nih(self): + return self.pages[0].is_nih + + @lazyattr + def is_fluoview(self): + return self.pages[0].is_fluoview + + @lazyattr + def is_ome(self): + return self.pages[0].is_ome + + +class TiffPage(object): + """A TIFF image file directory (IFD). + + Attributes + ---------- + index : int + Index of page in file. + dtype : str {TIFF_SAMPLE_DTYPES} + Data type of image, colormapped if applicable. + shape : tuple + Dimensions of the image array in TIFF page, + colormapped and with one alpha channel if applicable. + axes : str + Axes label codes: + 'X' width, 'Y' height, 'S' sample, 'P' plane, 'I' image series, + 'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda, + 'T' time, 'R' region|tile, 'A' angle, 'F' phase, 'H' lifetime, + 'L' exposure, 'V' event, 'Q' unknown, '_' missing + tags : TiffTags + Dictionary of tags in page. + Tag values are also directly accessible as attributes. + color_map : numpy array + Color look up table, if exists. + mm_uic_tags: Record(dict) + Consolidated MetaMorph mm_uic# tags, if exists. + cz_lsm_scan_info: Record(dict) + LSM scan info attributes, if exists. + imagej_tags: Record(dict) + Consolidated ImageJ description and metadata tags, if exists. + + All attributes are read-only. + + """ + def __init__(self, parent): + """Initialize instance from file.""" + self.parent = parent + self.index = len(parent.pages) + self.shape = self._shape = () + self.dtype = self._dtype = None + self.axes = "" + self.tags = TiffTags() + + self._fromfile() + self._process_tags() + + def _fromfile(self): + """Read TIFF IFD structure and its tags from file. + + File cursor must be at storage position of IFD offset and is left at + offset to next IFD. + + Raises StopIteration if offset (first bytes read) is 0. + + """ + fh = self.parent._fh + byteorder = self.parent.byteorder + offset_size = self.parent.offset_size + + fmt = {4: 'I', 8: 'Q'}[offset_size] + offset = struct.unpack(byteorder + fmt, fh.read(offset_size))[0] + if not offset: + raise StopIteration() + + # read standard tags + tags = self.tags + fh.seek(offset) + fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size] + try: + numtags = struct.unpack(byteorder + fmt, fh.read(size))[0] + except Exception: + warnings.warn("corrupted page list") + raise StopIteration() + + tagcode = 0 + for _ in range(numtags): + try: + tag = TiffTag(self.parent) + except TiffTag.Error as e: + warnings.warn(str(e)) + finally: + if tagcode > tag.code: + warnings.warn("tags are not ordered by code") + tagcode = tag.code + if not tag.name in tags: + tags[tag.name] = tag + else: + # some files contain multiple IFD with same code + # e.g. MicroManager files contain two image_description + for ext in ('_1', '_2', '_3'): + name = tag.name + ext + if not name in tags: + tags[name] = tag + break + + # read LSM info subrecords + if self.is_lsm: + pos = fh.tell() + for name, reader in list(CZ_LSM_INFO_READERS.items()): + try: + offset = self.cz_lsm_info['offset_'+name] + except KeyError: + continue + if not offset: + continue + fh.seek(offset) + try: + setattr(self, 'cz_lsm_'+name, reader(fh, byteorder)) + except ValueError: + pass + fh.seek(pos) + + def _process_tags(self): + """Validate standard tags and initialize attributes. + + Raise ValueError if tag values are not supported. + + """ + tags = self.tags + for code, (name, default, dtype, count, validate) in list(TIFF_TAGS.items()): + if not (name in tags or default is None): + tags[name] = TiffTag(code, dtype=dtype, count=count, + value=default, name=name) + if name in tags and validate: + try: + if tags[name].count == 1: + setattr(self, name, validate[tags[name].value]) + else: + setattr(self, name, tuple( + validate[value] for value in tags[name].value)) + except KeyError: + raise ValueError("%s.value (%s) not supported" % + (name, tags[name].value)) + + tag = tags['bits_per_sample'] + if tag.count == 1: + self.bits_per_sample = tag.value + else: + value = tag.value[:self.samples_per_pixel] + if any((v-value[0] for v in value)): + self.bits_per_sample = value + else: + self.bits_per_sample = value[0] + + tag = tags['sample_format'] + if tag.count == 1: + self.sample_format = TIFF_SAMPLE_FORMATS[tag.value] + else: + value = tag.value[:self.samples_per_pixel] + if any((v-value[0] for v in value)): + self.sample_format = [TIFF_SAMPLE_FORMATS[v] for v in value] + else: + self.sample_format = TIFF_SAMPLE_FORMATS[value[0]] + + if not 'photometric' in tags: + self.photometric = None + + if 'image_length' in tags: + self.strips_per_image = int(math.floor( + float(self.image_length + self.rows_per_strip - 1) / + self.rows_per_strip)) + else: + self.strips_per_image = 0 + + key = (self.sample_format, self.bits_per_sample) + self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None) + + if self.is_imagej: + # consolidate imagej meta data + if 'image_description_1' in self.tags: # MicroManager + adict = imagej_description(tags['image_description_1'].value) + else: + adict = imagej_description(tags['image_description'].value) + if 'imagej_metadata' in tags: + try: + adict.update(imagej_metadata( + tags['imagej_metadata'].value, + tags['imagej_byte_counts'].value, + self.parent.byteorder)) + except Exception as e: + warnings.warn(str(e)) + self.imagej_tags = Record(adict) + + if not 'image_length' in self.tags or not 'image_width' in self.tags: + # some GEL file pages are missing image data + self.image_length = 0 + self.image_width = 0 + self.strip_offsets = 0 + self._shape = () + self.shape = () + self.axes = '' + + if self.is_palette: + self.dtype = self.tags['color_map'].dtype[1] + self.color_map = numpy.array(self.color_map, self.dtype) + dmax = self.color_map.max() + if dmax < 256: + self.dtype = numpy.uint8 + self.color_map = self.color_map.astype(self.dtype) + #else: + # self.dtype = numpy.uint8 + # self.color_map >>= 8 + # self.color_map = self.color_map.astype(self.dtype) + self.color_map.shape = (3, -1) + + if self.is_stk: + # consolidate mm_uci tags + planes = tags['mm_uic2'].count + self.mm_uic_tags = Record(tags['mm_uic2'].value) + for key in ('mm_uic3', 'mm_uic4', 'mm_uic1'): + if key in tags: + self.mm_uic_tags.update(tags[key].value) + if self.planar_configuration == 'contig': + self._shape = (planes, 1, self.image_length, self.image_width, + self.samples_per_pixel) + self.shape = tuple(self._shape[i] for i in (0, 2, 3, 4)) + self.axes = 'PYXS' + else: + self._shape = (planes, self.samples_per_pixel, + self.image_length, self.image_width, 1) + self.shape = self._shape[:4] + self.axes = 'PSYX' + if self.is_palette and (self.color_map.shape[1] + >= 2**self.bits_per_sample): + self.shape = (3, planes, self.image_length, self.image_width) + self.axes = 'CPYX' + else: + warnings.warn("palette cannot be applied") + self.is_palette = False + elif self.is_palette: + samples = 1 + if 'extra_samples' in self.tags: + samples += len(self.extra_samples) + if self.planar_configuration == 'contig': + self._shape = ( + 1, 1, self.image_length, self.image_width, samples) + else: + self._shape = ( + 1, samples, self.image_length, self.image_width, 1) + if self.color_map.shape[1] >= 2**self.bits_per_sample: + self.shape = (3, self.image_length, self.image_width) + self.axes = 'CYX' + else: + warnings.warn("palette cannot be applied") + self.is_palette = False + self.shape = (self.image_length, self.image_width) + self.axes = 'YX' + elif self.is_rgb or self.samples_per_pixel > 1: + if self.planar_configuration == 'contig': + self._shape = (1, 1, self.image_length, self.image_width, + self.samples_per_pixel) + self.shape = (self.image_length, self.image_width, + self.samples_per_pixel) + self.axes = 'YXS' + else: + self._shape = (1, self.samples_per_pixel, self.image_length, + self.image_width, 1) + self.shape = self._shape[1:-1] + self.axes = 'SYX' + if self.is_rgb and 'extra_samples' in self.tags: + extra_samples = self.extra_samples + if self.tags['extra_samples'].count == 1: + extra_samples = (extra_samples, ) + for exs in extra_samples: + if exs in ('unassalpha', 'assocalpha', 'unspecified'): + if self.planar_configuration == 'contig': + self.shape = self.shape[:2] + (4,) + else: + self.shape = (4,) + self.shape[1:] + break + else: + self._shape = (1, 1, self.image_length, self.image_width, 1) + self.shape = self._shape[2:4] + self.axes = 'YX' + + if not self.compression and not 'strip_byte_counts' in tags: + self.strip_byte_counts = numpy.prod(self.shape) * ( + self.bits_per_sample // 8) + + def asarray(self, squeeze=True, colormapped=True, rgbonly=True, + memmap=False): + """Read image data from file and return as numpy array. + + Raise ValueError if format is unsupported. + If any argument is False, the shape of the returned array might be + different from the page shape. + + Parameters + ---------- + squeeze : bool + If True, all length-1 dimensions (except X and Y) are + squeezed out from result. + colormapped : bool + If True, color mapping is applied for palette-indexed images. + rgbonly : bool + If True, return RGB(A) image without additional extra samples. + memmap : bool + If True, use numpy.memmap to read array if possible. + + """ + fh = self.parent._fh + if not fh: + raise IOError("TIFF file is not open") + if self.dtype is None: + raise ValueError("data type not supported: %s%i" % ( + self.sample_format, self.bits_per_sample)) + if self.compression not in TIFF_DECOMPESSORS: + raise ValueError("cannot decompress %s" % self.compression) + if ('ycbcr_subsampling' in self.tags + and self.tags['ycbcr_subsampling'].value not in (1, (1, 1))): + raise ValueError("YCbCr subsampling not supported") + tag = self.tags['sample_format'] + if tag.count != 1 and any((i-tag.value[0] for i in tag.value)): + raise ValueError("sample formats don't match %s" % str(tag.value)) + + dtype = self._dtype + shape = self._shape + + if not shape: + return None + + image_width = self.image_width + image_length = self.image_length + typecode = self.parent.byteorder + dtype + bits_per_sample = self.bits_per_sample + byteorder_is_native = ({'big': '>', 'little': '<'}[sys.byteorder] == + self.parent.byteorder) + + if self.is_tiled: + if 'tile_offsets' in self.tags: + byte_counts = self.tile_byte_counts + offsets = self.tile_offsets + else: + byte_counts = self.strip_byte_counts + offsets = self.strip_offsets + tile_width = self.tile_width + tile_length = self.tile_length + tw = (image_width + tile_width - 1) // tile_width + tl = (image_length + tile_length - 1) // tile_length + shape = shape[:-3] + (tl*tile_length, tw*tile_width, shape[-1]) + tile_shape = (tile_length, tile_width, shape[-1]) + runlen = tile_width + else: + byte_counts = self.strip_byte_counts + offsets = self.strip_offsets + runlen = image_width + + try: + offsets[0] + except TypeError: + offsets = (offsets, ) + byte_counts = (byte_counts, ) + if any(o < 2 for o in offsets): + raise ValueError("corrupted page") + + if (not self.is_tiled and (self.is_stk or (not self.compression + and bits_per_sample in (8, 16, 32, 64) + and all(offsets[i] == offsets[i+1] - byte_counts[i] + for i in range(len(offsets)-1))))): + # contiguous data + if (memmap and not (self.is_tiled or self.predictor or + ('extra_samples' in self.tags) or + (colormapped and self.is_palette) or + (not byteorder_is_native))): + result = numpy.memmap(fh, typecode, 'r', offsets[0], shape) + else: + fh.seek(offsets[0]) + result = numpy_fromfile(fh, typecode, numpy.prod(shape)) + result = result.astype('=' + dtype) + else: + if self.planar_configuration == 'contig': + runlen *= self.samples_per_pixel + if bits_per_sample in (8, 16, 32, 64, 128): + if (bits_per_sample * runlen) % 8: + raise ValueError("data and sample size mismatch") + + def unpack(x): + return numpy.fromstring(x, typecode) + elif isinstance(bits_per_sample, tuple): + def unpack(x): + return unpackrgb(x, typecode, bits_per_sample) + else: + def unpack(x): + return unpackints(x, typecode, bits_per_sample, runlen) + decompress = TIFF_DECOMPESSORS[self.compression] + if self.is_tiled: + result = numpy.empty(shape, dtype) + tw, tl, pl = 0, 0, 0 + for offset, bytecount in zip(offsets, byte_counts): + fh.seek(offset) + tile = unpack(decompress(fh.read(bytecount))) + tile.shape = tile_shape + if self.predictor == 'horizontal': + numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile) + result[0, pl, tl:tl+tile_length, + tw:tw+tile_width, :] = tile + del tile + tw += tile_width + if tw >= shape[-2]: + tw, tl = 0, tl + tile_length + if tl >= shape[-3]: + tl, pl = 0, pl + 1 + result = result[..., :image_length, :image_width, :] + else: + strip_size = (self.rows_per_strip * self.image_width * + self.samples_per_pixel) + result = numpy.empty(shape, dtype).reshape(-1) + index = 0 + for offset, bytecount in zip(offsets, byte_counts): + fh.seek(offset) + strip = fh.read(bytecount) + strip = unpack(decompress(strip)) + size = min(result.size, strip.size, strip_size, + result.size - index) + result[index:index+size] = strip[:size] + del strip + index += size + + result.shape = self._shape + + if self.predictor == 'horizontal' and not self.is_tiled: + # work around bug in LSM510 software + if not (self.parent.is_lsm and not self.compression): + numpy.cumsum(result, axis=-2, dtype=dtype, out=result) + + if colormapped and self.is_palette: + if self.color_map.shape[1] >= 2**bits_per_sample: + # FluoView and LSM might fail here + result = numpy.take(self.color_map, + result[:, 0, :, :, 0], axis=1) + elif rgbonly and self.is_rgb and 'extra_samples' in self.tags: + # return only RGB and first alpha channel if exists + extra_samples = self.extra_samples + if self.tags['extra_samples'].count == 1: + extra_samples = (extra_samples, ) + for i, exs in enumerate(extra_samples): + if exs in ('unassalpha', 'assocalpha', 'unspecified'): + if self.planar_configuration == 'contig': + result = result[..., [0, 1, 2, 3+i]] + else: + result = result[:, [0, 1, 2, 3+i]] + break + else: + if self.planar_configuration == 'contig': + result = result[..., :3] + else: + result = result[:, :3] + + if squeeze: + try: + result.shape = self.shape + except ValueError: + warnings.warn("failed to reshape from %s to %s" % ( + str(result.shape), str(self.shape))) + + return result + + def __str__(self): + """Return string containing information about page.""" + s = ', '.join(s for s in ( + ' x '.join(str(i) for i in self.shape), + str(numpy.dtype(self.dtype)), + '%s bit' % str(self.bits_per_sample), + self.photometric if 'photometric' in self.tags else '', + self.compression if self.compression else 'raw', + '|'.join(t[3:] for t in ( + 'is_stk', 'is_lsm', 'is_nih', 'is_ome', 'is_imagej', + 'is_micromanager', 'is_fluoview', 'is_mdgel', 'is_mediacy', + 'is_reduced', 'is_tiled') if getattr(self, t))) if s) + return "Page %i: %s" % (self.index, s) + + def __getattr__(self, name): + """Return tag value.""" + if name in self.tags: + value = self.tags[name].value + setattr(self, name, value) + return value + raise AttributeError(name) + + @lazyattr + def is_rgb(self): + """True if page contains a RGB image.""" + return ('photometric' in self.tags and + self.tags['photometric'].value == 2) + + @lazyattr + def is_palette(self): + """True if page contains a palette-colored image.""" + return ('photometric' in self.tags and + self.tags['photometric'].value == 3) + + @lazyattr + def is_tiled(self): + """True if page contains tiled image.""" + return 'tile_width' in self.tags + + @lazyattr + def is_reduced(self): + """True if page is a reduced image of another image.""" + return bool(self.tags['new_subfile_type'].value & 1) + + @lazyattr + def is_mdgel(self): + """True if page contains md_file_tag tag.""" + return 'md_file_tag' in self.tags + + @lazyattr + def is_mediacy(self): + """True if page contains Media Cybernetics Id tag.""" + return ('mc_id' in self.tags and + self.tags['mc_id'].value.startswith(b'MC TIFF')) + + @lazyattr + def is_stk(self): + """True if page contains MM_UIC2 tag.""" + return 'mm_uic2' in self.tags + + @lazyattr + def is_lsm(self): + """True if page contains LSM CZ_LSM_INFO tag.""" + return 'cz_lsm_info' in self.tags + + @lazyattr + def is_fluoview(self): + """True if page contains FluoView MM_STAMP tag.""" + return 'mm_stamp' in self.tags + + @lazyattr + def is_nih(self): + """True if page contains NIH image header.""" + return 'nih_image_header' in self.tags + + @lazyattr + def is_ome(self): + """True if page contains OME-XML in image_description tag.""" + return ('image_description' in self.tags and self.tags[ + 'image_description'].value.startswith(b' parent.offset_size or code in CUSTOM_TAGS: + pos = fh.tell() + tof = {4: 'I', 8: 'Q'}[parent.offset_size] + self.value_offset = offset = struct.unpack(byteorder+tof, value)[0] + if offset < 0 or offset > parent._fsize: + raise TiffTag.Error("corrupt file - invalid tag value offset") + elif offset < 4: + raise TiffTag.Error("corrupt value offset for tag %i" % code) + fh.seek(offset) + if code in CUSTOM_TAGS: + readfunc = CUSTOM_TAGS[code][1] + value = readfunc(fh, byteorder, dtype, count) + fh.seek(0, 2) # bug in numpy/Python 3.x ? + if isinstance(value, dict): # numpy.core.records.record + value = Record(value) + elif code in TIFF_TAGS or dtype[-1] == 's': + value = struct.unpack(fmt, fh.read(size)) + else: + value = read_numpy(fh, byteorder, dtype, count) + fh.seek(0, 2) # bug in numpy/Python 3.x ? + fh.seek(pos) + else: + value = struct.unpack(fmt, value[:size]) + + if not code in CUSTOM_TAGS: + if len(value) == 1: + value = value[0] + + if dtype.endswith('s') and isinstance(value, bytes): + value = stripnull(value) + + self.code = code + self.name = name + self.dtype = dtype + self.count = count + self.value = value + + def __str__(self): + """Return string containing information about tag.""" + return ' '.join(str(getattr(self, s)) for s in self.__slots__) + + +class TiffSequence(object): + """Sequence of image files. + + Properties + ---------- + files : list + List of file names. + shape : tuple + Shape of image sequence. + axes : str + Labels of axes in shape. + + Examples + -------- + >>> ims = TiffSequence("test.oif.files/*.tif") + >>> ims = ims.asarray() + >>> ims.shape + (2, 100, 256, 256) + + """ + _axes_pattern = """ + # matches Olympus OIF and Leica TIFF series + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4})) + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + """ + + class _ParseError(Exception): + pass + + def __init__(self, files, imread=TiffFile, pattern='axes'): + """Initialize instance from multiple files. + + Parameters + ---------- + files : str, or sequence of str + Glob pattern or sequence of file names. + imread : function or class + Image read function or class with asarray function returning numpy + array from single file. + pattern : str + Regular expression pattern that matches axes names and sequence + indices in file names. + + """ + if isinstance(files, str): + files = natural_sorted(glob.glob(files)) + files = list(files) + if not files: + raise ValueError("no files found") + #if not os.path.isfile(files[0]): + # raise ValueError("file not found") + self.files = files + + if hasattr(imread, 'asarray'): + _imread = imread + + def imread(fname, *args, **kwargs): + with _imread(fname) as im: + return im.asarray(*args, **kwargs) + + self.imread = imread + + self.pattern = self._axes_pattern if pattern == 'axes' else pattern + try: + self._parse() + if not self.axes: + self.axes = 'I' + except self._ParseError: + self.axes = 'I' + self.shape = (len(files),) + self._start_index = (0,) + self._indices = ((i,) for i in range(len(files))) + + def __str__(self): + """Return string with information about image sequence.""" + return "\n".join([ + self.files[0], + '* files: %i' % len(self.files), + '* axes: %s' % self.axes, + '* shape: %s' % str(self.shape)]) + + def __len__(self): + return len(self.files) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def close(self): + pass + + def asarray(self, *args, **kwargs): + """Read image data from all files and return as single numpy array. + + Raise IndexError if image shapes don't match. + + """ + im = self.imread(self.files[0]) + result_shape = self.shape + im.shape + result = numpy.zeros(result_shape, dtype=im.dtype) + result = result.reshape(-1, *im.shape) + for index, fname in zip(self._indices, self.files): + index = [i-j for i, j in zip(index, self._start_index)] + index = numpy.ravel_multi_index(index, self.shape) + im = self.imread(fname, *args, **kwargs) + result[index] = im + result.shape = result_shape + return result + + def _parse(self): + """Get axes and shape from file names.""" + if not self.pattern: + raise self._ParseError("invalid pattern") + pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE) + matches = pattern.findall(self.files[0]) + if not matches: + raise self._ParseError("pattern doesn't match file names") + matches = matches[-1] + if len(matches) % 2: + raise self._ParseError("pattern doesn't match axis name and index") + axes = ''.join(m for m in matches[::2] if m) + if not axes: + raise self._ParseError("pattern doesn't match file names") + + indices = [] + for fname in self.files: + matches = pattern.findall(fname)[-1] + if axes != ''.join(m for m in matches[::2] if m): + raise ValueError("axes don't match within the image sequence") + indices.append([int(m) for m in matches[1::2] if m]) + shape = tuple(numpy.max(indices, axis=0)) + start_index = tuple(numpy.min(indices, axis=0)) + shape = tuple(i-j+1 for i, j in zip(shape, start_index)) + if numpy.prod(shape) != len(self.files): + warnings.warn("files are missing. Missing data are zeroed") + + self.axes = axes.upper() + self.shape = shape + self._indices = indices + self._start_index = start_index + + +class Record(dict): + """Dictionary with attribute access. + + Can also be initialized with numpy.core.records.record. + + """ + __slots__ = () + + def __init__(self, arg=None, **kwargs): + if kwargs: + arg = kwargs + elif arg is None: + arg = {} + try: + dict.__init__(self, arg) + except (TypeError, ValueError): + for i, name in enumerate(arg.dtype.names): + v = arg[i] + self[name] = v if v.dtype.char != 'S' else stripnull(v) + + def __getattr__(self, name): + return self[name] + + def __setattr__(self, name, value): + self.__setitem__(name, value) + + def __str__(self): + """Pretty print Record.""" + s = [] + lists = [] + for k in sorted(self): + if k.startswith('_'): # does not work with byte + continue + v = self[k] + if isinstance(v, (list, tuple)) and len(v): + if isinstance(v[0], Record): + lists.append((k, v)) + continue + elif isinstance(v[0], TiffPage): + v = [i.index for i in v if i] + s.append( + ("* %s: %s" % (k, str(v))).split("\n", 1)[0] + [:PRINT_LINE_LEN].rstrip()) + for k, v in lists: + l = [] + for i, w in enumerate(v): + l.append("* %s[%i]\n %s" % (k, i, + str(w).replace("\n", "\n "))) + s.append('\n'.join(l)) + return '\n'.join(s) + + +class TiffTags(Record): + """Dictionary of TiffTags with attribute access.""" + def __str__(self): + """Return string with information about all tags.""" + s = [] + for tag in sorted(list(self.values()), key=lambda x: x.code): + typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1]) + line = "* %i %s (%s) %s" % (tag.code, tag.name, typecode, + str(tag.value).split('\n', 1)[0]) + s.append(line[:PRINT_LINE_LEN].lstrip()) + return '\n'.join(s) + + +def read_bytes(fh, byteorder, dtype, count): + """Read tag data from file and return as byte string.""" + return numpy_fromfile(fh, byteorder+dtype[-1], count).tostring() + + +def read_numpy(fh, byteorder, dtype, count): + """Read tag data from file and return as numpy array.""" + return numpy_fromfile(fh, byteorder+dtype[-1], count) + + +def read_json(fh, byteorder, dtype, count): + """Read tag data from file and return as object.""" + return json.loads(str(stripnull(fh.read(count)), 'utf-8')) + + +def read_mm_header(fh, byteorder, dtype, count): + """Read MM_HEADER tag from file and return as numpy.rec.array.""" + return numpy.rec.fromfile(fh, MM_HEADER, 1, byteorder=byteorder)[0] + + +def read_mm_stamp(fh, byteorder, dtype, count): + """Read MM_STAMP tag from file and return as numpy.array.""" + return numpy_fromfile(fh, byteorder+'8f8', 1)[0] + + +def read_mm_uic1(fh, byteorder, dtype, count): + """Read MM_UIC1 tag from file and return as dictionary.""" + t = fh.read(8*count) + t = struct.unpack('%s%iI' % (byteorder, 2*count), t) + return dict((MM_TAG_IDS[k], v) for k, v in zip(t[::2], t[1::2]) + if k in MM_TAG_IDS) + + +def read_mm_uic2(fh, byteorder, dtype, count): + """Read MM_UIC2 tag from file and return as dictionary.""" + result = {'number_planes': count} + values = numpy_fromfile(fh, byteorder+'I', 6*count) + result['z_distance'] = values[0::6] // values[1::6] + #result['date_created'] = tuple(values[2::6]) + #result['time_created'] = tuple(values[3::6]) + #result['date_modified'] = tuple(values[4::6]) + #result['time_modified'] = tuple(values[5::6]) + return result + + +def read_mm_uic3(fh, byteorder, dtype, count): + """Read MM_UIC3 tag from file and return as dictionary.""" + t = numpy_fromfile(fh, byteorder+'I', 2*count) + return {'wavelengths': t[0::2] // t[1::2]} + + +def read_mm_uic4(fh, byteorder, dtype, count): + """Read MM_UIC4 tag from file and return as dictionary.""" + t = struct.unpack(byteorder + 'hI'*count, fh.read(6*count)) + return dict((MM_TAG_IDS[k], v) for k, v in zip(t[::2], t[1::2]) + if k in MM_TAG_IDS) + + +def read_cz_lsm_info(fh, byteorder, dtype, count): + """Read CS_LSM_INFO tag from file and return as numpy.rec.array.""" + result = numpy.rec.fromfile(fh, CZ_LSM_INFO, 1, + byteorder=byteorder)[0] + {50350412: '1.3', 67127628: '2.0'}[result.magic_number] # validation + return result + + +def read_cz_lsm_time_stamps(fh, byteorder): + """Read LSM time stamps from file and return as list.""" + size, count = struct.unpack(byteorder+'II', fh.read(8)) + if size != (8 + 8 * count): + raise ValueError("lsm_time_stamps block is too short") + return struct.unpack(('%s%dd' % (byteorder, count)), + fh.read(8*count)) + + +def read_cz_lsm_event_list(fh, byteorder): + """Read LSM events from file and return as list of (time, type, text).""" + count = struct.unpack(byteorder+'II', fh.read(8))[1] + events = [] + while count > 0: + esize, etime, etype = struct.unpack(byteorder+'IdI', fh.read(16)) + etext = stripnull(fh.read(esize - 16)) + events.append((etime, etype, etext)) + count -= 1 + return events + + +def read_cz_lsm_scan_info(fh, byteorder): + """Read LSM scan information from file and return as Record.""" + block = Record() + blocks = [block] + unpack = struct.unpack + if 0x10000000 != struct.unpack(byteorder+"I", fh.read(4))[0]: + raise ValueError("not a lsm_scan_info structure") + fh.read(8) + while True: + entry, dtype, size = unpack(byteorder+"III", fh.read(12)) + if dtype == 2: + value = stripnull(fh.read(size)) + elif dtype == 4: + value = unpack(byteorder+"i", fh.read(4))[0] + elif dtype == 5: + value = unpack(byteorder+"d", fh.read(8))[0] + else: + value = 0 + if entry in CZ_LSM_SCAN_INFO_ARRAYS: + blocks.append(block) + name = CZ_LSM_SCAN_INFO_ARRAYS[entry] + newobj = [] + setattr(block, name, newobj) + block = newobj + elif entry in CZ_LSM_SCAN_INFO_STRUCTS: + blocks.append(block) + newobj = Record() + block.append(newobj) + block = newobj + elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES: + name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry] + setattr(block, name, value) + elif entry == 0xffffffff: + block = blocks.pop() + else: + setattr(block, "unknown_%x" % entry, value) + if not blocks: + break + return block + + +def read_nih_image_header(fh, byteorder, dtype, count): + """Read NIH_IMAGE_HEADER tag from file and return as numpy.rec.array.""" + a = numpy.rec.fromfile(fh, NIH_IMAGE_HEADER, 1, byteorder=byteorder)[0] + a = a.newbyteorder(byteorder) + a.xunit = a.xunit[:a._xunit_len] + a.um = a.um[:a._um_len] + return a + + +def imagej_metadata(data, bytecounts, byteorder): + """Return dict from ImageJ meta data tag value.""" + + _str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252') + + def read_string(data, byteorder): + return _str(stripnull(data[0 if byteorder == '<' else 1::2])) + + def read_double(data, byteorder): + return struct.unpack(byteorder+('d' * (len(data) // 8)), data) + + def read_bytes(data, byteorder): + #return struct.unpack('b' * len(data), data) + return numpy.fromstring(data, 'uint8') + + metadata_types = { # big endian + b'info': ('info', read_string), + b'labl': ('labels', read_string), + b'rang': ('ranges', read_double), + b'luts': ('luts', read_bytes), + b'roi ': ('roi', read_bytes), + b'over': ('overlays', read_bytes)} + metadata_types.update( # little endian + dict((k[::-1], v) for k, v in list(metadata_types.items()))) + + if not bytecounts: + raise ValueError("no ImageJ meta data") + + if not data[:4] in (b'IJIJ', b'JIJI'): + raise ValueError("invalid ImageJ meta data") + + header_size = bytecounts[0] + if header_size < 12 or header_size > 804: + raise ValueError("invalid ImageJ meta data header size") + + ntypes = (header_size - 4) // 8 + header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8]) + pos = 4 + ntypes * 8 + counter = 0 + result = {} + for mtype, count in zip(header[::2], header[1::2]): + values = [] + name, func = metadata_types.get(mtype, (_str(mtype), read_bytes)) + for _ in range(count): + counter += 1 + pos1 = pos + bytecounts[counter] + values.append(func(data[pos:pos1], byteorder)) + pos = pos1 + result[name.strip()] = values[0] if count == 1 else values + return result + + +def imagej_description(description): + """Return dict from ImageJ image_description tag.""" + def _bool(val): + return {b'true': True, b'false': False}[val.lower()] + + _str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252') + result = {} + for line in description.splitlines(): + try: + key, val = line.split(b'=') + except Exception: + continue + key = key.strip() + val = val.strip() + for dtype in (int, float, _bool, _str): + try: + val = dtype(val) + break + except Exception: + pass + result[_str(key)] = val + return result + + +def read_micromanager_metadata(fh): + """Read MicroManager non-TIFF settings from open file and return as dict. + + The settings can be used to read image data without parsing the TIFF file. + + Raise ValueError if file does not contain valid MicroManager metadata. + + """ + fh.seek(0) + try: + byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)] + except IndexError: + raise ValueError("not a MicroManager TIFF file") + + results = {} + fh.seek(8) + (index_header, index_offset, display_header, display_offset, + comments_header, comments_offset, summary_header, summary_length + ) = struct.unpack(byteorder + "IIIIIIII", fh.read(32)) + + if summary_header != 2355492: + raise ValueError("invalid MicroManager summary_header") + results['summary'] = read_json(fh, byteorder, None, summary_length) + + if index_header != 54773648: + raise ValueError("invalid MicroManager index_header") + fh.seek(index_offset) + header, count = struct.unpack(byteorder + "II", fh.read(8)) + if header != 3453623: + raise ValueError("invalid MicroManager index_header") + data = struct.unpack(byteorder + "IIIII"*count, fh.read(20*count)) + results['index_map'] = { + 'channel': data[::5], 'slice': data[1::5], 'frame': data[2::5], + 'position': data[3::5], 'offset': data[4::5]} + + if display_header != 483765892: + raise ValueError("invalid MicroManager display_header") + fh.seek(display_offset) + header, count = struct.unpack(byteorder + "II", fh.read(8)) + if header != 347834724: + raise ValueError("invalid MicroManager display_header") + results['display_settings'] = read_json(fh, byteorder, None, count) + + if comments_header != 99384722: + raise ValueError("invalid MicroManager comments_header") + fh.seek(comments_offset) + header, count = struct.unpack(byteorder + "II", fh.read(8)) + if header != 84720485: + raise ValueError("invalid MicroManager comments_header") + results['comments'] = read_json(fh, byteorder, None, count) + + return results + + +def _replace_by(module_function, package=None, warn=True): + """Try replace decorated function by module.function.""" + try: + from importlib import import_module + except ImportError: + warnings.warn('Could not import module importlib') + return lambda func: func + + def decorate(func, module_function=module_function, warn=warn): + try: + module, function = module_function.split('.') + if not package: + module = import_module(module) + else: + module = import_module('.' + module, package=package) + func, oldfunc = getattr(module, function), func + globals()['__old_' + func.__name__] = oldfunc + except Exception: + if warn: + warnings.warn("failed to import %s" % module_function) + return func + + return decorate + + +@_replace_by('_tifffile.decodepackbits') +def decodepackbits(encoded): + """Decompress PackBits encoded byte string. + + PackBits is a simple byte-oriented run-length compression scheme. + + """ + func = ord if sys.version[0] == '2' else lambda x: x + result = [] + result_extend = result.extend + i = 0 + try: + while True: + n = func(encoded[i]) + 1 + i += 1 + if n < 129: + result_extend(encoded[i:i+n]) + i += n + elif n > 129: + result_extend(encoded[i:i+1] * (258-n)) + i += 1 + except IndexError: + pass + return b''.join(result) if sys.version[0] == '2' else bytes(result) + + +@_replace_by('_tifffile.decodelzw') +def decodelzw(encoded): + """Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string). + + The strip must begin with a CLEAR code and end with an EOI code. + + This is an implementation of the LZW decoding algorithm described in (1). + It is not compatible with old style LZW compressed files like quad-lzw.tif. + + """ + len_encoded = len(encoded) + bitcount_max = len_encoded * 8 + unpack = struct.unpack + + if sys.version[0] == '2': + newtable = [chr(i) for i in range(256)] + else: + newtable = [bytes([i]) for i in range(256)] + newtable.extend((0, 0)) + + def next_code(): + """Return integer of `bitw` bits at `bitcount` position in encoded.""" + start = bitcount // 8 + s = encoded[start:start+4] + try: + code = unpack('>I', s)[0] + except Exception: + code = unpack('>I', s + b'\x00'*(4-len(s)))[0] + code <<= bitcount % 8 + code &= mask + return code >> shr + + switchbitch = { # code: bit-width, shr-bits, bit-mask + 255: (9, 23, int(9*'1'+'0'*23, 2)), + 511: (10, 22, int(10*'1'+'0'*22, 2)), + 1023: (11, 21, int(11*'1'+'0'*21, 2)), + 2047: (12, 20, int(12*'1'+'0'*20, 2)), } + bitw, shr, mask = switchbitch[255] + bitcount = 0 + + if len_encoded < 4: + raise ValueError("strip must be at least 4 characters long") + + if next_code() != 256: + raise ValueError("strip must begin with CLEAR code") + + code = 0 + oldcode = 0 + result = [] + result_append = result.append + while True: + code = next_code() # ~5% faster when inlining this function + bitcount += bitw + if code == 257 or bitcount >= bitcount_max: # EOI + break + if code == 256: # CLEAR + table = newtable[:] + table_append = table.append + lentable = 258 + bitw, shr, mask = switchbitch[255] + code = next_code() + bitcount += bitw + if code == 257: # EOI + break + result_append(table[code]) + else: + if code < lentable: + decoded = table[code] + newcode = table[oldcode] + decoded[:1] + else: + newcode = table[oldcode] + newcode += newcode[:1] + decoded = newcode + result_append(decoded) + table_append(newcode) + lentable += 1 + oldcode = code + if lentable in switchbitch: + bitw, shr, mask = switchbitch[lentable] + + if code != 257: + warnings.warn( + "decodelzw encountered unexpected end of stream (code %i)" % code) + + return b''.join(result) + + +@_replace_by('_tifffile.unpackints') +def unpackints(data, dtype, itemsize, runlen=0): + """Decompress byte string to array of integers of any bit size <= 32. + + Parameters + ---------- + data : byte str + Data to decompress. + dtype : numpy.dtype or str + A numpy boolean or integer type. + itemsize : int + Number of bits per integer. + runlen : int + Number of consecutive integers, after which to start at next byte. + + """ + if itemsize == 1: # bitarray + data = numpy.fromstring(data, '|B') + data = numpy.unpackbits(data) + if runlen % 8: + data = data.reshape(-1, runlen + (8 - runlen % 8)) + data = data[:, :runlen].reshape(-1) + return data.astype(dtype) + + dtype = numpy.dtype(dtype) + if itemsize in (8, 16, 32, 64): + return numpy.fromstring(data, dtype) + if itemsize < 1 or itemsize > 32: + raise ValueError("itemsize out of range: %i" % itemsize) + if dtype.kind not in "biu": + raise ValueError("invalid dtype") + + itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize) + if itembytes != dtype.itemsize: + raise ValueError("dtype.itemsize too small") + if runlen == 0: + runlen = len(data) // itembytes + skipbits = runlen*itemsize % 8 + if skipbits: + skipbits = 8 - skipbits + shrbits = itembytes*8 - itemsize + bitmask = int(itemsize*'1'+'0'*shrbits, 2) + dtypestr = '>' + dtype.char # dtype always big endian? + + unpack = struct.unpack + l = runlen * (len(data)*8 // (runlen*itemsize + skipbits)) + result = numpy.empty((l, ), dtype) + bitcount = 0 + for i in range(len(result)): + start = bitcount // 8 + s = data[start:start+itembytes] + try: + code = unpack(dtypestr, s)[0] + except Exception: + code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0] + code <<= bitcount % 8 + code &= bitmask + result[i] = code >> shrbits + bitcount += itemsize + if (i+1) % runlen == 0: + bitcount += skipbits + return result + + +def unpackrgb(data, dtype='>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff) + >>> print(unpackrgb(data, '>> print(unpackrgb(data, '>> print(unpackrgb(data, '= bits) + data = numpy.fromstring(data, dtype.byteorder+dt) + result = numpy.empty((data.size, len(bitspersample)), dtype.char) + for i, bps in enumerate(bitspersample): + t = data >> int(numpy.sum(bitspersample[i+1:])) + t &= int('0b'+'1'*bps, 2) + if rescale: + o = ((dtype.itemsize * 8) // bps + 1) * bps + if o > data.dtype.itemsize * 8: + t = t.astype('I') + t *= (2**o - 1) // (2**bps - 1) + t //= 2**(o - (dtype.itemsize * 8)) + result[:, i] = t + return result.reshape(-1) + + +def reorient(image, orientation): + """Return reoriented view of image array. + + Parameters + ---------- + image : numpy array + Non-squeezed output of asarray() functions. + Axes -3 and -2 must be image length and width respectively. + orientation : int or str + One of TIFF_ORIENTATIONS keys or values. + + """ + o = TIFF_ORIENTATIONS.get(orientation, orientation) + if o == 'top_left': + return image + elif o == 'top_right': + return image[..., ::-1, :] + elif o == 'bottom_left': + return image[..., ::-1, :, :] + elif o == 'bottom_right': + return image[..., ::-1, ::-1, :] + elif o == 'left_top': + return numpy.swapaxes(image, -3, -2) + elif o == 'right_top': + return numpy.swapaxes(image, -3, -2)[..., ::-1, :] + elif o == 'left_bottom': + return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :] + elif o == 'right_bottom': + return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :] + + +def numpy_fromfile(arg, dtype=float, count=-1, sep=''): + """Return array from data in binary file. + + Work around numpy issue #2230, "numpy.fromfile does not accept StringIO + object" https://github.com/numpy/numpy/issues/2230. + + """ + try: + return numpy.fromfile(arg, dtype, count, sep) + except IOError: + if count < 0: + size = 2**30 + else: + size = count * numpy.dtype(dtype).itemsize + data = arg.read(int(size)) + return numpy.fromstring(data, dtype, count, sep) + + +def stripnull(string): + """Return string truncated at first null character.""" + i = string.find(b'\x00') + return string if (i < 0) else string[:i] + + +def format_size(size): + """Return file size as string from byte size.""" + for unit in ('B', 'KB', 'MB', 'GB', 'TB'): + if size < 2048: + return "%.f %s" % (size, unit) + size /= 1024.0 + + +def natural_sorted(iterable): + """Return human sorted list of strings. + + >>> natural_sorted(['f1', 'f2', 'f10']) + ['f1', 'f2', 'f10'] + + """ + def sortkey(x): + return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)] + numbers = re.compile('(\d+)') + return sorted(iterable, key=sortkey) + + +def datetime_from_timestamp(n, epoch=datetime.datetime.fromordinal(693594)): + """Return datetime object from timestamp in Excel serial format. + + Examples + -------- + >>> datetime_from_timestamp(40237.029999999795) + datetime.datetime(2010, 2, 28, 0, 43, 11, 999982) + + """ + return epoch + datetime.timedelta(n) + + +def test_tifffile(directory='testimages', verbose=True): + """Read all images in directory. Print error message on failure. + + Examples + -------- + >>> test_tifffile(verbose=False) + + """ + successful = 0 + failed = 0 + start = time.time() + for f in glob.glob(os.path.join(directory, '*.*')): + if verbose: + print("\n%s>\n" % f.lower(), end='') + t0 = time.time() + try: + tif = TiffFile(f, multifile=True) + except Exception as e: + if not verbose: + print(f, end=' ') + print("ERROR:", e) + failed += 1 + continue + try: + img = tif.asarray() + except ValueError: + try: + img = tif[0].asarray() + except Exception as e: + if not verbose: + print(f, end=' ') + print("ERROR:", e) + failed += 1 + continue + finally: + tif.close() + successful += 1 + if verbose: + print("%s, %s %s, %s, %.0f ms" % ( + str(tif), str(img.shape), img.dtype, tif[0].compression, + (time.time()-t0) * 1e3)) + if verbose: + print("\nSuccessfully read %i of %i files in %.3f s\n" % ( + successful, successful+failed, time.time()-start)) + + +class TIFF_SUBFILE_TYPES(object): + def __getitem__(self, key): + result = [] + if key & 1: + result.append('reduced_image') + if key & 2: + result.append('page') + if key & 4: + result.append('mask') + return tuple(result) + + +TIFF_PHOTOMETRICS = { + 0: 'miniswhite', + 1: 'minisblack', + 2: 'rgb', + 3: 'palette', + 4: 'mask', + 5: 'separated', + 6: 'cielab', + 7: 'icclab', + 8: 'itulab', + 32844: 'logl', + 32845: 'logluv', +} + +TIFF_COMPESSIONS = { + 1: None, + 2: 'ccittrle', + 3: 'ccittfax3', + 4: 'ccittfax4', + 5: 'lzw', + 6: 'ojpeg', + 7: 'jpeg', + 8: 'adobe_deflate', + 9: 't85', + 10: 't43', + 32766: 'next', + 32771: 'ccittrlew', + 32773: 'packbits', + 32809: 'thunderscan', + 32895: 'it8ctpad', + 32896: 'it8lw', + 32897: 'it8mp', + 32898: 'it8bl', + 32908: 'pixarfilm', + 32909: 'pixarlog', + 32946: 'deflate', + 32947: 'dcs', + 34661: 'jbig', + 34676: 'sgilog', + 34677: 'sgilog24', + 34712: 'jp2000', + 34713: 'nef', +} + +TIFF_DECOMPESSORS = { + None: lambda x: x, + 'adobe_deflate': zlib.decompress, + 'deflate': zlib.decompress, + 'packbits': decodepackbits, + 'lzw': decodelzw, +} + +TIFF_DATA_TYPES = { + 1: '1B', # BYTE 8-bit unsigned integer. + 2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code; + # the last byte must be NULL (binary zero). + 3: '1H', # SHORT 16-bit (2-byte) unsigned integer + 4: '1I', # LONG 32-bit (4-byte) unsigned integer. + 5: '2I', # RATIONAL Two LONGs: the first represents the numerator of + # a fraction; the second, the denominator. + 6: '1b', # SBYTE An 8-bit signed (twos-complement) integer. + 7: '1B', # UNDEFINED An 8-bit byte that may contain anything, + # depending on the definition of the field. + 8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer. + 9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer. + 10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator + # of a fraction, the second the denominator. + 11: '1f', # FLOAT Single precision (4-byte) IEEE format. + 12: '1d', # DOUBLE Double precision (8-byte) IEEE format. + 13: '1I', # IFD unsigned 4 byte IFD offset. + #14: '', # UNICODE + #15: '', # COMPLEX + 16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff) + 17: '1q', # SLONG8 signed 8 byte integer (BigTiff) + 18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff) +} + +TIFF_SAMPLE_FORMATS = { + 1: 'uint', + 2: 'int', + 3: 'float', + #4: 'void', + #5: 'complex_int', + 6: 'complex', +} + +TIFF_SAMPLE_DTYPES = { + ('uint', 1): '?', # bitmap + ('uint', 2): 'B', + ('uint', 3): 'B', + ('uint', 4): 'B', + ('uint', 5): 'B', + ('uint', 6): 'B', + ('uint', 7): 'B', + ('uint', 8): 'B', + ('uint', 9): 'H', + ('uint', 10): 'H', + ('uint', 11): 'H', + ('uint', 12): 'H', + ('uint', 13): 'H', + ('uint', 14): 'H', + ('uint', 15): 'H', + ('uint', 16): 'H', + ('uint', 17): 'I', + ('uint', 18): 'I', + ('uint', 19): 'I', + ('uint', 20): 'I', + ('uint', 21): 'I', + ('uint', 22): 'I', + ('uint', 23): 'I', + ('uint', 24): 'I', + ('uint', 25): 'I', + ('uint', 26): 'I', + ('uint', 27): 'I', + ('uint', 28): 'I', + ('uint', 29): 'I', + ('uint', 30): 'I', + ('uint', 31): 'I', + ('uint', 32): 'I', + ('uint', 64): 'Q', + ('int', 8): 'b', + ('int', 16): 'h', + ('int', 32): 'i', + ('int', 64): 'q', + ('float', 16): 'e', + ('float', 32): 'f', + ('float', 64): 'd', + ('complex', 64): 'F', + ('complex', 128): 'D', + ('uint', (5, 6, 5)): 'B', +} + +TIFF_ORIENTATIONS = { + 1: 'top_left', + 2: 'top_right', + 3: 'bottom_right', + 4: 'bottom_left', + 5: 'left_top', + 6: 'right_top', + 7: 'right_bottom', + 8: 'left_bottom', +} + +AXES_LABELS = { + 'X': 'width', + 'Y': 'height', + 'Z': 'depth', + 'S': 'sample', # rgb(a) + 'P': 'plane', # page + 'T': 'time', + 'C': 'channel', # color, emission wavelength + 'A': 'angle', + 'F': 'phase', + 'R': 'tile', # region, point + 'H': 'lifetime', # histogram + 'E': 'lambda', # excitation wavelength + 'L': 'exposure', # lux + 'V': 'event', + 'Q': 'other', +} + +AXES_LABELS.update(dict((v, k) for k, v in list(AXES_LABELS.items()))) + +# NIH Image PicHeader v1.63 +NIH_IMAGE_HEADER = [ + ('fileid', 'a8'), + ('nlines', 'i2'), + ('pixelsperline', 'i2'), + ('version', 'i2'), + ('oldlutmode', 'i2'), + ('oldncolors', 'i2'), + ('colors', 'u1', (3, 32)), + ('oldcolorstart', 'i2'), + ('colorwidth', 'i2'), + ('extracolors', 'u2', (6, 3)), + ('nextracolors', 'i2'), + ('foregroundindex', 'i2'), + ('backgroundindex', 'i2'), + ('xscale', 'f8'), + ('_x0', 'i2'), + ('_x1', 'i2'), + ('units_t', 'i2'), + ('p1', [('x', 'i2'), ('y', 'i2')]), + ('p2', [('x', 'i2'), ('y', 'i2')]), + ('curvefit_t', 'i2'), + ('ncoefficients', 'i2'), + ('coeff', 'f8', 6), + ('_um_len', 'u1'), + ('um', 'a15'), + ('_x2', 'u1'), + ('binarypic', 'b1'), + ('slicestart', 'i2'), + ('sliceend', 'i2'), + ('scalemagnification', 'f4'), + ('nslices', 'i2'), + ('slicespacing', 'f4'), + ('currentslice', 'i2'), + ('frameinterval', 'f4'), + ('pixelaspectratio', 'f4'), + ('colorstart', 'i2'), + ('colorend', 'i2'), + ('ncolors', 'i2'), + ('fill1', '3u2'), + ('fill2', '3u2'), + ('colortable_t', 'u1'), + ('lutmode_t', 'u1'), + ('invertedtable', 'b1'), + ('zeroclip', 'b1'), + ('_xunit_len', 'u1'), + ('xunit', 'a11'), + ('stacktype_t', 'i2'), +] + +#NIH_COLORTABLE_TYPE = ( +# 'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow', +# 'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum') +#NIH_LUTMODE_TYPE = ( +# 'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale', +# 'ColorLut', 'CustomGrayscale') +#NIH_CURVEFIT_TYPE = ( +# 'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit', +# 'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated', +# 'UncalibratedOD') +#NIH_UNITS_TYPE = ( +# 'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters', +# 'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits') +#NIH_STACKTYPE_TYPE = ( +# 'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack') + +# MetaMorph STK tags +MM_TAG_IDS = { + 0: 'auto_scale', + 1: 'min_scale', + 2: 'max_scale', + 3: 'spatial_calibration', + #4: 'x_calibration', + #5: 'y_calibration', + #6: 'calibration_units', + #7: 'name', + 8: 'thresh_state', + 9: 'thresh_state_red', + 11: 'thresh_state_green', + 12: 'thresh_state_blue', + 13: 'thresh_state_lo', + 14: 'thresh_state_hi', + 15: 'zoom', + #16: 'create_time', + #17: 'last_saved_time', + 18: 'current_buffer', + 19: 'gray_fit', + 20: 'gray_point_count', + #21: 'gray_x', + #22: 'gray_y', + #23: 'gray_min', + #24: 'gray_max', + #25: 'gray_unit_name', + 26: 'standard_lut', + 27: 'wavelength', + #28: 'stage_position', + #29: 'camera_chip_offset', + #30: 'overlay_mask', + #31: 'overlay_compress', + #32: 'overlay', + #33: 'special_overlay_mask', + #34: 'special_overlay_compress', + #35: 'special_overlay', + 36: 'image_property', + #37: 'stage_label', + #38: 'autoscale_lo_info', + #39: 'autoscale_hi_info', + #40: 'absolute_z', + #41: 'absolute_z_valid', + #42: 'gamma', + #43: 'gamma_red', + #44: 'gamma_green', + #45: 'gamma_blue', + #46: 'camera_bin', + 47: 'new_lut', + #48: 'image_property_ex', + 49: 'plane_property', + #50: 'user_lut_table', + 51: 'red_autoscale_info', + #52: 'red_autoscale_lo_info', + #53: 'red_autoscale_hi_info', + 54: 'red_minscale_info', + 55: 'red_maxscale_info', + 56: 'green_autoscale_info', + #57: 'green_autoscale_lo_info', + #58: 'green_autoscale_hi_info', + 59: 'green_minscale_info', + 60: 'green_maxscale_info', + 61: 'blue_autoscale_info', + #62: 'blue_autoscale_lo_info', + #63: 'blue_autoscale_hi_info', + 64: 'blue_min_scale_info', + 65: 'blue_max_scale_info', + #66: 'overlay_plane_color' +} + +# Olympus FluoView +MM_DIMENSION = [ + ('name', 'a16'), + ('size', 'i4'), + ('origin', 'f8'), + ('resolution', 'f8'), + ('unit', 'a64'), +] + +MM_HEADER = [ + ('header_flag', 'i2'), + ('image_type', 'u1'), + ('image_name', 'a257'), + ('offset_data', 'u4'), + ('palette_size', 'i4'), + ('offset_palette0', 'u4'), + ('offset_palette1', 'u4'), + ('comment_size', 'i4'), + ('offset_comment', 'u4'), + ('dimensions', MM_DIMENSION, 10), + ('offset_position', 'u4'), + ('map_type', 'i2'), + ('map_min', 'f8'), + ('map_max', 'f8'), + ('min_value', 'f8'), + ('max_value', 'f8'), + ('offset_map', 'u4'), + ('gamma', 'f8'), + ('offset', 'f8'), + ('gray_channel', MM_DIMENSION), + ('offset_thumbnail', 'u4'), + ('voice_field', 'i4'), + ('offset_voice_field', 'u4'), +] + +# Carl Zeiss LSM +CZ_LSM_INFO = [ + ('magic_number', 'i4'), + ('structure_size', 'i4'), + ('dimension_x', 'i4'), + ('dimension_y', 'i4'), + ('dimension_z', 'i4'), + ('dimension_channels', 'i4'), + ('dimension_time', 'i4'), + ('dimension_data_type', 'i4'), + ('thumbnail_x', 'i4'), + ('thumbnail_y', 'i4'), + ('voxel_size_x', 'f8'), + ('voxel_size_y', 'f8'), + ('voxel_size_z', 'f8'), + ('origin_x', 'f8'), + ('origin_y', 'f8'), + ('origin_z', 'f8'), + ('scan_type', 'u2'), + ('spectral_scan', 'u2'), + ('data_type', 'u4'), + ('offset_vector_overlay', 'u4'), + ('offset_input_lut', 'u4'), + ('offset_output_lut', 'u4'), + ('offset_channel_colors', 'u4'), + ('time_interval', 'f8'), + ('offset_channel_data_types', 'u4'), + ('offset_scan_information', 'u4'), + ('offset_ks_data', 'u4'), + ('offset_time_stamps', 'u4'), + ('offset_event_list', 'u4'), + ('offset_roi', 'u4'), + ('offset_bleach_roi', 'u4'), + ('offset_next_recording', 'u4'), + ('display_aspect_x', 'f8'), + ('display_aspect_y', 'f8'), + ('display_aspect_z', 'f8'), + ('display_aspect_time', 'f8'), + ('offset_mean_of_roi_overlay', 'u4'), + ('offset_topo_isoline_overlay', 'u4'), + ('offset_topo_profile_overlay', 'u4'), + ('offset_linescan_overlay', 'u4'), + ('offset_toolbar_flags', 'u4'), +] + +# Import functions for LSM_INFO sub-records +CZ_LSM_INFO_READERS = { + 'scan_information': read_cz_lsm_scan_info, + 'time_stamps': read_cz_lsm_time_stamps, + 'event_list': read_cz_lsm_event_list, +} + +# Map cz_lsm_info.scan_type to dimension order +CZ_SCAN_TYPES = { + 0: 'XYZCT', # x-y-z scan + 1: 'XYZCT', # z scan (x-z plane) + 2: 'XYZCT', # line scan + 3: 'XYTCZ', # time series x-y + 4: 'XYZTC', # time series x-z + 5: 'XYTCZ', # time series 'Mean of ROIs' + 6: 'XYZTC', # time series x-y-z + 7: 'XYCTZ', # spline scan + 8: 'XYCZT', # spline scan x-z + 9: 'XYTCZ', # time series spline plane x-z + 10: 'XYZCT', # point mode +} + +# Map dimension codes to cz_lsm_info attribute +CZ_DIMENSIONS = { + 'X': 'dimension_x', + 'Y': 'dimension_y', + 'Z': 'dimension_z', + 'C': 'dimension_channels', + 'T': 'dimension_time', +} + +# Descriptions of cz_lsm_info.data_type +CZ_DATA_TYPES = { + 0: 'varying data types', + 2: '12 bit unsigned integer', + 5: '32 bit float', +} + +CZ_LSM_SCAN_INFO_ARRAYS = { + 0x20000000: "tracks", + 0x30000000: "lasers", + 0x60000000: "detectionchannels", + 0x80000000: "illuminationchannels", + 0xa0000000: "beamsplitters", + 0xc0000000: "datachannels", + 0x13000000: "markers", + 0x11000000: "timers", +} + +CZ_LSM_SCAN_INFO_STRUCTS = { + 0x40000000: "tracks", + 0x50000000: "lasers", + 0x70000000: "detectionchannels", + 0x90000000: "illuminationchannels", + 0xb0000000: "beamsplitters", + 0xd0000000: "datachannels", + 0x14000000: "markers", + 0x12000000: "timers", +} + +CZ_LSM_SCAN_INFO_ATTRIBUTES = { + 0x10000001: "name", + 0x10000002: "description", + 0x10000003: "notes", + 0x10000004: "objective", + 0x10000005: "processing_summary", + 0x10000006: "special_scan_mode", + 0x10000007: "oledb_recording_scan_type", + 0x10000008: "oledb_recording_scan_mode", + 0x10000009: "number_of_stacks", + 0x1000000a: "lines_per_plane", + 0x1000000b: "samples_per_line", + 0x1000000c: "planes_per_volume", + 0x1000000d: "images_width", + 0x1000000e: "images_height", + 0x1000000f: "images_number_planes", + 0x10000010: "images_number_stacks", + 0x10000011: "images_number_channels", + 0x10000012: "linscan_xy_size", + 0x10000013: "scan_direction", + 0x10000014: "time_series", + 0x10000015: "original_scan_data", + 0x10000016: "zoom_x", + 0x10000017: "zoom_y", + 0x10000018: "zoom_z", + 0x10000019: "sample_0x", + 0x1000001a: "sample_0y", + 0x1000001b: "sample_0z", + 0x1000001c: "sample_spacing", + 0x1000001d: "line_spacing", + 0x1000001e: "plane_spacing", + 0x1000001f: "plane_width", + 0x10000020: "plane_height", + 0x10000021: "volume_depth", + 0x10000023: "nutation", + 0x10000034: "rotation", + 0x10000035: "precession", + 0x10000036: "sample_0time", + 0x10000037: "start_scan_trigger_in", + 0x10000038: "start_scan_trigger_out", + 0x10000039: "start_scan_event", + 0x10000040: "start_scan_time", + 0x10000041: "stop_scan_trigger_in", + 0x10000042: "stop_scan_trigger_out", + 0x10000043: "stop_scan_event", + 0x10000044: "stop_scan_time", + 0x10000045: "use_rois", + 0x10000046: "use_reduced_memory_rois", + 0x10000047: "user", + 0x10000048: "use_bccorrection", + 0x10000049: "position_bccorrection1", + 0x10000050: "position_bccorrection2", + 0x10000051: "interpolation_y", + 0x10000052: "camera_binning", + 0x10000053: "camera_supersampling", + 0x10000054: "camera_frame_width", + 0x10000055: "camera_frame_height", + 0x10000056: "camera_offset_x", + 0x10000057: "camera_offset_y", + # lasers + 0x50000001: "name", + 0x50000002: "acquire", + 0x50000003: "power", + # tracks + 0x40000001: "multiplex_type", + 0x40000002: "multiplex_order", + 0x40000003: "sampling_mode", + 0x40000004: "sampling_method", + 0x40000005: "sampling_number", + 0x40000006: "acquire", + 0x40000007: "sample_observation_time", + 0x4000000b: "time_between_stacks", + 0x4000000c: "name", + 0x4000000d: "collimator1_name", + 0x4000000e: "collimator1_position", + 0x4000000f: "collimator2_name", + 0x40000010: "collimator2_position", + 0x40000011: "is_bleach_track", + 0x40000012: "is_bleach_after_scan_number", + 0x40000013: "bleach_scan_number", + 0x40000014: "trigger_in", + 0x40000015: "trigger_out", + 0x40000016: "is_ratio_track", + 0x40000017: "bleach_count", + 0x40000018: "spi_center_wavelength", + 0x40000019: "pixel_time", + 0x40000021: "condensor_frontlens", + 0x40000023: "field_stop_value", + 0x40000024: "id_condensor_aperture", + 0x40000025: "condensor_aperture", + 0x40000026: "id_condensor_revolver", + 0x40000027: "condensor_filter", + 0x40000028: "id_transmission_filter1", + 0x40000029: "id_transmission1", + 0x40000030: "id_transmission_filter2", + 0x40000031: "id_transmission2", + 0x40000032: "repeat_bleach", + 0x40000033: "enable_spot_bleach_pos", + 0x40000034: "spot_bleach_posx", + 0x40000035: "spot_bleach_posy", + 0x40000036: "spot_bleach_posz", + 0x40000037: "id_tubelens", + 0x40000038: "id_tubelens_position", + 0x40000039: "transmitted_light", + 0x4000003a: "reflected_light", + 0x4000003b: "simultan_grab_and_bleach", + 0x4000003c: "bleach_pixel_time", + # detection_channels + 0x70000001: "integration_mode", + 0x70000002: "special_mode", + 0x70000003: "detector_gain_first", + 0x70000004: "detector_gain_last", + 0x70000005: "amplifier_gain_first", + 0x70000006: "amplifier_gain_last", + 0x70000007: "amplifier_offs_first", + 0x70000008: "amplifier_offs_last", + 0x70000009: "pinhole_diameter", + 0x7000000a: "counting_trigger", + 0x7000000b: "acquire", + 0x7000000c: "point_detector_name", + 0x7000000d: "amplifier_name", + 0x7000000e: "pinhole_name", + 0x7000000f: "filter_set_name", + 0x70000010: "filter_name", + 0x70000013: "integrator_name", + 0x70000014: "detection_channel_name", + 0x70000015: "detection_detector_gain_bc1", + 0x70000016: "detection_detector_gain_bc2", + 0x70000017: "detection_amplifier_gain_bc1", + 0x70000018: "detection_amplifier_gain_bc2", + 0x70000019: "detection_amplifier_offset_bc1", + 0x70000020: "detection_amplifier_offset_bc2", + 0x70000021: "detection_spectral_scan_channels", + 0x70000022: "detection_spi_wavelength_start", + 0x70000023: "detection_spi_wavelength_stop", + 0x70000026: "detection_dye_name", + 0x70000027: "detection_dye_folder", + # illumination_channels + 0x90000001: "name", + 0x90000002: "power", + 0x90000003: "wavelength", + 0x90000004: "aquire", + 0x90000005: "detchannel_name", + 0x90000006: "power_bc1", + 0x90000007: "power_bc2", + # beam_splitters + 0xb0000001: "filter_set", + 0xb0000002: "filter", + 0xb0000003: "name", + # data_channels + 0xd0000001: "name", + 0xd0000003: "acquire", + 0xd0000004: "color", + 0xd0000005: "sample_type", + 0xd0000006: "bits_per_sample", + 0xd0000007: "ratio_type", + 0xd0000008: "ratio_track1", + 0xd0000009: "ratio_track2", + 0xd000000a: "ratio_channel1", + 0xd000000b: "ratio_channel2", + 0xd000000c: "ratio_const1", + 0xd000000d: "ratio_const2", + 0xd000000e: "ratio_const3", + 0xd000000f: "ratio_const4", + 0xd0000010: "ratio_const5", + 0xd0000011: "ratio_const6", + 0xd0000012: "ratio_first_images1", + 0xd0000013: "ratio_first_images2", + 0xd0000014: "dye_name", + 0xd0000015: "dye_folder", + 0xd0000016: "spectrum", + 0xd0000017: "acquire", + # markers + 0x14000001: "name", + 0x14000002: "description", + 0x14000003: "trigger_in", + 0x14000004: "trigger_out", + # timers + 0x12000001: "name", + 0x12000002: "description", + 0x12000003: "interval", + 0x12000004: "trigger_in", + 0x12000005: "trigger_out", + 0x12000006: "activation_time", + 0x12000007: "activation_number", +} + +# Map TIFF tag code to attribute name, default value, type, count, validator +TIFF_TAGS = { + 254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()), + 255: ('subfile_type', None, 3, 1, + {0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}), + 256: ('image_width', None, 4, 1, None), + 257: ('image_length', None, 4, 1, None), + 258: ('bits_per_sample', 1, 3, 1, None), + 259: ('compression', 1, 3, 1, TIFF_COMPESSIONS), + 262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS), + 266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}), + 269: ('document_name', None, 2, None, None), + 270: ('image_description', None, 2, None, None), + 271: ('make', None, 2, None, None), + 272: ('model', None, 2, None, None), + 273: ('strip_offsets', None, 4, None, None), + 274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS), + 277: ('samples_per_pixel', 1, 3, 1, None), + 278: ('rows_per_strip', 2**32-1, 4, 1, None), + 279: ('strip_byte_counts', None, 4, None, None), + 280: ('min_sample_value', None, 3, None, None), + 281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample + 282: ('x_resolution', None, 5, 1, None), + 283: ('y_resolution', None, 5, 1, None), + 284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}), + 285: ('page_name', None, 2, None, None), + 286: ('x_position', None, 5, 1, None), + 287: ('y_position', None, 5, 1, None), + 296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}), + 297: ('page_number', None, 3, 2, None), + 305: ('software', None, 2, None, None), + 306: ('datetime', None, 2, None, None), + 315: ('artist', None, 2, None, None), + 316: ('host_computer', None, 2, None, None), + 317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal'}), + 320: ('color_map', None, 3, None, None), + 322: ('tile_width', None, 4, 1, None), + 323: ('tile_length', None, 4, 1, None), + 324: ('tile_offsets', None, 4, None, None), + 325: ('tile_byte_counts', None, 4, None, None), + 338: ('extra_samples', None, 3, None, + {0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}), + 339: ('sample_format', 1, 3, 1, TIFF_SAMPLE_FORMATS), + 347: ('jpeg_tables', None, None, None, None), + 530: ('ycbcr_subsampling', 1, 3, 2, None), + 531: ('ycbcr_positioning', 1, 3, 1, None), + 32997: ('image_depth', None, 4, 1, None), + 32998: ('tile_depth', None, 4, 1, None), + 33432: ('copyright', None, 1, None, None), + 33445: ('md_file_tag', None, 4, 1, None), + 33446: ('md_scale_pixel', None, 5, 1, None), + 33447: ('md_color_table', None, 3, None, None), + 33448: ('md_lab_name', None, 2, None, None), + 33449: ('md_sample_info', None, 2, None, None), + 33450: ('md_prep_date', None, 2, None, None), + 33451: ('md_prep_time', None, 2, None, None), + 33452: ('md_file_units', None, 2, None, None), + 33550: ('model_pixel_scale', None, 12, 3, None), + 33922: ('model_tie_point', None, 12, None, None), + 37510: ('user_comment', None, None, None, None), + 34665: ('exif_ifd', None, None, 1, None), + 34735: ('geo_key_directory', None, 3, None, None), + 34736: ('geo_double_params', None, 12, None, None), + 34737: ('geo_ascii_params', None, 2, None, None), + 34853: ('gps_ifd', None, None, 1, None), + 42112: ('gdal_metadata', None, 2, None, None), + 42113: ('gdal_nodata', None, 2, None, None), + 50838: ('imagej_byte_counts', None, None, None, None), + 50289: ('mc_xy_position', None, 12, 2, None), + 50290: ('mc_z_position', None, 12, 1, None), + 50291: ('mc_xy_calibration', None, 12, 3, None), + 50292: ('mc_lens_lem_na_n', None, 12, 3, None), + 50293: ('mc_channel_name', None, 1, None, None), + 50294: ('mc_ex_wavelength', None, 12, 1, None), + 50295: ('mc_time_stamp', None, 12, 1, None), + 65200: ('flex_xml', None, 2, None, None), + # code: (attribute name, default value, type, count, validator) +} + +# Map custom TIFF tag codes to attribute names and import functions +CUSTOM_TAGS = { + 700: ('xmp', read_bytes), + 34377: ('photoshop', read_numpy), + 33723: ('iptc', read_bytes), + 34675: ('icc_profile', read_numpy), + 33628: ('mm_uic1', read_mm_uic1), + 33629: ('mm_uic2', read_mm_uic2), + 33630: ('mm_uic3', read_mm_uic3), + 33631: ('mm_uic4', read_mm_uic4), + 34361: ('mm_header', read_mm_header), + 34362: ('mm_stamp', read_mm_stamp), + 34386: ('mm_user_block', read_bytes), + 34412: ('cz_lsm_info', read_cz_lsm_info), + 43314: ('nih_image_header', read_nih_image_header), + # 40001: ('mc_ipwinscal', read_bytes), + 40100: ('mc_id_old', read_bytes), + 50288: ('mc_id', read_bytes), + 50296: ('mc_frame_properties', read_bytes), + 50839: ('imagej_metadata', read_bytes), + 51123: ('micromanager_metadata', read_json), +} + +# Max line length of printed output +PRINT_LINE_LEN = 79 + + +def imshow(data, title=None, vmin=0, vmax=None, cmap=None, + bitspersample=None, photometric='rgb', interpolation='nearest', + dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs): + """Plot n-dimensional images using matplotlib.pyplot. + + Return figure, subplot and plot axis. + Requires pyplot already imported ``from matplotlib import pyplot``. + + Parameters + ---------- + bitspersample : int or None + Number of bits per channel in integer RGB images. + photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'} + The color space of the image data. + title : str + Window and subplot title. + figure : matplotlib.figure.Figure (optional). + Matplotlib to use for plotting. + subplot : int + A matplotlib.pyplot.subplot axis. + maxdim : int + maximum image size in any dimension. + kwargs : optional + Arguments for matplotlib.pyplot.imshow. + + """ + #if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'): + # raise ValueError("Can't handle %s photometrics" % photometric) + # TODO: handle photometric == 'separated' (CMYK) + isrgb = photometric in ('rgb', 'palette') + data = numpy.atleast_2d(data.squeeze()) + data = data[(slice(0, maxdim), ) * len(data.shape)] + + dims = data.ndim + if dims < 2: + raise ValueError("not an image") + elif dims == 2: + dims = 0 + isrgb = False + else: + if isrgb and data.shape[-3] in (3, 4): + data = numpy.swapaxes(data, -3, -2) + data = numpy.swapaxes(data, -2, -1) + elif not isrgb and data.shape[-1] in (3, 4): + data = numpy.swapaxes(data, -3, -1) + data = numpy.swapaxes(data, -2, -1) + isrgb = isrgb and data.shape[-1] in (3, 4) + dims -= 3 if isrgb else 2 + + if photometric == 'palette' and isrgb: + datamax = data.max() + if datamax > 255: + data >>= 8 # possible precision loss + data = data.astype('B') + elif data.dtype.kind in 'ui': + if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None: + try: + bitspersample = int(math.ceil(math.log(data.max(), 2))) + except Exception: + bitspersample = data.dtype.itemsize * 8 + elif not isinstance(bitspersample, int): + # bitspersample can be tuple, e.g. (5, 6, 5) + bitspersample = data.dtype.itemsize * 8 + datamax = 2**bitspersample + if isrgb: + if bitspersample < 8: + data <<= 8 - bitspersample + elif bitspersample > 8: + data >>= bitspersample - 8 # precision loss + data = data.astype('B') + elif data.dtype.kind == 'f': + datamax = data.max() + if isrgb and datamax > 1.0: + if data.dtype.char == 'd': + data = data.astype('f') + data /= datamax + elif data.dtype.kind == 'b': + datamax = 1 + elif data.dtype.kind == 'c': + raise NotImplementedError("complex type") # TODO: handle complex types + + if not isrgb: + if vmax is None: + vmax = datamax + if vmin is None: + if data.dtype.kind == 'i': + dtmin = numpy.iinfo(data.dtype).min + vmin = numpy.min(data) + if vmin == dtmin: + vmin = numpy.min(data > dtmin) + if data.dtype.kind == 'f': + dtmin = numpy.finfo(data.dtype).min + vmin = numpy.min(data) + if vmin == dtmin: + vmin = numpy.min(data > dtmin) + else: + vmin = 0 + + pyplot = sys.modules['matplotlib.pyplot'] + + if figure is None: + pyplot.rc('font', family='sans-serif', weight='normal', size=8) + figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True, + facecolor='1.0', edgecolor='w') + try: + figure.canvas.manager.window.title(title) + except Exception: + pass + pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9, + left=0.1, right=0.95, hspace=0.05, wspace=0.0) + subplot = pyplot.subplot(subplot) + + if title: + try: + title = str(title, 'Windows-1252') + except TypeError: + pass + pyplot.title(title, size=11) + + if cmap is None: + if data.dtype.kind in 'ub' and vmin == 0: + cmap = 'gray' + else: + cmap = 'coolwarm' + if photometric == 'miniswhite': + cmap += '_r' + + image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax, + cmap=cmap, interpolation=interpolation, **kwargs) + + if not isrgb: + pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05 + + def format_coord(x, y): + # callback function to format coordinate display in toolbar + x = int(x + 0.5) + y = int(y + 0.5) + try: + if dims: + return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x], + current, x, y) + else: + return "%s @ [%4i, %4i]" % (data[y, x], x, y) + except IndexError: + return "" + + pyplot.gca().format_coord = format_coord + + if dims: + current = list((0, ) * dims) + cur_ax_dat = [0, data[tuple(current)].squeeze()] + sliders = [pyplot.Slider( + pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]), + 'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5', + valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)] + for slider in sliders: + slider.drawon = False + + def set_image(current, sliders=sliders, data=data): + # change image and redraw canvas + cur_ax_dat[1] = data[tuple(current)].squeeze() + image.set_data(cur_ax_dat[1]) + for ctrl, index in zip(sliders, current): + ctrl.eventson = False + ctrl.set_val(index) + ctrl.eventson = True + figure.canvas.draw() + + def on_changed(index, axis, data=data, current=current): + # callback function for slider change event + index = int(round(index)) + cur_ax_dat[0] = axis + if index == current[axis]: + return + if index >= data.shape[axis]: + index = 0 + elif index < 0: + index = data.shape[axis] - 1 + current[axis] = index + set_image(current) + + def on_keypressed(event, data=data, current=current): + # callback function for key press event + key = event.key + axis = cur_ax_dat[0] + if str(key) in '0123456789': + on_changed(key, axis) + elif key == 'right': + on_changed(current[axis] + 1, axis) + elif key == 'left': + on_changed(current[axis] - 1, axis) + elif key == 'up': + cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1 + elif key == 'down': + cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1 + elif key == 'end': + on_changed(data.shape[axis] - 1, axis) + elif key == 'home': + on_changed(0, axis) + + figure.canvas.mpl_connect('key_press_event', on_keypressed) + for axis, ctrl in enumerate(sliders): + ctrl.on_changed(lambda k, a=axis: on_changed(k, a)) + + return figure, subplot, image + + +def _app_show(): + """Block the GUI. For use as skimage plugin.""" + pyplot = sys.modules['matplotlib.pyplot'] + pyplot.show() + + +def main(argv=None): + """Command line usage main function.""" + if float(sys.version[0:3]) < 2.6: + print("This script requires Python version 2.6 or better.") + print("This is Python version %s" % sys.version) + return 0 + if argv is None: + argv = sys.argv + + import optparse + + search_doc = lambda r, d: re.search(r, __doc__).group(1) if __doc__ else d + parser = optparse.OptionParser( + usage="usage: %prog [options] path", + description=search_doc("\n\n([^|]*?)\n\n", ''), + version="%%prog %s" % search_doc(":Version: (.*)", "Unknown")) + opt = parser.add_option + opt('-p', '--page', dest='page', type='int', default=-1, + help="display single page") + opt('-s', '--series', dest='series', type='int', default=-1, + help="display series of pages of same shape") + opt('--nomultifile', dest='nomultifile', action='store_true', + default=False, help="don't read OME series from multiple files") + opt('--noplot', dest='noplot', action='store_true', default=False, + help="don't display images") + opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear', + help="image interpolation method") + opt('--dpi', dest='dpi', type='int', default=96, + help="set plot resolution") + opt('--debug', dest='debug', action='store_true', default=False, + help="raise exception on failures") + opt('--test', dest='test', action='store_true', default=False, + help="try read all images in path") + opt('--doctest', dest='doctest', action='store_true', default=False, + help="runs the internal tests") + opt('-v', '--verbose', dest='verbose', action='store_true', default=True) + opt('-q', '--quiet', dest='verbose', action='store_false') + + settings, path = parser.parse_args() + path = ' '.join(path) + + if settings.doctest: + import doctest + doctest.testmod() + return 0 + if not path: + parser.error("No file specified") + if settings.test: + test_tifffile(path, settings.verbose) + return 0 + + if any(i in path for i in '?*'): + path = glob.glob(path) + if not path: + print('no files match the pattern') + return 0 + # TODO: handle image sequences + #if len(path) == 1: + path = path[0] + + print("Reading file structure...", end=' ') + start = time.time() + try: + tif = TiffFile(path, multifile=not settings.nomultifile) + except Exception as e: + if settings.debug: + raise + else: + print("\n", e) + sys.exit(0) + print("%.3f ms" % ((time.time()-start) * 1e3)) + + if tif.is_ome: + settings.norgb = True + + images = [(None, tif[0 if settings.page < 0 else settings.page])] + if not settings.noplot: + print("Reading image data... ", end=' ') + + def notnone(x): + return next(i for i in x if i is not None) + start = time.time() + try: + if settings.page >= 0: + images = [(tif.asarray(key=settings.page), + tif[settings.page])] + elif settings.series >= 0: + images = [(tif.asarray(series=settings.series), + notnone(tif.series[settings.series].pages))] + else: + images = [] + for i, s in enumerate(tif.series): + try: + images.append( + (tif.asarray(series=i), notnone(s.pages))) + except ValueError as e: + images.append((None, notnone(s.pages))) + if settings.debug: + raise + else: + print("\n* series %i failed: %s... " % (i, e), + end='') + print("%.3f ms" % ((time.time()-start) * 1e3)) + except Exception as e: + if settings.debug: + raise + else: + print(e) + + tif.close() + + print("\nTIFF file:", tif) + print() + for i, s in enumerate(tif.series): + print ("Series %i" % i) + print(s) + print() + for i, page in images: + print(page) + print(page.tags) + if page.is_palette: + print("\nColor Map:", page.color_map.shape, page.color_map.dtype) + for attr in ('cz_lsm_info', 'cz_lsm_scan_information', 'mm_uic_tags', + 'mm_header', 'imagej_tags', 'micromanager_metadata', + 'nih_image_header'): + if hasattr(page, attr): + print("", attr.upper(), Record(getattr(page, attr)), sep="\n") + print() + if page.is_micromanager: + print('MICROMANAGER_FILE_METADATA') + print(Record(tif.micromanager_metadata)) + + if images and not settings.noplot: + try: + import matplotlib + matplotlib.use('TkAgg') + from matplotlib import pyplot + except ImportError as e: + warnings.warn("failed to import matplotlib.\n%s" % e) + else: + for img, page in images: + if img is None: + continue + vmin, vmax = None, None + if 'gdal_nodata' in page.tags: + vmin = numpy.min(img[img > float(page.gdal_nodata)]) + if page.is_stk: + try: + vmin = page.mm_uic_tags['min_scale'] + vmax = page.mm_uic_tags['max_scale'] + except KeyError: + pass + else: + if vmax <= vmin: + vmin, vmax = None, None + title = "%s\n %s" % (str(tif), str(page)) + imshow(img, title=title, vmin=vmin, vmax=vmax, + bitspersample=page.bits_per_sample, + photometric=page.photometric, + interpolation=settings.interpol, + dpi=settings.dpi) + pyplot.show() + + +TIFFfile = TiffFile # backwards compatibility + +if sys.version_info[0] > 2: + str = str, bytes + str = str + +if __name__ == "__main__": + sys.exit(main()) \ No newline at end of file diff --git a/corticalmapping/ephys/KilosortWrapper.py b/corticalmapping/ephys/KilosortWrapper.py index 50de1d5..a192d8a 100644 --- a/corticalmapping/ephys/KilosortWrapper.py +++ b/corticalmapping/ephys/KilosortWrapper.py @@ -39,7 +39,7 @@ def get_clusters(csv_output): try: cluster_id = int(cluster[0]) except ValueError: - print cluster[0], 'can not be converted into integer.' + print(cluster[0], 'can not be converted into integer.') continue cluster_type = cluster[1] @@ -70,7 +70,7 @@ def get_spike_times_indices(clusters, spike_clusters_path, spike_times_path): spike_ind = {} - for cluster, cluster_id in clusters.iteritems(): + for cluster, cluster_id in clusters.items(): if cluster == 'unit_mua': mua_spike_ind = [] for id in cluster_id: @@ -120,23 +120,23 @@ def get_spike_timestamps(spike_ind, h5_path): :return: update the h5_file to contain timestamps in seconds of each defined unit in spike_ind for each file """ h5_file = h5py.File(h5_path, 'r+') - folder_list = [f for f in h5_file.keys() if f[0:6] == 'folder'] + folder_list = [f for f in list(h5_file.keys()) if f[0:6] == 'folder'] fs = h5_file['fs_hz'].value - units = spike_ind.keys() + units = list(spike_ind.keys()) units.sort() h5_file.create_dataset('units', data=units) - print folder_list - print fs + print(folder_list) + print(fs) for folder in folder_list: curr_group = h5_file[folder] curr_start_ind = curr_group.attrs['start_index'] curr_end_ind = curr_group.attrs['end_index'] - print curr_start_ind - print curr_end_ind + print(curr_start_ind) + print(curr_end_ind) - for unit, spikes in spike_ind.iteritems(): + for unit, spikes in spike_ind.items(): curr_spike_ind = [spk for spk in spikes if spk >= curr_start_ind and spk < curr_end_ind] curr_spike_ind = np.array(curr_spike_ind, dtype=np.float32) - curr_start_ind @@ -153,16 +153,16 @@ def get_spike_timestamps(spike_ind, h5_path): h5_path = r"G:\160610-M240652\processed_1\160610-M240652.hdf5" cluster_group = read_csv(csv_path) - print 'cluster_group:' - print cluster_group + print('cluster_group:') + print(cluster_group) clusters = get_clusters(cluster_group) - print '\nclusters:' - print clusters + print('\nclusters:') + print(clusters) spike_ind = get_spike_times_indices(clusters, spike_cluster_path, spike_times_path) - print '\nspike_ind:' - print spike_ind.keys() - for key, value in spike_ind.iteritems(): - print key, ':', len(value) - print key, ':', value[0:10] + print('\nspike_ind:') + print(list(spike_ind.keys())) + for key, value in spike_ind.items(): + print(key, ':', len(value)) + print(key, ':', value[0:10]) get_spike_timestamps(spike_ind, h5_path) diff --git a/corticalmapping/ephys/OpenEphysWrapper.py b/corticalmapping/ephys/OpenEphysWrapper.py index 042c5da..80b298f 100644 --- a/corticalmapping/ephys/OpenEphysWrapper.py +++ b/corticalmapping/ephys/OpenEphysWrapper.py @@ -36,7 +36,7 @@ def find_next_valid_block(input_array, bytes_per_block, start_index): first_valid_block_start = i - bytes_per_block break else: - print 'no valid block found after index:', start_index + print('no valid block found after index:', start_index) return first_valid_block_start @@ -81,7 +81,7 @@ def load_continuous_old(file_path, dtype=np.float32): assert dtype in (np.float32, np.int16), \ 'Invalid data type specified for loadContinous, valid types are np.float32 and np.int16' - print "\nLoading continuous data from " + file_path + print("\nLoading continuous data from " + file_path) bytes_per_block = CONTINUOUS_TIMESTAMP_DTYPE.itemsize + CONTINUOUS_SAMPLE_PER_RECORD_DTYPE.itemsize + \ CONTINUOUS_RECORDING_NUMBER_DTYPE.itemsize + CONTINUOUS_MARKER_BYTES + \ @@ -91,12 +91,12 @@ def load_continuous_old(file_path, dtype=np.float32): f = open(file_path, 'rb') file_length = os.fstat(f.fileno()).st_size - print 'total length of the file: ', file_length, 'bytes.' + print('total length of the file: ', file_length, 'bytes.') - print 'bytes per record block: ', bytes_per_block + print('bytes per record block: ', bytes_per_block) block_num = (file_length - oe.NUM_HEADER_BYTES) // bytes_per_block - print 'total number of valid blocks: ', block_num + print('total number of valid blocks: ', block_num) header = oe.readHeader(f) @@ -115,8 +115,8 @@ def load_continuous_old(file_path, dtype=np.float32): N = np.fromfile(f, CONTINUOUS_SAMPLE_PER_RECORD_DTYPE, 1)[0] if N != oe.SAMPLES_PER_RECORD: - print('samples per record specified in block ' + str(i) + ' (' + str(N) + - ') does not equal to expected value (' + str(oe.SAMPLES_PER_RECORD) + ')!') + print(('samples per record specified in block ' + str(i) + ' (' + str(N) + + ') does not equal to expected value (' + str(oe.SAMPLES_PER_RECORD) + ')!')) samples = samples[0 : i * oe.SAMPLES_PER_RECORD] is_break = True break @@ -136,8 +136,8 @@ def load_continuous_old(file_path, dtype=np.float32): record_mark = np.fromfile(f, dtype=np.dtype(' 0) & (er <= end_time)] @@ -469,10 +469,10 @@ def pack_folders(folder_list, output_folder, output_filename, continous_channels curr_ts_group = curr_group.create_group('timestamps') curr_trace_dict, curr_sample_num, fs = pack_folder(folder, prefix, digital_channels=digital_channels) - all_channels = curr_trace_dict.keys() - print '\nall channels in folder ', folder, ':' - print all_channels - print + all_channels = list(curr_trace_dict.keys()) + print('\nall channels in folder ', folder, ':') + print(all_channels) + print() if sampling_rate is None: sampling_rate = fs @@ -502,14 +502,14 @@ def pack_folders(folder_list, output_folder, output_filename, continous_channels data_all.append(curr_data_array.flatten(order='F')) # add continuous channels - for ch, trace in curr_trace_dict.iteritems(): + for ch, trace in curr_trace_dict.items(): if '_CH' not in ch and ch != 'events': curr_dset = curr_con_group.create_dataset(ch[len(prefix) + 1:], data=trace) curr_dset.attrs['unit'] = 'volt' # add digital events events = curr_trace_dict['events'] - for dch, dch_dict in events.iteritems(): + for dch, dch_dict in events.items(): curr_dch_group = curr_dig_group.create_group(dch) curr_dch_group.create_dataset('rise', data=dch_dict['rise']) curr_dch_group.create_dataset('fall', data=dch_dict['fall']) diff --git a/corticalmapping/scripts/post_recording/00_old/analysis_database/0000_delete_stas_and_repack.py b/corticalmapping/scripts/post_recording/00_old/analysis_database/0000_delete_stas_and_repack.py deleted file mode 100644 index cb8e540..0000000 --- a/corticalmapping/scripts/post_recording/00_old/analysis_database/0000_delete_stas_and_repack.py +++ /dev/null @@ -1,71 +0,0 @@ -import os -import sys -import h5py -import time -from multiprocessing import Pool -import corticalmapping.NwbTools as nt -import corticalmapping.DatabaseTools as dt - -process_num = 5 -save_folder = 'repacked' - -curr_folder = os.path.dirname(os.path.abspath(__file__)) -os.chdir(curr_folder) - -def run_single_file_for_multi_process(params): - - file_path, save_dir, file_ind, total_file_num, t0 = params - - nwb_f = h5py.File(file_path) - if 'STRFs' in nwb_f['analysis']: - del nwb_f['analysis/STRFs'] - - if 'strf_001_LocallySparseNoiseRetinotopicMapping' in nwb_f['analysis']: - del nwb_f['analysis/strf_001_LocallySparseNoiseRetinotopicMapping'] - - if 'response_table_003_DriftingGratingCircleRetinotopicMapping' in nwb_f['analysis']: - del nwb_f['analysis/response_table_003_DriftingGratingCircleRetinotopicMapping'] - - if 'response_table_001_DriftingGratingCircleRetinotopicMapping' in nwb_f['analysis']: - del nwb_f['analysis/response_table_001_DriftingGratingCircleRetinotopicMapping'] - - nwb_f.close() - - print('{:6.1f} min; {} / {}; {}: repacking ...'.format((time.time() - t0) / 60., - file_ind+1, - total_file_num, - file_path)) - # save_path = os.path.join(save_dir, os.path.splitext(os.path.split(file_path)[1])[0] + '_repacked.nwb') - save_path = os.path.join(save_dir, os.path.splitext(os.path.split(file_path)[1])[0] + '_repacked.nwb') - sys_str = "h5repack {} {}".format(file_path, save_path) - - # print(sys_str) - - os.system(sys_str) - - print('{:6.1f} min; {} / {}; {}: Done.'.format((time.time() - t0) / 60., - file_ind + 1, - total_file_num, - file_path)) - - -def run(): - - t0 = time.time() - - nwb_fns = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'] - nwb_fns.sort() - print('nwb files:') - print('\n'.join(nwb_fns)) - - param_lst = [(os.path.join(curr_folder, nwb_fn), - os.path.join(curr_folder, save_folder), - file_ind, - len(nwb_fns), - t0) for file_ind, nwb_fn in enumerate(nwb_fns)] - - p = Pool(process_num) - p.map(run_single_file_for_multi_process, param_lst) - -if __name__ == "__main__": - run() diff --git a/corticalmapping/scripts/post_recording/00_old/analysis_database/0010_regenerate_stas.py b/corticalmapping/scripts/post_recording/00_old/analysis_database/0010_regenerate_stas.py deleted file mode 100644 index 3e2f64e..0000000 --- a/corticalmapping/scripts/post_recording/00_old/analysis_database/0010_regenerate_stas.py +++ /dev/null @@ -1,65 +0,0 @@ -import os -import time -from multiprocessing import Pool -import corticalmapping.NwbTools as nt -import corticalmapping.DatabaseTools as dt - -process_num = 5 -nwb_folder = 'repacked' -strf_t_win = [-0.5, 2.] -dgc_t_win = [-1., 2.5] - -curr_folder = os.path.dirname(os.path.abspath(__file__)) -os.chdir(curr_folder) - -def run_single_file_for_multi_process(params): - - file_path, file_ind, total_file_num, t0 = params - - print('{:6.1f} min; {} / {}; {}: regenerating strf ...'.format((time.time() - t0) / 60., - file_ind + 1, - total_file_num, - file_path)) - - nwb_f = nt.RecordedFile(file_path) - lsn_name = '001_LocallySparseNoiseRetinotopicMapping' - if dt.get_strf_grp_key(nwb_f.file_pointer) is None: - if lsn_name in nwb_f.file_pointer['analysis/photodiode_onsets']: - nwb_f.get_spatial_temporal_receptive_field_retinotopic_mapping(stim_name=lsn_name, - time_window=strf_t_win, - verbose=False) - - dgc_name = '003_DriftingGratingCircleRetinotopicMapping' - if dt.get_dgcrm_grp_key(nwb_f.file_pointer) is None: - if dgc_name in nwb_f.file_pointer['analysis/photodiode_onsets']: - nwb_f.get_drifting_grating_response_table_retinotopic_mapping(stim_name=dgc_name, - time_window=dgc_t_win) - - dgc_name = '001_DriftingGratingCircleRetinotopicMapping' - if dt.get_dgcrm_grp_key(nwb_f.file_pointer) is None: - if dgc_name in nwb_f.file_pointer['analysis/photodiode_onsets']: - nwb_f.get_drifting_grating_response_table_retinotopic_mapping(stim_name=dgc_name, - time_window=dgc_t_win) - - nwb_f.close() - - -def run(): - - t0 = time.time() - - nwb_fns = [f for f in os.listdir(nwb_folder) if f[-4:] == '.nwb'] - nwb_fns.sort() - print('nwb files:') - print('\n'.join(nwb_fns)) - - param_lst = [(os.path.join(curr_folder, nwb_folder, nwb_fn), - file_ind, - len(nwb_fns), - t0) for file_ind, nwb_fn in enumerate(nwb_fns)] - - p = Pool(process_num) - p.map(run_single_file_for_multi_process, param_lst) - -if __name__ == "__main__": - run() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/analysis_database/0030_test_sigle_strf.py b/corticalmapping/scripts/post_recording/00_old/analysis_database/0030_test_sigle_strf.py deleted file mode 100644 index d455f9a..0000000 --- a/corticalmapping/scripts/post_recording/00_old/analysis_database/0030_test_sigle_strf.py +++ /dev/null @@ -1,50 +0,0 @@ -import os -import h5py -import numpy as np -import corticalmapping.DatabaseTools as dt -import corticalmapping.SingleCellAnalysis as sca - -fn_original = '180813_M386444_110.nwb' -fn_repacked = '180813_M386444_110_repacked.nwb' - -roi_ind = 0 -params = dt.ANALYSIS_PARAMS -params['gaussian_filter_sigma_rf'] = 1. -params['interpolate_rate_rf'] = 10. -params['rf_z_threshold'] = 1.3 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -roi_n = 'roi_{:04d}'.format(roi_ind) - -def print_peak_z(strf): - strf_dff = strf.get_local_dff_strf(is_collaps_before_normalize=True, add_to_trace=1) - - # positive spatial receptive fields - srf_on, srf_off = strf_dff.get_zscore_receptive_field(timeWindow=[0., 0.5]) - - srf = srf_on.gaussian_filter(sigma=1.) - srf = srf.interpolate(ratio=10.) - - print(np.max(srf.weights)) - - -f_o = h5py.File(fn_original, 'r') -strf_o = sca.SpatialTemporalReceptiveField.from_h5_group(f_o['analysis/STRFs/plane0/strf_roi_{:04d}'.format(roi_ind)]) -print_peak_z(strf_o) -# print(strf_o.data['traces'][0][5]) -# print(strf_o.time) -f_o.close() - -f_r = h5py.File(os.path.join('repacked', fn_repacked), 'r') -strf_r = dt.get_strf(f_r, plane_n='plane0', roi_ind=0, trace_type='sta_f_center_subtracted') -print_peak_z(strf_r) -# print(strf_r.data['traces'][0][5]) -# print(strf_r.time) - -roi_properties, _, _, _, _, _, _, _, _, _, _, _, _, _ = \ - dt.get_everything_from_roi(nwb_f=f_r, plane_n='plane0', roi_n=roi_n, params=params) -print(roi_properties['rf_pos_on_peak_z']) - -f_r.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/analysis_database/0040_get_roi_metadata.py b/corticalmapping/scripts/post_recording/00_old/analysis_database/0040_get_roi_metadata.py deleted file mode 100644 index 52b66b7..0000000 --- a/corticalmapping/scripts/post_recording/00_old/analysis_database/0040_get_roi_metadata.py +++ /dev/null @@ -1,414 +0,0 @@ -import sys -sys.path.extend(['/home/junz/PycharmProjects/corticalmapping']) -import os -import time -import pandas as pd -import numpy as np -import h5py -import datetime -import corticalmapping.DatabaseTools as dt -from multiprocessing import Pool - -date_range = [180309, 190411] -database_folder = 'nwbs' -save_folder = "temp_xlsx" -process_num = 8 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -print('pandas version: {}\n'.format(pd.__version__)) - -params = dt.ANALYSIS_PARAMS -params['trace_type'] = 'f_center_subtracted' -params['rf_z_threshold'] = 1.3 -params['is_collapse_dire'] = False -params['is_collapse_sf'] = False -params['is_collapse_tf'] = False - -columns = [ - 'date', - 'mouse_id', - 'plane_n', - 'roi_n', - 'depth', # microns under pia, float - - # roi mask - 'roi_area', # square micron - 'roi_center_row', # center of roi mask in field of view, row - 'roi_center_col', # center of roi mask in field of view, column - - # trace skewness - 'skew_raw', # skewness of unfiltered trace (neuropil subtracted), float - 'skew_fil', # skewness of highpassed trace, float - - # receptive fields - 'rf_pos_on_peak_z', - 'rf_pos_on_area', - 'rf_pos_on_center_alt', - 'rf_pos_on_center_azi', - - 'rf_pos_off_peak_z', - 'rf_pos_off_area', - 'rf_pos_off_center_alt', - 'rf_pos_off_center_azi', - - 'rf_pos_onoff_peak_z', - 'rf_pos_onoff_area', - 'rf_pos_onoff_center_alt', - 'rf_pos_onoff_center_azi', - - 'rf_pos_lsi', - - 'rf_neg_on_peak_z', - 'rf_neg_on_area', - 'rf_neg_on_center_alt', - 'rf_neg_on_center_azi', - - 'rf_neg_off_peak_z', - 'rf_neg_off_area', - 'rf_neg_off_center_alt', - 'rf_neg_off_center_azi', - - 'rf_neg_onoff_peak_z', - 'rf_neg_onoff_area', - 'rf_neg_onoff_center_alt', - 'rf_neg_onoff_center_azi', - - 'rf_neg_lsi', - - # drifting grating peak response - 'dgc_pos_peak_df', - 'dgc_neg_peak_df', - 'dgc_pos_p_ttest_df', - 'dgc_neg_p_ttest_df', - 'dgc_p_anova_df', - - 'dgc_pos_peak_dff', - 'dgc_neg_peak_dff', - 'dgc_pos_p_ttest_dff', - 'dgc_neg_p_ttest_dff', - 'dgc_p_anova_dff', - - 'dgc_pos_peak_z', - 'dgc_neg_peak_z', - 'dgc_pos_p_ttest_z', - 'dgc_neg_p_ttest_z', - 'dgc_p_anova_z', - - # direction / orientation tuning, pos, df - 'dgc_pos_osi_raw_df', - 'dgc_pos_dsi_raw_df', - 'dgc_pos_gosi_raw_df', - 'dgc_pos_gdsi_raw_df', - 'dgc_pos_osi_ele_df', - 'dgc_pos_dsi_ele_df', - 'dgc_pos_gosi_ele_df', - 'dgc_pos_gdsi_ele_df', - 'dgc_pos_osi_rec_df', - 'dgc_pos_dsi_rec_df', - 'dgc_pos_gosi_rec_df', - 'dgc_pos_gdsi_rec_df', - 'dgc_pos_peak_dire_raw_df', - 'dgc_pos_vs_dire_raw_df', - 'dgc_pos_vs_dire_ele_df', - 'dgc_pos_vs_dire_rec_df', - - # direction / orientation tuning, neg, df - 'dgc_neg_osi_raw_df', - 'dgc_neg_dsi_raw_df', - 'dgc_neg_gosi_raw_df', - 'dgc_neg_gdsi_raw_df', - 'dgc_neg_osi_ele_df', - 'dgc_neg_dsi_ele_df', - 'dgc_neg_gosi_ele_df', - 'dgc_neg_gdsi_ele_df', - 'dgc_neg_osi_rec_df', - 'dgc_neg_dsi_rec_df', - 'dgc_neg_gosi_rec_df', - 'dgc_neg_gdsi_rec_df', - 'dgc_neg_peak_dire_raw_df', - 'dgc_neg_vs_dire_raw_df', - 'dgc_neg_vs_dire_ele_df', - 'dgc_neg_vs_dire_rec_df', - - # direction / orientation tuning, pos, dff - 'dgc_pos_osi_raw_dff', - 'dgc_pos_dsi_raw_dff', - 'dgc_pos_gosi_raw_dff', - 'dgc_pos_gdsi_raw_dff', - 'dgc_pos_osi_ele_dff', - 'dgc_pos_dsi_ele_dff', - 'dgc_pos_gosi_ele_dff', - 'dgc_pos_gdsi_ele_dff', - 'dgc_pos_osi_rec_dff', - 'dgc_pos_dsi_rec_dff', - 'dgc_pos_gosi_rec_dff', - 'dgc_pos_gdsi_rec_dff', - 'dgc_pos_peak_dire_raw_dff', - 'dgc_pos_vs_dire_raw_dff', - 'dgc_pos_vs_dire_ele_dff', - 'dgc_pos_vs_dire_rec_dff', - - # direction / orientation tuning, neg, dff - 'dgc_neg_osi_raw_dff', - 'dgc_neg_dsi_raw_dff', - 'dgc_neg_gosi_raw_dff', - 'dgc_neg_gdsi_raw_dff', - 'dgc_neg_osi_ele_dff', - 'dgc_neg_dsi_ele_dff', - 'dgc_neg_gosi_ele_dff', - 'dgc_neg_gdsi_ele_dff', - 'dgc_neg_osi_rec_dff', - 'dgc_neg_dsi_rec_dff', - 'dgc_neg_gosi_rec_dff', - 'dgc_neg_gdsi_rec_dff', - 'dgc_neg_peak_dire_raw_dff', - 'dgc_neg_vs_dire_raw_dff', - 'dgc_neg_vs_dire_ele_dff', - 'dgc_neg_vs_dire_rec_dff', - - # direction / orientation tuning, pos, zscore - 'dgc_pos_osi_raw_z', - 'dgc_pos_dsi_raw_z', - 'dgc_pos_gosi_raw_z', - 'dgc_pos_gdsi_raw_z', - 'dgc_pos_osi_ele_z', - 'dgc_pos_dsi_ele_z', - 'dgc_pos_gosi_ele_z', - 'dgc_pos_gdsi_ele_z', - 'dgc_pos_osi_rec_z', - 'dgc_pos_dsi_rec_z', - 'dgc_pos_gosi_rec_z', - 'dgc_pos_gdsi_rec_z', - 'dgc_pos_peak_dire_raw_z', - 'dgc_pos_vs_dire_raw_z', - 'dgc_pos_vs_dire_ele_z', - 'dgc_pos_vs_dire_rec_z', - - # direction / orientation tuning, neg, zscore - 'dgc_neg_osi_raw_z', - 'dgc_neg_dsi_raw_z', - 'dgc_neg_gosi_raw_z', - 'dgc_neg_gdsi_raw_z', - 'dgc_neg_osi_ele_z', - 'dgc_neg_dsi_ele_z', - 'dgc_neg_gosi_ele_z', - 'dgc_neg_gdsi_ele_z', - 'dgc_neg_osi_rec_z', - 'dgc_neg_dsi_rec_z', - 'dgc_neg_gosi_rec_z', - 'dgc_neg_gdsi_rec_z', - 'dgc_neg_peak_dire_raw_z', - 'dgc_neg_vs_dire_raw_z', - 'dgc_neg_vs_dire_ele_z', - 'dgc_neg_vs_dire_rec_z', - - # sf tuning, pos, df - 'dgc_pos_peak_sf_raw_df', - 'dgc_pos_weighted_sf_raw_df', - 'dgc_pos_weighted_sf_log_raw_df', - 'dgc_pos_weighted_sf_ele_df', - 'dgc_pos_weighted_sf_log_ele_df', - 'dgc_pos_weighted_sf_rec_df', - 'dgc_pos_weighted_sf_log_rec_df', - - # sf tuning, neg, df - 'dgc_neg_peak_sf_raw_df', - 'dgc_neg_weighted_sf_raw_df', - 'dgc_neg_weighted_sf_log_raw_df', - 'dgc_neg_weighted_sf_ele_df', - 'dgc_neg_weighted_sf_log_ele_df', - 'dgc_neg_weighted_sf_rec_df', - 'dgc_neg_weighted_sf_log_rec_df', - - # sf tuning, pos, dff - 'dgc_pos_peak_sf_raw_dff', - 'dgc_pos_weighted_sf_raw_dff', - 'dgc_pos_weighted_sf_log_raw_dff', - 'dgc_pos_weighted_sf_ele_dff', - 'dgc_pos_weighted_sf_log_ele_dff', - 'dgc_pos_weighted_sf_rec_dff', - 'dgc_pos_weighted_sf_log_rec_dff', - - # sf tuning, neg, dff - 'dgc_neg_peak_sf_raw_dff', - 'dgc_neg_weighted_sf_raw_dff', - 'dgc_neg_weighted_sf_log_raw_dff', - 'dgc_neg_weighted_sf_ele_dff', - 'dgc_neg_weighted_sf_log_ele_dff', - 'dgc_neg_weighted_sf_rec_dff', - 'dgc_neg_weighted_sf_log_rec_dff', - - # sf tuning, pos, zscore - 'dgc_pos_peak_sf_raw_z', - 'dgc_pos_weighted_sf_raw_z', - 'dgc_pos_weighted_sf_log_raw_z', - 'dgc_pos_weighted_sf_ele_z', - 'dgc_pos_weighted_sf_log_ele_z', - 'dgc_pos_weighted_sf_rec_z', - 'dgc_pos_weighted_sf_log_rec_z', - - # sf tuning, neg, zscore - 'dgc_neg_peak_sf_raw_z', - 'dgc_neg_weighted_sf_raw_z', - 'dgc_neg_weighted_sf_log_raw_z', - 'dgc_neg_weighted_sf_ele_z', - 'dgc_neg_weighted_sf_log_ele_z', - 'dgc_neg_weighted_sf_rec_z', - 'dgc_neg_weighted_sf_log_rec_z', - - # tf tuning, pos, df - 'dgc_pos_peak_tf_raw_df', - 'dgc_pos_weighted_tf_raw_df', - 'dgc_pos_weighted_tf_log_raw_df', - 'dgc_pos_weighted_tf_ele_df', - 'dgc_pos_weighted_tf_log_ele_df', - 'dgc_pos_weighted_tf_rec_df', - 'dgc_pos_weighted_tf_log_rec_df', - - # tf tuning, neg, df - 'dgc_neg_peak_tf_raw_df', - 'dgc_neg_weighted_tf_raw_df', - 'dgc_neg_weighted_tf_log_raw_df', - 'dgc_neg_weighted_tf_ele_df', - 'dgc_neg_weighted_tf_log_ele_df', - 'dgc_neg_weighted_tf_rec_df', - 'dgc_neg_weighted_tf_log_rec_df', - - # tf tuning, pos, dff - 'dgc_pos_peak_tf_raw_dff', - 'dgc_pos_weighted_tf_raw_dff', - 'dgc_pos_weighted_tf_log_raw_dff', - 'dgc_pos_weighted_tf_ele_dff', - 'dgc_pos_weighted_tf_log_ele_dff', - 'dgc_pos_weighted_tf_rec_dff', - 'dgc_pos_weighted_tf_log_rec_dff', - - # tf tuning, neg, dff - 'dgc_neg_peak_tf_raw_dff', - 'dgc_neg_weighted_tf_raw_dff', - 'dgc_neg_weighted_tf_log_raw_dff', - 'dgc_neg_weighted_tf_ele_dff', - 'dgc_neg_weighted_tf_log_ele_dff', - 'dgc_neg_weighted_tf_rec_dff', - 'dgc_neg_weighted_tf_log_rec_dff', - - # tf tuning, pos, zscore - 'dgc_pos_peak_tf_raw_z', - 'dgc_pos_weighted_tf_raw_z', - 'dgc_pos_weighted_tf_log_raw_z', - 'dgc_pos_weighted_tf_ele_z', - 'dgc_pos_weighted_tf_log_ele_z', - 'dgc_pos_weighted_tf_rec_z', - 'dgc_pos_weighted_tf_log_rec_z', - - # tf tuning, neg, zscore - 'dgc_neg_peak_tf_raw_z', - 'dgc_neg_weighted_tf_raw_z', - 'dgc_neg_weighted_tf_log_raw_z', - 'dgc_neg_weighted_tf_ele_z', - 'dgc_neg_weighted_tf_log_ele_z', - 'dgc_neg_weighted_tf_rec_z', - 'dgc_neg_weighted_tf_log_rec_z', -] - -def process_one_nwb_for_multi_thread(inputs): - - nwb_path, params, columns, save_folder, t0 = inputs - nwb_fn = os.path.splitext(os.path.split(nwb_path)[1])[0] - save_path = os.path.join(save_folder, nwb_fn + '.xlsx') - - if os.path.isfile(save_path): - print('\tt: {:5.0f} minutes, {} , excel table already exists, skip.'.format((time.time() - t0) / 60., nwb_fn)) - return None - else: - nwb_f = h5py.File(nwb_path, 'r') - - plane_ns = [k for k in nwb_f['processing'].keys() if k[0:16] == 'rois_and_traces_'] - plane_ns = [k[16:] for k in plane_ns] - plane_ns.sort() - # print('total plane number: {}'.format(len(plane_ns))) - - total_roi_num = 10000 - big_array = np.zeros((total_roi_num, len(columns)), dtype=np.float64) - big_array[:] = np.nan - df = pd.DataFrame(data=big_array, columns=columns) - - curr_row_ind = 0 - for plane_n in plane_ns: - print('\tt: {:5.0f} minutes, {} / {}, processing ...'.format((time.time() - t0) / 60., nwb_fn, plane_n)) - roi_ns = nwb_f['processing/rois_and_traces_{}/ImageSegmentation/imaging_plane/roi_list'.format(plane_n)].value - roi_ns = [r.encode('utf-8') for r in roi_ns if r[0:4] == 'roi_'] - roi_ns.sort() - - for roi_i, roi_n in enumerate(roi_ns): - # print('\t\t\troi: {} / {}'.format(roi_i+1, len(roi_ns))) - roi_properties, _, _, _, _, _, _, _, _, _, _, _, _, _ = \ - dt.get_everything_from_roi(nwb_f=nwb_f, plane_n=plane_n, roi_n=roi_n, params=params) - for rp_name, rp_value in roi_properties.items(): - df.loc[curr_row_ind, rp_name] = rp_value - - curr_row_ind += 1 - - df = df[0:curr_row_ind] - - save_path = os.path.join(save_folder, nwb_fn + '.xlsx') - if os.path.isfile(save_path): - os.remove(save_path) - - with pd.ExcelWriter(save_path, mode='w') as writer: - df.to_excel(writer, sheet_name='sheet1') - - -def run(): - - t0 = time.time() - - nwb_fns = [] - for fn in os.listdir(database_folder): - if fn[-4:] == '.nwb' and date_range[0] <= int(fn[0:6]) <= date_range[1]: - nwb_fns.append(fn) - nwb_fns.sort() - print('\nnwb file list:') - print('\n'.join(nwb_fns)) - - inputs_lst = [(os.path.join(curr_folder, database_folder, nwb_fn), - params, - columns, - os.path.join(curr_folder, save_folder), - t0) for nwb_fn in nwb_fns] - - print('\nprocessing individual nwb files ...') - p = Pool(process_num) - p.map(process_one_nwb_for_multi_thread, inputs_lst) - # process_one_nwb_for_multi_thread(inputs_lst[0]) - - print('\nConcatenating indiviudal dataframes ...') - xlsx_fns = [f for f in os.listdir(os.path.join(curr_folder,save_folder)) if f[-5:] == '.xlsx'] - xlsx_fns.sort() - - dfs = [] - for xlsx_fn in xlsx_fns: - curr_df = pd.read_excel(os.path.join(curr_folder, save_folder, xlsx_fn), sheetname='sheet1') - # print(curr_df) - dfs.append(curr_df) - - big_df = pd.concat(dfs, ignore_index=True) - - print('\nsaving ...') - date_str = datetime.datetime.now().strftime('%y%m%d%H%M%S') - save_path = os.path.join(curr_folder, 'big_roi_table_{}.xlsx'.format(date_str)) - - if os.path.isfile(save_path): - with pd.ExcelWriter(save_path, mode='a') as writer: - big_df.to_excel(writer, sheet_name=params['trace_type']) - else: - with pd.ExcelWriter(save_path, mode='w') as writer: - big_df.to_excel(writer, sheet_name=params['trace_type']) - - print('\ndone!') - - -if __name__ == "__main__": - run() diff --git a/corticalmapping/scripts/post_recording/00_old/analysis_database/0041_get_plane_dfs.py b/corticalmapping/scripts/post_recording/00_old/analysis_database/0041_get_plane_dfs.py deleted file mode 100644 index 9c4746c..0000000 --- a/corticalmapping/scripts/post_recording/00_old/analysis_database/0041_get_plane_dfs.py +++ /dev/null @@ -1,427 +0,0 @@ -import sys -sys.path.extend(['/home/junz/PycharmProjects/corticalmapping']) -import os -import time -import pandas as pd -import numpy as np -import h5py -import datetime -import corticalmapping.DatabaseTools as dt -from multiprocessing import Pool -from shutil import copyfile - -date_range = [180201, 190610] -database_folder = 'nwbs' -save_folder_n = "dataframes" -process_num = 8 -is_overwrite = False - -params = dt.ANALYSIS_PARAMS -params['trace_type'] = 'f_center_subtracted' -params['is_collapse_dire'] = False -params['is_collapse_sf'] = True -params['is_collapse_tf'] = True - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -print('pandas version: {}\n'.format(pd.__version__)) - -columns = [ - 'date', - 'mouse_id', - 'plane_n', - 'roi_n', - 'depth', # microns under pia, float - - # roi mask - 'roi_area', # square micron - 'roi_center_row', # center of roi mask in field of view, row - 'roi_center_col', # center of roi mask in field of view, column - - # trace skewness - 'skew_raw', # skewness of unfiltered trace (neuropil subtracted), float - 'skew_fil', # skewness of highpassed trace, float - - # receptive fields - 'rf_pos_on_peak_z', - 'rf_pos_on_area', - 'rf_pos_on_center_alt', - 'rf_pos_on_center_azi', - - 'rf_pos_off_peak_z', - 'rf_pos_off_area', - 'rf_pos_off_center_alt', - 'rf_pos_off_center_azi', - - 'rf_pos_onoff_peak_z', - 'rf_pos_onoff_area', - 'rf_pos_onoff_center_alt', - 'rf_pos_onoff_center_azi', - - 'rf_pos_lsi', - - 'rf_neg_on_peak_z', - 'rf_neg_on_area', - 'rf_neg_on_center_alt', - 'rf_neg_on_center_azi', - - 'rf_neg_off_peak_z', - 'rf_neg_off_area', - 'rf_neg_off_center_alt', - 'rf_neg_off_center_azi', - - 'rf_neg_onoff_peak_z', - 'rf_neg_onoff_area', - 'rf_neg_onoff_center_alt', - 'rf_neg_onoff_center_azi', - - 'rf_neg_lsi', - - # drifting grating peak response - 'dgc_pos_peak_df', - 'dgc_neg_peak_df', - 'dgc_pos_p_ttest_df', - 'dgc_neg_p_ttest_df', - 'dgc_p_anova_df', - - 'dgc_pos_peak_dff', - 'dgc_neg_peak_dff', - 'dgc_pos_p_ttest_dff', - 'dgc_neg_p_ttest_dff', - 'dgc_p_anova_dff', - - 'dgc_pos_peak_z', - 'dgc_neg_peak_z', - 'dgc_pos_p_ttest_z', - 'dgc_neg_p_ttest_z', - 'dgc_p_anova_z', - - # direction / orientation tuning, pos, df - 'dgc_pos_osi_raw_df', - 'dgc_pos_dsi_raw_df', - 'dgc_pos_gosi_raw_df', - 'dgc_pos_gdsi_raw_df', - 'dgc_pos_osi_ele_df', - 'dgc_pos_dsi_ele_df', - 'dgc_pos_gosi_ele_df', - 'dgc_pos_gdsi_ele_df', - 'dgc_pos_osi_rec_df', - 'dgc_pos_dsi_rec_df', - 'dgc_pos_gosi_rec_df', - 'dgc_pos_gdsi_rec_df', - 'dgc_pos_peak_dire_raw_df', - 'dgc_pos_vs_dire_raw_df', - 'dgc_pos_vs_dire_ele_df', - 'dgc_pos_vs_dire_rec_df', - - # direction / orientation tuning, neg, df - 'dgc_neg_osi_raw_df', - 'dgc_neg_dsi_raw_df', - 'dgc_neg_gosi_raw_df', - 'dgc_neg_gdsi_raw_df', - 'dgc_neg_osi_ele_df', - 'dgc_neg_dsi_ele_df', - 'dgc_neg_gosi_ele_df', - 'dgc_neg_gdsi_ele_df', - 'dgc_neg_osi_rec_df', - 'dgc_neg_dsi_rec_df', - 'dgc_neg_gosi_rec_df', - 'dgc_neg_gdsi_rec_df', - 'dgc_neg_peak_dire_raw_df', - 'dgc_neg_vs_dire_raw_df', - 'dgc_neg_vs_dire_ele_df', - 'dgc_neg_vs_dire_rec_df', - - # direction / orientation tuning, pos, dff - 'dgc_pos_osi_raw_dff', - 'dgc_pos_dsi_raw_dff', - 'dgc_pos_gosi_raw_dff', - 'dgc_pos_gdsi_raw_dff', - 'dgc_pos_osi_ele_dff', - 'dgc_pos_dsi_ele_dff', - 'dgc_pos_gosi_ele_dff', - 'dgc_pos_gdsi_ele_dff', - 'dgc_pos_osi_rec_dff', - 'dgc_pos_dsi_rec_dff', - 'dgc_pos_gosi_rec_dff', - 'dgc_pos_gdsi_rec_dff', - 'dgc_pos_peak_dire_raw_dff', - 'dgc_pos_vs_dire_raw_dff', - 'dgc_pos_vs_dire_ele_dff', - 'dgc_pos_vs_dire_rec_dff', - - # direction / orientation tuning, neg, dff - 'dgc_neg_osi_raw_dff', - 'dgc_neg_dsi_raw_dff', - 'dgc_neg_gosi_raw_dff', - 'dgc_neg_gdsi_raw_dff', - 'dgc_neg_osi_ele_dff', - 'dgc_neg_dsi_ele_dff', - 'dgc_neg_gosi_ele_dff', - 'dgc_neg_gdsi_ele_dff', - 'dgc_neg_osi_rec_dff', - 'dgc_neg_dsi_rec_dff', - 'dgc_neg_gosi_rec_dff', - 'dgc_neg_gdsi_rec_dff', - 'dgc_neg_peak_dire_raw_dff', - 'dgc_neg_vs_dire_raw_dff', - 'dgc_neg_vs_dire_ele_dff', - 'dgc_neg_vs_dire_rec_dff', - - # direction / orientation tuning, pos, zscore - 'dgc_pos_osi_raw_z', - 'dgc_pos_dsi_raw_z', - 'dgc_pos_gosi_raw_z', - 'dgc_pos_gdsi_raw_z', - 'dgc_pos_osi_ele_z', - 'dgc_pos_dsi_ele_z', - 'dgc_pos_gosi_ele_z', - 'dgc_pos_gdsi_ele_z', - 'dgc_pos_osi_rec_z', - 'dgc_pos_dsi_rec_z', - 'dgc_pos_gosi_rec_z', - 'dgc_pos_gdsi_rec_z', - 'dgc_pos_peak_dire_raw_z', - 'dgc_pos_vs_dire_raw_z', - 'dgc_pos_vs_dire_ele_z', - 'dgc_pos_vs_dire_rec_z', - - # direction / orientation tuning, neg, zscore - 'dgc_neg_osi_raw_z', - 'dgc_neg_dsi_raw_z', - 'dgc_neg_gosi_raw_z', - 'dgc_neg_gdsi_raw_z', - 'dgc_neg_osi_ele_z', - 'dgc_neg_dsi_ele_z', - 'dgc_neg_gosi_ele_z', - 'dgc_neg_gdsi_ele_z', - 'dgc_neg_osi_rec_z', - 'dgc_neg_dsi_rec_z', - 'dgc_neg_gosi_rec_z', - 'dgc_neg_gdsi_rec_z', - 'dgc_neg_peak_dire_raw_z', - 'dgc_neg_vs_dire_raw_z', - 'dgc_neg_vs_dire_ele_z', - 'dgc_neg_vs_dire_rec_z', - - # sf tuning, pos, df - 'dgc_pos_peak_sf_raw_df', - 'dgc_pos_weighted_sf_raw_df', - 'dgc_pos_weighted_sf_log_raw_df', - 'dgc_pos_weighted_sf_ele_df', - 'dgc_pos_weighted_sf_log_ele_df', - 'dgc_pos_weighted_sf_rec_df', - 'dgc_pos_weighted_sf_log_rec_df', - - # sf tuning, neg, df - 'dgc_neg_peak_sf_raw_df', - 'dgc_neg_weighted_sf_raw_df', - 'dgc_neg_weighted_sf_log_raw_df', - 'dgc_neg_weighted_sf_ele_df', - 'dgc_neg_weighted_sf_log_ele_df', - 'dgc_neg_weighted_sf_rec_df', - 'dgc_neg_weighted_sf_log_rec_df', - - # sf tuning, pos, dff - 'dgc_pos_peak_sf_raw_dff', - 'dgc_pos_weighted_sf_raw_dff', - 'dgc_pos_weighted_sf_log_raw_dff', - 'dgc_pos_weighted_sf_ele_dff', - 'dgc_pos_weighted_sf_log_ele_dff', - 'dgc_pos_weighted_sf_rec_dff', - 'dgc_pos_weighted_sf_log_rec_dff', - - # sf tuning, neg, dff - 'dgc_neg_peak_sf_raw_dff', - 'dgc_neg_weighted_sf_raw_dff', - 'dgc_neg_weighted_sf_log_raw_dff', - 'dgc_neg_weighted_sf_ele_dff', - 'dgc_neg_weighted_sf_log_ele_dff', - 'dgc_neg_weighted_sf_rec_dff', - 'dgc_neg_weighted_sf_log_rec_dff', - - # sf tuning, pos, zscore - 'dgc_pos_peak_sf_raw_z', - 'dgc_pos_weighted_sf_raw_z', - 'dgc_pos_weighted_sf_log_raw_z', - 'dgc_pos_weighted_sf_ele_z', - 'dgc_pos_weighted_sf_log_ele_z', - 'dgc_pos_weighted_sf_rec_z', - 'dgc_pos_weighted_sf_log_rec_z', - - # sf tuning, neg, zscore - 'dgc_neg_peak_sf_raw_z', - 'dgc_neg_weighted_sf_raw_z', - 'dgc_neg_weighted_sf_log_raw_z', - 'dgc_neg_weighted_sf_ele_z', - 'dgc_neg_weighted_sf_log_ele_z', - 'dgc_neg_weighted_sf_rec_z', - 'dgc_neg_weighted_sf_log_rec_z', - - # tf tuning, pos, df - 'dgc_pos_peak_tf_raw_df', - 'dgc_pos_weighted_tf_raw_df', - 'dgc_pos_weighted_tf_log_raw_df', - 'dgc_pos_weighted_tf_ele_df', - 'dgc_pos_weighted_tf_log_ele_df', - 'dgc_pos_weighted_tf_rec_df', - 'dgc_pos_weighted_tf_log_rec_df', - - # tf tuning, neg, df - 'dgc_neg_peak_tf_raw_df', - 'dgc_neg_weighted_tf_raw_df', - 'dgc_neg_weighted_tf_log_raw_df', - 'dgc_neg_weighted_tf_ele_df', - 'dgc_neg_weighted_tf_log_ele_df', - 'dgc_neg_weighted_tf_rec_df', - 'dgc_neg_weighted_tf_log_rec_df', - - # tf tuning, pos, dff - 'dgc_pos_peak_tf_raw_dff', - 'dgc_pos_weighted_tf_raw_dff', - 'dgc_pos_weighted_tf_log_raw_dff', - 'dgc_pos_weighted_tf_ele_dff', - 'dgc_pos_weighted_tf_log_ele_dff', - 'dgc_pos_weighted_tf_rec_dff', - 'dgc_pos_weighted_tf_log_rec_dff', - - # tf tuning, neg, dff - 'dgc_neg_peak_tf_raw_dff', - 'dgc_neg_weighted_tf_raw_dff', - 'dgc_neg_weighted_tf_log_raw_dff', - 'dgc_neg_weighted_tf_ele_dff', - 'dgc_neg_weighted_tf_log_ele_dff', - 'dgc_neg_weighted_tf_rec_dff', - 'dgc_neg_weighted_tf_log_rec_dff', - - # tf tuning, pos, zscore - 'dgc_pos_peak_tf_raw_z', - 'dgc_pos_weighted_tf_raw_z', - 'dgc_pos_weighted_tf_log_raw_z', - 'dgc_pos_weighted_tf_ele_z', - 'dgc_pos_weighted_tf_log_ele_z', - 'dgc_pos_weighted_tf_rec_z', - 'dgc_pos_weighted_tf_log_rec_z', - - # tf tuning, neg, zscore - 'dgc_neg_peak_tf_raw_z', - 'dgc_neg_weighted_tf_raw_z', - 'dgc_neg_weighted_tf_log_raw_z', - 'dgc_neg_weighted_tf_ele_z', - 'dgc_neg_weighted_tf_log_ele_z', - 'dgc_neg_weighted_tf_rec_z', - 'dgc_neg_weighted_tf_log_rec_z', -] - -def process_one_nwb_for_multi_thread(inputs): - - nwb_path, params, columns, save_folder, t0, nwb_i, nwb_f_num, is_overwrite = inputs - nwb_fn = os.path.splitext(os.path.split(nwb_path)[1])[0] - - nwb_f = h5py.File(nwb_path, 'r') - - plane_ns = [k for k in nwb_f['processing'].keys() if k[0:16] == 'rois_and_traces_'] - plane_ns = [k[16:] for k in plane_ns] - plane_ns.sort() - # print('total plane number: {}'.format(len(plane_ns))) - - for plane_n in plane_ns: - print('\tt: {:5.0f} minutes, processing {}, {} / {}, {} ...'.format((time.time() - t0) / 60., - nwb_fn, - nwb_i + 1, - nwb_f_num, - plane_n)) - - save_fn = '_'.join(nwb_fn.split('_')[0:2]) + '_' + plane_n + '.xlsx' - save_path = os.path.join(save_folder, save_fn) - if os.path.isfile(save_path): - - if is_overwrite: # overwrite existing xlsx files - print('\t{}, file already exists. Overwirite.'.format(os.path.split(save_path)[1])) - os.remove(save_path) - - - else: # do not overwrite existing xlsx files - print('\t{}, file already exists. Skip.'.format(os.path.split(save_path)[1])) - return - - roi_ns = nwb_f['processing/rois_and_traces_{}/ImageSegmentation/imaging_plane/roi_list'.format(plane_n)].value - roi_ns = [r.encode('utf-8') for r in roi_ns if r[0:4] == 'roi_'] - roi_ns.sort() - - df = pd.DataFrame(np.nan, index=range(len(roi_ns)), columns=columns) - - for roi_i, roi_n in enumerate(roi_ns): - # print('\t\t\troi: {} / {}'.format(roi_i+1, len(roi_ns))) - roi_properties, _, _, _, _, _, _, _, _, _, _, _, _, _ = \ - dt.get_everything_from_roi(nwb_f=nwb_f, plane_n=plane_n, roi_n=roi_n, params=params) - for rp_name, rp_value in roi_properties.items(): - df.loc[roi_i, rp_name] = rp_value - - with pd.ExcelWriter(save_path, mode='w') as writer: - df.to_excel(writer, sheet_name='sheet1') - - -def run(): - - t0 = time.time() - - nwb_fns = [] - for fn in os.listdir(database_folder): - if fn[-4:] == '.nwb' and date_range[0] <= int(fn[0:6]) <= date_range[1]: - nwb_fns.append(fn) - nwb_fns.sort() - print('\nnwb file list:') - print('\n'.join(nwb_fns)) - - date_str = datetime.datetime.now().strftime('%y%m%d%H%M%S') - save_folder = os.path.join(curr_folder, '{}_{}'.format(save_folder_n, date_str)) - - if not os.path.isdir(save_folder): - os.makedirs(save_folder) - - copyfile(os.path.realpath(__file__), os.path.join(save_folder, 'script_log.py')) - - inputs_lst = [(os.path.join(curr_folder, database_folder, nwb_fn), - params, - columns, - save_folder, - t0, - nwb_i, - len(nwb_fns), - is_overwrite) for nwb_i, nwb_fn in enumerate(nwb_fns)] - - print('\nprocessing individual nwb files ...') - p = Pool(process_num) - p.map(process_one_nwb_for_multi_thread, inputs_lst) - # process_one_nwb_for_multi_thread(inputs_lst[0]) - - # print('\nConcatenating indiviudal dataframes ...') - # xlsx_fns = [f for f in os.listdir(os.path.join(curr_folder,save_folder)) if f[-5:] == '.xlsx'] - # xlsx_fns.sort() - # - # dfs = [] - # for xlsx_fn in xlsx_fns: - # curr_df = pd.read_excel(os.path.join(curr_folder, save_folder, xlsx_fn), sheetname='sheet1') - # # print(curr_df) - # dfs.append(curr_df) - # - # big_df = pd.concat(dfs, ignore_index=True) - # - # print('\nsaving ...') - # date_str = datetime.datetime.now().strftime('%y%m%d%H%M%S') - # save_path = os.path.join(curr_folder, 'big_roi_table_{}.xlsx'.format(date_str)) - # - # if os.path.isfile(save_path): - # with pd.ExcelWriter(save_path, mode='a') as writer: - # big_df.to_excel(writer, sheet_name=params['trace_type']) - # else: - # with pd.ExcelWriter(save_path, mode='w') as writer: - # big_df.to_excel(writer, sheet_name=params['trace_type']) - - print('\ndone!') - - -if __name__ == "__main__": - run() diff --git a/corticalmapping/scripts/post_recording/00_old/analysis_database/0042_get_plane_meta.py b/corticalmapping/scripts/post_recording/00_old/analysis_database/0042_get_plane_meta.py deleted file mode 100644 index e8cb038..0000000 --- a/corticalmapping/scripts/post_recording/00_old/analysis_database/0042_get_plane_meta.py +++ /dev/null @@ -1,32 +0,0 @@ -import os -import datetime -import pandas as pd - -df_folder = 'dataframes_190523104427' -save_fn = 'plane_table' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -fns = [fn for fn in os.listdir(df_folder) if fn[-5:] == '.xlsx'] -print('\n'.join(fns)) - -df = pd.DataFrame(index=range(len(fns)), columns=['date', 'mouse_id', 'plane_n', 'volume_n', - 'depth', 'has_lsn', 'has_dgc']) - -for fn_i, fn in enumerate(fns): - - date = fn.split('_')[0] - mouse_id = fn.split('_')[1] - plane_n = fn.split('_')[-1][0:-5] - - df.loc[fn_i] = [date, mouse_id, plane_n, '', 0, True, True] - -df.sort_values(by=['mouse_id', 'date', 'plane_n'], inplace=True) -df.reset_index(inplace=True, drop=True) - -print(df) - -date_str = datetime.datetime.now().strftime('%y%m%d%H%M%S') -with pd.ExcelWriter('{}_{}.xlsx'.format(save_fn, date_str), mode='w') as writer: - df.to_excel(writer, sheet_name='sheet1') \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/analysis_database/0050_generate_page_report.py b/corticalmapping/scripts/post_recording/00_old/analysis_database/0050_generate_page_report.py deleted file mode 100644 index ed9423e..0000000 --- a/corticalmapping/scripts/post_recording/00_old/analysis_database/0050_generate_page_report.py +++ /dev/null @@ -1,50 +0,0 @@ -import os -import corticalmapping.DatabaseTools as dt -import pandas as pd -import matplotlib.pyplot as plt -from matplotlib.backends.backend_pdf import PdfPages -import h5py -import datetime - -area_lim = 100. -nwb_folder = 'nwbs' - -summary_fn = 'big_roi_table_190404145421.xlsx' -sheet_n = 'f_center_raw' - -params = dt.ANALYSIS_PARAMS -params['trace_type'] = sheet_n -params['response_window_dgc'] = [0., 1.5] - -plot_params = dt.PLOTTING_PARAMS -plot_params['response_type_for_plot'] = 'zscore' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -db_df = pd.read_excel(summary_fn, sheet_name=sheet_n) -print(db_df.head()) - -pdf_fn = 'page_report_{}.pdf'.format(datetime.datetime.now().strftime('%y%m%d%H%M%S')) -pdff = PdfPages(pdf_fn) - -for row_i, row in db_df.iterrows(): - # if row['roi_area'] >= area_lim and row['skew_fil'] >= 0.5: - if row['roi_area'] >= area_lim: - - print('{}_{}; {}; {}'.format(row['date'], row['mouse_id'], row['plane_n'], row['roi_n'])) - - nwb_fn = '{}_{}_110.nwb'.format(row['date'], row['mouse_id']) - nwb_f = h5py.File(os.path.join(nwb_folder, nwb_fn), 'r') - - f = dt.roi_page_report(nwb_f=nwb_f, - plane_n=row['plane_n'], - roi_n=row['roi_n'], - params=params, - plot_params=plot_params) - nwb_f.close() - pdff.savefig(f) - f.clear() - plt.close() - -pdff.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/analysis_database/0060_single_page_report.py b/corticalmapping/scripts/post_recording/00_old/analysis_database/0060_single_page_report.py deleted file mode 100644 index 07db7e8..0000000 --- a/corticalmapping/scripts/post_recording/00_old/analysis_database/0060_single_page_report.py +++ /dev/null @@ -1,30 +0,0 @@ -import os -import h5py -import corticalmapping.DatabaseTools as dt -import matplotlib.pyplot as plt - -nwb_folder = 'nwbs' -nwb_fn = "190326_M441626_110.nwb" - -plane_n = 'plane2' -roi_n = 'roi_0001' - -analysis_params = dt.ANALYSIS_PARAMS -plot_params = dt.PLOTTING_PARAMS - -analysis_params['trace_type'] = 'f_center_raw' -analysis_params['response_window_dgc'] = [0.5, 1.5] -analysis_params['baseline_window_dgc'] = [-0.5, 0.5] -plot_params['sftf_vmax'] = 6 -plot_params['sftf_vmin'] = -6 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_f = h5py.File(os.path.join(nwb_folder, nwb_fn), 'r') -f = dt.roi_page_report(nwb_f=nwb_f, plane_n=plane_n, roi_n=roi_n, params=analysis_params, plot_params=plot_params) -nwb_f.close() - -plt.show() - -f.savefig('{}_{}_{}.pdf'.format(os.path.splitext(nwb_fn)[0], plane_n, roi_n)) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/analysis_database/0070_get_rf_maps.py b/corticalmapping/scripts/post_recording/00_old/analysis_database/0070_get_rf_maps.py deleted file mode 100644 index 0c992cd..0000000 --- a/corticalmapping/scripts/post_recording/00_old/analysis_database/0070_get_rf_maps.py +++ /dev/null @@ -1,194 +0,0 @@ -import sys -sys.path.extend(['/home/junz/PycharmProjects/corticalmapping']) -import os -import numpy as np -import h5py -# import time -import pandas as pd -import corticalmapping.DatabaseTools as dt -import corticalmapping.core.ImageAnalysis as ia - -table_name = 'big_roi_table_190423135026.xlsx' -sheet_name = 'f_center_subtracted' -nwb_folder = 'nwbs' -save_folder = "temp_xlsx" - -response_dir = 'pos' -skew_thr = 0.6 -trace_bias = 1. -rf_peak_z_thr = 1.3 -gaussian_filter_sigma = 1. # float, in pixels, filtering sigma for z-score receptive fields -interpolate_rate = 10. # float, interpolate rate of filtered z-score maps -response_window = [0., 0.5] - -rf_map_fn = 'rf_maps.hdf5' -notes = ''' - zscore receptive field maps of all significant rois. Spatial temporal receptive fields - are first converted to df/f. Then 2-d zscore maps are generated. Then the zscore maps are - 2d filtered to smooth and interpolated in to high resolution. After preprocessing, if the - peak value of zscore is larger than the threshold, the receptive field will be considered - as sigificant. - ''' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -rf_map_f = h5py.File(os.path.join(curr_folder, rf_map_fn)) -if 'notes' not in rf_map_f.keys(): - rf_map_f['notes'] = notes - -table_path = os.path.join(curr_folder, 'intermediate_results', table_name) -df = pd.read_excel(table_path, sheetname=sheet_name) -subdf = df[np.logical_not(df['rf_pos_on_peak_z'].isnull())] -subdf = subdf[subdf['skew_fil'] >= skew_thr] - -df_nwb = subdf[['date', 'mouse_id']].drop_duplicates() -nwb_fns_total = ['{}_{}_110_repacked'.format(r['date'], r['mouse_id']) for r_i, r in df_nwb.iterrows()] -print('all nwb files:') -print('\n'.join(nwb_fns_total)) - -# nwb_fns_saved = [gn[0:14] + '_110_repacked' for gn in rf_map_f.keys() if len(gn) >= 14] -# print('\nsaved nwb files:') -# print('\n'.join(nwb_fns_saved)) -# -# nwb_fns_todo = [] -# for nwb_fn in nwb_fns_total: -# if nwb_fn not in nwb_fns_saved: -# nwb_fns_todo.append(nwb_fn) -# print('\ntodo nwb files:') -# print('\n'.join(nwb_fns_todo)) - -for nwb_fn in nwb_fns_total: - print('\nprocessing {} ...'.format(nwb_fn)) - nwb_f = h5py.File(os.path.join(nwb_folder, nwb_fn + '.nwb')) - localdf = subdf[subdf['date'] == int(nwb_fn[0:6])] - localdf = localdf[localdf['mouse_id'] == nwb_fn[7:14]] - - # ====================================ON============================================================= - localdfon = localdf[localdf['rf_{}_on_peak_z'.format(response_dir)] >= rf_peak_z_thr].reset_index() - print('\tnumber of rois with significant ON RFs: {}'.format(len(localdfon))) - - for roi_i, roi_row in localdfon.iterrows(): - - # get the saving group - group_n = '{}_{}_{}_ON'.format(roi_row['date'], roi_row['mouse_id'], roi_row['plane_n']) - if group_n in rf_map_f.keys(): - local_grp = rf_map_f[group_n] - else: - local_grp = rf_map_f.create_group(name=group_n) - local_grp.attrs['trace_type'] = sheet_name - local_grp.attrs['response_dir'] = response_dir - local_grp.attrs['skew_thr'] = 0.6 - local_grp.attrs['trace_bias'] = trace_bias - local_grp.attrs['rf_peak_z_thr'] = rf_peak_z_thr - local_grp.attrs['gaussian_filter_sigma'] = gaussian_filter_sigma - local_grp.attrs['interpolation_rate'] = interpolate_rate - local_grp.attrs['response_window'] = response_window - - print('\t\tprocessing roi {}/{} ...'.format(roi_i + 1, len(localdfon))) - - if roi_row['roi_n'] not in local_grp.keys(): - - # get constant to add to trace - trace, _ = dt.get_single_trace(nwb_f=nwb_f, - plane_n=roi_row['plane_n'], - roi_n=roi_row['roi_n'], - trace_type=sheet_name) - if np.min(trace) < trace_bias: - add_to_trace = -np.min(trace) + trace_bias - else: - add_to_trace = 0. - - # get strf - curr_strf = dt.get_strf(nwb_f=nwb_f, - plane_n=roi_row['plane_n'], - roi_ind=int(roi_row['roi_n'][-4:]), - trace_type='sta_'+sheet_name) - curr_strf_dff = curr_strf.get_local_dff_strf(is_collaps_before_normalize=True, - add_to_trace=add_to_trace) - curr_srf, _ = curr_strf_dff.get_zscore_receptive_field(timeWindow=response_window) - if response_dir == 'pos': - polarity = 'positive' - elif response_dir == 'neg': - polarity = 'negative' - else: - raise ValueError('polarity ({}) should be either "pos" or "neg".') - _, _, _, rf_mask = dt.get_rf_properties(srf=curr_srf, - polarity=polarity, - sigma=gaussian_filter_sigma, - interpolate_rate=interpolate_rate, - z_thr=rf_peak_z_thr) - roi_grp = local_grp.create_group(name=roi_row['roi_n']) - ia.WeightedROI(mask=rf_mask).to_h5_group(roi_grp) - if 'alts_deg' not in local_grp.attrs: - local_grp.attrs['alts_deg'] = curr_srf.altPos - if 'azis_deg' not in local_grp.attrs: - local_grp.attrs['alts_deg'] = curr_srf.aziPos - else: - print('\t\t\tAlready exists. Skip.') - - - # ====================================OFF======================================================== - localdfoff = localdf[localdf['rf_{}_off_peak_z'.format(response_dir)] >= rf_peak_z_thr].reset_index() - print('\n\tnumber of rois with significant OFF RFs: {}'.format(len(localdfoff))) - - for roi_i, roi_row in localdfoff.iterrows(): - - # get the saving group - group_n = '{}_{}_{}_OFF'.format(roi_row['date'], roi_row['mouse_id'], roi_row['plane_n']) - if group_n in rf_map_f.keys(): - local_grp = rf_map_f[group_n] - else: - local_grp = rf_map_f.create_group(name=group_n) - local_grp.attrs['trace_type'] = sheet_name - local_grp.attrs['response_dir'] = response_dir - local_grp.attrs['skew_thr'] = 0.6 - local_grp.attrs['trace_bias'] = trace_bias - local_grp.attrs['rf_peak_z_thr'] = rf_peak_z_thr - local_grp.attrs['gaussian_filter_sigma'] = gaussian_filter_sigma - local_grp.attrs['interpolation_rate'] = interpolate_rate - local_grp.attrs['response_window'] = response_window - - print('\t\tprocessing roi {}/{} ...'.format(roi_i + 1, len(localdfoff))) - - if roi_row['roi_n'] not in local_grp.keys(): - - # get constant to add to trace - trace, _ = dt.get_single_trace(nwb_f=nwb_f, - plane_n=roi_row['plane_n'], - roi_n=roi_row['roi_n'], - trace_type=sheet_name) - if np.min(trace) < trace_bias: - add_to_trace = -np.min(trace) + trace_bias - else: - add_to_trace = 0. - - # get strf - curr_strf = dt.get_strf(nwb_f=nwb_f, - plane_n=roi_row['plane_n'], - roi_ind=int(roi_row['roi_n'][-4:]), - trace_type='sta_' + sheet_name) - curr_strf_dff = curr_strf.get_local_dff_strf(is_collaps_before_normalize=True, - add_to_trace=add_to_trace) - _, curr_srf = curr_strf_dff.get_zscore_receptive_field(timeWindow=response_window) - if response_dir == 'pos': - polarity = 'positive' - elif response_dir == 'neg': - polarity = 'negative' - else: - raise ValueError('polarity ({}) should be either "pos" or "neg".') - _, _, _, rf_mask = dt.get_rf_properties(srf=curr_srf, - polarity=polarity, - sigma=gaussian_filter_sigma, - interpolate_rate=interpolate_rate, - z_thr=rf_peak_z_thr) - roi_grp = local_grp.create_group(name=roi_row['roi_n']) - ia.WeightedROI(mask=rf_mask).to_h5_group(roi_grp) - if 'alts_deg' not in local_grp.attrs: - local_grp.attrs['alts_deg'] = curr_srf.altPos - if 'azis_deg' not in local_grp.attrs: - local_grp.attrs['alts_deg'] = curr_srf.aziPos - else: - print('\t\t\tAlready exists. Skip.') - - \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/analysis_database/0080_plot_plane_rf_center.py b/corticalmapping/scripts/post_recording/00_old/analysis_database/0080_plot_plane_rf_center.py deleted file mode 100644 index d849ca4..0000000 --- a/corticalmapping/scripts/post_recording/00_old/analysis_database/0080_plot_plane_rf_center.py +++ /dev/null @@ -1,262 +0,0 @@ -import sys -sys.path.extend(['/home/junz/PycharmProjects/corticalmapping']) -import os -import numpy as np -import matplotlib.pyplot as plt -import pandas as pd -from matplotlib.backends.backend_pdf import PdfPages -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.DatabaseTools as dt - -table_name = 'big_roi_table_test.xlsx' -sheet_name = 'f_center_subtracted' - -response_dir = 'pos' -skew_thr = 0.6 -rf_peak_z_thr = 1.6 - -save_fn = 'plane_rf_centers.pdf' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -table_path = os.path.join(curr_folder, table_name) -df = pd.read_excel(table_path, sheetname=sheet_name) -subdf = df[df['skew_fil'] >= skew_thr] - -planes = subdf[['date', 'mouse_id', 'plane_n', 'depth']].drop_duplicates().reset_index() -print(planes) - -pdff = PdfPages(os.path.join('intermediate_figures', save_fn)) - -for plane_i, plane_row in planes.iterrows(): - - print('plotting {}_{}_{}, {} / {}'.format( - plane_row['date'], - plane_row['mouse_id'], - plane_row['plane_n'], - plane_i + 1, - len(planes))) - - planedf = subdf[(subdf['date'] == plane_row['date']) & \ - (subdf['mouse_id'] == plane_row['mouse_id']) & \ - (subdf['plane_n'] == plane_row['plane_n']) & \ - (subdf['depth'] == plane_row['depth'])] - - df_or = planedf[planedf['rf_{}_onoff_peak_z'.format(response_dir)] >= rf_peak_z_thr] - df_and = planedf[(planedf['rf_{}_on_peak_z'.format(response_dir)] >= rf_peak_z_thr) & \ - (planedf['rf_{}_off_peak_z'.format(response_dir)] >= rf_peak_z_thr)] - df_on = planedf[planedf['rf_{}_on_peak_z'.format(response_dir)] >= rf_peak_z_thr].drop(df_and.index) - df_off = planedf[planedf['rf_{}_off_peak_z'.format(response_dir)] >= rf_peak_z_thr].drop(df_and.index) - - df_or = df_or.reset_index() - df_and = df_and.reset_index() - df_on = df_on.reset_index() - df_off = df_off.reset_index() - - if len(df_or) == 0: - print('no any receptive fields. skip.') - else: - print('\tnumber of rois with significant rf: {}'.format(len(df_or))) - print('\tnumber of rois with S1 ON: {}'.format(len(df_on))) - print('\tnumber of rois with S1 OFF: {}'.format(len(df_off))) - print('\tnumber of rois with S2 ON/OFF: {}'.format(len(df_and))) - - f = plt.figure(figsize=(11, 8.5)) - - f.suptitle('{}_{}_{}; {} um'.format(plane_row['date'], - plane_row['mouse_id'], - plane_row['plane_n'], - plane_row['depth'])) - - #=============================RF center============================================= - # ON/OFF - alt_min = int(np.min(df_or['rf_{}_onoff_center_alt'.format(response_dir)]) - 5) - alt_max = int(np.max(df_or['rf_{}_onoff_center_alt'.format(response_dir)]) + 5) - azi_min = int(np.min(df_or['rf_{}_onoff_center_azi'.format(response_dir)]) - 5) - azi_max = int(np.max(df_or['rf_{}_onoff_center_azi'.format(response_dir)]) + 5) - ax_or_scatter = f.add_subplot(4, 5, 1) - ax_or_scatter.plot(df_or['rf_{}_onoff_center_azi'.format(response_dir)], - df_or['rf_{}_onoff_center_alt'.format(response_dir)], - '.', color='#888888') - ax_or_scatter.set_xlim([azi_min, azi_max]) - ax_or_scatter.set_ylim([alt_min, alt_max]) - ax_or_scatter.set_title('RF center') - - # ON - ax_on_scatter = f.add_subplot(4, 5, 6) - ax_on_scatter.plot(df_off['rf_{}_off_center_azi'.format(response_dir)], - df_off['rf_{}_off_center_alt'.format(response_dir)], - '.', color='#aaaaaa') - ax_on_scatter.plot(df_on['rf_{}_on_center_azi'.format(response_dir)], - df_on['rf_{}_on_center_alt'.format(response_dir)], - '.', color='#ff0000') - ax_on_scatter.set_xlim([azi_min, azi_max]) - ax_on_scatter.set_ylim([alt_min, alt_max]) - - # OFF - ax_off_scatter = f.add_subplot(4, 5, 11) - ax_off_scatter.plot(df_on['rf_{}_on_center_azi'.format(response_dir)], - df_on['rf_{}_on_center_alt'.format(response_dir)], - '.', color='#aaaaaa') - ax_off_scatter.plot(df_off['rf_{}_off_center_azi'.format(response_dir)], - df_off['rf_{}_off_center_alt'.format(response_dir)], - '.', color='#0000ff') - ax_off_scatter.set_xlim([azi_min, azi_max]) - ax_off_scatter.set_ylim([alt_min, alt_max]) - - # ON-OFF - ax_and_scatter = f.add_subplot(4, 5, 16) - ax_and_scatter.plot(df_and['rf_{}_on_center_azi'.format(response_dir)], - df_and['rf_{}_on_center_alt'.format(response_dir)], - '.', color='#ff0000') - ax_and_scatter.plot(df_and['rf_{}_off_center_azi'.format(response_dir)], - df_and['rf_{}_off_center_alt'.format(response_dir)], - '.', color='#0000ff') - ax_and_scatter.set_xlim([azi_min, azi_max]) - ax_and_scatter.set_ylim([alt_min, alt_max]) - - # =============================pairwise distance============================================= - dis_or = ia.pairwise_distance(df_or[['rf_{}_onoff_center_azi'.format(response_dir), - 'rf_{}_onoff_center_alt'.format(response_dir)]].values) - ax_or_pd = f.add_subplot(4, 5, 2) - if len(dis_or) > 0: - ax_or_pd.hist(dis_or, range=[0, 80], bins=20, facecolor='#aaaaaa', edgecolor='none') - ax_or_pd.get_yaxis().set_ticks([]) - ax_or_pd.set_title('pw RF dis') # pairwise receptive field center distance - - dis_on = ia.pairwise_distance(df_on[['rf_{}_on_center_azi'.format(response_dir), - 'rf_{}_on_center_alt'.format(response_dir)]].values) - ax_on_pd = f.add_subplot(4, 5, 7) - if len(dis_on) > 0: - ax_on_pd.hist(dis_on, range=[0, 80], bins=20, facecolor='#ff0000', edgecolor='none') - ax_on_pd.get_yaxis().set_ticks([]) - - dis_off = ia.pairwise_distance(df_off[['rf_{}_off_center_azi'.format(response_dir), - 'rf_{}_off_center_alt'.format(response_dir)]].values) - ax_off_pd = f.add_subplot(4, 5, 12) - if len(dis_off) > 0: - ax_off_pd.hist(dis_off, range=[0, 80], bins=20, facecolor='#0000ff', edgecolor='none') - ax_off_pd.get_yaxis().set_ticks([]) - - dis_and_on = ia.pairwise_distance(df_and[['rf_{}_on_center_azi'.format(response_dir), - 'rf_{}_on_center_alt'.format(response_dir)]].values) - dis_and_off = ia.pairwise_distance(df_and[['rf_{}_off_center_azi'.format(response_dir), - 'rf_{}_off_center_alt'.format(response_dir)]].values) - ax_and_pd = f.add_subplot(4, 5, 17) - if len(dis_and_on) > 0: - ax_and_pd.hist(dis_and_on, range=[0, 80], bins=20, facecolor='#ff0000', edgecolor='none', alpha=0.5) - ax_and_pd.hist(dis_and_off, range=[0, 80], bins=20, facecolor='#0000ff', edgecolor='none', alpha=0.5) - ax_and_pd.get_yaxis().set_ticks([]) - - # =============================parewise magnification============================================= - mag_or = ia.pairwise_magnification(df_or[['rf_{}_onoff_center_azi'.format(response_dir), - 'rf_{}_onoff_center_alt'.format(response_dir)]].values, - df_or[['roi_center_col', 'roi_center_row']].values) - ax_or_pm = f.add_subplot(4, 5, 3) - if len(mag_or) > 0: - mag_or = 0.00035 / mag_or # 0.35 um per pixel - ax_or_pm.hist(mag_or, range=[0, 0.2], bins=20, facecolor='#aaaaaa', edgecolor='none') - ax_or_pm.get_yaxis().set_ticks([]) - ax_or_pm.set_title('mm/deg') # pairwise magnification - # - mag_on = ia.pairwise_magnification(df_on[['rf_{}_on_center_azi'.format(response_dir), - 'rf_{}_on_center_alt'.format(response_dir)]].values, - df_on[['roi_center_col', 'roi_center_row']].values) - ax_on_pm = f.add_subplot(4, 5, 8) - if len(mag_on) > 0: - mag_on = 0.00035 / mag_on # 0.35 um per pixel - ax_on_pm.hist(mag_on, range=[0, 0.2], bins=20, facecolor='#ff0000', edgecolor='none') - ax_on_pm.get_yaxis().set_ticks([]) - - mag_off = ia.pairwise_magnification(df_off[['rf_{}_off_center_azi'.format(response_dir), - 'rf_{}_off_center_alt'.format(response_dir)]].values, - df_off[['roi_center_col', 'roi_center_row']].values) - ax_off_pm = f.add_subplot(4, 5, 13) - if len(mag_off) > 0: - mag_off = 0.00035 / mag_off # 0.35 um per pixel - ax_off_pm.hist(mag_off, range=[0, 0.2], bins=20, facecolor='#0000ff', edgecolor='none') - ax_off_pm.get_yaxis().set_ticks([]) - - mag_and_on = ia.pairwise_magnification(df_and[['rf_{}_on_center_azi'.format(response_dir), - 'rf_{}_on_center_alt'.format(response_dir)]].values, - df_and[['roi_center_col', 'roi_center_row']].values) - - mag_and_off = ia.pairwise_magnification(df_and[['rf_{}_off_center_azi'.format(response_dir), - 'rf_{}_off_center_alt'.format(response_dir)]].values, - df_and[['roi_center_col', 'roi_center_row']].values) - - ax_and_pm = f.add_subplot(4, 5, 18) - if len(mag_and_on) > 0: - mag_and_on = 0.00035 / mag_and_on # 0.35 um per pixel - mag_and_off = 0.00035 / mag_and_off # 0.35 um per pixel - ax_and_pm.hist(mag_and_on, range=[0, 0.2], bins=20, facecolor='#ff0000', edgecolor='none', alpha=0.5,) - ax_and_pm.hist(mag_and_off, range=[0, 0.2], bins=20, facecolor='#0000ff', edgecolor='none', alpha=0.5,) - ax_and_pm.get_yaxis().set_ticks([]) - - # =============================azi alt spatial distribution============================================= - ax_alt_or = f.add_subplot(4, 5, 4) - ax_alt_or.set_title('altitude') - ax_azi_or = f.add_subplot(4, 5, 5) - ax_azi_or.set_title('azimuth') - if len(df_or) > 0: - dt.plot_roi_retinotopy(coords_rf=df_or[['rf_{}_onoff_center_alt'.format(response_dir), - 'rf_{}_onoff_center_azi'.format(response_dir)]].values, - coords_roi=df_or[['roi_center_row', 'roi_center_col']].values, - ax_alt=ax_alt_or, - ax_azi=ax_azi_or, - cmap='viridis', - canvas_shape=(512, 512), - edgecolors='#000000', - linewidth=0.5) - else: - ax_alt_or.set_xticks([]) - ax_alt_or.set_yticks([]) - ax_azi_or.set_xticks([]) - ax_azi_or.set_yticks([]) - - ax_alt_on = f.add_subplot(4, 5, 9) - ax_azi_on = f.add_subplot(4, 5, 10) - if len(df_on) > 0: - dt.plot_roi_retinotopy(coords_rf=df_on[['rf_{}_on_center_alt'.format(response_dir), - 'rf_{}_on_center_azi'.format(response_dir)]].values, - coords_roi=df_on[['roi_center_row', 'roi_center_col']].values, - ax_alt=ax_alt_on, - ax_azi=ax_azi_on, - cmap='viridis', - canvas_shape=(512, 512), - edgecolors='#000000', - linewidth=0.5) - else: - ax_alt_on.set_xticks([]) - ax_alt_on.set_yticks([]) - ax_azi_on.set_xticks([]) - ax_azi_on.set_yticks([]) - - ax_alt_off = f.add_subplot(4, 5, 14) - ax_azi_off = f.add_subplot(4, 5, 15) - if len(df_off) > 0: - dt.plot_roi_retinotopy(coords_rf=df_off[['rf_{}_off_center_alt'.format(response_dir), - 'rf_{}_off_center_azi'.format(response_dir)]].values, - coords_roi=df_off[['roi_center_row', 'roi_center_col']].values, - ax_alt=ax_alt_off, - ax_azi=ax_azi_off, - cmap='viridis', - canvas_shape=(512, 512), - edgecolors='#000000', - linewidth=0.5) - else: - ax_alt_off.set_xticks([]) - ax_alt_off.set_yticks([]) - ax_azi_off.set_xticks([]) - ax_azi_off.set_yticks([]) - - # plt.tight_layout() - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - -pdff.close() - -print('for debug ...') diff --git a/corticalmapping/scripts/post_recording/00_old/analysis_database/h5repack.exe b/corticalmapping/scripts/post_recording/00_old/analysis_database/h5repack.exe deleted file mode 100644 index 3ee5da8..0000000 Binary files a/corticalmapping/scripts/post_recording/00_old/analysis_database/h5repack.exe and /dev/null differ diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/000_reorganize_data.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/000_reorganize_data.py deleted file mode 100644 index eabb2e6..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/000_reorganize_data.py +++ /dev/null @@ -1,95 +0,0 @@ -import os -import numpy as np -import tifffile as tf - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project\180502-M376019-2p\02" - -identifier = '02_' -channels = ['green', 'red'] -plane_num = 5 -temporal_downsample_rate = 2 -frame_each_file = 2000 -low_thr = -500 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -fns = np.array([f for f in os.listdir(data_folder) if f[-4:] == '.tif' and identifier in f]) -f_nums = [int(os.path.splitext(fn)[0].split('_')[1]) for fn in fns] -fns = fns[np.argsort(f_nums)] -print('total file number: {}'.format(len(fns))) - -# print('\n'.join(fns)) - -save_folders = [] -for i in range(plane_num): - curr_save_folder = os.path.join(data_folder, identifier, 'plane{}'.format(i)) - if not os.path.isdir(curr_save_folder): - os.makedirs(curr_save_folder) - save_folders.append(curr_save_folder) - -# frame_per_plane = len(fns) // plane_num -for plane_ind in range(plane_num): - print('\nprocessing plane: {}'.format(plane_ind)) - curr_fns = fns[plane_ind::plane_num] - - total_frames_down = len(curr_fns) // temporal_downsample_rate - curr_fns = curr_fns[: total_frames_down * temporal_downsample_rate].reshape((total_frames_down, temporal_downsample_rate)) - - print(curr_fns.shape) - - print('current file ind: 000') - curr_file_ind = 0 - curr_frame_ind = 0 - curr_mov = {} - for ch_n in channels: - curr_mov.update({ch_n : []}) - - for fgs in curr_fns: - - frame_grp = [] - - for fn in fgs: - cf = tf.imread(os.path.join(data_folder, fn)) - # remove extreme negative pixels - cf[cf < low_thr] = low_thr - frame_grp.append(cf) - - curr_frame = {} - - for ch_i, ch_n in enumerate(channels): - ch_frame_grp = np.array([f[ch_i::len(channels)][0] for f in frame_grp]) - # print ch_frame_grp.shape - ch_frame = np.mean(ch_frame_grp, axis=0).astype(np.int16) - ch_frame = ch_frame.transpose()[::-1, ::-1] - curr_frame.update({ch_n: ch_frame}) - - if curr_frame_ind < frame_each_file: - - for ch_n in channels: - curr_mov[ch_n].append(curr_frame[ch_n]) - - curr_frame_ind = curr_frame_ind + 1 - - else: - for ch_n in channels: - curr_mov_ch = np.array(curr_mov[ch_n], dtype=np.int16) - save_name = 'plane{}_{:03d}.tif'.format(plane_ind, curr_file_ind) - save_folder_ch = os.path.join(save_folders[plane_ind], ch_n) - if not os.path.isdir(save_folder_ch): - os.makedirs(save_folder_ch) - tf.imsave(os.path.join(save_folder_ch, save_name), curr_mov_ch) - curr_mov[ch_n] = [curr_frame[ch_n]] - print('current file ind: {:03d}; channel: {}'.format(curr_file_ind, ch_n)) - curr_file_ind += 1 - curr_frame_ind = 1 - - for ch_n in channels: - curr_mov_ch = np.array(curr_mov[ch_n], dtype=np.int16) - save_name = 'plane{}_{:03d}.tif'.format(plane_ind, curr_file_ind) - save_folder_ch = os.path.join(save_folders[plane_ind], ch_n) - if not os.path.isdir(save_folder_ch): - os.makedirs(save_folder_ch) - tf.imsave(os.path.join(save_folder_ch, save_name), curr_mov_ch) - print('current file ind: {:03d}; channel: {}'.format(curr_file_ind, ch_n)) - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/001_get_vasculature_map.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/001_get_vasculature_map.py deleted file mode 100644 index d7507db..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/001_get_vasculature_map.py +++ /dev/null @@ -1,37 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import skimage.io as io -import matplotlib.pyplot as plt -import corticalmapping.core.ImageAnalysis as ia - -vasmap_wf_path = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180502-M376019-deepscope\Widefield.tif" - -vasmap_2p_zoom1_path = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180502-M376019-deepscope\01\01_00001.tif" - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -vasmap_wf = io.imread(vasmap_wf_path, as_grey=True) -vasmap_wf = vasmap_wf.transpose()[::-1, ::-1] - -vasmap_2p_zoom1 = tf.imread(vasmap_2p_zoom1_path).astype(np.float32) -vasmap_2p_zoom1 = np.mean(vasmap_2p_zoom1, axis=0) -vasmap_2p_zoom1 = vasmap_2p_zoom1.transpose()[::-1, ::-1] - -f = plt.figure(figsize=(12, 5)) -ax_wf = f.add_subplot(121) -ax_wf.imshow(ia.array_nor(vasmap_wf), vmin=0., vmax=1., cmap='gray', interpolation='nearest') -ax_wf.set_title('vasmap wide field') -ax_wf.set_axis_off() -ax_2p = f.add_subplot(122) -ax_2p.imshow(ia.array_nor(vasmap_2p_zoom1), vmin=0., vmax=0.15, cmap='gray', interpolation='nearest') -ax_2p.set_title('vasmap 2p zoom1') -ax_2p.set_axis_off() - -plt.show() - -tf.imsave('vasmap_wf.tif', vasmap_wf) -tf.imsave('vasmap_2p_zoom1.tif', vasmap_2p_zoom1) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/010_motion_correction.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/010_motion_correction.py deleted file mode 100644 index 1eac374..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/010_motion_correction.py +++ /dev/null @@ -1,66 +0,0 @@ -import os -import stia.motion_correction as mc - -def correct(data_folder): - # data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - # r"\180104-M361012-2p\FOV1_injection_site_00001" - ref_ch_n = 'red' - apply_ch_ns = ['green', 'red'] - - curr_folder = os.path.dirname(os.path.realpath(__file__)) - os.chdir(curr_folder) - - ref_data_folder = os.path.join(data_folder, ref_ch_n) - - mc.motion_correction(input_folder=ref_data_folder, - input_path_identifier='.tif', - process_num=3, - output_folder=os.path.join(ref_data_folder, 'corrected'), - anchor_frame_ind_chunk=100, - anchor_frame_ind_projection=0, - iteration_chunk=10, - iteration_projection=10, - max_offset_chunk=(50., 50.), - max_offset_projection=(50., 50.), - align_func=mc.phase_correlation, - preprocessing_type=0, - fill_value=0.) - - offsets_path = os.path.join(ref_data_folder, 'corrected', 'correction_offsets.hdf5') - ref_fns = [f for f in os.listdir(ref_data_folder) if f[-4:] == '.tif'] - ref_fns.sort() - ref_paths = [os.path.join(ref_data_folder, f) for f in ref_fns] - print('\nreference paths:') - print('\n'.join(ref_paths)) - - for apply_ch_i, apply_ch_n in enumerate(apply_ch_ns): - apply_data_folder = os.path.join(data_folder, apply_ch_n) - apply_fns = [f for f in os.listdir(apply_data_folder) if f[-4:] == '.tif'] - apply_fns.sort() - apply_paths = [os.path.join(apply_data_folder, f) for f in apply_fns] - print('\napply paths:') - print('\n'.join(apply_paths)) - - mc.apply_correction_offsets(offsets_path=offsets_path, - path_pairs=zip(ref_paths, apply_paths), - output_folder=os.path.join(apply_data_folder, 'corrected'), - process_num=3, - fill_value=0., - avi_downsample_rate=20, - is_equalizing_histogram=False) - -def run(): - data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180502-M376019-deepscope\02\02_" - - plane_folders = [f for f in os.listdir(data_folder) if f[0:5] == 'plane' and - os.path.isdir(os.path.join(data_folder, f))] - plane_folders.sort() - print('folders to be corrected:') - print('\n'.join(plane_folders)) - - for plane_folder in plane_folders: - correct(os.path.join(data_folder, plane_folder)) - -if __name__ == "__main__": - run() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/020_downsample_from_server.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/020_downsample_from_server.py deleted file mode 100644 index 8457d28..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/020_downsample_from_server.py +++ /dev/null @@ -1,42 +0,0 @@ -import os -import numpy as np -import tifffile as tf - -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180309-M360495-deepscope\03\03_" -xy_downsample_rate = 2 -t_downsample_rate = 10 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -plane_ns = [f for f in os.listdir(data_folder) if os.path.isdir(f) and f[:5] == 'plane'] -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -for plane_n in plane_ns: - print('\nprocessing plane: {}'.format(plane_n)) - plane_folder = os.path.join(data_folder, plane_n, 'green', 'corrected') - - f_ns = [f for f in os.listdir(plane_folder) if f[-14:] == '_corrected.tif'] - f_ns.sort() - print('\n'.join(f_ns)) - - mov_d = [] - - for f_n in f_ns: - print('processing {} ...'.format(f_n)) - curr_mov = tf.imread(os.path.join(plane_folder, f_n)) - curr_mov_d = ia.rigid_transform_cv2(img=curr_mov, zoom=(1. / xy_downsample_rate)) - curr_mov_d = ia.z_downsample(curr_mov_d, downSampleRate=t_downsample_rate) - mov_d.append(curr_mov_d) - - mov_d = np.concatenate(mov_d, axis=0) - save_n = os.path.split(data_folder)[1] + '_' + plane_n + '_downsampled.tif' - tf.imsave(os.path.join(plane_n, save_n), mov_d) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/030_get_movie_data.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/030_get_movie_data.py deleted file mode 100644 index 00bc199..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/030_get_movie_data.py +++ /dev/null @@ -1,65 +0,0 @@ -import os -import h5py -import numpy as np -import skimage.external.tifffile as tf - -file_prefix = '180309_M360495_03' -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180309-M360495-deepscope\03\03_" - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -plane_fns = [f for f in os.listdir(data_folder) if f[:5] == 'plane'] -plane_fns.sort() -print('\n'.join(plane_fns)) - -data_f = h5py.File(file_prefix + '_2p_movies.hdf5') - -for plane_fn in plane_fns: - print('\nprocessing {} ...'.format(plane_fn)) - plane_folder = os.path.join(data_folder, plane_fn, 'green', 'corrected') - mov_fns = [f for f in os.listdir(plane_folder) if f[-14:] == '_corrected.tif'] - mov_fns.sort() - print('\n'.join(mov_fns)) - - frame_num_tot = 0 - x = None - y = None - z = 0 - for mov_fn in mov_fns: - print('reading {} ...'.format(mov_fn)) - curr_z, curr_y, curr_x = tf.imread(os.path.join(plane_folder, mov_fn)).shape - - if y is None: - y = curr_y - else: - if y != curr_y: - raise ValueError('y dimension ({}) of file "{}" does not agree with previous file(s) ({}).' - .format(curr_y, mov_fn, y)) - - if x is None: - x = curr_x - else: - if x != curr_x: - raise ValueError('x dimension ({}) of file "{}" does not agree with previous file(s) ({}).' - .format(curr_x, mov_fn, x)) - - z = z + curr_z - - dset = data_f.create_dataset(plane_fn, (z, y, x), dtype=np.int16, compression='lzf') - - start_frame = 0 - end_frame = 0 - for mov_fn in mov_fns: - print('reading {} ...'.format(mov_fn)) - curr_mov = tf.imread(os.path.join(plane_folder, mov_fn)) - end_frame = start_frame + curr_mov.shape[0] - dset[start_frame : end_frame] = curr_mov - start_frame = end_frame - - dset.attrs['conversion'] = 1. - dset.attrs['resolution'] = 1. - dset.attrs['unit'] = 'arbiturary_unit' - -data_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/045_get_mmap_files_for_caiman_from_tiff.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/045_get_mmap_files_for_caiman_from_tiff.py deleted file mode 100644 index 39e6792..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/045_get_mmap_files_for_caiman_from_tiff.py +++ /dev/null @@ -1,54 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia -import h5py - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180309-M360495-deepscope\03\03_" -base_name = '180309_M360495_03' -t_downsample_rate = 2 - -plane_ns = [p for p in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder, p))] -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -for plane_n in plane_ns: - print('\nprocessing {} ...'.format(plane_n)) - - plane_folder = os.path.join(data_folder, plane_n, 'green', 'corrected') - os.chdir(plane_folder) - - f_ns = [f for f in os.listdir(plane_folder) if f[-14:] == '_corrected.tif'] - f_ns.sort() - print('\n'.join(f_ns)) - - mov_join = [] - for f_n in f_ns: - - curr_mov = tf.imread(os.path.join(plane_folder, f_n)) - - if curr_mov.shape[0] % t_downsample_rate != 0: - print('the frame number of {} ({}) is not divisible by t_downsample_rate ({}).' - .format(f_n, curr_mov.shape[0], t_downsample_rate)) - - curr_mov_d = ia.z_downsample(curr_mov, downSampleRate=t_downsample_rate) - mov_join.append(curr_mov_d) - - mov_join = np.concatenate(mov_join, axis=0) - add_to_mov = 10 - np.amin(mov_join) - - save_name = '{}_d1_{}_d2_{}_d3_1_order_C_frames_{}_.mmap'\ - .format(base_name, mov_join.shape[2], mov_join.shape[1], mov_join.shape[0]) - - mov_join = mov_join.reshape((mov_join.shape[0], mov_join.shape[1] * mov_join.shape[2]), order='F').transpose() - mov_join_mmap = np.memmap(os.path.join(plane_folder, save_name), shape=mov_join.shape, order='C', - dtype=np.float32, mode='w+') - mov_join_mmap[:] = mov_join + add_to_mov - mov_join_mmap.flush() - del mov_join_mmap - - save_file = h5py.File(os.path.join(plane_folder, 'caiman_segmentation_results.hdf5')) - save_file['bias_added_to_movie'] = add_to_mov - save_file.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/050_show_mmap_movie.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/050_show_mmap_movie.py deleted file mode 100644 index 8752b16..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/050_show_mmap_movie.py +++ /dev/null @@ -1,39 +0,0 @@ -import sys; print('Python %s on %s' % (sys.version, sys.platform)) -sys.path.extend([r"E:\data\github_packages\CaImAn"]) - -import os -import numpy as np -import caiman as cm - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180309-M360495-deepscope\03\03_" -plane_n = 'plane0' - -plane_folder = os.path.join(data_folder, plane_n, 'green', 'corrected') -os.chdir(plane_folder) - -fn = [f for f in os.listdir(plane_folder) if f[-5:] == '.mmap'] -if len(fn) > 1: - print('\n'.join(fn)) - raise LookupError('more than one file found.') -elif len(fn) == 0: - raise LookupError('no file found.') -else: - fn = fn[0] - -cm.load(fn).play(fr=30,magnification=1,gain=2.) - -# fn_parts = fn.split('_') -# d1 = int(fn_parts[fn_parts.index('d1') + 1]) # column, x -# d2 = int(fn_parts[fn_parts.index('d2') + 1]) # row, y -# d3 = int(fn_parts[fn_parts.index('d3') + 1]) # channel -# d4 = int(fn_parts[fn_parts.index('frames') + 1]) # frame, T -# order = fn_parts[fn_parts.index('order') + 1] -# -# print('playing {} ...'.format(fn)) -# -# mov = np.memmap(filename=fn, shape=(d1, d2, d4), order=order, dtype=np.float32, mode='r') -# mov = mov.transpose((2, 1, 0)) -# -# cm.movie(mov).play(fr=30,magnification=1,gain=2.) - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/060_caiman_segmentation.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/060_caiman_segmentation.py deleted file mode 100644 index 13ac843..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/060_caiman_segmentation.py +++ /dev/null @@ -1,116 +0,0 @@ -import sys; print('Python %s on %s' % (sys.version, sys.platform)) -sys.path.extend([r"E:\data\github_packages\CaImAn"]) - -import os -import numpy as np -import caiman as cm -import matplotlib.pyplot as plt -from caiman.source_extraction.cnmf import cnmf as cnmf -import h5py -from shutil import copyfile - -def run(): - - data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180309-M360495-deepscope\03\03_" - play_movie = False - - curr_folder = os.path.dirname(os.path.realpath(__file__)) - - plane_ns = [f for f in os.listdir(data_folder) if os.path.isdir(f) and f[:5] == 'plane'] - plane_ns.sort() - print('planes:') - print('\n'.join(plane_ns)) - - # %% start cluster - c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=3, single_thread=False) - - for plane_n in plane_ns: - - print('\nsegmenting plane: {}'.format(plane_n)) - - plane_folder = os.path.join(data_folder, plane_n, 'green', 'corrected') - os.chdir(plane_folder) - - fn = [f for f in os.listdir(plane_folder) if f[-5:] == '.mmap'] - if len(fn) > 1: - print('\n'.join(fn)) - raise LookupError('more than one file found.') - elif len(fn) == 0: - raise LookupError('no file found.') - else: - fn = fn[0] - - fn_parts = fn.split('_') - d1 = int(fn_parts[fn_parts.index('d1') + 1]) # column, x - d2 = int(fn_parts[fn_parts.index('d2') + 1]) # row, y - d3 = int(fn_parts[fn_parts.index('d3') + 1]) # channel - d4 = int(fn_parts[fn_parts.index('frames') + 1]) # frame, T - order = fn_parts[fn_parts.index('order') + 1] - - print('playing {} ...'.format(fn)) - - mov = np.memmap(filename=fn, shape=(d1, d2, d4), order=order, dtype=np.float32, mode='r') - mov = mov.transpose((2, 1, 0)) - - print('shape of joined movie: {}.'.format(mov.shape)) - - #%% play movie, press q to quit - if play_movie: - cm.movie(mov).play(fr=50,magnification=1,gain=2.) - - #%% movie cannot be negative! - mov_min = float(np.amin(mov)) - print('minimum pixel value: {}.'.format(mov_min)) - if mov_min < 0: - raise Exception('Movie too negative, add_to_movie should be larger') - - #%% correlation image. From here infer neuron size and density - Cn = cm.movie(mov).local_correlations(swap_dim=False) - plt.imshow(Cn, cmap='gray') - plt.show() - - K = 100 # number of neurons expected per patch - gSig = [5, 5] # expected half size of neurons - merge_thresh = 0.9 # merging threshold, max correlation allowed - p = 2 # order of the autoregressive system - cnm = cnmf.CNMF(n_processes, - k=10, # number of neurons expected per patch - gSig=[5, 5] , # expected half size of neurons - merge_thresh=0.9, # merging threshold, max correlation allowed - p=2, # order of the autoregressive system - dview=dview, - Ain=None, - method_deconvolution='oasis', - rolling_sum = False, - method_init='sparse_nmf', - alpha_snmf=10e1, - ssub=1, - tsub=1, - p_ssub=1, - p_tsub=1, - rf=256, # half-size of the patches in pixels - border_pix=20, - do_merge=False) - cnm = cnm.fit(mov) - A, C, b, f, YrA, sn = cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, cnm.sn - #%% - crd = cm.utils.visualization.plot_contours(cnm.A, Cn) - plt.show() - # input("Press enter to continue ...") - - roi_num = cnm.A.shape[1] - save_fn = h5py.File('caiman_segmentation_results.hdf5') - bias = save_fn['bias_added_to_movie'].value - save_fn['masks'] = np.array(cnm.A.todense()).T.reshape((roi_num, 512, 512), order='F') - save_fn['traces'] = cnm.C - bias - save_fn.close() - - copyfile(os.path.join(plane_folder, 'caiman_segmentation_results.hdf5'), - os.path.join(curr_folder, plane_n, 'caiman_segmentation_results.hdf5')) - - plt.close('all') - - -if __name__ == '__main__': - run() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/070_generate_nwb.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/070_generate_nwb.py deleted file mode 100644 index 3f762ab..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/070_generate_nwb.py +++ /dev/null @@ -1,48 +0,0 @@ -import os -import corticalmapping.NwbTools as nt - -date_recorded = '180502' -mouse_id = '376019' -sess_num = '02' - -experimenter = 'Jun' -genotype = 'Vipr2-IRES2-Cre-neo' -sex = 'male' -age = '120' -indicator = 'GCaMP6s' -imaging_rate = 37. -imaging_depth = '50/100/150/200/250 microns' -imaging_location = 'visual cortex' -imaging_device = 'DeepScope' -imaging_excitation_lambda = '940 nanometers' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -notebook_path = os.path.join(curr_folder, 'notebook.txt') -with open(notebook_path, 'r') as ff: - notes = ff.read() - -general = nt.DEFAULT_GENERAL -general['experimenter'] = experimenter -general['subject']['subject_id'] = mouse_id -general['subject']['genotype'] = genotype -general['subject']['sex'] = sex -general['subject']['age'] = age -general['optophysiology'].update({'imaging_plane_1': {}}) -general['optophysiology']['imaging_plane_1'].update({'indicator': indicator}) -general['optophysiology']['imaging_plane_1'].update({'imaging_rate': imaging_rate}) -general['optophysiology']['imaging_plane_1'].update({'imaging_depth': imaging_depth}) -general['optophysiology']['imaging_plane_1'].update({'location': imaging_location}) -general['optophysiology']['imaging_plane_1'].update({'device': imaging_device}) -general['optophysiology']['imaging_plane_1'].update({'excitation_lambda': imaging_excitation_lambda}) -general['notes'] = notes - -file_name = date_recorded + '_M' + mouse_id + '_' + sess_num + '.nwb' - -rf = nt.RecordedFile(os.path.join(curr_folder, file_name), identifier=file_name[:-4], description='') -rf.add_general(general=general) -rf.close() - - - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/071_add_vasmaps.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/071_add_vasmaps.py deleted file mode 100644 index 1a12531..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/071_add_vasmaps.py +++ /dev/null @@ -1,27 +0,0 @@ -import os -import corticalmapping.NwbTools as nt -import matplotlib.pyplot as plt -import tifffile as tf - - -vasmap_name_wf = 'vasmap_wf.tif' -# vasmap_name_2p_zoom1_g = 'vasmap_2p_zoom1_green.tif' -vasmap_name_2p_zoom1_r = 'vasmap_2p_zoom1.tif' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -vasmap_wf = tf.imread(vasmap_name_wf) -# vasmap_2p_zoom1_g = tf.imread(vasmap_name_2p_zoom1_g) -vasmap_2p_zoom1_r = tf.imread(vasmap_name_2p_zoom1_r) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] - -nwb_f = nt.RecordedFile(nwb_fn) -nwb_f.add_acquisition_image('surface_vas_map_wf', vasmap_wf, - description='wide field surface vasculature map through cranial window') -# nwb_f.add_acquisition_image('surface_vas_map_2p_zoom1_green', vasmap_2p_zoom1_g, -# description='2-photon surface vasculature map through cranial window, zoom 1, green') -nwb_f.add_acquisition_image('surface_vas_map_2p_zoom1_red', vasmap_2p_zoom1_r, - description='2-photon surface vasculature map through cranial window, zoom 1, red') -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/080_add_sync_data.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/080_add_sync_data.py deleted file mode 100644 index 0f4f6fa..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/080_add_sync_data.py +++ /dev/null @@ -1,23 +0,0 @@ -import os -import corticalmapping.NwbTools as nt - -record_date = '180502' -mouse_id = '376019' -session_id = '02' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = record_date + '_M' + mouse_id + '_' + session_id + '.nwb' - -sync_fn = [f for f in os.listdir(curr_folder) if f[-3:] == '.h5' and record_date in f and 'M' + mouse_id in f] -if len(sync_fn) == 0: - raise LookupError('Did not find sync .h5 file.') -elif len(sync_fn) > 1: - raise LookupError('More than one sync .h5 files found.') -else: - sync_fn = sync_fn[0] - -nwb_f = nt.RecordedFile(nwb_fn) -nwb_f.add_sync_data(sync_fn) -nwb_f.close() diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/090_add_image_data.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/090_add_image_data.py deleted file mode 100644 index d403cea..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/090_add_image_data.py +++ /dev/null @@ -1,52 +0,0 @@ -import os -import h5py -import corticalmapping.NwbTools as nt - -dset_ns = ['plane0', 'plane1', 'plane2', 'plane3', 'plane4'] -imaging_depths = [250, 200, 150, 100, 50] -temporal_downsample_rate = 2 -pixel_size = 0.0000002 # meter, 0.2 micron, deepscope 12K Hz scanner, zoom 4 - -description = '2-photon imaging data' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) -ts_2p_tot = nwb_f.file_pointer['/acquisition/timeseries/digital_2p_vsync_rise/timestamps'].value -print('total 2p timestamps count: {}'.format(len(ts_2p_tot))) - -mov_fn = os.path.splitext(nwb_fn)[0] + '_2p_movies.hdf5' -mov_f = h5py.File(mov_fn, 'r') - -for mov_i, mov_dn in enumerate(dset_ns): - - if mov_dn is not None: - - curr_dset = mov_f[mov_dn] - if mov_dn is not None: - mov_ts = ts_2p_tot[mov_i::len(dset_ns)] - print('\n{}: total 2p timestamps count: {}'.format(mov_dn, len(mov_ts))) - - mov_ts_d = mov_ts[::temporal_downsample_rate] - print('{}: downsampled 2p timestamps count: {}'.format(mov_dn, len(mov_ts_d))) - print('{}: downsampled 2p movie frame num: {}'.format(mov_dn, curr_dset.shape[0])) - - if len(mov_ts_d) == curr_dset.shape[0]: - pass - elif len(mov_ts_d) == curr_dset.shape[0] + 1: - mov_ts_d = mov_ts_d[0: -1] - else: - raise ValueError('the timestamp count of {} movie ({}) does not equal (or is not greater by one) ' - 'the frame cound in the movie ({})'.format(mov_dn, len(mov_ts_d), curr_dset.shape[0])) - - curr_description = '{}. Imaging depth: {} micron.'.format(description, imaging_depths[mov_i]) - nwb_f.add_acquired_image_series_as_remote_link('2p_movie_' + mov_dn, image_file_path=mov_fn, - dataset_path=mov_dn, timestamps=mov_ts_d, - description=curr_description, comments='', - data_format='zyx', pixel_size=[pixel_size, pixel_size], - pixel_size_unit='meter') - -mov_f.close() -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/110_add_motion_correction_module.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/110_add_motion_correction_module.py deleted file mode 100644 index 94e79bd..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/110_add_motion_correction_module.py +++ /dev/null @@ -1,56 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import h5py -import corticalmapping.NwbTools as nt - -movie_2p_fn = '180309_M360495_03_2p_movies.hdf5' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -input_parameters = [] - -for i in range(5): - - plane_n = 'plane{}'.format(i) - - offsets_path = os.path.join(plane_n, 'correction_offsets.hdf5') - offsets_f = h5py.File(offsets_path) - offsets_keys = offsets_f.keys() - if 'path_list' in offsets_keys: - offsets_keys.remove('path_list') - - offsets_keys.sort() - offsets = [] - for offsets_key in offsets_keys: - offsets.append(offsets_f[offsets_key].value) - offsets = np.concatenate(offsets, axis=0) - offsets = np.array(zip(offsets[:, 1], offsets[:, 0])) - offsets_f.close() - - mean_projection = tf.imread(os.path.join(plane_n, 'corrected_mean_projection.tif')) - max_projection = tf.imread(os.path.join(plane_n, 'corrected_max_projection.tif')) - - input_dict = {'field_name': plane_n, - 'original_timeseries_path': '/acquisition/timeseries/2p_movie_plane' + str(i), - 'corrected_file_path': movie_2p_fn, - 'corrected_dataset_path': plane_n, - 'xy_translation_offsets': offsets, - 'mean_projection': mean_projection, - 'max_projection': max_projection, - 'description': '', - 'comments': '', - 'source': ''} - - input_parameters.append(input_dict) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -nwb_f.add_muliple_dataset_to_motion_correction_module(input_parameters=input_parameters, - module_name='motion_correction') -nwb_f.close() - - - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/120_add_rois_and_traces_caiman_segmentation.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/120_add_rois_and_traces_caiman_segmentation.py deleted file mode 100644 index 68a3f3c..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/120_add_rois_and_traces_caiman_segmentation.py +++ /dev/null @@ -1,162 +0,0 @@ -import os -import h5py -import numpy as np -import matplotlib.pyplot as plt -import tifffile as tf -import corticalmapping.NwbTools as nt -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -plane_ns = ['plane0', 'plane1', 'plane2', 'plane3', 'plane4'] -plane_depths = [250, 200, 150, 100, 50] - -def add_rois_and_traces(data_folder, nwb_f, plane_n, imaging_depth, - mov_path='/processing/motion_correction/MotionCorrection'): - - mov_grp = nwb_f.file_pointer[mov_path + '/' + plane_n + '/corrected'] - - data_f = h5py.File(os.path.join(data_folder, 'rois_and_traces.hdf5'), 'r') - mask_arr_c = data_f['masks_center'].value - mask_arr_s = data_f['masks_surround'].value - traces_center_raw = data_f['traces_center_raw'].value - # traces_center_demixed = data_f['traces_center_demixed'].value - traces_center_subtracted = data_f['traces_center_subtracted'].value - # traces_center_dff = data_f['traces_center_dff'].value - traces_surround_raw = data_f['traces_surround_raw'].value - neuropil_r = data_f['neuropil_r'].value - neuropil_err = data_f['neuropil_err'].value - data_f.close() - - - if traces_center_raw.shape[1] != mov_grp['num_samples'].value: - raise ValueError('number of trace time points ({}) does not match frame number of ' - 'corresponding movie ({}).'.format(traces_center_raw.shape[0], mov_grp['num_samples'].value)) - - rf_img = tf.imread(os.path.join(data_folder, 'corrected_mean_projection.tif')) - - print 'adding segmentation results ...' - rt_mo = nwb_f.create_module('rois_and_traces_' + plane_n) - rt_mo.set_value('imaging_depth_micron', imaging_depth) - is_if = rt_mo.create_interface('ImageSegmentation') - is_if.create_imaging_plane('imaging_plane', description='') - is_if.add_reference_image('imaging_plane', 'mean_projection', rf_img) - - for i in range(mask_arr_c.shape[0]): - curr_cen = mask_arr_c[i] - curr_cen_n = 'roi_' + ft.int2str(i, 4) - curr_cen_roi = ia.WeightedROI(curr_cen) - curr_cen_pixels_yx = curr_cen_roi.get_pixel_array() - curr_cen_pixels_xy = np.array([curr_cen_pixels_yx[:, 1], curr_cen_pixels_yx[:, 0]]).transpose() - is_if.add_roi_mask_pixels(image_plane='imaging_plane', roi_name=curr_cen_n, desc='', - pixel_list=curr_cen_pixels_xy, weights=curr_cen_roi.weights, width=512, height=512) - - curr_sur = mask_arr_s[i] - curr_sur_n = 'surround_' + ft.int2str(i, 4) - curr_sur_roi = ia.ROI(curr_sur) - curr_sur_pixels_yx = curr_sur_roi.get_pixel_array() - curr_sur_pixels_xy = np.array([curr_sur_pixels_yx[:, 1], curr_sur_pixels_yx[:, 0]]).transpose() - is_if.add_roi_mask_pixels(image_plane='imaging_plane', roi_name=curr_sur_n, desc='', - pixel_list=curr_sur_pixels_xy, weights=None, width=512, height=512) - is_if.finalize() - - - - trace_f_if = rt_mo.create_interface('Fluorescence') - seg_if_path = '/processing/rois_and_traces_' + plane_n + '/ImageSegmentation/imaging_plane' - # print seg_if_path - ts_path = mov_path + '/' + plane_n + '/corrected' - - print 'adding center fluorescence raw' - trace_raw_ts = nwb_f.create_timeseries('RoiResponseSeries', 'f_center_raw') - trace_raw_ts.set_data(traces_center_raw, unit='au', conversion=np.nan, resolution=np.nan) - trace_raw_ts.set_value('data_format', 'roi (row) x time (column)') - trace_raw_ts.set_value('data_range', '[-8192, 8191]') - trace_raw_ts.set_description('fluorescence traces extracted from the center region of each roi') - trace_raw_ts.set_time_as_link(ts_path) - trace_raw_ts.set_value_as_link('segmentation_interface', seg_if_path) - roi_names = ['roi_' + ft.int2str(ind, 4) for ind in range(traces_center_raw.shape[0])] - trace_raw_ts.set_value('roi_names', roi_names) - trace_raw_ts.set_value('num_samples', traces_center_raw.shape[1]) - trace_f_if.add_timeseries(trace_raw_ts) - trace_raw_ts.finalize() - - print 'adding neuropil fluorescence raw' - trace_sur_ts = nwb_f.create_timeseries('RoiResponseSeries', 'f_surround_raw') - trace_sur_ts.set_data(traces_surround_raw, unit='au', conversion=np.nan, resolution=np.nan) - trace_sur_ts.set_value('data_format', 'roi (row) x time (column)') - trace_sur_ts.set_value('data_range', '[-8192, 8191]') - trace_sur_ts.set_description('neuropil traces extracted from the surroud region of each roi') - trace_sur_ts.set_time_as_link(ts_path) - trace_sur_ts.set_value_as_link('segmentation_interface', seg_if_path) - sur_names = ['surround_' + ft.int2str(ind, 4) for ind in range(traces_center_raw.shape[0])] - trace_sur_ts.set_value('roi_names', sur_names) - trace_sur_ts.set_value('num_samples', traces_surround_raw.shape[1]) - trace_f_if.add_timeseries(trace_sur_ts) - trace_sur_ts.finalize() - - roi_center_n_path = '/processing/rois_and_traces_' + plane_n + '/Fluorescence/f_center_raw/roi_names' - # print 'adding center fluorescence demixed' - # trace_demix_ts = nwb_f.create_timeseries('RoiResponseSeries', 'f_center_demixed') - # trace_demix_ts.set_data(traces_center_demixed, unit='au', conversion=np.nan, resolution=np.nan) - # trace_demix_ts.set_value('data_format', 'roi (row) x time (column)') - # trace_demix_ts.set_description('center traces after overlapping demixing for each roi') - # trace_demix_ts.set_time_as_link(mov_path + '/' + plane_n + '/corrected') - # trace_demix_ts.set_value_as_link('segmentation_interface', seg_if_path) - # trace_demix_ts.set_value('roi_names', roi_names) - # trace_demix_ts.set_value('num_samples', traces_center_demixed.shape[1]) - # trace_f_if.add_timeseries(trace_demix_ts) - # trace_demix_ts.finalize() - - print 'adding center fluorescence after neuropil subtraction' - trace_sub_ts = nwb_f.create_timeseries('RoiResponseSeries', 'f_center_subtracted') - trace_sub_ts.set_data(traces_center_subtracted, unit='au', conversion=np.nan, resolution=np.nan) - trace_sub_ts.set_value('data_format', 'roi (row) x time (column)') - trace_sub_ts.set_description('center traces after overlap demixing and neuropil subtraction for each roi') - trace_sub_ts.set_time_as_link(mov_path + '/' + plane_n + '/corrected') - trace_sub_ts.set_value_as_link('segmentation_interface', seg_if_path) - trace_sub_ts.set_value_as_link('roi_names', roi_center_n_path) - trace_sub_ts.set_value('num_samples', traces_center_subtracted.shape[1]) - trace_sub_ts.set_value('r', neuropil_r, dtype='float32') - trace_sub_ts.set_value('rmse', neuropil_err, dtype='float32') - trace_sub_ts.set_comments('value "r": neuropil contribution ratio for each roi. ' - 'value "rmse": RMS error of neuropil subtraction for each roi') - trace_f_if.add_timeseries(trace_sub_ts) - trace_sub_ts.finalize() - - trace_f_if.finalize() - - # print 'adding global dF/F traces for each roi' - # trace_dff_if = rt_mo.create_interface('DfOverF') - # - # trace_dff_ts = nwb_f.create_timeseries('RoiResponseSeries', 'dff_center') - # trace_dff_ts.set_data(traces_center_dff, unit='au', conversion=np.nan, resolution=np.nan) - # trace_dff_ts.set_value('data_format', 'roi (row) x time (column)') - # trace_dff_ts.set_description('global df/f traces for each roi center, input fluorescence is the trace after demixing' - # ' and neuropil subtraction. global df/f is calculated by ' - # 'allensdk.brain_observatory.dff.compute_dff() function.') - # trace_dff_ts.set_time_as_link(ts_path) - # trace_dff_ts.set_value_as_link('segmentation_interface', seg_if_path) - # trace_dff_ts.set_value('roi_names', roi_names) - # trace_dff_ts.set_value('num_samples', traces_center_dff.shape[1]) - # trace_dff_if.add_timeseries(trace_dff_ts) - # trace_dff_ts.finalize() - # trace_dff_if.finalize() - - rt_mo.finalize() - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -for plane_i, plane_n in enumerate(plane_ns): - - print('\n\n' + plane_n) - - data_folder = os.path.join(curr_folder, plane_n) - add_rois_and_traces(data_folder, nwb_f, plane_n, imaging_depth=plane_depths[plane_i]) - -nwb_f.close() - - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/130_add_visual_stimuli_retinotopic_mapping.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/130_add_visual_stimuli_retinotopic_mapping.py deleted file mode 100644 index 0414239..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/130_add_visual_stimuli_retinotopic_mapping.py +++ /dev/null @@ -1,15 +0,0 @@ -import os -import retinotopic_mapping.DisplayLogAnalysis as dla -import corticalmapping.NwbTools as nt - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -stim_pkl_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.pkl'][0] -stim_log = dla.DisplayLogAnalyzer(stim_pkl_fn) - -nwb_f.add_visual_display_log_retinotopic_mapping(stim_log=stim_log) -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/135_get_photodiode_onset_timestamps.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/135_get_photodiode_onset_timestamps.py deleted file mode 100644 index 498d9c4..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/135_get_photodiode_onset_timestamps.py +++ /dev/null @@ -1,45 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import matplotlib.pyplot as plt -import corticalmapping.NwbTools as nt -import corticalmapping.HighLevel as hl - -# photodiode -digitizeThr = 0.02 -filterSize = 0.01 -segmentThr = 0.02 -smallestInterval = 0.03 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] - -nwb_f = nt.RecordedFile(nwb_fn) -pd, pd_t = nwb_f.get_analog_data(ch_n='analog_photodiode') -fs = 1. / np.mean(np.diff(pd_t)) -# print fs - -pd_onsets = hl.segmentPhotodiodeSignal(pd, digitizeThr=digitizeThr, filterSize=filterSize, - segmentThr=segmentThr, Fs=fs, smallestInterval=smallestInterval) - -raw_input('press enter to continue ...') - -pdo_ts = nwb_f.create_timeseries('TimeSeries', 'digital_photodiode_rise', modality='other') -pdo_ts.set_time(pd_onsets) -pdo_ts.set_data([], unit='', conversion=np.nan, resolution=np.nan) -pdo_ts.set_value('digitize_threshold', digitizeThr) -pdo_ts.set_value('filter_size', filterSize) -pdo_ts.set_value('segment_threshold', segmentThr) -pdo_ts.set_value('smallest_interval', smallestInterval) -pdo_ts.set_description('Real Timestamps (master acquisition clock) of photodiode onset. ' - 'Extracted from analog photodiode signal by the function:' - 'corticalmapping.HighLevel.segmentPhotodiodeSignal() using parameters saved in the' - 'current timeseries.') -pdo_ts.set_path('/analysis') -pdo_ts.finalize() - -nwb_f.close() - - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/140_analyze_analog_photodiode_onsets.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/140_analyze_analog_photodiode_onsets.py deleted file mode 100644 index bd58c7e..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/140_analyze_analog_photodiode_onsets.py +++ /dev/null @@ -1,39 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt -import corticalmapping.NwbTools as nt -import retinotopic_mapping.DisplayLogAnalysis as dla -import corticalmapping.core.TimingAnalysis as ta - - -pd_ts_pd_path = 'analysis/digital_photodiode_rise' -vsync_frame_path = 'acquisition/timeseries/digital_stim_vsync_rise' -pd_thr = -0.5 # this is color threshold, not analog photodiode threshold -ccg_t_range = (0., 0.1) -ccg_bins = 100 -is_plot = True - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -stim_pkl_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.pkl'][0] -stim_log = dla.DisplayLogAnalyzer(stim_pkl_fn) - -# get display lag -display_delay = nwb_f.get_display_delay_retinotopic_mapping(stim_log=stim_log, indicator_color_thr=pd_thr, - ccg_t_range=ccg_t_range, ccg_bins=ccg_bins, - is_plot=is_plot, pd_onset_ts_path=pd_ts_pd_path, - vsync_frame_ts_path=vsync_frame_path) - -# analyze photodiode onset -stim_dict = stim_log.get_stim_dict() -pd_onsets_seq = stim_log.analyze_photodiode_onsets_sequential(stim_dict=stim_dict, pd_thr=pd_thr) -pd_onsets_com = stim_log.analyze_photodiode_onsets_combined(pd_onsets_seq=pd_onsets_seq, - is_dgc_blocked=True) -nwb_f.add_photodiode_onsets_combined_retinotopic_mapping(pd_onsets_com=pd_onsets_com, - display_delay=display_delay, - vsync_frame_path=vsync_frame_path) -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/140_analyze_digital_photodiode_onsets.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/140_analyze_digital_photodiode_onsets.py deleted file mode 100644 index 0f5da9f..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/140_analyze_digital_photodiode_onsets.py +++ /dev/null @@ -1,39 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt -import corticalmapping.NwbTools as nt -import retinotopic_mapping.DisplayLogAnalysis as dla -import corticalmapping.core.TimingAnalysis as ta - - -pd_ts_pd_path = 'acquisition/timeseries/digital_photodiode_rise' -vsync_frame_path = 'acquisition/timeseries/digital_stim_vsync_rise' -pd_thr = 0.5 # this is color threshold, not analog photodiode threshold -ccg_t_range = (0., 0.1) -ccg_bins = 100 -is_plot = True - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -stim_pkl_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.pkl'][0] -stim_log = dla.DisplayLogAnalyzer(stim_pkl_fn) - -# get display lag -display_delay = nwb_f.get_display_delay_retinotopic_mapping(stim_log=stim_log, indicator_color_thr=pd_thr, - ccg_t_range=ccg_t_range, ccg_bins=ccg_bins, - is_plot=is_plot, pd_onset_ts_path=pd_ts_pd_path, - vsync_frame_ts_path=vsync_frame_path) - -# analyze photodiode onset -stim_dict = stim_log.get_stim_dict() -pd_onsets_seq = stim_log.analyze_photodiode_onsets_sequential(stim_dict=stim_dict, pd_thr=pd_thr) -pd_onsets_com = stim_log.analyze_photodiode_onsets_combined(pd_onsets_seq=pd_onsets_seq, - is_dgc_blocked=True) -nwb_f.add_photodiode_onsets_combined_retinotopic_mapping(pd_onsets_com=pd_onsets_com, - display_delay=display_delay, - vsync_frame_path=vsync_frame_path) -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/150_get_STRFs.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/150_get_STRFs.py deleted file mode 100644 index 8642a33..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/150_get_STRFs.py +++ /dev/null @@ -1,96 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt -import corticalmapping.NwbTools as nt -import corticalmapping.core.TimingAnalysis as ta -import corticalmapping.SingleCellAnalysis as sca -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -from matplotlib.backends.backend_pdf import PdfPages - -stim_name = '001_LocallySparseNoiseRetinotopicMapping' -trace_source = 'f_center_subtracted' -start_time = -1. -end_time = 2. - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -probe_grp = nwb_f.file_pointer['analysis/photodiode_onsets/' + stim_name] -probe_ns = probe_grp.keys() -probe_ns.sort() - -probe_locations = [[float(pn[3: 9]), float(pn[13: 19])] for pn in probe_ns] -probe_signs = [float(pn[-2:]) for pn in probe_ns] -# print(probe_locations) - -plane_ns = nwb_f.file_pointer['processing'].keys() -plane_ns = [pn.split('_')[-1] for pn in plane_ns if 'rois_and_traces_plane' in pn] -plane_ns.sort() -print('\n'.join(plane_ns)) - -strf_grp = nwb_f.file_pointer['analysis'].create_group('STRFs') - -for plane_n in plane_ns: - print('\ngetting STRFs for {} ...'.format(plane_n)) - - roi_ns = nwb_f.file_pointer['processing/rois_and_traces_' + plane_n + - '/ImageSegmentation/imaging_plane/roi_list'].value - roi_ns = [rn for rn in roi_ns if rn[0: 4] == 'roi_'] - roi_ns.sort() - roi_num = len(roi_ns) - - plane_strf_grp = strf_grp.create_group(plane_n) - plane_traces = nwb_f.file_pointer['processing/rois_and_traces_' + plane_n + '/Fluorescence/' + - trace_source + '/data'].value - plane_trace_ts = nwb_f.file_pointer['processing/rois_and_traces_' + plane_n + '/Fluorescence/' + - trace_source + '/timestamps'].value - - plane_mean_frame_dur = np.mean(np.diff(plane_trace_ts)) - plane_chunk_frame_dur = int(np.ceil((end_time - start_time) / plane_mean_frame_dur)) - plane_chunk_frame_start = int(np.floor(start_time / plane_mean_frame_dur)) - plane_t = (np.arange(plane_chunk_frame_dur) + plane_chunk_frame_start) * plane_mean_frame_dur - print '{}: STRF time axis: \n{}'.format(plane_n, plane_t) - - plane_roi_traces = [] - trigger_ts = [] - - for probe_ind, probe_n in enumerate(probe_ns): - - probe_ts = probe_grp[probe_n]['pd_onset_ts_sec'].value - probe_traces = [] - probe_trigger_ts = [] - for curr_probe_ts in probe_ts: - curr_frame_start = ta.find_nearest(plane_trace_ts, curr_probe_ts) + plane_chunk_frame_start - curr_frame_end = curr_frame_start + plane_chunk_frame_dur - if curr_frame_start >= 0 and curr_frame_end <= len(plane_trace_ts): - probe_traces.append(plane_traces[:, curr_frame_start: curr_frame_end]) - probe_trigger_ts.append(curr_probe_ts) - - plane_roi_traces.append(np.array(probe_traces)) - trigger_ts.append(probe_trigger_ts) - print('probe: {} / {}; shape: {}'.format(probe_ind + 1, len(probe_ns), np.array(probe_traces).shape)) - - # plane_roi_traces = np.array(plane_roi_traces) - - print('saving ...') - for roi_ind in range(roi_num): - - print "roi: {} / {}".format(roi_ind + 1, roi_num) - curr_unit_traces = [pt[:, roi_ind, :] for pt in plane_roi_traces] - curr_unit_traces = [list(t) for t in curr_unit_traces] - curr_strf = sca.SpatialTemporalReceptiveField2(locations=probe_locations, - signs=probe_signs, - traces=curr_unit_traces, - trigger_ts=trigger_ts, - time=plane_t, - name='roi_{:04d}'.format(roi_ind), - trace_data_type=trace_source) - - curr_strf_grp = plane_strf_grp.create_group('strf_roi_{:04d}'.format(roi_ind)) - curr_strf.to_h5_group(curr_strf_grp) - -nwb_f.close() diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/160_get_drifting_grating_response_tables.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/160_get_drifting_grating_response_tables.py deleted file mode 100644 index 65ba54c..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/160_get_drifting_grating_response_tables.py +++ /dev/null @@ -1,18 +0,0 @@ -import os -import h5py -import numpy as np -import corticalmapping.NwbTools as nt - -plane_ns = ['plane0', 'plane1', 'plane2', 'plane3', 'plane4'] -stim_name = '001_DriftingGratingCircleRetinotopicMapping' -t_win = [-1, 2.5] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -nwb_f.get_drifting_grating_response_table_retinotopic_mapping(stim_name=stim_name, time_window=t_win) - -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/170_plot_STRFs.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/170_plot_STRFs.py deleted file mode 100644 index 3de24ad..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/170_plot_STRFs.py +++ /dev/null @@ -1,61 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt -import h5py -import corticalmapping.core.TimingAnalysis as ta -import corticalmapping.SingleCellAnalysis as sca -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -from matplotlib.backends.backend_pdf import PdfPages - -save_folder = 'figures' -is_local_dff = True -is_add_to_traces = True - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, save_folder) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = h5py.File(nwb_fn, 'r') - -strf_grp = nwb_f['analysis/STRFs'] -plane_ns = strf_grp.keys() -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -for plane_n in plane_ns: - print('plotting rois in {} ...'.format(plane_n)) - - if is_add_to_traces: - add_to_trace = h5py.File("{}/caiman_segmentation_results.hdf5".format(plane_n))['bias_added_to_movie'].value - else: - add_to_trace = 0. - - plane_grp = strf_grp[plane_n] - pdff = PdfPages(os.path.join(save_folder, 'STRFs_' + plane_n + '.pdf')) - - roi_ns = [rn[-8:] for rn in plane_grp.keys()] - roi_ns.sort() - - for roi_ind, roi_n in enumerate(roi_ns): - print('roi: {} / {}'.format(roi_ind + 1, len(roi_ns))) - curr_strf = sca.SpatialTemporalReceptiveField.from_h5_group(plane_grp['strf_' + roi_n]) - - curr_strf_dff = curr_strf.get_local_dff_strf(is_collaps_before_normalize=True, add_to_trace=add_to_trace) - - v_min, v_max = curr_strf_dff.get_data_range() - f = curr_strf_dff.plot_traces(yRange=(v_min, v_max * 1.1), figSize=(16, 10), - columnSpacing=0.002, rowSpacing=0.002) - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - - pdff.close() - -nwb_f.close() diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/180_plot_zscore_RFs.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/180_plot_zscore_RFs.py deleted file mode 100644 index 73f92f3..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/180_plot_zscore_RFs.py +++ /dev/null @@ -1,68 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt -import h5py -import corticalmapping.core.TimingAnalysis as ta -import corticalmapping.SingleCellAnalysis as sca -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -from matplotlib.backends.backend_pdf import PdfPages - -save_folder = 'figures' -is_local_dff = True -zscore_range = [0., 4.] -t_window = [0., 1.] -is_add_to_traces = True - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, save_folder) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = h5py.File(nwb_fn, 'r') - -strf_grp = nwb_f['analysis/STRFs'] -plane_ns = strf_grp.keys() -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -for plane_n in plane_ns: - print('plotting rois in {} ...'.format(plane_n)) - - if is_add_to_traces: - add_to_trace = h5py.File("{}/caiman_segmentation_results.hdf5".format(plane_n))['bias_added_to_movie'].value - else: - add_to_trace = 0. - - plane_grp = strf_grp[plane_n] - pdff = PdfPages(os.path.join(save_folder, 'zscore_RFs_' + plane_n + '.pdf')) - - roi_ns = [rn[-8:] for rn in plane_grp.keys()] - roi_ns.sort() - - for roi_ind, roi_n in enumerate(roi_ns): - print('roi: {} / {}'.format(roi_ind + 1, len(roi_ns))) - curr_strf = sca.SpatialTemporalReceptiveField.from_h5_group(plane_grp['strf_' + roi_n]) - curr_strf_dff = curr_strf.get_local_dff_strf(is_collaps_before_normalize=True, add_to_trace=add_to_trace) - v_min, v_max = curr_strf_dff.get_data_range() - - rf_on, rf_off = curr_strf_dff.get_zscore_receptive_field(timeWindow=t_window) - f = plt.figure(figsize=(15, 4)) - ax_on = f.add_subplot(121) - rf_on.plot_rf(plot_axis=ax_on, is_colorbar=True, cmap='Reds', vmin=zscore_range[0], vmax=zscore_range[1]) - ax_off = f.add_subplot(122) - rf_off.plot_rf(plot_axis=ax_off, is_colorbar=True, cmap='Blues', vmin=zscore_range[0], vmax=zscore_range[1]) - plt.close() - - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - - pdff.close() - -nwb_f.close() diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/190_plot_RF_contours.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/190_plot_RF_contours.py deleted file mode 100644 index 1e7b31c..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/190_plot_RF_contours.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -import numpy as np -import h5py -import matplotlib.pyplot as plt -import corticalmapping.NwbTools as nt -import corticalmapping.core.TimingAnalysis as ta -import corticalmapping.SingleCellAnalysis as sca -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -from matplotlib.backends.backend_pdf import PdfPages - -roi_t_window = [0., 1.] -zscore_range = [0., 4.] -save_folder = 'figures' -is_add_to_traces = True - -# plot control -thr_ratio = 0.4 -filter_sigma = 1. -interpolate_rate = 5 -absolute_thr = 1.6 -level_num = 1 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, save_folder) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'] -print('\n'.join(nwb_fn)) - -if len(nwb_fn) != 1: - raise LookupError - -nwb_fn = nwb_fn[0] -rff = h5py.File(nwb_fn, 'r') - -strf_grp = rff['analysis/STRFs'] -plane_ns = strf_grp.keys() -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -X = None -Y = None - -for plane_n in plane_ns: - print('plotting rois in {} ...'.format(plane_n)) - - if is_add_to_traces: - add_to_trace = h5py.File("{}/caiman_segmentation_results.hdf5".format(plane_n))['bias_added_to_movie'].value - else: - add_to_trace = 0. - - plane_grp = strf_grp[plane_n] - - roi_ns = [rn[-8:] for rn in plane_grp.keys()] - roi_ns.sort() - - f_all = plt.figure(figsize=(10, 10)) - ax_all = f_all.add_subplot(111) - - pdff = PdfPages(os.path.join(save_folder, 'RF_contours_' + plane_n + '.pdf')) - - for roi_ind, roi_n in enumerate(roi_ns): - print('roi: {} / {}'.format(roi_ind + 1, len(roi_ns))) - curr_strf = sca.SpatialTemporalReceptiveField.from_h5_group(plane_grp['strf_' + roi_n]) - curr_strf_dff = curr_strf.get_local_dff_strf(is_collaps_before_normalize=True, add_to_trace=add_to_trace) - rf_on, rf_off, _ = curr_strf_dff.get_zscore_thresholded_receptive_fields(timeWindow=roi_t_window, - thr_ratio=thr_ratio, - filter_sigma=filter_sigma, - interpolate_rate=interpolate_rate, - absolute_thr=absolute_thr) - - if X is None and Y is None: - X, Y = np.meshgrid(np.arange(len(rf_on.aziPos)), - np.arange(len(rf_on.altPos))) - - levels_on = [np.max(rf_on.get_weighted_mask().flat) * thr_ratio] - levels_off = [np.max(rf_off.get_weighted_mask().flat) * thr_ratio] - ax_all.contour(X, Y, rf_on.get_weighted_mask(), levels=levels_on, colors='r', lw=5) - ax_all.contour(X, Y, rf_off.get_weighted_mask(), levels=levels_off, colors='b', lw=5) - - f_single = plt.figure(figsize=(10, 10)) - ax_single = f_single.add_subplot(111) - ax_single.contour(X, Y, rf_on.get_weighted_mask(), levels=levels_on, colors='r', lw=5) - ax_single.contour(X, Y, rf_off.get_weighted_mask(), levels=levels_off, colors='b', lw=5) - ax_single.set_xticks(range(len(rf_on.aziPos))[::10]) - ax_single.set_xticklabels(['{:05.1f}'.format(l) for l in rf_on.aziPos[::10]]) - ax_single.set_yticks(range(len(rf_on.altPos))[::10]) - ax_single.set_yticklabels(['{:05.1f}'.format(l) for l in rf_on.altPos[::-1][::10]]) - ax_single.set_aspect('equal') - ax_single.set_title('{}: {}. ON thr:{}; OFF thr:{}.'.format(plane_n, roi_n, rf_on.thr, rf_off.thr)) - pdff.savefig(f_single) - f_single.clear() - plt.close(f_single) - - pdff.close() - - ax_all.set_xticks(range(len(rf_on.aziPos))[::10]) - ax_all.set_xticklabels(['{:05.1f}'.format(l) for l in rf_on.aziPos[::10]]) - ax_all.set_yticks(range(len(rf_on.altPos))[::10]) - ax_all.set_yticklabels(['{:05.1f}'.format(l) for l in rf_on.altPos[::-1][::10]]) - ax_all.set_aspect('equal') - ax_all.set_title('{}, abs_zscore_thr:{}'.format(plane_n, absolute_thr)) - - f_all.savefig(os.path.join(save_folder, 'RF_contours_' + plane_n + '_all.pdf'), dpi=300) - -rff.close() - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/200_plot_dgc_response_all.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/200_plot_dgc_response_all.py deleted file mode 100644 index b1b2f1f..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/200_plot_dgc_response_all.py +++ /dev/null @@ -1,160 +0,0 @@ -import os -import h5py -import numpy as np -import tifffile as tf -import matplotlib.pyplot as plt -import corticalmapping.core.PlottingTools as pt -from matplotlib.backends.backend_pdf import PdfPages -import matplotlib.gridspec as gridspec - -trace_type = 'f_center_subtracted' -response_table_path = 'analysis/response_table_001_DriftingGratingCircleRetinotopicMapping' - -baseline_span = [-0.5, 0.] -response_span = [0., 1.5] -is_add_to_trace = True - -face_cmap = 'RdBu_r' - -def get_dff(traces, t_axis, response_span, baseline_span): - """ - - :param traces: dimension, trial x timepoint - :param t_axis: - :return: - """ - - baseline_ind = np.logical_and(t_axis > baseline_span[0], t_axis <= baseline_span[1]) - response_ind = np.logical_and(t_axis > response_span[0], t_axis <= response_span[1]) - baseline = np.mean(traces[:, baseline_ind], axis=1, keepdims=True) - dff_traces = (traces - baseline) / baseline - - trace_mean = np.mean(traces, axis=0) - baseline_mean = np.mean(trace_mean[baseline_ind]) - dff_trace_mean = (trace_mean - baseline_mean) / baseline_mean - dff_mean = np.mean(dff_trace_mean[response_ind]) - - return dff_traces, dff_trace_mean, dff_mean - - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, 'figures') -if not os.path.isdir(save_folder): - os.mkdir(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -print(nwb_fn) -nwb_f = h5py.File(nwb_fn, 'r') - -plane_ns = nwb_f[response_table_path].keys() -plane_ns.sort() - -for plane_n in plane_ns: - - print('\nprocessing {} ...'.format(plane_n)) - - if is_add_to_trace: - add_to_trace = h5py.File(os.path.join(plane_n, 'caiman_segmentation_results.hdf5'), - 'r')['bias_added_to_movie'].value - else: - add_to_trace = 0 - - res_grp = nwb_f['{}/{}'.format(response_table_path, plane_n)] - t_axis = res_grp.attrs['sta_timestamps'] - - roi_lst = nwb_f['processing/rois_and_traces_' + plane_n + '/ImageSegmentation/imaging_plane/roi_list'].value - roi_lst = [r for r in roi_lst if r[:4] == 'roi_'] - roi_lst.sort() - - grating_ns = res_grp.keys() - - # remove blank sweep - grating_ns = [gn for gn in grating_ns if gn[-37:] != '_sf0.00_tf00.0_dire000_con0.00_rad000'] - - dire_lst = np.array(list(set([str(gn[38:41]) for gn in grating_ns]))) - tf_lst = np.array(list(set([str(gn[29:33]) for gn in grating_ns]))) - sf_lst = np.array(list(set([str(gn[22:26]) for gn in grating_ns]))) - - - pdff = PdfPages(os.path.join(save_folder, 'STA_DriftingGrating_' + plane_n + '_all.pdf')) - - for roi_i, roi_n in enumerate(roi_lst): - print(roi_n) - - f = plt.figure(figsize=(8.5, 11)) - gs_out = gridspec.GridSpec(len(tf_lst), 1) - gs_in_dict = {} - for gs_ind, gs_o in enumerate(gs_out): - curr_gs_in = gridspec.GridSpecFromSubplotSpec(len(sf_lst), len(dire_lst), subplot_spec=gs_o, - wspace=0.0, hspace=0.0) - gs_in_dict[gs_ind] = curr_gs_in - - v_max = 0 - v_min = 0 - dff_mean_max=0 - dff_mean_min=0 - - for grating_n in grating_ns: - grating_grp = res_grp[grating_n] - - curr_sta = grating_grp['sta_' + trace_type].value[roi_i] + add_to_trace - dff_traces, dff_trace_mean, dff_mean = get_dff(traces=curr_sta, t_axis=t_axis, response_span=response_span, - baseline_span=baseline_span) - v_max = max([np.amax(dff_traces), v_max]) - v_min = min([np.amin(dff_traces), v_min]) - dff_mean_max = max([dff_mean, dff_mean_max]) - dff_mean_min = min([dff_mean, dff_mean_min]) - - dff_mean_max = max([abs(dff_mean_max), abs(dff_mean_min)]) - dff_mean_min = - dff_mean_max - - - for grating_n in grating_ns: - grating_grp = res_grp[grating_n] - - curr_sta = grating_grp['sta_' + trace_type].value[roi_i] + add_to_trace - dff_traces, dff_trace_mean, dff_mean = get_dff(traces=curr_sta, t_axis=t_axis, response_span=response_span, - baseline_span=baseline_span) - - curr_tf = grating_n[29:33] - tf_i = np.where(tf_lst == curr_tf)[0][0] - curr_sf = grating_n[22:26] - sf_i = np.where(sf_lst == curr_sf)[0][0] - curr_dire = grating_n[38:41] - dire_i = np.where(dire_lst == curr_dire)[0][0] - ax = plt.Subplot(f, gs_in_dict[tf_i][sf_i * len(dire_lst) + dire_i]) - f_color = pt.value_2_rgb(value=(dff_mean - dff_mean_min) / (dff_mean_max - dff_mean_min), - cmap=face_cmap) - - # f_color = pt.value_2_rgb(value=dff_mean / dff_mean_max, cmap=face_cmap) - - # print f_color - ax.set_axis_bgcolor(f_color) - ax.set_xticks([]) - ax.set_yticks([]) - for sp in ax.spines.values(): - sp.set_visible(False) - ax.axhline(y=0, ls='--', color='#888888', lw=1) - ax.axvspan(response_span[0], response_span[1], alpha=0.5, color='#888888', ec='none') - for t in dff_traces: - ax.plot(t_axis, t, '-', color='#888888', lw=0.5) - ax.plot(t_axis, dff_trace_mean, '-r', lw=1) - f.add_subplot(ax) - - all_axes = f.get_axes() - for ax in all_axes: - ax.set_ylim([v_min, v_max]) - ax.set_xlim([t_axis[0], t_axis[-1]]) - - f.suptitle('roi:{:04d}; trace type:{}; baseline:{}; response:{}; \ntrace range:{}; color range:{}' - .format(roi_i, trace_type, baseline_span, response_span, [v_min, v_max], - [dff_mean_min, dff_mean_max]), fontsize=8) - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - - pdff.close() -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/210_plot_dgc_response_mean.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/210_plot_dgc_response_mean.py deleted file mode 100644 index 604a4db..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/210_plot_dgc_response_mean.py +++ /dev/null @@ -1,157 +0,0 @@ -import os -import h5py -import numpy as np -import tifffile as tf -import matplotlib.pyplot as plt -import corticalmapping.core.PlottingTools as pt -from matplotlib.backends.backend_pdf import PdfPages -import matplotlib.gridspec as gridspec - -trace_type = 'f_center_subtracted' -response_table_path = 'analysis/response_table_001_DriftingGratingCircleRetinotopicMapping' - -baseline_span = [-0.5, 0.] -response_span = [0., 1.5] -is_add_to_trace = True - -face_cmap = 'RdBu_r' - -def get_dff(traces, t_axis, response_span, baseline_span): - """ - - :param traces: dimension, trial x timepoint - :param t_axis: - :return: - """ - - trace_mean = np.mean(traces, axis=0) - trace_std = np.std(traces, axis=0) - trace_sem = trace_std / np.sqrt(traces.shape[0]) - - baseline_ind = np.logical_and(t_axis > baseline_span[0], t_axis <= baseline_span[1]) - response_ind = np.logical_and(t_axis > response_span[0], t_axis <= response_span[1]) - baseline = np.mean(trace_mean[baseline_ind]) - dff_trace_mean = (trace_mean - baseline) / baseline - dff_trace_std = trace_std / baseline - dff_trace_sem = trace_sem / baseline - dff_mean = np.mean(dff_trace_mean[response_ind]) - - return dff_trace_mean, dff_trace_std, dff_trace_sem, dff_mean - - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, 'figures') -if not os.path.isdir(save_folder): - os.mkdir(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -print(nwb_fn) -nwb_f = h5py.File(nwb_fn, 'r') - -plane_ns = nwb_f[response_table_path].keys() -plane_ns.sort() - -for plane_n in plane_ns: - - print('\nprocessing {} ...'.format(plane_n)) - - if is_add_to_trace: - add_to_trace = h5py.File(os.path.join(plane_n, 'caiman_segmentation_results.hdf5'), - 'r')['bias_added_to_movie'].value - else: - add_to_trace = 0 - - res_grp = nwb_f['{}/{}'.format(response_table_path, plane_n)] - t_axis = res_grp.attrs['sta_timestamps'] - - roi_lst = nwb_f['processing/rois_and_traces_' + plane_n + '/ImageSegmentation/imaging_plane/roi_list'].value - roi_lst = [r for r in roi_lst if r[:4] == 'roi_'] - roi_lst.sort() - - grating_ns = res_grp.keys() - # remove blank sweep - grating_ns = [gn for gn in grating_ns if gn[-37:] != '_sf0.00_tf00.0_dire000_con0.00_rad000'] - - dire_lst = np.array(list(set([str(gn[38:41]) for gn in grating_ns]))) - dire_lst.sort() - tf_lst = np.array(list(set([str(gn[29:33]) for gn in grating_ns]))) - tf_lst.sort() - sf_lst = np.array(list(set([str(gn[22:26]) for gn in grating_ns]))) - sf_lst.sort() - - pdff = PdfPages(os.path.join(save_folder, 'STA_DriftingGrating_' + plane_n + '_mean.pdf')) - - for roi_i, roi_n in enumerate(roi_lst): - print(roi_n) - - f = plt.figure(figsize=(8.5, 11)) - gs_out = gridspec.GridSpec(len(tf_lst), 1) - gs_in_dict = {} - for gs_ind, gs_o in enumerate(gs_out): - curr_gs_in = gridspec.GridSpecFromSubplotSpec(len(sf_lst), len(dire_lst), subplot_spec=gs_o, - wspace=0.05, hspace=0.05) - gs_in_dict[gs_ind] = curr_gs_in - - v_max = 0 - v_min = 0 - dff_mean_max=0 - dff_mean_min=0 - for grating_n in grating_ns: - grating_grp = res_grp[grating_n] - curr_sta = grating_grp['sta_' + trace_type].value[roi_i] + add_to_trace - _ = get_dff(traces=curr_sta, t_axis=t_axis, response_span=response_span, baseline_span=baseline_span) - dff_trace_mean, dff_trace_std, dff_trace_sem, dff_mean = _ - v_max = max([np.amax(dff_trace_mean + dff_trace_sem), v_max]) - v_min = min([np.amin(dff_trace_mean - dff_trace_sem), v_min]) - dff_mean_max = max([dff_mean, dff_mean_max]) - dff_mean_min = min([dff_mean, dff_mean_min]) - dff_mean_max = max([abs(dff_mean_max), abs(dff_mean_min)]) - dff_mean_min = - dff_mean_max - - for grating_n in grating_ns: - grating_grp = res_grp[grating_n] - curr_sta = grating_grp['sta_' + trace_type].value[roi_i] + add_to_trace - _ = get_dff(traces=curr_sta, t_axis=t_axis, response_span=response_span, baseline_span=baseline_span) - dff_trace_mean, dff_trace_std, dff_trace_sem, dff_mean = _ - curr_tf = grating_n[29:33] - tf_i = np.where(tf_lst == curr_tf)[0][0] - curr_sf = grating_n[22:26] - sf_i = np.where(sf_lst == curr_sf)[0][0] - curr_dire = grating_n[38:41] - dire_i = np.where(dire_lst == curr_dire)[0][0] - ax = plt.Subplot(f, gs_in_dict[tf_i][sf_i * len(dire_lst) + dire_i]) - f_color = pt.value_2_rgb(value=(dff_mean - dff_mean_min) / (dff_mean_max - dff_mean_min), - cmap=face_cmap) - - # f_color = pt.value_2_rgb(value=dff_mean / dff_mean_max, cmap=face_cmap) - - # print f_color - ax.set_axis_bgcolor(f_color) - ax.set_xticks([]) - ax.set_yticks([]) - for sp in ax.spines.values(): - sp.set_visible(False) - ax.axhline(y=0, ls='--', color='#888888', lw=1) - ax.axvspan(response_span[0], response_span[1], alpha=0.5, color='#888888', ec='none') - ax.fill_between(t_axis, dff_trace_mean - dff_trace_sem, dff_trace_mean + dff_trace_sem, edgecolor='none', - facecolor='#880000', alpha=0.5) - ax.plot(t_axis, dff_trace_mean, '-r', lw=1) - f.add_subplot(ax) - - all_axes = f.get_axes() - for ax in all_axes: - ax.set_ylim([v_min, v_max]) - ax.set_xlim([t_axis[0], t_axis[-1]]) - - f.suptitle('roi:{:04d}; trace type:{}; baseline:{}; response:{}; \ntrace range:{}; color range:{}' - .format(roi_i, trace_type, baseline_span, response_span, [v_min, v_max], - [dff_mean_min, dff_mean_max]), fontsize=8) - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - - pdff.close() -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/220_plot_dgc_tuning_curves.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/220_plot_dgc_tuning_curves.py deleted file mode 100644 index a5d8a6f..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/220_plot_dgc_tuning_curves.py +++ /dev/null @@ -1,196 +0,0 @@ -import os -import h5py -import numpy as np -import pandas as pd -import matplotlib.pyplot as plt -from matplotlib.backends.backend_pdf import PdfPages - -trace_type = 'f_center_subtracted' -response_table_path = 'analysis/response_table_001_DriftingGratingCircleRetinotopicMapping' - -baseline_span = [-0.5, 0.] -response_span = [0., 1.5] -is_add_to_trace = True - -def get_response(traces, t_axis, response_span, baseline_span): - """ - - :param traces: dimension, trial x timepoint - :param t_axis: - :return: - """ - - baseline_ind = np.logical_and(t_axis > baseline_span[0], t_axis <= baseline_span[1]) - response_ind = np.logical_and(t_axis > response_span[0], t_axis <= response_span[1]) - - trace_mean = np.mean(traces, axis=0) - baseline_mean = np.mean(trace_mean[baseline_ind]) - dff_trace_mean = (trace_mean - baseline_mean) / baseline_mean - dff_mean = np.mean(dff_trace_mean[response_ind]) - - baselines = np.mean(traces[:, baseline_ind], axis=1, keepdims=True) - dff_traces = (traces - baselines) / baselines - dffs = np.mean(dff_traces[:, response_ind], axis=1) - dff_std = np.std(dffs) - dff_sem = dff_std / np.sqrt(traces.shape[0]) - - return dff_mean, dff_std, dff_sem - - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, 'figures') -if not os.path.isdir(save_folder): - os.mkdir(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -print(nwb_fn) -nwb_f = h5py.File(nwb_fn, 'r') - -plane_ns = nwb_f[response_table_path].keys() -plane_ns.sort() - -for plane_n in plane_ns: - - print('\nprocessing {} ...'.format(plane_n)) - - if is_add_to_trace: - add_to_trace = h5py.File(os.path.join(plane_n, 'caiman_segmentation_results.hdf5'), - 'r')['bias_added_to_movie'].value - else: - add_to_trace = 0 - - res_grp = nwb_f['{}/{}'.format(response_table_path, plane_n)] - t_axis = res_grp.attrs['sta_timestamps'] - - roi_lst = nwb_f['processing/rois_and_traces_' + plane_n + '/ImageSegmentation/imaging_plane/roi_list'].value - roi_lst = [r for r in roi_lst if r[:4] == 'roi_'] - roi_lst.sort() - - grating_ns = res_grp.keys() - - # remove blank sweep - grating_ns = [gn for gn in grating_ns if gn[-37:] != '_sf0.00_tf00.0_dire000_con0.00_rad000'] - - dire_lst = np.array(list(set([str(gn[38:41]) for gn in grating_ns]))) - dire_lst.sort() - tf_lst = np.array(list(set([str(gn[29:33]) for gn in grating_ns]))) - tf_lst.sort() - sf_lst = np.array(list(set([str(gn[22:26]) for gn in grating_ns]))) - sf_lst.sort() - - pdff = PdfPages(os.path.join(save_folder, 'tuning_curve_DriftingGrating_' + plane_n + '_mean.pdf')) - - for roi_i, roi_n in enumerate(roi_lst): - print(roi_n) - - # get response table - res_tab = pd.DataFrame(columns=['con', 'tf', 'sf', 'dire', 'dff_mean', 'dff_std', 'dff_sem']) - row_ind = 0 - - for grating_n in grating_ns: - grating_grp = res_grp[grating_n] - curr_sta = grating_grp['sta_' + trace_type].value[roi_i] + add_to_trace - _ = get_response(traces=curr_sta, t_axis=t_axis, response_span=response_span, baseline_span=baseline_span) - dff_mean, dff_std, dff_sem = _ - - con = float(grating_n.split('_')[5][3:]) - tf = float(grating_n.split('_')[3][2:]) - sf = float(grating_n.split('_')[2][2:]) - dire = int(grating_n.split('_')[4][4:]) - - res_tab.loc[row_ind] = [con, tf, sf, dire, dff_mean, dff_std, dff_sem] - row_ind += 1 - - # find the preferred condition - top_condition = res_tab[res_tab.dff_mean == max(res_tab.dff_mean)] - - # make figure - f = plt.figure(figsize=(8.5, 11)) - - # get tf plot - tf_conditions = res_tab[(res_tab.sf == float(top_condition.sf)) & \ - (res_tab.dire == int(top_condition.dire))] - tf_conditions = tf_conditions.sort_values(by='tf') - - tf_log = np.log(tf_conditions.tf) - - ax_tf = f.add_subplot(311) - ax_tf.fill_between(x=tf_log, y1=tf_conditions.dff_mean + tf_conditions.dff_sem, - y2=tf_conditions.dff_mean - tf_conditions.dff_sem, edgecolor='none', - facecolor='#888888', alpha=0.5) - ax_tf.axhline(y=0, ls='--', color='k', lw=1) - ax_tf.plot(tf_log, tf_conditions.dff_mean, 'r-', lw=2) - ax_tf.set_title('temporal frequency tuning', rotation='vertical', x=-0.4, y=0.5, va='center', ha='center', - size=10) - ax_tf.set_xticks(tf_log) - ax_tf.set_xticklabels(list(tf_conditions.tf)) - ax_tf.set_xlim(np.log([0.9, 16])) - ax_tf_xrange = ax_tf.get_xlim()[1] - ax_tf.get_xlim()[0] - ax_tf_yrange = ax_tf.get_ylim()[1] - ax_tf.get_ylim()[0] - ax_tf.set_aspect(aspect=(ax_tf_xrange / ax_tf_yrange)) - ax_tf.set_ylabel('mean df/f', size=10) - ax_tf.set_xlabel('temporal freqency (Hz)', size=10) - ax_tf.tick_params(axis='both', which='major', labelsize=10) - - # get sf plot - sf_conditions = res_tab[(res_tab.tf == float(top_condition.tf)) & \ - (res_tab.dire == int(top_condition.dire))] - sf_conditions = sf_conditions.sort_values(by='sf') - - sf_log = np.log(sf_conditions.sf) - - ax_sf = f.add_subplot(312) - ax_sf.fill_between(x=sf_log, y1=sf_conditions.dff_mean + sf_conditions.dff_sem, - y2=sf_conditions.dff_mean - sf_conditions.dff_sem, edgecolor='none', - facecolor='#888888', alpha=0.5) - ax_sf.axhline(y=0, ls='--', color='k', lw=1) - ax_sf.plot(sf_log, sf_conditions.dff_mean, '-r', lw=2) - ax_sf.set_title('spatial frequency tuning', rotation='vertical', x=-0.4, y=0.5, va='center', ha='center', - size=10) - ax_sf.set_xticks(sf_log) - ax_sf.set_xticklabels(['{:04.2f}'.format(s) for s in list(sf_conditions.sf)]) - ax_sf.set_xlim(np.log([0.008, 0.4])) - ax_sf_xrange = ax_sf.get_xlim()[1] - ax_sf.get_xlim()[0] - ax_sf_yrange = ax_sf.get_ylim()[1] - ax_sf.get_ylim()[0] - ax_sf.set_aspect(aspect=(ax_sf_xrange / ax_sf_yrange)) - ax_sf.set_ylabel('mean df/f', size=10) - ax_sf.set_xlabel('spatial freqency (cpd)', size=10) - ax_sf.tick_params(axis='both', which='major', labelsize=10) - - # get dire plot - dire_conditions = res_tab[(res_tab.tf == float(top_condition.tf)) & \ - (res_tab.sf == float(top_condition.sf))] - dire_conditions = dire_conditions.sort_values(by='dire') - dire_arc = list(dire_conditions.dire * np.pi / 180.) - dire_arc.append(dire_arc[0]) - dire_dff = np.array(dire_conditions.dff_mean) - dire_dff[dire_dff < 0.] = 0. - dire_dff = list(dire_dff) - dire_dff.append(dire_dff[0]) - dire_dff_sem = list(dire_conditions.dff_sem) - dire_dff_sem.append(dire_dff_sem[0]) - dire_dff_low = np.array(dire_dff) - np.array(dire_dff_sem) - dire_dff_low[dire_dff_low < 0.] = 0. - dire_dff_high = np.array(dire_dff) + np.array(dire_dff_sem) - - r_ticks = [0, round(max(dire_dff) * 10000.) / 10000.] - - ax_dire = f.add_subplot(313, projection='polar') - ax_dire.fill_between(x=dire_arc, y1=dire_dff_low, y2=dire_dff_high, edgecolor='none', facecolor='#888888', - alpha=0.5) - ax_dire.plot(dire_arc, dire_dff, '-r', lw=2) - ax_dire.set_title('orientation tuning', rotation='vertical', x=-0.4, y=0.5, va='center', ha='center', size=10) - ax_dire.set_rticks(r_ticks) - ax_dire.tick_params(axis='both', which='major', labelsize=10) - - f.suptitle('roi:{:04d}; trace type:{}; baseline:{}; response:{}' - .format(roi_i, trace_type, baseline_span, response_span), fontsize=10) - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - - pdff.close() -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/040_get_cells_file.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/040_get_cells_file.py deleted file mode 100644 index ff29a37..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/040_get_cells_file.py +++ /dev/null @@ -1,87 +0,0 @@ -import os -import numpy as np -import h5py -import tifffile as tf -import allensdk_internal.brain_observatory.mask_set as mask_set -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.core.PlottingTools as pt -import scipy.ndimage as ni -import matplotlib.pyplot as plt - - -isSave = True -is_filter = True - -filter_sigma = 0.5 # parameters only used if filter the rois -# dilation_iterations = 0 # parameters only used if filter the rois -cut_thr = 3. # parameters only used if filter the rois - -bg_fn = "corrected_mean_projection.tif" -save_folder = 'figures' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -data_f = h5py.File('caiman_segmentation_results.hdf5') -masks = data_f['masks'].value -data_f.close() - -bg = tf.imread(bg_fn) - -final_roi_dict = {} - -for i, mask in enumerate(masks): - - if is_filter: - mask_nor = (mask - np.mean(mask.flatten())) / np.abs(np.std(mask.flatten())) - mask_nor_f = ni.filters.gaussian_filter(mask_nor, filter_sigma) - mask_bin = np.zeros(mask_nor_f.shape, dtype=np.uint8) - mask_bin[mask_nor_f > cut_thr] = 1 - - else: - mask_bin = np.zeros(mask.shape, dtype=np.uint8) - mask_bin[mask > 0] = 1 - - mask_labeled, mask_num = ni.label(mask_bin) - curr_mask_dict = ia.get_masks(labeled=mask_labeled, keyPrefix='caiman_mask_{:03d}'.format(i), labelLength=5) - for roi_key, roi_mask in curr_mask_dict.items(): - final_roi_dict.update({roi_key: ia.WeightedROI(roi_mask * mask)}) - -print 'Total number of ROIs:',len(final_roi_dict) - -f = plt.figure(figsize=(15, 8)) -ax1 = f.add_subplot(121) -ax1.imshow(ia.array_nor(bg), vmin=0, vmax=0.5, cmap='gray', interpolation='nearest') -colors1 = pt.random_color(masks.shape[0]) -for i, mask in enumerate(masks): - pt.plot_mask_borders(mask, plotAxis=ax1, color=colors1[i]) -ax1.set_title('original ROIs') -ax1.set_axis_off() -ax2 = f.add_subplot(122) -ax2.imshow(ia.array_nor(bg), vmin=0, vmax=0.5, cmap='gray', interpolation='nearest') -colors2 = pt.random_color(len(final_roi_dict)) -i = 0 -for roi in final_roi_dict.values(): - pt.plot_mask_borders(roi.get_binary_mask(), plotAxis=ax2, color=colors2[i]) - i = i + 1 -ax2.set_title('filtered ROIs') -ax2.set_axis_off() -plt.show() - -if isSave: - - if not os.path.isdir(save_folder): - os.makedirs(save_folder) - - f.savefig(os.path.join(save_folder, 'caiman_segmentation_filtering.pdf'), dpi=300) - - cell_file = h5py.File('cells.hdf5', 'w') - - i = 0 - for key, value in sorted(final_roi_dict.iteritems()): - curr_grp = cell_file.create_group('cell{:04d}'.format(i)) - curr_grp.attrs['name'] = key - value.to_h5_group(curr_grp) - i += 1 - - cell_file.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/050_refine_cells.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/050_refine_cells.py deleted file mode 100644 index f0b8c31..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/050_refine_cells.py +++ /dev/null @@ -1,173 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Jun 30 17:44:42 2015 - -@author: junz -""" -import os -import h5py -import numpy as np -import operator -import matplotlib.pyplot as plt -import scipy.ndimage as ni -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.core.FileTools as ft -import corticalmapping.core.PlottingTools as pt -import corticalmapping.SingleCellAnalysis as sca - -plt.ioff() - -# pixels, masks with center location within this pixel region at the image border will be discarded -center_margin = [20, 20] - -# area range, range of number of pixels of a valid roi -area_range = [20, 500] - -# for the two masks that are overlapping, if the ratio between overlap and the area of the smaller mask is larger than -# this value, the smaller mask will be discarded. -overlap_thr = 0.5 - -save_folder = 'figures' - -data_file_name = 'cells.hdf5' -save_file_name = 'cells_refined.hdf5' -background_file_name = "corrected_mean_projection.tif" - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -# read cells -dfile = h5py.File(data_file_name) -cells = {} -for cellname in dfile.iterkeys(): - cells.update({cellname:ia.WeightedROI.from_h5_group(dfile[cellname])}) - -print 'total number of cells:', len(cells) - -# get the names of cells which are on the edge -edge_cells = [] -for cellname, cellmask in cells.iteritems(): - dimension = cellmask.dimension - center = cellmask.get_center() - if center[0] < center_margin[0] or \ - center[0] > dimension[0] - center_margin[0] or \ - center[1] < center_margin[1] or \ - center[1] > dimension[1] - center_margin[1]: - - # cellmask.plot_binary_mask_border(color='#ff0000', borderWidth=1) - # plt.title(cellname) - # plt.show() - - edge_cells.append(cellname) - -print '\ncells to be removed because they are on the edges:' -print '\n'.join(edge_cells) - -# remove edge cells -for edge_cell in edge_cells: - _ = cells.pop(edge_cell) - -# get dictionary of cell areas -cell_areas = {} -for cellname, cellmask in cells.iteritems(): - cell_areas.update({cellname: cellmask.get_binary_area()}) - - -# remove cellnames that have area outside of the area_range -invalid_cell_ns = [] -for cellname, cellarea in cell_areas.items(): - if cellarea < area_range[0] or cellarea > area_range[1]: - invalid_cell_ns.append(cellname) -print "cells to be removed because they do not meet area criterion:" -print "\n".join(invalid_cell_ns) -for invalid_cell_n in invalid_cell_ns: - cell_areas.pop(invalid_cell_n) - - -# sort cells with their binary area -cell_areas_sorted = sorted(cell_areas.items(), key=operator.itemgetter(1)) -cell_areas_sorted.reverse() -cell_names_sorted = [c[0] for c in cell_areas_sorted] -# print '\n'.join([str(c) for c in cell_areas_sorted]) - -# get the name of cells that needs to be removed because of overlapping -retain_cells = [] -remove_cells = [] -for cell1_name in cell_names_sorted: - cell1_mask = cells[cell1_name] - is_remove = 0 - cell1_area = cell1_mask.get_binary_area() - for cell2_name in retain_cells: - cell2_mask = cells[cell2_name] - cell2_area = cell2_mask.get_binary_area() - curr_overlap = cell1_mask.binary_overlap(cell2_mask) - - if float(curr_overlap) / cell1_area > overlap_thr: - remove_cells.append(cell1_name) - is_remove = 1 - print cell1_name, ':', cell1_mask.get_binary_area(), ': removed' - - # f = plt.figure(figsize=(10,10)) - # ax = f.add_subplot(111) - # cell1_mask.plot_binary_mask_border(plotAxis=ax, color='#ff0000', borderWidth=1) - # cell2_mask.plot_binary_mask_border(plotAxis=ax, color='#0000ff', borderWidth=1) - # ax.set_title('red:'+cell1_name+'; blue:'+cell2_name) - # plt.show() - break - - if is_remove == 0: - retain_cells.append(cell1_name) - print cell1_name, ':', cell1_mask.get_binary_area(), ': retained' - -print '\ncells to be removed because of overlapping:' -print '\n'.join(remove_cells) - -print '\ntotal number of reatined cells:', len(retain_cells) - -# plotting -colors = pt.random_color(len(cells.keys())) -bgImg = tf.imread(background_file_name) - -f = plt.figure(figsize=(10, 10)) -ax = f.add_subplot(111) -ax.imshow(ia.array_nor(bgImg), cmap='gray', vmin=0, vmax=0.5, interpolation='nearest') - -f2 = plt.figure(figsize=(10, 10)) -ax2 = f2.add_subplot(111) -ax2.imshow(np.zeros(bgImg.shape, dtype=np.uint8), vmin=0, vmax=1, cmap='gray', interpolation='nearest') - -i = 0 -for retain_cell in retain_cells: - cells[retain_cell].plot_binary_mask_border(plotAxis=ax, color=colors[i], borderWidth=1) - cells[retain_cell].plot_binary_mask_border(plotAxis=ax2, color=colors[i], borderWidth=1) - i += 1 -plt.show() - -# save figures -pt.save_figure_without_borders(f, os.path.join(save_folder, '2P_refined_ROIs_with_background.png'), dpi=300) -pt.save_figure_without_borders(f2, os.path.join(save_folder, '2P_refined_ROIs_without_background.png'), dpi=300) - -# save h5 file -save_file = h5py.File(save_file_name, 'w') -i = 0 -for retain_cell in retain_cells: - print retain_cell, ':', cells[retain_cell].get_binary_area() - - currGroup = save_file.create_group('cell' + ft.int2str(i, 4)) - currGroup.attrs['name'] = retain_cell - roiGroup = currGroup.create_group('roi') - cells[retain_cell].to_h5_group(roiGroup) - i += 1 - -for attr, value in dfile.attrs.iteritems(): - save_file.attrs[attr] = value - -save_file.close() -dfile.close() - - - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/060_get_weighted_rois_and_surrounds.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/060_get_weighted_rois_and_surrounds.py deleted file mode 100644 index b31e405..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/060_get_weighted_rois_and_surrounds.py +++ /dev/null @@ -1,122 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Jun 30 17:44:42 2015 - -@author: junz -""" - -import os -import numpy as np -import h5py -import tifffile as tf -import allensdk_internal.brain_observatory.mask_set as mask_set -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.core.PlottingTools as pt -import scipy.ndimage as ni -import matplotlib.pyplot as plt - -plt.ioff() - -data_file_name = 'cells_refined.hdf5' -background_file_name = "corrected_mean_projection.tif" -save_folder = 'figures' - -overlap_threshold = 0.9 -surround_limit = [1, 8] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -print 'reading cells file ...' -data_f = h5py.File(data_file_name, 'r') - -cell_ns = data_f.keys() -cell_ns.sort() - -binary_mask_array = [] -weight_mask_array = [] - -for cell_n in cell_ns: - curr_roi = ia.ROI.from_h5_group(data_f[cell_n]['roi']) - binary_mask_array.append(curr_roi.get_binary_mask()) - weight_mask_array.append(curr_roi.get_weighted_mask()) - -data_f.close() -binary_mask_array = np.array(binary_mask_array) -weight_mask_array = np.array(weight_mask_array) -print 'starting mask_array shape:', weight_mask_array.shape - -print 'getting total mask ...' -total_mask = np.zeros((binary_mask_array.shape[1], binary_mask_array.shape[2]), dtype=np.uint8) -for curr_mask in binary_mask_array: - total_mask = np.logical_or(total_mask, curr_mask) -total_mask = np.logical_not(total_mask) - -plt.imshow(total_mask, interpolation='nearest') -plt.title('total_mask') -plt.show() - -print 'getting and surround masks ...' -binary_surround_array = [] -for binary_center in binary_mask_array: - curr_surround = np.logical_xor(ni.binary_dilation(binary_center, iterations=surround_limit[1]), - ni.binary_dilation(binary_center, iterations=surround_limit[0])) - curr_surround = np.logical_and(curr_surround, total_mask).astype(np.uint8) - binary_surround_array.append(curr_surround) - # plt.imshow(curr_surround) - # plt.show() -binary_surround_array = np.array(binary_surround_array) - -print "saving rois ..." -center_areas = [] -surround_areas = [] -for mask_ind in range(binary_mask_array.shape[0]): - center_areas.append(np.sum(binary_mask_array[mask_ind].flat)) - surround_areas.append(np.sum(binary_surround_array[mask_ind].flat)) -roi_f = h5py.File('rois_and_traces.hdf5') -roi_f['masks_center'] = weight_mask_array -roi_f['masks_surround'] = binary_surround_array - -roi_f.close() -print 'minimum surround area:', min(surround_areas), 'pixels.' - -f = plt.figure(figsize=(10, 10)) -ax_center = f.add_subplot(211) -ax_center.hist(center_areas, bins=30) -ax_center.set_title('roi center area distribution') -ax_surround = f.add_subplot(212) -ax_surround.hist(surround_areas, bins=30) -ax_surround.set_title('roi surround area distribution') -plt.show() - -print 'plotting ...' -colors = pt.random_color(weight_mask_array.shape[0]) -bg = ia.array_nor(tf.imread('corrected_mean_projection.tif')) - -f_c_bg = plt.figure(figsize=(10, 10)) -ax_c_bg = f_c_bg.add_subplot(111) -ax_c_bg.imshow(bg, cmap='gray', vmin=0, vmax=0.5, interpolation='nearest') -f_c_nbg = plt.figure(figsize=(10, 10)) -ax_c_nbg = f_c_nbg.add_subplot(111) -ax_c_nbg.imshow(np.zeros(bg.shape,dtype=np.uint8),vmin=0,vmax=1,cmap='gray',interpolation='nearest') -f_s_nbg = plt.figure(figsize=(10, 10)) -ax_s_nbg = f_s_nbg.add_subplot(111) -ax_s_nbg.imshow(np.zeros(bg.shape,dtype=np.uint8),vmin=0,vmax=1,cmap='gray',interpolation='nearest') - -i = 0 -for mask_ind in range(binary_mask_array.shape[0]): - pt.plot_mask_borders(binary_mask_array[mask_ind], plotAxis=ax_c_bg, color=colors[i], borderWidth=1) - pt.plot_mask_borders(binary_mask_array[mask_ind], plotAxis=ax_c_nbg, color=colors[i], borderWidth=1) - pt.plot_mask_borders(binary_surround_array[mask_ind], plotAxis=ax_s_nbg, color=colors[i], borderWidth=1) - i += 1 - -plt.show() - -print 'saving figures ...' -pt.save_figure_without_borders(f_c_bg, os.path.join(save_folder, '2P_ROIs_with_background.png'), dpi=300) -pt.save_figure_without_borders(f_c_nbg, os.path.join(save_folder, '2P_ROIs_without_background.png'), dpi=300) -pt.save_figure_without_borders(f_s_nbg, os.path.join(save_folder, '2P_ROI_surrounds_background.png'), dpi=300) -f.savefig(os.path.join(save_folder, 'roi_area_distribution.pdf'), dpi=300) diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/070_get_raw_center_and_surround_traces.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/070_get_raw_center_and_surround_traces.py deleted file mode 100644 index 0e57582..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/070_get_raw_center_and_surround_traces.py +++ /dev/null @@ -1,129 +0,0 @@ -import os -import numpy as np -import h5py -import time -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.core.PlottingTools as pt -import corticalmapping.core.FileTools as ft -import corticalmapping.NwbTools as nt -import matplotlib.pyplot as plt -from multiprocessing import Pool - -CHUNK_SIZE = 2000 -PROCESS_NUM = 5 - -def get_chunk_frames(frame_num, chunk_size): - chunk_num = frame_num // chunk_size - if frame_num % chunk_size > 0: - chunk_num = chunk_num + 1 - - print("total number of frames:", frame_num) - print("total number of chunks:", chunk_num) - - chunk_ind = [] - chunk_starts = [] - chunk_ends = [] - - for chunk_i in range(chunk_num): - chunk_ind.append(chunk_i) - chunk_starts.append(chunk_i * chunk_size) - - if chunk_i < chunk_num - 1: - chunk_ends.append((chunk_i + 1) * chunk_size) - else: - chunk_ends.append(frame_num) - - return zip(chunk_ind, chunk_starts, chunk_ends) - -def get_traces(params): - t0 = time.time() - - chunk_ind, chunk_start, chunk_end, nwb_path, data_path, curr_folder, center_array, surround_array = params - - nwb_f = h5py.File(nwb_path, 'r') - print('\nstart analyzing chunk: {}'.format(chunk_ind)) - curr_mov = nwb_f[data_path][chunk_start: chunk_end] - nwb_f.close() - - # print 'extracting traces' - curr_traces_center = np.empty((center_array.shape[0], curr_mov.shape[0]), dtype=np.float32) - curr_traces_surround = np.empty((center_array.shape[0], curr_mov.shape[0]), dtype=np.float32) - for i in range(center_array.shape[0]): - curr_center = ia.WeightedROI(center_array[i]) - curr_surround = ia.ROI(surround_array[i]) - curr_traces_center[i, :] = curr_center.get_weighted_trace_pixelwise(curr_mov) - - # scale surround trace to be similar as center trace - mean_center_weight = curr_center.get_mean_weight() - curr_traces_surround[i, :] = curr_surround.get_binary_trace_pixelwise(curr_mov) * mean_center_weight - - # print 'saveing chunk {} ...'.format(chunk_ind) - chunk_folder = os.path.join(curr_folder, 'chunks') - if not os.path.isdir(chunk_folder): - os.mkdir(chunk_folder) - chunk_f = h5py.File(os.path.join(chunk_folder, 'chunk_temp_' + ft.int2str(chunk_ind, 4) + '.hdf5')) - chunk_f['traces_center'] = curr_traces_center - chunk_f['traces_surround'] = curr_traces_surround - chunk_f.close() - - print('\n\t{:06d} seconds: chunk: {}; demixing finished.'.format(int(time.time() - t0), chunk_ind)) - - return None - -def run(): - - curr_folder = os.path.dirname(os.path.realpath(__file__)) - os.chdir(curr_folder) - - plane_n = os.path.split(curr_folder)[1] - print(plane_n) - - print('getting masks ...') - rois_f = h5py.File('rois_and_traces.hdf5') - center_array = rois_f['masks_center'].value - surround_array = rois_f['masks_surround'].value - - print('\nanalyzing movie in chunks of size:', CHUNK_SIZE , 'frames.') - - nwb_folder = os.path.dirname(curr_folder) - nwb_fn = [f for f in os.listdir(nwb_folder) if f[-4:] == '.nwb'][0] - nwb_path = os.path.join(nwb_folder, nwb_fn) - print('\n' + nwb_path) - data_path = '/processing/motion_correction/MotionCorrection/' + plane_n + '/corrected/data' - - nwb_f = h5py.File(nwb_path, 'r') - total_frame = nwb_f[data_path].shape[0] - nwb_f.close() - - chunk_frames = get_chunk_frames(total_frame, CHUNK_SIZE) - chunk_params = [(cf[0], cf[1], cf[2], nwb_path, data_path, - curr_folder, center_array, surround_array) for cf in chunk_frames] - - p = Pool(PROCESS_NUM) - p.map(get_traces, chunk_params) - - chunk_folder = os.path.join(curr_folder, 'chunks') - chunk_fns = [f for f in os.listdir(chunk_folder) if f[0:11] == 'chunk_temp_'] - chunk_fns.sort() - print('\nreading chunks files ...') - print('\n'.join(chunk_fns)) - - traces_raw = [] - traces_surround = [] - - for chunk_fn in chunk_fns: - curr_chunk_f = h5py.File(os.path.join(chunk_folder, chunk_fn)) - traces_raw.append(curr_chunk_f['traces_center'].value) - traces_surround.append(curr_chunk_f['traces_surround'].value) - - print("saving ...") - traces_raw = np.concatenate(traces_raw, axis=1) - traces_surround = np.concatenate(traces_surround, axis=1) - rois_f['traces_center_raw'] = traces_raw - rois_f['traces_surround_raw'] = traces_surround - print('done.') - - -if __name__ == '__main__': - run() - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/090_get_neuropil_subtracted_traces.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/090_get_neuropil_subtracted_traces.py deleted file mode 100644 index 551768f..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/090_get_neuropil_subtracted_traces.py +++ /dev/null @@ -1,101 +0,0 @@ -import sys -import os -import h5py -import numpy as np -import corticalmapping.HighLevel as hl -import corticalmapping.core.FileTools as ft -import matplotlib.pyplot as plt - - -lam = 1. # 100. -plot_chunk_size = 5000 - - -def plot_traces_chunks(traces, labels, chunk_size, roi_ind): - """ - - :param traces: np.array, shape=[trace_type, t_num] - :param labels: - :param chunk_size: - :param figures_folder: - :param roi_ind: - :return: - """ - - t_num = traces.shape[1] - chunk_num = t_num // chunk_size - - chunks = [] - for chunk_ind in range(chunk_num): - chunks.append([chunk_ind * chunk_size, (chunk_ind + 1) * chunk_size]) - - if t_num % chunk_size != 0: - chunks.append([chunk_num * chunk_size, t_num]) - - v_max = np.amax(traces) - v_min = np.amin(traces) - - fig = plt.figure(figsize=(75, 20)) - fig.suptitle('neuropil subtraction for ROI: {}'.format(roi_ind)) - for chunk_ind, chunk in enumerate(chunks): - curr_ax = fig.add_subplot(len(chunks), 1, chunk_ind + 1) - for trace_ind in range(traces.shape[0]): - curr_ax.plot(traces[trace_ind, chunk[0]: chunk[1]], label=labels[trace_ind]) - - curr_ax.set_xlim([0, chunk_size]) - curr_ax.set_ylim([v_min, v_max * 1.2]) - curr_ax.legend() - - return fig - - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -data_f = h5py.File('rois_and_traces.hdf5') -traces_raw = data_f['traces_center_raw'].value -traces_srround = data_f['traces_surround_raw'].value - -traces_subtracted = np.zeros(traces_raw.shape, np.float32) -ratio = np.zeros(traces_raw.shape[0], np.float32) -err = np.zeros(traces_raw.shape[0], np.float32) - -for i in range(traces_raw.shape[0]): - curr_trace_c = traces_raw[i] - curr_trace_s = traces_srround[i] - curr_r, curr_err, curr_trace_sub = hl.neural_pil_subtraction(curr_trace_c, curr_trace_s, lam=lam) - print "roi_%s \tr = %.4f; error = %.4f." % (ft.int2str(i, 5), curr_r, curr_err) - traces_subtracted[i] = curr_trace_sub - ratio[i] = curr_r - err[i] = curr_err - -print('\nplotting neuropil subtraction results ...') -figures_folder = 'figures/neuropil_subtraction_lam_{}'.format(lam) -if not os.path.isdir(figures_folder): - os.makedirs(figures_folder) -for roi_ind in range(traces_raw.shape[0]): - print('roi_{:04d}'.format(roi_ind)) - curr_traces = np.array([traces_raw[roi_ind], traces_srround[roi_ind], traces_subtracted[roi_ind]]) - curr_fig = plot_traces_chunks(traces=curr_traces, - labels=['center', 'surround', 'subtracted'], - chunk_size=plot_chunk_size, - roi_ind=roi_ind) - curr_fig.savefig(os.path.join(figures_folder, 'neuropil_subtraction_ROI_{:04d}.png'.format(roi_ind))) - curr_fig.clear() - plt.close(curr_fig) - -# wait for keyboard abortion -msg = raw_input('Do you want to save? (y/n)\n') -while True: - if msg == 'y': - break - elif msg == 'n': - sys.exit('Stop process without saving.') - else: - msg = raw_input('Do you want to save? (y/n)\n') - -data_f['traces_center_subtracted'] = traces_subtracted -data_f['neuropil_r'] = ratio -data_f['neuropil_err'] = err - -data_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/120_check_correlation.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/120_check_correlation.py deleted file mode 100644 index 65c3f07..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/120_check_correlation.py +++ /dev/null @@ -1,99 +0,0 @@ -import os -import h5py -import tifffile as tf -import numpy as np -import matplotlib.pyplot as plt -import corticalmapping.core.PlottingTools as pt -import corticalmapping.core.ImageAnalysis as ia - - -cor_thr = 0.8 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -def merger_pairs(pairs): - - total_set = set([]) - for pair in pairs: - total_set.update(set(pair)) - - all_nodes = list(total_set) - node_grps = [{n} for n in all_nodes] - - for pair in pairs: - - node0 = pair[0] - node1 = pair[1] - - for node_grp in node_grps: - if node0 in node_grp: - node_grp0 = node_grp - if node1 in node_grp: - node_grp1 = node_grp - - if node_grp0 != node_grp1: - node_grp0.update(node_grp1) - node_grps.remove(node_grp1) - - return node_grps - - -save_plot_dir = os.path.join(curr_folder, 'figures', 'dff_extraction') -if not os.path.isdir(save_plot_dir): - os.makedirs(save_plot_dir) - -bg = ia.array_nor(tf.imread('corrected_mean_projection.tif')) - -data_f = h5py.File('rois_and_traces.hdf5') -traces_subtracted = data_f['traces_center_subtracted'].value -masks = data_f['masks_center'].value - -f, axs = plt.subplots(1, 2, figsize=(16, 5)) - -cor_mat = np.corrcoef(traces_subtracted) -fig = axs[0].imshow(cor_mat, vmin=-1, vmax=1, cmap='jet', interpolation='nearest') -axs[0].set_title('coriance matrix') -f.colorbar(fig, ax=axs[0]) - -cors = cor_mat[np.tril_indices(cor_mat.shape[0], k=-1)] -cor_dist = axs[1].hist(cors, range=[-1., 1.], bins=40) -axs[1].set_title('coriance distribution') - -# cors = np.sort(cors) -# cor_thr = cors[int(cors.shape[0] * 0.99)] -# print('Cutoff threshold for coriance: {}'.format(cor_thr)) - -pos_cor_loc = np.where(cor_mat > cor_thr) - -roi_pairs = [] -for ind in range(len(pos_cor_loc[0])): - if pos_cor_loc[0][ind] < pos_cor_loc[1][ind]: - roi_pairs.append([pos_cor_loc[0][ind], pos_cor_loc[1][ind]]) -print(roi_pairs) - -roi_grps = merger_pairs(roi_pairs) -print roi_grps - -cor_grps = [] -for roi_grp in roi_grps: - grp_traces = traces_subtracted[list(roi_grp)] - grp_cors = np.corrcoef(grp_traces)[np.tril_indices(len(roi_grp), k=-1)] - cor_grps.append(np.mean(grp_cors)) - -cor_grps = np.array(cor_grps) -cor_scalars = [(c + 1) / 2 for c in cor_grps] -print cor_scalars -cor_colors = [pt.value_2_rgb(c, cmap='inferno') for c in cor_scalars] - -f_roi = plt.figure() -ax_roi = f_roi.add_subplot(111) -ax_roi.imshow(bg, vmin=0, vmax=0.5, cmap='gray', interpolation='nearest') -for grp_ind, roi_grp in enumerate(roi_grps): - for roi_ind in roi_grp: - print roi_ind, cor_colors[grp_ind] - pt.plot_mask_borders(masks[roi_ind], plotAxis=ax_roi, color=cor_colors[grp_ind]) - -plt.show() - -data_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/old/100_get_dff_traces.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/old/100_get_dff_traces.py deleted file mode 100644 index 3cd09ff..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_multi_channel_deepscope/within_plane_folder/old/100_get_dff_traces.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -import h5py -import allensdk.brain_observatory.dff as dff -import numpy as np -import corticalmapping.HighLevel as hl -import corticalmapping.core.FileTools as ft - - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_plot_dir = os.path.join(curr_folder, 'figures', 'dff_extraction') -if not os.path.isdir(save_plot_dir): - os.makedirs(save_plot_dir) - -data_f = h5py.File('rois_and_traces.hdf5') -traces_subtracted = data_f['traces_center_subtracted'].value - -traces_dff = dff.compute_dff(traces_subtracted, save_plot_dir=save_plot_dir, - mode_kernelsize=100, mean_kernelsize=100) -data_f['traces_center_dff'] = traces_dff -data_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/000_reorganize_data.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/000_reorganize_data.py deleted file mode 100644 index 9d83ccf..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/000_reorganize_data.py +++ /dev/null @@ -1,65 +0,0 @@ -import os -import numpy as np -import tifffile as tf - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180328-M360495-deepscope\04" - -identifier = '04_' -plane_num = 5 -temporal_downsample_rate = 1 -frame_each_file = 2000 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -fns = np.array([f for f in os.listdir(data_folder) if f[-4:] == '.tif' and identifier in f]) -f_nums = [int(os.path.splitext(fn)[0].split('_')[1]) for fn in fns] -fns = fns[np.argsort(f_nums)] -print('total file number: {}'.format(len(fns))) - -# print('\n'.join(fns)) - -save_folders = [] -for i in range(plane_num): - curr_save_folder = os.path.join(data_folder, identifier, 'plane{}'.format(i)) - if not os.path.isdir(curr_save_folder): - os.makedirs(curr_save_folder) - save_folders.append(curr_save_folder) - -# frame_per_plane = len(fns) // plane_num -for plane_ind in range(plane_num): - print('\nprocessing plane: {}'.format(plane_ind)) - curr_fns = fns[plane_ind::plane_num] - - total_frames_down = len(curr_fns) // temporal_downsample_rate - curr_fns = curr_fns[: total_frames_down * temporal_downsample_rate].reshape((total_frames_down, temporal_downsample_rate)) - - # print curr_fns - - print('current file ind: 000') - curr_file_ind = 0 - curr_frame_ind = 0 - curr_mov = [] - - for fgs in curr_fns: - - curr_frame = np.mean([tf.imread(os.path.join(data_folder, fn)) for fn in fgs], axis=0).astype(np.int16) - curr_frame = curr_frame.transpose()[::-1, ::-1] - - if curr_frame_ind < frame_each_file: - curr_mov.append(curr_frame) - curr_frame_ind = curr_frame_ind + 1 - else: - curr_mov = np.array(curr_mov, dtype=np.int16) - save_name = 'plane{}_{:03d}.tif'.format(plane_ind, curr_file_ind) - tf.imsave(os.path.join(save_folders[plane_ind], save_name), curr_mov) - curr_file_ind += 1 - curr_frame_ind = 1 - curr_mov = [curr_frame] - print('current file ind: {:03d}'.format(curr_file_ind)) - - curr_mov = np.array(curr_mov, dtype=np.int16) - save_name = 'plane{}_{:03d}.tif'.format(plane_ind, curr_file_ind) - tf.imsave(os.path.join(save_folders[plane_ind], save_name), curr_mov) - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/001_get_vasculature_map.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/001_get_vasculature_map.py deleted file mode 100644 index d714731..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/001_get_vasculature_map.py +++ /dev/null @@ -1,37 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import skimage.io as io -import matplotlib.pyplot as plt -import corticalmapping.core.ImageAnalysis as ia - -vasmap_wf_path = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180328-M360495-deepscope\Widefield.tif" - -vasmap_2p_zoom1_path = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180328-M360495-deepscope\01\01_00001.tif" - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -vasmap_wf = io.imread(vasmap_wf_path, as_grey=True) -vasmap_wf = vasmap_wf.transpose()[::-1, ::-1] - -vasmap_2p_zoom1 = tf.imread(vasmap_2p_zoom1_path).astype(np.float32) -vasmap_2p_zoom1 = np.mean(vasmap_2p_zoom1, axis=0) -vasmap_2p_zoom1 = vasmap_2p_zoom1.transpose()[::-1, ::-1] - -f = plt.figure(figsize=(12, 5)) -ax_wf = f.add_subplot(121) -ax_wf.imshow(ia.array_nor(vasmap_wf), vmin=0., vmax=1., cmap='gray', interpolation='nearest') -ax_wf.set_title('vasmap wide field') -ax_wf.set_axis_off() -ax_2p = f.add_subplot(122) -ax_2p.imshow(ia.array_nor(vasmap_2p_zoom1), vmin=0., vmax=0.15, cmap='gray', interpolation='nearest') -ax_2p.set_title('vasmap 2p zoom1') -ax_2p.set_axis_off() - -plt.show() - -tf.imsave('vasmap_wf.tif', vasmap_wf) -tf.imsave('vasmap_2p_zoom1.tif', vasmap_2p_zoom1) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/010_motion_correction.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/010_motion_correction.py deleted file mode 100644 index 906f6d7..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/010_motion_correction.py +++ /dev/null @@ -1,46 +0,0 @@ -import os -import stia.motion_correction as mc - -def run(): - - data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180328-M360495-deepscope\04\04_" - - curr_folder = os.path.dirname(os.path.realpath(__file__)) - os.chdir(curr_folder) - - plane_ns = [p for p in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder, p))] - plane_ns.sort() - print('planes:') - print('\n'.join(plane_ns)) - - for plane_n in plane_ns: - print('\nprocessing plane: {}'.format(plane_n)) - plane_folder = os.path.join(data_folder, plane_n) - f_paths, _ = mc.motion_correction(input_folder=plane_folder, - input_path_identifier='.tif', - process_num=3, - output_folder=os.path.join(plane_folder, 'corrected'), - anchor_frame_ind_chunk=10, - anchor_frame_ind_projection=0, - iteration_chunk=10, - iteration_projection=10, - max_offset_chunk=(50., 50.), - max_offset_projection=(50., 50.), - align_func=mc.phase_correlation, - preprocessing_type=0, - fill_value=0.) - - offsets_path = os.path.join(plane_folder, 'corrected', 'correction_offsets.hdf5') - - mc.apply_correction_offsets(offsets_path=offsets_path, - path_pairs=zip(f_paths, f_paths), - output_folder=os.path.join(plane_folder, 'corrected'), - process_num=3, - fill_value=0., - avi_downsample_rate=20, - is_equalizing_histogram=True) - -if __name__ == "__main__": - run() - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/020_downsample_from_server.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/020_downsample_from_server.py deleted file mode 100644 index 9fa58d7..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/020_downsample_from_server.py +++ /dev/null @@ -1,45 +0,0 @@ -import os -import numpy as np -import tifffile as tf - -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180328-M360495-deepscope\04\04_" -xy_downsample_rate = 2 -t_downsample_rate = 10 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -plane_ns = [f for f in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder, f)) and f[:5] == 'plane'] -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -for plane_n in plane_ns: - print('\nprocessing plane: {}'.format(plane_n)) - plane_folder = os.path.join(data_folder, plane_n, 'corrected') - - f_ns = [f for f in os.listdir(plane_folder) if f[-14:] == '_corrected.tif'] - f_ns.sort() - print('\n'.join(f_ns)) - - mov_d = [] - - for f_n in f_ns: - print('processing {} ...'.format(f_n)) - curr_mov = tf.imread(os.path.join(plane_folder, f_n)) - curr_mov_d = ia.rigid_transform_cv2(img=curr_mov, zoom=(1. / xy_downsample_rate)) - curr_mov_d = ia.z_downsample(curr_mov_d, downSampleRate=t_downsample_rate) - mov_d.append(curr_mov_d) - - mov_d = np.concatenate(mov_d, axis=0) - save_n = os.path.split(data_folder)[1] + '_' + plane_n + '_downsampled.tif' - save_folder = os.path.join(curr_folder, plane_n) - if not os.path.isdir(save_folder): - os.makedirs(save_folder) - tf.imsave(os.path.join(save_folder, save_n), mov_d) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/030_get_movie_data.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/030_get_movie_data.py deleted file mode 100644 index ba3ee27..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/030_get_movie_data.py +++ /dev/null @@ -1,65 +0,0 @@ -import os -import h5py -import numpy as np -import skimage.external.tifffile as tf - -file_prefix = '180328_M360495_04' -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180328-M360495-deepscope\04\04_" - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -plane_fns = [f for f in os.listdir(data_folder) if f[:5] == 'plane'] -plane_fns.sort() -print('\n'.join(plane_fns)) - -data_f = h5py.File(file_prefix + '_2p_movies.hdf5') - -for plane_fn in plane_fns: - print('\nprocessing {} ...'.format(plane_fn)) - plane_folder = os.path.join(data_folder, plane_fn, 'corrected') - mov_fns = [f for f in os.listdir(plane_folder) if f[-14:] == '_corrected.tif'] - mov_fns.sort() - print('\n'.join(mov_fns)) - - frame_num_tot = 0 - x = None - y = None - z = 0 - for mov_fn in mov_fns: - print('reading {} ...'.format(mov_fn)) - curr_z, curr_y, curr_x = tf.imread(os.path.join(plane_folder, mov_fn)).shape - - if y is None: - y = curr_y - else: - if y != curr_y: - raise ValueError('y dimension ({}) of file "{}" does not agree with previous file(s) ({}).' - .format(curr_y, mov_fn, y)) - - if x is None: - x = curr_x - else: - if x != curr_x: - raise ValueError('x dimension ({}) of file "{}" does not agree with previous file(s) ({}).' - .format(curr_x, mov_fn, x)) - - z = z + curr_z - - dset = data_f.create_dataset(plane_fn, (z, y, x), dtype=np.int16, compression='lzf') - - start_frame = 0 - end_frame = 0 - for mov_fn in mov_fns: - print('reading {} ...'.format(mov_fn)) - curr_mov = tf.imread(os.path.join(plane_folder, mov_fn)) - end_frame = start_frame + curr_mov.shape[0] - dset[start_frame : end_frame] = curr_mov - start_frame = end_frame - - dset.attrs['conversion'] = 1. - dset.attrs['resolution'] = 1. - dset.attrs['unit'] = 'arbiturary_unit' - -data_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/045_get_mmap_files_for_caiman_from_tiff.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/045_get_mmap_files_for_caiman_from_tiff.py deleted file mode 100644 index 3d6f832..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/045_get_mmap_files_for_caiman_from_tiff.py +++ /dev/null @@ -1,54 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia -import h5py - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180328-M360495-deepscope\04\04_" -base_name = '180328_M360495_04' -t_downsample_rate = 5 - -plane_ns = [p for p in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder, p))] -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -for plane_n in plane_ns: - print('\nprocessing {} ...'.format(plane_n)) - - plane_folder = os.path.join(data_folder, plane_n, 'corrected') - os.chdir(plane_folder) - - f_ns = [f for f in os.listdir(plane_folder) if f[-14:] == '_corrected.tif'] - f_ns.sort() - print('\n'.join(f_ns)) - - mov_join = [] - for f_n in f_ns: - - curr_mov = tf.imread(os.path.join(plane_folder, f_n)) - - if curr_mov.shape[0] % t_downsample_rate != 0: - print('the frame number of {} ({}) is not divisible by t_downsample_rate ({}).' - .format(f_n, curr_mov.shape[0], t_downsample_rate)) - - curr_mov_d = ia.z_downsample(curr_mov, downSampleRate=t_downsample_rate) - mov_join.append(curr_mov_d) - - mov_join = np.concatenate(mov_join, axis=0) - add_to_mov = 10 - np.amin(mov_join) - - save_name = '{}_d1_{}_d2_{}_d3_1_order_C_frames_{}_.mmap'\ - .format(base_name, mov_join.shape[2], mov_join.shape[1], mov_join.shape[0]) - - mov_join = mov_join.reshape((mov_join.shape[0], mov_join.shape[1] * mov_join.shape[2]), order='F').transpose() - mov_join_mmap = np.memmap(os.path.join(plane_folder, save_name), shape=mov_join.shape, order='C', - dtype=np.float32, mode='w+') - mov_join_mmap[:] = mov_join + add_to_mov - mov_join_mmap.flush() - del mov_join_mmap - - save_file = h5py.File(os.path.join(plane_folder, 'caiman_segmentation_results.hdf5')) - save_file['bias_added_to_movie'] = add_to_mov - save_file.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/050_show_mmap_movie.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/050_show_mmap_movie.py deleted file mode 100644 index 0cce508..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/050_show_mmap_movie.py +++ /dev/null @@ -1,39 +0,0 @@ -import sys; print('Python %s on %s' % (sys.version, sys.platform)) -sys.path.extend([r"E:\data\github_packages\CaImAn"]) - -import os -import numpy as np -import caiman as cm - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180212-M360495-deepscope\2p_movies\04\04_" -plane_n = 'plane0' - -plane_folder = os.path.join(data_folder, plane_n, 'corrected') -os.chdir(plane_folder) - -fn = [f for f in os.listdir(plane_folder) if f[-5:] == '.mmap'] -if len(fn) > 1: - print('\n'.join(fn)) - raise LookupError('more than one file found.') -elif len(fn) == 0: - raise LookupError('no file found.') -else: - fn = fn[0] - -cm.load(fn).play(fr=30,magnification=1,gain=2.) - -# fn_parts = fn.split('_') -# d1 = int(fn_parts[fn_parts.index('d1') + 1]) # column, x -# d2 = int(fn_parts[fn_parts.index('d2') + 1]) # row, y -# d3 = int(fn_parts[fn_parts.index('d3') + 1]) # channel -# d4 = int(fn_parts[fn_parts.index('frames') + 1]) # frame, T -# order = fn_parts[fn_parts.index('order') + 1] -# -# print('playing {} ...'.format(fn)) -# -# mov = np.memmap(filename=fn, shape=(d1, d2, d4), order=order, dtype=np.float32, mode='r') -# mov = mov.transpose((2, 1, 0)) -# -# cm.movie(mov).play(fr=30,magnification=1,gain=2.) - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/060_caiman_segmentation.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/060_caiman_segmentation.py deleted file mode 100644 index 3623654..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/060_caiman_segmentation.py +++ /dev/null @@ -1,116 +0,0 @@ -import sys; print('Python %s on %s' % (sys.version, sys.platform)) -sys.path.extend([r"E:\data\github_packages\CaImAn"]) - -import os -import numpy as np -import caiman as cm -import matplotlib.pyplot as plt -from caiman.source_extraction.cnmf import cnmf as cnmf -import h5py -from shutil import copyfile - -def run(): - - data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180328-M360495-deepscope\04\04_" - play_movie = False - - curr_folder = os.path.dirname(os.path.realpath(__file__)) - - plane_ns = [f for f in os.listdir(data_folder) if os.path.isdir(f) and f[:5] == 'plane'] - plane_ns.sort() - print('planes:') - print('\n'.join(plane_ns)) - - # %% start cluster - c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=3, single_thread=False) - - for plane_n in plane_ns: - - print('\nsegmenting plane: {}'.format(plane_n)) - - plane_folder = os.path.join(data_folder, plane_n, 'corrected') - os.chdir(plane_folder) - - fn = [f for f in os.listdir(plane_folder) if f[-5:] == '.mmap'] - if len(fn) > 1: - print('\n'.join(fn)) - raise LookupError('more than one file found.') - elif len(fn) == 0: - raise LookupError('no file found.') - else: - fn = fn[0] - - fn_parts = fn.split('_') - d1 = int(fn_parts[fn_parts.index('d1') + 1]) # column, x - d2 = int(fn_parts[fn_parts.index('d2') + 1]) # row, y - d3 = int(fn_parts[fn_parts.index('d3') + 1]) # channel - d4 = int(fn_parts[fn_parts.index('frames') + 1]) # frame, T - order = fn_parts[fn_parts.index('order') + 1] - - print('playing {} ...'.format(fn)) - - mov = np.memmap(filename=fn, shape=(d1, d2, d4), order=order, dtype=np.float32, mode='r') - mov = mov.transpose((2, 1, 0)) - - print('shape of joined movie: {}.'.format(mov.shape)) - - #%% play movie, press q to quit - if play_movie: - cm.movie(mov).play(fr=50,magnification=1,gain=2.) - - #%% movie cannot be negative! - mov_min = float(np.amin(mov)) - print('minimum pixel value: {}.'.format(mov_min)) - if mov_min < 0: - raise Exception('Movie too negative, add_to_movie should be larger') - - #%% correlation image. From here infer neuron size and density - Cn = cm.movie(mov).local_correlations(swap_dim=False) - plt.imshow(Cn, cmap='gray') - plt.show() - - K = 100 # number of neurons expected per patch - gSig = [5, 5] # expected half size of neurons - merge_thresh = 0.9 # merging threshold, max correlation allowed - p = 2 # order of the autoregressive system - cnm = cnmf.CNMF(n_processes, - k=10, # number of neurons expected per patch - gSig=[5, 5] , # expected half size of neurons - merge_thresh=0.9, # merging threshold, max correlation allowed - p=2, # order of the autoregressive system - dview=dview, - Ain=None, - method_deconvolution='oasis', - rolling_sum = False, - method_init='sparse_nmf', - alpha_snmf=10e1, - ssub=1, - tsub=1, - p_ssub=1, - p_tsub=1, - rf=256, # half-size of the patches in pixels - border_pix=20, - do_merge=False) - cnm = cnm.fit(mov) - A, C, b, f, YrA, sn = cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, cnm.sn - #%% - crd = cm.utils.visualization.plot_contours(cnm.A, Cn) - plt.show() - # input("Press enter to continue ...") - - roi_num = cnm.A.shape[1] - save_fn = h5py.File('caiman_segmentation_results.hdf5') - bias = save_fn['bias_added_to_movie'].value - save_fn['masks'] = np.array(cnm.A.todense()).T.reshape((roi_num, 512, 512), order='F') - save_fn['traces'] = cnm.C - bias - save_fn.close() - - copyfile(os.path.join(plane_folder, 'caiman_segmentation_results.hdf5'), - os.path.join(curr_folder, plane_n, 'caiman_segmentation_results.hdf5')) - - plt.close('all') - - -if __name__ == '__main__': - run() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/070_generate_nwb.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/070_generate_nwb.py deleted file mode 100644 index 0703a70..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/070_generate_nwb.py +++ /dev/null @@ -1,48 +0,0 @@ -import os -import corticalmapping.NwbTools as nt - -date_recorded = '180328' -mouse_id = '360495' -sess_num = '04' - -experimenter = 'Jun' -genotype = 'Vipr2-IRES2-Cre-neo' -sex = 'male' -age = '173' -indicator = 'GCaMP6s' -imaging_rate = 37. -imaging_depth = '250/200/150/100/50 microns' -imaging_location = 'visual cortex' -imaging_device = 'DeepScope' -imaging_excitation_lambda = '940 nanometers' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -notebook_path = os.path.join(curr_folder, 'notebook.txt') -with open(notebook_path, 'r') as ff: - notes = ff.read() - -general = nt.DEFAULT_GENERAL -general['experimenter'] = experimenter -general['subject']['subject_id'] = mouse_id -general['subject']['genotype'] = genotype -general['subject']['sex'] = sex -general['subject']['age'] = age -general['optophysiology'].update({'imaging_plane_1': {}}) -general['optophysiology']['imaging_plane_1'].update({'indicator': indicator}) -general['optophysiology']['imaging_plane_1'].update({'imaging_rate': imaging_rate}) -general['optophysiology']['imaging_plane_1'].update({'imaging_depth': imaging_depth}) -general['optophysiology']['imaging_plane_1'].update({'location': imaging_location}) -general['optophysiology']['imaging_plane_1'].update({'device': imaging_device}) -general['optophysiology']['imaging_plane_1'].update({'excitation_lambda': imaging_excitation_lambda}) -general['notes'] = notes - -file_name = date_recorded + '_M' + mouse_id + '_' + sess_num + '.nwb' - -rf = nt.RecordedFile(os.path.join(curr_folder, file_name), identifier=file_name[:-4], description='') -rf.add_general(general=general) -rf.close() - - - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/071_add_vasmaps.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/071_add_vasmaps.py deleted file mode 100644 index d933a14..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/071_add_vasmaps.py +++ /dev/null @@ -1,36 +0,0 @@ -import os -import corticalmapping.NwbTools as nt -import matplotlib.pyplot as plt -import tifffile as tf - - -vasmap_name_wf = 'vasmap_wf.tif' -vasmap_name_2p_zoom1 = 'vasmap_2p_zoom1.tif' -# vasmap_name_2p_zoom4 = 'vasmap_2p_zoom4.tif' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -vasmap_wf = tf.imread(vasmap_name_wf) -vasmap_2p_zoom1 = tf.imread(vasmap_name_2p_zoom1) -# vasmap_2p_zoom4 = tf.imread(vasmap_name_2p_zoom4) - -# f = plt.figure(figsize=(15, 7)) -# ax1 = f.add_subplot(121) -# ax1.imshow(vasmap_wf, cmap='gray', interpolation='nearest') -# ax1.set_title('wide field surface vasculature') -# ax2 = f.add_subplot(122) -# ax2.imshow(vasmap_2p_zoom1, cmap='gray', interpolation='nearest') -# ax2.set_title('two photon surface vasculature') -# plt.show() - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] - -nwb_f = nt.RecordedFile(nwb_fn) -nwb_f.add_acquisition_image('surface_vas_map_wf', vasmap_wf, - description='wide field surface vasculature map through cranial window') -nwb_f.add_acquisition_image('surface_vas_map_2p_zoom1', vasmap_2p_zoom1, - description='2-photon surface vasculature map through cranial window, zoom 1') -# nwb_f.add_acquisition_image('surface_vas_map_2p_zoom2', vasmap_2p_zoom4, -# description='2-photon surface vasculature map through cranial window, zoom 4') -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/080_add_sync_data.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/080_add_sync_data.py deleted file mode 100644 index 642dfb4..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/080_add_sync_data.py +++ /dev/null @@ -1,23 +0,0 @@ -import os -import corticalmapping.NwbTools as nt - -record_date = '180328' -mouse_id = '360495' -session_id = '04' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = record_date + '_M' + mouse_id + '_' + session_id + '.nwb' - -sync_fn = [f for f in os.listdir(curr_folder) if f[-3:] == '.h5' and record_date in f and 'M' + mouse_id in f] -if len(sync_fn) == 0: - raise LookupError('Did not find sync .h5 file.') -elif len(sync_fn) > 1: - raise LookupError('More than one sync .h5 files found.') -else: - sync_fn = sync_fn[0] - -nwb_f = nt.RecordedFile(nwb_fn) -nwb_f.add_sync_data(sync_fn) -nwb_f.close() diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/090_add_image_data.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/090_add_image_data.py deleted file mode 100644 index 1b62215..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/090_add_image_data.py +++ /dev/null @@ -1,53 +0,0 @@ -import os -import h5py -import corticalmapping.NwbTools as nt - -dset_ns = ['plane0', 'plane1', 'plane2', 'plane3', 'plane4'] -imaging_depths = [250, 200, 150, 100, 50] -temporal_downsample_rate = 1 -pixel_size = 0.0000002 # meter, 0.2 micron, deepscope 12K Hz scanner, zoom 4 - -description = '2-photon imaging data' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) -ts_2p_tot = nwb_f.file_pointer['/acquisition/timeseries/digital_2p_vsync_rise/timestamps'].value -print('total 2p timestamps count: {}'.format(len(ts_2p_tot))) - -mov_fn = os.path.splitext(nwb_fn)[0] + '_2p_movies.hdf5' -mov_f = h5py.File(mov_fn, 'r') - -for mov_i, mov_dn in enumerate(dset_ns): - - if mov_dn is not None: - - curr_dset = mov_f[mov_dn] - if mov_dn is not None: - mov_ts = ts_2p_tot[mov_i::len(dset_ns)] - print('\n{}: total 2p timestamps count: {}'.format(mov_dn, len(mov_ts))) - - mov_ts_d = mov_ts[::temporal_downsample_rate] - print('{}: downsampled 2p timestamps count: {}'.format(mov_dn, len(mov_ts_d))) - print('{}: downsampled 2p movie frame num: {}'.format(mov_dn, curr_dset.shape[0])) - - # if len(mov_ts_d) == curr_dset.shape[0]: - # pass - # elif len(mov_ts_d) == curr_dset.shape[0] + 1: - # mov_ts_d = mov_ts_d[0: -1] - # else: - # raise ValueError('the timestamp count of {} movie ({}) does not equal (or is not greater by one) ' - # 'the frame cound in the movie ({})'.format(mov_dn, len(mov_ts_d), curr_dset.shape[0])) - mov_ts_d = mov_ts_d[:curr_dset.shape[0]] - - curr_description = '{}. Imaging depth: {} micron.'.format(description, imaging_depths[mov_i]) - nwb_f.add_acquired_image_series_as_remote_link('2p_movie_' + mov_dn, image_file_path=mov_fn, - dataset_path=mov_dn, timestamps=mov_ts_d, - description=curr_description, comments='', - data_format='zyx', pixel_size=[pixel_size, pixel_size], - pixel_size_unit='meter') - -mov_f.close() -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/110_add_motion_correction_module.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/110_add_motion_correction_module.py deleted file mode 100644 index a75dd4a..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/110_add_motion_correction_module.py +++ /dev/null @@ -1,56 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import h5py -import corticalmapping.NwbTools as nt - -movie_2p_fn = '180328_M360495_04_2p_movies.hdf5' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -input_parameters = [] - -for i in range(5): - - plane_n = 'plane{}'.format(i) - - offsets_path = os.path.join(plane_n, 'correction_offsets.hdf5') - offsets_f = h5py.File(offsets_path) - offsets_keys = offsets_f.keys() - if 'path_list' in offsets_keys: - offsets_keys.remove('path_list') - - offsets_keys.sort() - offsets = [] - for offsets_key in offsets_keys: - offsets.append(offsets_f[offsets_key].value) - offsets = np.concatenate(offsets, axis=0) - offsets = np.array(zip(offsets[:, 1], offsets[:, 0])) - offsets_f.close() - - mean_projection = tf.imread(os.path.join(plane_n, 'corrected_mean_projection.tif')) - max_projection = tf.imread(os.path.join(plane_n, 'corrected_max_projection.tif')) - - input_dict = {'field_name': plane_n, - 'original_timeseries_path': '/acquisition/timeseries/2p_movie_plane' + str(i), - 'corrected_file_path': movie_2p_fn, - 'corrected_dataset_path': plane_n, - 'xy_translation_offsets': offsets, - 'mean_projection': mean_projection, - 'max_projection': max_projection, - 'description': '', - 'comments': '', - 'source': ''} - - input_parameters.append(input_dict) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -nwb_f.add_muliple_dataset_to_motion_correction_module(input_parameters=input_parameters, - module_name='motion_correction') -nwb_f.close() - - - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/120_add_rois_and_traces_caiman_segmentation.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/120_add_rois_and_traces_caiman_segmentation.py deleted file mode 100644 index 68a3f3c..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/120_add_rois_and_traces_caiman_segmentation.py +++ /dev/null @@ -1,162 +0,0 @@ -import os -import h5py -import numpy as np -import matplotlib.pyplot as plt -import tifffile as tf -import corticalmapping.NwbTools as nt -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -plane_ns = ['plane0', 'plane1', 'plane2', 'plane3', 'plane4'] -plane_depths = [250, 200, 150, 100, 50] - -def add_rois_and_traces(data_folder, nwb_f, plane_n, imaging_depth, - mov_path='/processing/motion_correction/MotionCorrection'): - - mov_grp = nwb_f.file_pointer[mov_path + '/' + plane_n + '/corrected'] - - data_f = h5py.File(os.path.join(data_folder, 'rois_and_traces.hdf5'), 'r') - mask_arr_c = data_f['masks_center'].value - mask_arr_s = data_f['masks_surround'].value - traces_center_raw = data_f['traces_center_raw'].value - # traces_center_demixed = data_f['traces_center_demixed'].value - traces_center_subtracted = data_f['traces_center_subtracted'].value - # traces_center_dff = data_f['traces_center_dff'].value - traces_surround_raw = data_f['traces_surround_raw'].value - neuropil_r = data_f['neuropil_r'].value - neuropil_err = data_f['neuropil_err'].value - data_f.close() - - - if traces_center_raw.shape[1] != mov_grp['num_samples'].value: - raise ValueError('number of trace time points ({}) does not match frame number of ' - 'corresponding movie ({}).'.format(traces_center_raw.shape[0], mov_grp['num_samples'].value)) - - rf_img = tf.imread(os.path.join(data_folder, 'corrected_mean_projection.tif')) - - print 'adding segmentation results ...' - rt_mo = nwb_f.create_module('rois_and_traces_' + plane_n) - rt_mo.set_value('imaging_depth_micron', imaging_depth) - is_if = rt_mo.create_interface('ImageSegmentation') - is_if.create_imaging_plane('imaging_plane', description='') - is_if.add_reference_image('imaging_plane', 'mean_projection', rf_img) - - for i in range(mask_arr_c.shape[0]): - curr_cen = mask_arr_c[i] - curr_cen_n = 'roi_' + ft.int2str(i, 4) - curr_cen_roi = ia.WeightedROI(curr_cen) - curr_cen_pixels_yx = curr_cen_roi.get_pixel_array() - curr_cen_pixels_xy = np.array([curr_cen_pixels_yx[:, 1], curr_cen_pixels_yx[:, 0]]).transpose() - is_if.add_roi_mask_pixels(image_plane='imaging_plane', roi_name=curr_cen_n, desc='', - pixel_list=curr_cen_pixels_xy, weights=curr_cen_roi.weights, width=512, height=512) - - curr_sur = mask_arr_s[i] - curr_sur_n = 'surround_' + ft.int2str(i, 4) - curr_sur_roi = ia.ROI(curr_sur) - curr_sur_pixels_yx = curr_sur_roi.get_pixel_array() - curr_sur_pixels_xy = np.array([curr_sur_pixels_yx[:, 1], curr_sur_pixels_yx[:, 0]]).transpose() - is_if.add_roi_mask_pixels(image_plane='imaging_plane', roi_name=curr_sur_n, desc='', - pixel_list=curr_sur_pixels_xy, weights=None, width=512, height=512) - is_if.finalize() - - - - trace_f_if = rt_mo.create_interface('Fluorescence') - seg_if_path = '/processing/rois_and_traces_' + plane_n + '/ImageSegmentation/imaging_plane' - # print seg_if_path - ts_path = mov_path + '/' + plane_n + '/corrected' - - print 'adding center fluorescence raw' - trace_raw_ts = nwb_f.create_timeseries('RoiResponseSeries', 'f_center_raw') - trace_raw_ts.set_data(traces_center_raw, unit='au', conversion=np.nan, resolution=np.nan) - trace_raw_ts.set_value('data_format', 'roi (row) x time (column)') - trace_raw_ts.set_value('data_range', '[-8192, 8191]') - trace_raw_ts.set_description('fluorescence traces extracted from the center region of each roi') - trace_raw_ts.set_time_as_link(ts_path) - trace_raw_ts.set_value_as_link('segmentation_interface', seg_if_path) - roi_names = ['roi_' + ft.int2str(ind, 4) for ind in range(traces_center_raw.shape[0])] - trace_raw_ts.set_value('roi_names', roi_names) - trace_raw_ts.set_value('num_samples', traces_center_raw.shape[1]) - trace_f_if.add_timeseries(trace_raw_ts) - trace_raw_ts.finalize() - - print 'adding neuropil fluorescence raw' - trace_sur_ts = nwb_f.create_timeseries('RoiResponseSeries', 'f_surround_raw') - trace_sur_ts.set_data(traces_surround_raw, unit='au', conversion=np.nan, resolution=np.nan) - trace_sur_ts.set_value('data_format', 'roi (row) x time (column)') - trace_sur_ts.set_value('data_range', '[-8192, 8191]') - trace_sur_ts.set_description('neuropil traces extracted from the surroud region of each roi') - trace_sur_ts.set_time_as_link(ts_path) - trace_sur_ts.set_value_as_link('segmentation_interface', seg_if_path) - sur_names = ['surround_' + ft.int2str(ind, 4) for ind in range(traces_center_raw.shape[0])] - trace_sur_ts.set_value('roi_names', sur_names) - trace_sur_ts.set_value('num_samples', traces_surround_raw.shape[1]) - trace_f_if.add_timeseries(trace_sur_ts) - trace_sur_ts.finalize() - - roi_center_n_path = '/processing/rois_and_traces_' + plane_n + '/Fluorescence/f_center_raw/roi_names' - # print 'adding center fluorescence demixed' - # trace_demix_ts = nwb_f.create_timeseries('RoiResponseSeries', 'f_center_demixed') - # trace_demix_ts.set_data(traces_center_demixed, unit='au', conversion=np.nan, resolution=np.nan) - # trace_demix_ts.set_value('data_format', 'roi (row) x time (column)') - # trace_demix_ts.set_description('center traces after overlapping demixing for each roi') - # trace_demix_ts.set_time_as_link(mov_path + '/' + plane_n + '/corrected') - # trace_demix_ts.set_value_as_link('segmentation_interface', seg_if_path) - # trace_demix_ts.set_value('roi_names', roi_names) - # trace_demix_ts.set_value('num_samples', traces_center_demixed.shape[1]) - # trace_f_if.add_timeseries(trace_demix_ts) - # trace_demix_ts.finalize() - - print 'adding center fluorescence after neuropil subtraction' - trace_sub_ts = nwb_f.create_timeseries('RoiResponseSeries', 'f_center_subtracted') - trace_sub_ts.set_data(traces_center_subtracted, unit='au', conversion=np.nan, resolution=np.nan) - trace_sub_ts.set_value('data_format', 'roi (row) x time (column)') - trace_sub_ts.set_description('center traces after overlap demixing and neuropil subtraction for each roi') - trace_sub_ts.set_time_as_link(mov_path + '/' + plane_n + '/corrected') - trace_sub_ts.set_value_as_link('segmentation_interface', seg_if_path) - trace_sub_ts.set_value_as_link('roi_names', roi_center_n_path) - trace_sub_ts.set_value('num_samples', traces_center_subtracted.shape[1]) - trace_sub_ts.set_value('r', neuropil_r, dtype='float32') - trace_sub_ts.set_value('rmse', neuropil_err, dtype='float32') - trace_sub_ts.set_comments('value "r": neuropil contribution ratio for each roi. ' - 'value "rmse": RMS error of neuropil subtraction for each roi') - trace_f_if.add_timeseries(trace_sub_ts) - trace_sub_ts.finalize() - - trace_f_if.finalize() - - # print 'adding global dF/F traces for each roi' - # trace_dff_if = rt_mo.create_interface('DfOverF') - # - # trace_dff_ts = nwb_f.create_timeseries('RoiResponseSeries', 'dff_center') - # trace_dff_ts.set_data(traces_center_dff, unit='au', conversion=np.nan, resolution=np.nan) - # trace_dff_ts.set_value('data_format', 'roi (row) x time (column)') - # trace_dff_ts.set_description('global df/f traces for each roi center, input fluorescence is the trace after demixing' - # ' and neuropil subtraction. global df/f is calculated by ' - # 'allensdk.brain_observatory.dff.compute_dff() function.') - # trace_dff_ts.set_time_as_link(ts_path) - # trace_dff_ts.set_value_as_link('segmentation_interface', seg_if_path) - # trace_dff_ts.set_value('roi_names', roi_names) - # trace_dff_ts.set_value('num_samples', traces_center_dff.shape[1]) - # trace_dff_if.add_timeseries(trace_dff_ts) - # trace_dff_ts.finalize() - # trace_dff_if.finalize() - - rt_mo.finalize() - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -for plane_i, plane_n in enumerate(plane_ns): - - print('\n\n' + plane_n) - - data_folder = os.path.join(curr_folder, plane_n) - add_rois_and_traces(data_folder, nwb_f, plane_n, imaging_depth=plane_depths[plane_i]) - -nwb_f.close() - - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/130_add_visual_stimuli_retinotopic_mapping.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/130_add_visual_stimuli_retinotopic_mapping.py deleted file mode 100644 index 0414239..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/130_add_visual_stimuli_retinotopic_mapping.py +++ /dev/null @@ -1,15 +0,0 @@ -import os -import retinotopic_mapping.DisplayLogAnalysis as dla -import corticalmapping.NwbTools as nt - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -stim_pkl_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.pkl'][0] -stim_log = dla.DisplayLogAnalyzer(stim_pkl_fn) - -nwb_f.add_visual_display_log_retinotopic_mapping(stim_log=stim_log) -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/135_get_photodiode_onset_timestamps.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/135_get_photodiode_onset_timestamps.py deleted file mode 100644 index e43be8b..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/135_get_photodiode_onset_timestamps.py +++ /dev/null @@ -1,45 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import matplotlib.pyplot as plt -import corticalmapping.NwbTools as nt -import corticalmapping.HighLevel as hl - -# photodiode -digitizeThr = 0.2 -filterSize = 0.01 -segmentThr = 0.02 -smallestInterval = 0.03 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] - -nwb_f = nt.RecordedFile(nwb_fn) -pd, pd_t = nwb_f.get_analog_data(ch_n='analog_photodiode') -fs = 1. / np.mean(np.diff(pd_t)) -# print fs - -pd_onsets = hl.segmentPhotodiodeSignal(pd, digitizeThr=digitizeThr, filterSize=filterSize, - segmentThr=segmentThr, Fs=fs, smallestInterval=smallestInterval) - -raw_input('press enter to continue ...') - -pdo_ts = nwb_f.create_timeseries('TimeSeries', 'digital_photodiode_rise', modality='other') -pdo_ts.set_time(pd_onsets) -pdo_ts.set_data([], unit='', conversion=np.nan, resolution=np.nan) -pdo_ts.set_value('digitize_threshold', digitizeThr) -pdo_ts.set_value('filter_size', filterSize) -pdo_ts.set_value('segment_threshold', segmentThr) -pdo_ts.set_value('smallest_interval', smallestInterval) -pdo_ts.set_description('Real Timestamps (master acquisition clock) of photodiode onset. ' - 'Extracted from analog photodiode signal by the function:' - 'corticalmapping.HighLevel.segmentPhotodiodeSignal() using parameters saved in the' - 'current timeseries.') -pdo_ts.set_path('/analysis') -pdo_ts.finalize() - -nwb_f.close() - - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/140_analyze_analog_photodiode_onsets.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/140_analyze_analog_photodiode_onsets.py deleted file mode 100644 index bd58c7e..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/140_analyze_analog_photodiode_onsets.py +++ /dev/null @@ -1,39 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt -import corticalmapping.NwbTools as nt -import retinotopic_mapping.DisplayLogAnalysis as dla -import corticalmapping.core.TimingAnalysis as ta - - -pd_ts_pd_path = 'analysis/digital_photodiode_rise' -vsync_frame_path = 'acquisition/timeseries/digital_stim_vsync_rise' -pd_thr = -0.5 # this is color threshold, not analog photodiode threshold -ccg_t_range = (0., 0.1) -ccg_bins = 100 -is_plot = True - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -stim_pkl_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.pkl'][0] -stim_log = dla.DisplayLogAnalyzer(stim_pkl_fn) - -# get display lag -display_delay = nwb_f.get_display_delay_retinotopic_mapping(stim_log=stim_log, indicator_color_thr=pd_thr, - ccg_t_range=ccg_t_range, ccg_bins=ccg_bins, - is_plot=is_plot, pd_onset_ts_path=pd_ts_pd_path, - vsync_frame_ts_path=vsync_frame_path) - -# analyze photodiode onset -stim_dict = stim_log.get_stim_dict() -pd_onsets_seq = stim_log.analyze_photodiode_onsets_sequential(stim_dict=stim_dict, pd_thr=pd_thr) -pd_onsets_com = stim_log.analyze_photodiode_onsets_combined(pd_onsets_seq=pd_onsets_seq, - is_dgc_blocked=True) -nwb_f.add_photodiode_onsets_combined_retinotopic_mapping(pd_onsets_com=pd_onsets_com, - display_delay=display_delay, - vsync_frame_path=vsync_frame_path) -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/140_analyze_digital_photodiode_onsets.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/140_analyze_digital_photodiode_onsets.py deleted file mode 100644 index 0f5da9f..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/140_analyze_digital_photodiode_onsets.py +++ /dev/null @@ -1,39 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt -import corticalmapping.NwbTools as nt -import retinotopic_mapping.DisplayLogAnalysis as dla -import corticalmapping.core.TimingAnalysis as ta - - -pd_ts_pd_path = 'acquisition/timeseries/digital_photodiode_rise' -vsync_frame_path = 'acquisition/timeseries/digital_stim_vsync_rise' -pd_thr = 0.5 # this is color threshold, not analog photodiode threshold -ccg_t_range = (0., 0.1) -ccg_bins = 100 -is_plot = True - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -stim_pkl_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.pkl'][0] -stim_log = dla.DisplayLogAnalyzer(stim_pkl_fn) - -# get display lag -display_delay = nwb_f.get_display_delay_retinotopic_mapping(stim_log=stim_log, indicator_color_thr=pd_thr, - ccg_t_range=ccg_t_range, ccg_bins=ccg_bins, - is_plot=is_plot, pd_onset_ts_path=pd_ts_pd_path, - vsync_frame_ts_path=vsync_frame_path) - -# analyze photodiode onset -stim_dict = stim_log.get_stim_dict() -pd_onsets_seq = stim_log.analyze_photodiode_onsets_sequential(stim_dict=stim_dict, pd_thr=pd_thr) -pd_onsets_com = stim_log.analyze_photodiode_onsets_combined(pd_onsets_seq=pd_onsets_seq, - is_dgc_blocked=True) -nwb_f.add_photodiode_onsets_combined_retinotopic_mapping(pd_onsets_com=pd_onsets_com, - display_delay=display_delay, - vsync_frame_path=vsync_frame_path) -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/150_get_STRFs.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/150_get_STRFs.py deleted file mode 100644 index 8642a33..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/150_get_STRFs.py +++ /dev/null @@ -1,96 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt -import corticalmapping.NwbTools as nt -import corticalmapping.core.TimingAnalysis as ta -import corticalmapping.SingleCellAnalysis as sca -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -from matplotlib.backends.backend_pdf import PdfPages - -stim_name = '001_LocallySparseNoiseRetinotopicMapping' -trace_source = 'f_center_subtracted' -start_time = -1. -end_time = 2. - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -probe_grp = nwb_f.file_pointer['analysis/photodiode_onsets/' + stim_name] -probe_ns = probe_grp.keys() -probe_ns.sort() - -probe_locations = [[float(pn[3: 9]), float(pn[13: 19])] for pn in probe_ns] -probe_signs = [float(pn[-2:]) for pn in probe_ns] -# print(probe_locations) - -plane_ns = nwb_f.file_pointer['processing'].keys() -plane_ns = [pn.split('_')[-1] for pn in plane_ns if 'rois_and_traces_plane' in pn] -plane_ns.sort() -print('\n'.join(plane_ns)) - -strf_grp = nwb_f.file_pointer['analysis'].create_group('STRFs') - -for plane_n in plane_ns: - print('\ngetting STRFs for {} ...'.format(plane_n)) - - roi_ns = nwb_f.file_pointer['processing/rois_and_traces_' + plane_n + - '/ImageSegmentation/imaging_plane/roi_list'].value - roi_ns = [rn for rn in roi_ns if rn[0: 4] == 'roi_'] - roi_ns.sort() - roi_num = len(roi_ns) - - plane_strf_grp = strf_grp.create_group(plane_n) - plane_traces = nwb_f.file_pointer['processing/rois_and_traces_' + plane_n + '/Fluorescence/' + - trace_source + '/data'].value - plane_trace_ts = nwb_f.file_pointer['processing/rois_and_traces_' + plane_n + '/Fluorescence/' + - trace_source + '/timestamps'].value - - plane_mean_frame_dur = np.mean(np.diff(plane_trace_ts)) - plane_chunk_frame_dur = int(np.ceil((end_time - start_time) / plane_mean_frame_dur)) - plane_chunk_frame_start = int(np.floor(start_time / plane_mean_frame_dur)) - plane_t = (np.arange(plane_chunk_frame_dur) + plane_chunk_frame_start) * plane_mean_frame_dur - print '{}: STRF time axis: \n{}'.format(plane_n, plane_t) - - plane_roi_traces = [] - trigger_ts = [] - - for probe_ind, probe_n in enumerate(probe_ns): - - probe_ts = probe_grp[probe_n]['pd_onset_ts_sec'].value - probe_traces = [] - probe_trigger_ts = [] - for curr_probe_ts in probe_ts: - curr_frame_start = ta.find_nearest(plane_trace_ts, curr_probe_ts) + plane_chunk_frame_start - curr_frame_end = curr_frame_start + plane_chunk_frame_dur - if curr_frame_start >= 0 and curr_frame_end <= len(plane_trace_ts): - probe_traces.append(plane_traces[:, curr_frame_start: curr_frame_end]) - probe_trigger_ts.append(curr_probe_ts) - - plane_roi_traces.append(np.array(probe_traces)) - trigger_ts.append(probe_trigger_ts) - print('probe: {} / {}; shape: {}'.format(probe_ind + 1, len(probe_ns), np.array(probe_traces).shape)) - - # plane_roi_traces = np.array(plane_roi_traces) - - print('saving ...') - for roi_ind in range(roi_num): - - print "roi: {} / {}".format(roi_ind + 1, roi_num) - curr_unit_traces = [pt[:, roi_ind, :] for pt in plane_roi_traces] - curr_unit_traces = [list(t) for t in curr_unit_traces] - curr_strf = sca.SpatialTemporalReceptiveField2(locations=probe_locations, - signs=probe_signs, - traces=curr_unit_traces, - trigger_ts=trigger_ts, - time=plane_t, - name='roi_{:04d}'.format(roi_ind), - trace_data_type=trace_source) - - curr_strf_grp = plane_strf_grp.create_group('strf_roi_{:04d}'.format(roi_ind)) - curr_strf.to_h5_group(curr_strf_grp) - -nwb_f.close() diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/160_get_drifting_grating_response_tables.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/160_get_drifting_grating_response_tables.py deleted file mode 100644 index 65ba54c..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/160_get_drifting_grating_response_tables.py +++ /dev/null @@ -1,18 +0,0 @@ -import os -import h5py -import numpy as np -import corticalmapping.NwbTools as nt - -plane_ns = ['plane0', 'plane1', 'plane2', 'plane3', 'plane4'] -stim_name = '001_DriftingGratingCircleRetinotopicMapping' -t_win = [-1, 2.5] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -nwb_f.get_drifting_grating_response_table_retinotopic_mapping(stim_name=stim_name, time_window=t_win) - -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/170_plot_STRFs.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/170_plot_STRFs.py deleted file mode 100644 index 3de24ad..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/170_plot_STRFs.py +++ /dev/null @@ -1,61 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt -import h5py -import corticalmapping.core.TimingAnalysis as ta -import corticalmapping.SingleCellAnalysis as sca -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -from matplotlib.backends.backend_pdf import PdfPages - -save_folder = 'figures' -is_local_dff = True -is_add_to_traces = True - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, save_folder) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = h5py.File(nwb_fn, 'r') - -strf_grp = nwb_f['analysis/STRFs'] -plane_ns = strf_grp.keys() -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -for plane_n in plane_ns: - print('plotting rois in {} ...'.format(plane_n)) - - if is_add_to_traces: - add_to_trace = h5py.File("{}/caiman_segmentation_results.hdf5".format(plane_n))['bias_added_to_movie'].value - else: - add_to_trace = 0. - - plane_grp = strf_grp[plane_n] - pdff = PdfPages(os.path.join(save_folder, 'STRFs_' + plane_n + '.pdf')) - - roi_ns = [rn[-8:] for rn in plane_grp.keys()] - roi_ns.sort() - - for roi_ind, roi_n in enumerate(roi_ns): - print('roi: {} / {}'.format(roi_ind + 1, len(roi_ns))) - curr_strf = sca.SpatialTemporalReceptiveField.from_h5_group(plane_grp['strf_' + roi_n]) - - curr_strf_dff = curr_strf.get_local_dff_strf(is_collaps_before_normalize=True, add_to_trace=add_to_trace) - - v_min, v_max = curr_strf_dff.get_data_range() - f = curr_strf_dff.plot_traces(yRange=(v_min, v_max * 1.1), figSize=(16, 10), - columnSpacing=0.002, rowSpacing=0.002) - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - - pdff.close() - -nwb_f.close() diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/180_plot_zscore_RFs.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/180_plot_zscore_RFs.py deleted file mode 100644 index 73f92f3..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/180_plot_zscore_RFs.py +++ /dev/null @@ -1,68 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt -import h5py -import corticalmapping.core.TimingAnalysis as ta -import corticalmapping.SingleCellAnalysis as sca -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -from matplotlib.backends.backend_pdf import PdfPages - -save_folder = 'figures' -is_local_dff = True -zscore_range = [0., 4.] -t_window = [0., 1.] -is_add_to_traces = True - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, save_folder) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = h5py.File(nwb_fn, 'r') - -strf_grp = nwb_f['analysis/STRFs'] -plane_ns = strf_grp.keys() -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -for plane_n in plane_ns: - print('plotting rois in {} ...'.format(plane_n)) - - if is_add_to_traces: - add_to_trace = h5py.File("{}/caiman_segmentation_results.hdf5".format(plane_n))['bias_added_to_movie'].value - else: - add_to_trace = 0. - - plane_grp = strf_grp[plane_n] - pdff = PdfPages(os.path.join(save_folder, 'zscore_RFs_' + plane_n + '.pdf')) - - roi_ns = [rn[-8:] for rn in plane_grp.keys()] - roi_ns.sort() - - for roi_ind, roi_n in enumerate(roi_ns): - print('roi: {} / {}'.format(roi_ind + 1, len(roi_ns))) - curr_strf = sca.SpatialTemporalReceptiveField.from_h5_group(plane_grp['strf_' + roi_n]) - curr_strf_dff = curr_strf.get_local_dff_strf(is_collaps_before_normalize=True, add_to_trace=add_to_trace) - v_min, v_max = curr_strf_dff.get_data_range() - - rf_on, rf_off = curr_strf_dff.get_zscore_receptive_field(timeWindow=t_window) - f = plt.figure(figsize=(15, 4)) - ax_on = f.add_subplot(121) - rf_on.plot_rf(plot_axis=ax_on, is_colorbar=True, cmap='Reds', vmin=zscore_range[0], vmax=zscore_range[1]) - ax_off = f.add_subplot(122) - rf_off.plot_rf(plot_axis=ax_off, is_colorbar=True, cmap='Blues', vmin=zscore_range[0], vmax=zscore_range[1]) - plt.close() - - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - - pdff.close() - -nwb_f.close() diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/190_plot_RF_contours.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/190_plot_RF_contours.py deleted file mode 100644 index 1e7b31c..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/190_plot_RF_contours.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -import numpy as np -import h5py -import matplotlib.pyplot as plt -import corticalmapping.NwbTools as nt -import corticalmapping.core.TimingAnalysis as ta -import corticalmapping.SingleCellAnalysis as sca -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -from matplotlib.backends.backend_pdf import PdfPages - -roi_t_window = [0., 1.] -zscore_range = [0., 4.] -save_folder = 'figures' -is_add_to_traces = True - -# plot control -thr_ratio = 0.4 -filter_sigma = 1. -interpolate_rate = 5 -absolute_thr = 1.6 -level_num = 1 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, save_folder) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'] -print('\n'.join(nwb_fn)) - -if len(nwb_fn) != 1: - raise LookupError - -nwb_fn = nwb_fn[0] -rff = h5py.File(nwb_fn, 'r') - -strf_grp = rff['analysis/STRFs'] -plane_ns = strf_grp.keys() -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -X = None -Y = None - -for plane_n in plane_ns: - print('plotting rois in {} ...'.format(plane_n)) - - if is_add_to_traces: - add_to_trace = h5py.File("{}/caiman_segmentation_results.hdf5".format(plane_n))['bias_added_to_movie'].value - else: - add_to_trace = 0. - - plane_grp = strf_grp[plane_n] - - roi_ns = [rn[-8:] for rn in plane_grp.keys()] - roi_ns.sort() - - f_all = plt.figure(figsize=(10, 10)) - ax_all = f_all.add_subplot(111) - - pdff = PdfPages(os.path.join(save_folder, 'RF_contours_' + plane_n + '.pdf')) - - for roi_ind, roi_n in enumerate(roi_ns): - print('roi: {} / {}'.format(roi_ind + 1, len(roi_ns))) - curr_strf = sca.SpatialTemporalReceptiveField.from_h5_group(plane_grp['strf_' + roi_n]) - curr_strf_dff = curr_strf.get_local_dff_strf(is_collaps_before_normalize=True, add_to_trace=add_to_trace) - rf_on, rf_off, _ = curr_strf_dff.get_zscore_thresholded_receptive_fields(timeWindow=roi_t_window, - thr_ratio=thr_ratio, - filter_sigma=filter_sigma, - interpolate_rate=interpolate_rate, - absolute_thr=absolute_thr) - - if X is None and Y is None: - X, Y = np.meshgrid(np.arange(len(rf_on.aziPos)), - np.arange(len(rf_on.altPos))) - - levels_on = [np.max(rf_on.get_weighted_mask().flat) * thr_ratio] - levels_off = [np.max(rf_off.get_weighted_mask().flat) * thr_ratio] - ax_all.contour(X, Y, rf_on.get_weighted_mask(), levels=levels_on, colors='r', lw=5) - ax_all.contour(X, Y, rf_off.get_weighted_mask(), levels=levels_off, colors='b', lw=5) - - f_single = plt.figure(figsize=(10, 10)) - ax_single = f_single.add_subplot(111) - ax_single.contour(X, Y, rf_on.get_weighted_mask(), levels=levels_on, colors='r', lw=5) - ax_single.contour(X, Y, rf_off.get_weighted_mask(), levels=levels_off, colors='b', lw=5) - ax_single.set_xticks(range(len(rf_on.aziPos))[::10]) - ax_single.set_xticklabels(['{:05.1f}'.format(l) for l in rf_on.aziPos[::10]]) - ax_single.set_yticks(range(len(rf_on.altPos))[::10]) - ax_single.set_yticklabels(['{:05.1f}'.format(l) for l in rf_on.altPos[::-1][::10]]) - ax_single.set_aspect('equal') - ax_single.set_title('{}: {}. ON thr:{}; OFF thr:{}.'.format(plane_n, roi_n, rf_on.thr, rf_off.thr)) - pdff.savefig(f_single) - f_single.clear() - plt.close(f_single) - - pdff.close() - - ax_all.set_xticks(range(len(rf_on.aziPos))[::10]) - ax_all.set_xticklabels(['{:05.1f}'.format(l) for l in rf_on.aziPos[::10]]) - ax_all.set_yticks(range(len(rf_on.altPos))[::10]) - ax_all.set_yticklabels(['{:05.1f}'.format(l) for l in rf_on.altPos[::-1][::10]]) - ax_all.set_aspect('equal') - ax_all.set_title('{}, abs_zscore_thr:{}'.format(plane_n, absolute_thr)) - - f_all.savefig(os.path.join(save_folder, 'RF_contours_' + plane_n + '_all.pdf'), dpi=300) - -rff.close() - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/200_plot_dgc_response_all.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/200_plot_dgc_response_all.py deleted file mode 100644 index b1b2f1f..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/200_plot_dgc_response_all.py +++ /dev/null @@ -1,160 +0,0 @@ -import os -import h5py -import numpy as np -import tifffile as tf -import matplotlib.pyplot as plt -import corticalmapping.core.PlottingTools as pt -from matplotlib.backends.backend_pdf import PdfPages -import matplotlib.gridspec as gridspec - -trace_type = 'f_center_subtracted' -response_table_path = 'analysis/response_table_001_DriftingGratingCircleRetinotopicMapping' - -baseline_span = [-0.5, 0.] -response_span = [0., 1.5] -is_add_to_trace = True - -face_cmap = 'RdBu_r' - -def get_dff(traces, t_axis, response_span, baseline_span): - """ - - :param traces: dimension, trial x timepoint - :param t_axis: - :return: - """ - - baseline_ind = np.logical_and(t_axis > baseline_span[0], t_axis <= baseline_span[1]) - response_ind = np.logical_and(t_axis > response_span[0], t_axis <= response_span[1]) - baseline = np.mean(traces[:, baseline_ind], axis=1, keepdims=True) - dff_traces = (traces - baseline) / baseline - - trace_mean = np.mean(traces, axis=0) - baseline_mean = np.mean(trace_mean[baseline_ind]) - dff_trace_mean = (trace_mean - baseline_mean) / baseline_mean - dff_mean = np.mean(dff_trace_mean[response_ind]) - - return dff_traces, dff_trace_mean, dff_mean - - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, 'figures') -if not os.path.isdir(save_folder): - os.mkdir(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -print(nwb_fn) -nwb_f = h5py.File(nwb_fn, 'r') - -plane_ns = nwb_f[response_table_path].keys() -plane_ns.sort() - -for plane_n in plane_ns: - - print('\nprocessing {} ...'.format(plane_n)) - - if is_add_to_trace: - add_to_trace = h5py.File(os.path.join(plane_n, 'caiman_segmentation_results.hdf5'), - 'r')['bias_added_to_movie'].value - else: - add_to_trace = 0 - - res_grp = nwb_f['{}/{}'.format(response_table_path, plane_n)] - t_axis = res_grp.attrs['sta_timestamps'] - - roi_lst = nwb_f['processing/rois_and_traces_' + plane_n + '/ImageSegmentation/imaging_plane/roi_list'].value - roi_lst = [r for r in roi_lst if r[:4] == 'roi_'] - roi_lst.sort() - - grating_ns = res_grp.keys() - - # remove blank sweep - grating_ns = [gn for gn in grating_ns if gn[-37:] != '_sf0.00_tf00.0_dire000_con0.00_rad000'] - - dire_lst = np.array(list(set([str(gn[38:41]) for gn in grating_ns]))) - tf_lst = np.array(list(set([str(gn[29:33]) for gn in grating_ns]))) - sf_lst = np.array(list(set([str(gn[22:26]) for gn in grating_ns]))) - - - pdff = PdfPages(os.path.join(save_folder, 'STA_DriftingGrating_' + plane_n + '_all.pdf')) - - for roi_i, roi_n in enumerate(roi_lst): - print(roi_n) - - f = plt.figure(figsize=(8.5, 11)) - gs_out = gridspec.GridSpec(len(tf_lst), 1) - gs_in_dict = {} - for gs_ind, gs_o in enumerate(gs_out): - curr_gs_in = gridspec.GridSpecFromSubplotSpec(len(sf_lst), len(dire_lst), subplot_spec=gs_o, - wspace=0.0, hspace=0.0) - gs_in_dict[gs_ind] = curr_gs_in - - v_max = 0 - v_min = 0 - dff_mean_max=0 - dff_mean_min=0 - - for grating_n in grating_ns: - grating_grp = res_grp[grating_n] - - curr_sta = grating_grp['sta_' + trace_type].value[roi_i] + add_to_trace - dff_traces, dff_trace_mean, dff_mean = get_dff(traces=curr_sta, t_axis=t_axis, response_span=response_span, - baseline_span=baseline_span) - v_max = max([np.amax(dff_traces), v_max]) - v_min = min([np.amin(dff_traces), v_min]) - dff_mean_max = max([dff_mean, dff_mean_max]) - dff_mean_min = min([dff_mean, dff_mean_min]) - - dff_mean_max = max([abs(dff_mean_max), abs(dff_mean_min)]) - dff_mean_min = - dff_mean_max - - - for grating_n in grating_ns: - grating_grp = res_grp[grating_n] - - curr_sta = grating_grp['sta_' + trace_type].value[roi_i] + add_to_trace - dff_traces, dff_trace_mean, dff_mean = get_dff(traces=curr_sta, t_axis=t_axis, response_span=response_span, - baseline_span=baseline_span) - - curr_tf = grating_n[29:33] - tf_i = np.where(tf_lst == curr_tf)[0][0] - curr_sf = grating_n[22:26] - sf_i = np.where(sf_lst == curr_sf)[0][0] - curr_dire = grating_n[38:41] - dire_i = np.where(dire_lst == curr_dire)[0][0] - ax = plt.Subplot(f, gs_in_dict[tf_i][sf_i * len(dire_lst) + dire_i]) - f_color = pt.value_2_rgb(value=(dff_mean - dff_mean_min) / (dff_mean_max - dff_mean_min), - cmap=face_cmap) - - # f_color = pt.value_2_rgb(value=dff_mean / dff_mean_max, cmap=face_cmap) - - # print f_color - ax.set_axis_bgcolor(f_color) - ax.set_xticks([]) - ax.set_yticks([]) - for sp in ax.spines.values(): - sp.set_visible(False) - ax.axhline(y=0, ls='--', color='#888888', lw=1) - ax.axvspan(response_span[0], response_span[1], alpha=0.5, color='#888888', ec='none') - for t in dff_traces: - ax.plot(t_axis, t, '-', color='#888888', lw=0.5) - ax.plot(t_axis, dff_trace_mean, '-r', lw=1) - f.add_subplot(ax) - - all_axes = f.get_axes() - for ax in all_axes: - ax.set_ylim([v_min, v_max]) - ax.set_xlim([t_axis[0], t_axis[-1]]) - - f.suptitle('roi:{:04d}; trace type:{}; baseline:{}; response:{}; \ntrace range:{}; color range:{}' - .format(roi_i, trace_type, baseline_span, response_span, [v_min, v_max], - [dff_mean_min, dff_mean_max]), fontsize=8) - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - - pdff.close() -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/210_plot_dgc_response_mean.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/210_plot_dgc_response_mean.py deleted file mode 100644 index 604a4db..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/210_plot_dgc_response_mean.py +++ /dev/null @@ -1,157 +0,0 @@ -import os -import h5py -import numpy as np -import tifffile as tf -import matplotlib.pyplot as plt -import corticalmapping.core.PlottingTools as pt -from matplotlib.backends.backend_pdf import PdfPages -import matplotlib.gridspec as gridspec - -trace_type = 'f_center_subtracted' -response_table_path = 'analysis/response_table_001_DriftingGratingCircleRetinotopicMapping' - -baseline_span = [-0.5, 0.] -response_span = [0., 1.5] -is_add_to_trace = True - -face_cmap = 'RdBu_r' - -def get_dff(traces, t_axis, response_span, baseline_span): - """ - - :param traces: dimension, trial x timepoint - :param t_axis: - :return: - """ - - trace_mean = np.mean(traces, axis=0) - trace_std = np.std(traces, axis=0) - trace_sem = trace_std / np.sqrt(traces.shape[0]) - - baseline_ind = np.logical_and(t_axis > baseline_span[0], t_axis <= baseline_span[1]) - response_ind = np.logical_and(t_axis > response_span[0], t_axis <= response_span[1]) - baseline = np.mean(trace_mean[baseline_ind]) - dff_trace_mean = (trace_mean - baseline) / baseline - dff_trace_std = trace_std / baseline - dff_trace_sem = trace_sem / baseline - dff_mean = np.mean(dff_trace_mean[response_ind]) - - return dff_trace_mean, dff_trace_std, dff_trace_sem, dff_mean - - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, 'figures') -if not os.path.isdir(save_folder): - os.mkdir(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -print(nwb_fn) -nwb_f = h5py.File(nwb_fn, 'r') - -plane_ns = nwb_f[response_table_path].keys() -plane_ns.sort() - -for plane_n in plane_ns: - - print('\nprocessing {} ...'.format(plane_n)) - - if is_add_to_trace: - add_to_trace = h5py.File(os.path.join(plane_n, 'caiman_segmentation_results.hdf5'), - 'r')['bias_added_to_movie'].value - else: - add_to_trace = 0 - - res_grp = nwb_f['{}/{}'.format(response_table_path, plane_n)] - t_axis = res_grp.attrs['sta_timestamps'] - - roi_lst = nwb_f['processing/rois_and_traces_' + plane_n + '/ImageSegmentation/imaging_plane/roi_list'].value - roi_lst = [r for r in roi_lst if r[:4] == 'roi_'] - roi_lst.sort() - - grating_ns = res_grp.keys() - # remove blank sweep - grating_ns = [gn for gn in grating_ns if gn[-37:] != '_sf0.00_tf00.0_dire000_con0.00_rad000'] - - dire_lst = np.array(list(set([str(gn[38:41]) for gn in grating_ns]))) - dire_lst.sort() - tf_lst = np.array(list(set([str(gn[29:33]) for gn in grating_ns]))) - tf_lst.sort() - sf_lst = np.array(list(set([str(gn[22:26]) for gn in grating_ns]))) - sf_lst.sort() - - pdff = PdfPages(os.path.join(save_folder, 'STA_DriftingGrating_' + plane_n + '_mean.pdf')) - - for roi_i, roi_n in enumerate(roi_lst): - print(roi_n) - - f = plt.figure(figsize=(8.5, 11)) - gs_out = gridspec.GridSpec(len(tf_lst), 1) - gs_in_dict = {} - for gs_ind, gs_o in enumerate(gs_out): - curr_gs_in = gridspec.GridSpecFromSubplotSpec(len(sf_lst), len(dire_lst), subplot_spec=gs_o, - wspace=0.05, hspace=0.05) - gs_in_dict[gs_ind] = curr_gs_in - - v_max = 0 - v_min = 0 - dff_mean_max=0 - dff_mean_min=0 - for grating_n in grating_ns: - grating_grp = res_grp[grating_n] - curr_sta = grating_grp['sta_' + trace_type].value[roi_i] + add_to_trace - _ = get_dff(traces=curr_sta, t_axis=t_axis, response_span=response_span, baseline_span=baseline_span) - dff_trace_mean, dff_trace_std, dff_trace_sem, dff_mean = _ - v_max = max([np.amax(dff_trace_mean + dff_trace_sem), v_max]) - v_min = min([np.amin(dff_trace_mean - dff_trace_sem), v_min]) - dff_mean_max = max([dff_mean, dff_mean_max]) - dff_mean_min = min([dff_mean, dff_mean_min]) - dff_mean_max = max([abs(dff_mean_max), abs(dff_mean_min)]) - dff_mean_min = - dff_mean_max - - for grating_n in grating_ns: - grating_grp = res_grp[grating_n] - curr_sta = grating_grp['sta_' + trace_type].value[roi_i] + add_to_trace - _ = get_dff(traces=curr_sta, t_axis=t_axis, response_span=response_span, baseline_span=baseline_span) - dff_trace_mean, dff_trace_std, dff_trace_sem, dff_mean = _ - curr_tf = grating_n[29:33] - tf_i = np.where(tf_lst == curr_tf)[0][0] - curr_sf = grating_n[22:26] - sf_i = np.where(sf_lst == curr_sf)[0][0] - curr_dire = grating_n[38:41] - dire_i = np.where(dire_lst == curr_dire)[0][0] - ax = plt.Subplot(f, gs_in_dict[tf_i][sf_i * len(dire_lst) + dire_i]) - f_color = pt.value_2_rgb(value=(dff_mean - dff_mean_min) / (dff_mean_max - dff_mean_min), - cmap=face_cmap) - - # f_color = pt.value_2_rgb(value=dff_mean / dff_mean_max, cmap=face_cmap) - - # print f_color - ax.set_axis_bgcolor(f_color) - ax.set_xticks([]) - ax.set_yticks([]) - for sp in ax.spines.values(): - sp.set_visible(False) - ax.axhline(y=0, ls='--', color='#888888', lw=1) - ax.axvspan(response_span[0], response_span[1], alpha=0.5, color='#888888', ec='none') - ax.fill_between(t_axis, dff_trace_mean - dff_trace_sem, dff_trace_mean + dff_trace_sem, edgecolor='none', - facecolor='#880000', alpha=0.5) - ax.plot(t_axis, dff_trace_mean, '-r', lw=1) - f.add_subplot(ax) - - all_axes = f.get_axes() - for ax in all_axes: - ax.set_ylim([v_min, v_max]) - ax.set_xlim([t_axis[0], t_axis[-1]]) - - f.suptitle('roi:{:04d}; trace type:{}; baseline:{}; response:{}; \ntrace range:{}; color range:{}' - .format(roi_i, trace_type, baseline_span, response_span, [v_min, v_max], - [dff_mean_min, dff_mean_max]), fontsize=8) - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - - pdff.close() -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/220_plot_dgc_tuning_curves.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/220_plot_dgc_tuning_curves.py deleted file mode 100644 index a5d8a6f..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/220_plot_dgc_tuning_curves.py +++ /dev/null @@ -1,196 +0,0 @@ -import os -import h5py -import numpy as np -import pandas as pd -import matplotlib.pyplot as plt -from matplotlib.backends.backend_pdf import PdfPages - -trace_type = 'f_center_subtracted' -response_table_path = 'analysis/response_table_001_DriftingGratingCircleRetinotopicMapping' - -baseline_span = [-0.5, 0.] -response_span = [0., 1.5] -is_add_to_trace = True - -def get_response(traces, t_axis, response_span, baseline_span): - """ - - :param traces: dimension, trial x timepoint - :param t_axis: - :return: - """ - - baseline_ind = np.logical_and(t_axis > baseline_span[0], t_axis <= baseline_span[1]) - response_ind = np.logical_and(t_axis > response_span[0], t_axis <= response_span[1]) - - trace_mean = np.mean(traces, axis=0) - baseline_mean = np.mean(trace_mean[baseline_ind]) - dff_trace_mean = (trace_mean - baseline_mean) / baseline_mean - dff_mean = np.mean(dff_trace_mean[response_ind]) - - baselines = np.mean(traces[:, baseline_ind], axis=1, keepdims=True) - dff_traces = (traces - baselines) / baselines - dffs = np.mean(dff_traces[:, response_ind], axis=1) - dff_std = np.std(dffs) - dff_sem = dff_std / np.sqrt(traces.shape[0]) - - return dff_mean, dff_std, dff_sem - - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, 'figures') -if not os.path.isdir(save_folder): - os.mkdir(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -print(nwb_fn) -nwb_f = h5py.File(nwb_fn, 'r') - -plane_ns = nwb_f[response_table_path].keys() -plane_ns.sort() - -for plane_n in plane_ns: - - print('\nprocessing {} ...'.format(plane_n)) - - if is_add_to_trace: - add_to_trace = h5py.File(os.path.join(plane_n, 'caiman_segmentation_results.hdf5'), - 'r')['bias_added_to_movie'].value - else: - add_to_trace = 0 - - res_grp = nwb_f['{}/{}'.format(response_table_path, plane_n)] - t_axis = res_grp.attrs['sta_timestamps'] - - roi_lst = nwb_f['processing/rois_and_traces_' + plane_n + '/ImageSegmentation/imaging_plane/roi_list'].value - roi_lst = [r for r in roi_lst if r[:4] == 'roi_'] - roi_lst.sort() - - grating_ns = res_grp.keys() - - # remove blank sweep - grating_ns = [gn for gn in grating_ns if gn[-37:] != '_sf0.00_tf00.0_dire000_con0.00_rad000'] - - dire_lst = np.array(list(set([str(gn[38:41]) for gn in grating_ns]))) - dire_lst.sort() - tf_lst = np.array(list(set([str(gn[29:33]) for gn in grating_ns]))) - tf_lst.sort() - sf_lst = np.array(list(set([str(gn[22:26]) for gn in grating_ns]))) - sf_lst.sort() - - pdff = PdfPages(os.path.join(save_folder, 'tuning_curve_DriftingGrating_' + plane_n + '_mean.pdf')) - - for roi_i, roi_n in enumerate(roi_lst): - print(roi_n) - - # get response table - res_tab = pd.DataFrame(columns=['con', 'tf', 'sf', 'dire', 'dff_mean', 'dff_std', 'dff_sem']) - row_ind = 0 - - for grating_n in grating_ns: - grating_grp = res_grp[grating_n] - curr_sta = grating_grp['sta_' + trace_type].value[roi_i] + add_to_trace - _ = get_response(traces=curr_sta, t_axis=t_axis, response_span=response_span, baseline_span=baseline_span) - dff_mean, dff_std, dff_sem = _ - - con = float(grating_n.split('_')[5][3:]) - tf = float(grating_n.split('_')[3][2:]) - sf = float(grating_n.split('_')[2][2:]) - dire = int(grating_n.split('_')[4][4:]) - - res_tab.loc[row_ind] = [con, tf, sf, dire, dff_mean, dff_std, dff_sem] - row_ind += 1 - - # find the preferred condition - top_condition = res_tab[res_tab.dff_mean == max(res_tab.dff_mean)] - - # make figure - f = plt.figure(figsize=(8.5, 11)) - - # get tf plot - tf_conditions = res_tab[(res_tab.sf == float(top_condition.sf)) & \ - (res_tab.dire == int(top_condition.dire))] - tf_conditions = tf_conditions.sort_values(by='tf') - - tf_log = np.log(tf_conditions.tf) - - ax_tf = f.add_subplot(311) - ax_tf.fill_between(x=tf_log, y1=tf_conditions.dff_mean + tf_conditions.dff_sem, - y2=tf_conditions.dff_mean - tf_conditions.dff_sem, edgecolor='none', - facecolor='#888888', alpha=0.5) - ax_tf.axhline(y=0, ls='--', color='k', lw=1) - ax_tf.plot(tf_log, tf_conditions.dff_mean, 'r-', lw=2) - ax_tf.set_title('temporal frequency tuning', rotation='vertical', x=-0.4, y=0.5, va='center', ha='center', - size=10) - ax_tf.set_xticks(tf_log) - ax_tf.set_xticklabels(list(tf_conditions.tf)) - ax_tf.set_xlim(np.log([0.9, 16])) - ax_tf_xrange = ax_tf.get_xlim()[1] - ax_tf.get_xlim()[0] - ax_tf_yrange = ax_tf.get_ylim()[1] - ax_tf.get_ylim()[0] - ax_tf.set_aspect(aspect=(ax_tf_xrange / ax_tf_yrange)) - ax_tf.set_ylabel('mean df/f', size=10) - ax_tf.set_xlabel('temporal freqency (Hz)', size=10) - ax_tf.tick_params(axis='both', which='major', labelsize=10) - - # get sf plot - sf_conditions = res_tab[(res_tab.tf == float(top_condition.tf)) & \ - (res_tab.dire == int(top_condition.dire))] - sf_conditions = sf_conditions.sort_values(by='sf') - - sf_log = np.log(sf_conditions.sf) - - ax_sf = f.add_subplot(312) - ax_sf.fill_between(x=sf_log, y1=sf_conditions.dff_mean + sf_conditions.dff_sem, - y2=sf_conditions.dff_mean - sf_conditions.dff_sem, edgecolor='none', - facecolor='#888888', alpha=0.5) - ax_sf.axhline(y=0, ls='--', color='k', lw=1) - ax_sf.plot(sf_log, sf_conditions.dff_mean, '-r', lw=2) - ax_sf.set_title('spatial frequency tuning', rotation='vertical', x=-0.4, y=0.5, va='center', ha='center', - size=10) - ax_sf.set_xticks(sf_log) - ax_sf.set_xticklabels(['{:04.2f}'.format(s) for s in list(sf_conditions.sf)]) - ax_sf.set_xlim(np.log([0.008, 0.4])) - ax_sf_xrange = ax_sf.get_xlim()[1] - ax_sf.get_xlim()[0] - ax_sf_yrange = ax_sf.get_ylim()[1] - ax_sf.get_ylim()[0] - ax_sf.set_aspect(aspect=(ax_sf_xrange / ax_sf_yrange)) - ax_sf.set_ylabel('mean df/f', size=10) - ax_sf.set_xlabel('spatial freqency (cpd)', size=10) - ax_sf.tick_params(axis='both', which='major', labelsize=10) - - # get dire plot - dire_conditions = res_tab[(res_tab.tf == float(top_condition.tf)) & \ - (res_tab.sf == float(top_condition.sf))] - dire_conditions = dire_conditions.sort_values(by='dire') - dire_arc = list(dire_conditions.dire * np.pi / 180.) - dire_arc.append(dire_arc[0]) - dire_dff = np.array(dire_conditions.dff_mean) - dire_dff[dire_dff < 0.] = 0. - dire_dff = list(dire_dff) - dire_dff.append(dire_dff[0]) - dire_dff_sem = list(dire_conditions.dff_sem) - dire_dff_sem.append(dire_dff_sem[0]) - dire_dff_low = np.array(dire_dff) - np.array(dire_dff_sem) - dire_dff_low[dire_dff_low < 0.] = 0. - dire_dff_high = np.array(dire_dff) + np.array(dire_dff_sem) - - r_ticks = [0, round(max(dire_dff) * 10000.) / 10000.] - - ax_dire = f.add_subplot(313, projection='polar') - ax_dire.fill_between(x=dire_arc, y1=dire_dff_low, y2=dire_dff_high, edgecolor='none', facecolor='#888888', - alpha=0.5) - ax_dire.plot(dire_arc, dire_dff, '-r', lw=2) - ax_dire.set_title('orientation tuning', rotation='vertical', x=-0.4, y=0.5, va='center', ha='center', size=10) - ax_dire.set_rticks(r_ticks) - ax_dire.tick_params(axis='both', which='major', labelsize=10) - - f.suptitle('roi:{:04d}; trace type:{}; baseline:{}; response:{}' - .format(roi_i, trace_type, baseline_span, response_span), fontsize=10) - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - - pdff.close() -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/old/040_get_mmap_files_for_caiman.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/old/040_get_mmap_files_for_caiman.py deleted file mode 100644 index 1acc083..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/old/040_get_mmap_files_for_caiman.py +++ /dev/null @@ -1,64 +0,0 @@ -import sys; print('Python %s on %s' % (sys.version, sys.platform)) -sys.path.extend([r"E:\data\github_packages\CaImAn"]) - -import os -import numpy as np -import tifffile as tf -import caiman as cm -import h5py - -def run(): - - data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180323-M360495-deepscope\02\02_" - base_name = '180323_M360495_02' - t_downsample_rate = 10. - - plane_ns = [p for p in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder, p))] - plane_ns.sort() - print('planes:') - print('\n'.join(plane_ns)) - - ## start cluster - c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=3, single_thread=False) - - for plane_n in plane_ns: - print('\nprocessing {} ...'.format(plane_n)) - - plane_folder = os.path.join(data_folder, plane_n, 'corrected') - os.chdir(plane_folder) - - f_ns = [f for f in os.listdir(plane_folder) if f[-14:] == '_corrected.tif'] - f_ns.sort() - print('\n'.join(f_ns)) - - min_tot = 0 - for fn in f_ns: - min_tot = min([min_tot, np.min(tf.imread(os.path.join(plane_folder, fn)))]) - print('minimum pixel value of entire movie: ' + str(min_tot)) - - add_to_movie = 10. - min_tot # the movie must be positive!!! - t_ds_factor = 1. / t_downsample_rate # use .2 or .1 if file is large and you want a quick answer - f_paths = [os.path.join(plane_folder, f) for f in f_ns] - - name_new = cm.save_memmap_each(f_paths, - dview=dview, - base_name=base_name + '_' + plane_n + '_each', - resize_fact=(1., 1., t_ds_factor), - add_to_movie=add_to_movie) - name_new.sort() - - fname_new = cm.save_memmap_join(name_new, base_name=base_name + '_' + plane_n, dview=dview, - n_chunks=100) - print('\n{}'.format(fname_new)) - - save_file = h5py.File(os.path.join(plane_folder, 'caiman_segmentation_results.hdf5')) - save_file['bias_added_to_movie'] = add_to_movie - save_file.close() - - single_fns = [f for f in os.listdir(plane_folder) if '_each' in f] - for single_fn in single_fns: - os.remove(os.path.join(plane_folder, single_fn)) - -if __name__ == '__main__': - run() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/040_get_cells_file.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/040_get_cells_file.py deleted file mode 100644 index ff29a37..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/040_get_cells_file.py +++ /dev/null @@ -1,87 +0,0 @@ -import os -import numpy as np -import h5py -import tifffile as tf -import allensdk_internal.brain_observatory.mask_set as mask_set -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.core.PlottingTools as pt -import scipy.ndimage as ni -import matplotlib.pyplot as plt - - -isSave = True -is_filter = True - -filter_sigma = 0.5 # parameters only used if filter the rois -# dilation_iterations = 0 # parameters only used if filter the rois -cut_thr = 3. # parameters only used if filter the rois - -bg_fn = "corrected_mean_projection.tif" -save_folder = 'figures' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -data_f = h5py.File('caiman_segmentation_results.hdf5') -masks = data_f['masks'].value -data_f.close() - -bg = tf.imread(bg_fn) - -final_roi_dict = {} - -for i, mask in enumerate(masks): - - if is_filter: - mask_nor = (mask - np.mean(mask.flatten())) / np.abs(np.std(mask.flatten())) - mask_nor_f = ni.filters.gaussian_filter(mask_nor, filter_sigma) - mask_bin = np.zeros(mask_nor_f.shape, dtype=np.uint8) - mask_bin[mask_nor_f > cut_thr] = 1 - - else: - mask_bin = np.zeros(mask.shape, dtype=np.uint8) - mask_bin[mask > 0] = 1 - - mask_labeled, mask_num = ni.label(mask_bin) - curr_mask_dict = ia.get_masks(labeled=mask_labeled, keyPrefix='caiman_mask_{:03d}'.format(i), labelLength=5) - for roi_key, roi_mask in curr_mask_dict.items(): - final_roi_dict.update({roi_key: ia.WeightedROI(roi_mask * mask)}) - -print 'Total number of ROIs:',len(final_roi_dict) - -f = plt.figure(figsize=(15, 8)) -ax1 = f.add_subplot(121) -ax1.imshow(ia.array_nor(bg), vmin=0, vmax=0.5, cmap='gray', interpolation='nearest') -colors1 = pt.random_color(masks.shape[0]) -for i, mask in enumerate(masks): - pt.plot_mask_borders(mask, plotAxis=ax1, color=colors1[i]) -ax1.set_title('original ROIs') -ax1.set_axis_off() -ax2 = f.add_subplot(122) -ax2.imshow(ia.array_nor(bg), vmin=0, vmax=0.5, cmap='gray', interpolation='nearest') -colors2 = pt.random_color(len(final_roi_dict)) -i = 0 -for roi in final_roi_dict.values(): - pt.plot_mask_borders(roi.get_binary_mask(), plotAxis=ax2, color=colors2[i]) - i = i + 1 -ax2.set_title('filtered ROIs') -ax2.set_axis_off() -plt.show() - -if isSave: - - if not os.path.isdir(save_folder): - os.makedirs(save_folder) - - f.savefig(os.path.join(save_folder, 'caiman_segmentation_filtering.pdf'), dpi=300) - - cell_file = h5py.File('cells.hdf5', 'w') - - i = 0 - for key, value in sorted(final_roi_dict.iteritems()): - curr_grp = cell_file.create_group('cell{:04d}'.format(i)) - curr_grp.attrs['name'] = key - value.to_h5_group(curr_grp) - i += 1 - - cell_file.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/050_refine_cells.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/050_refine_cells.py deleted file mode 100644 index f0b8c31..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/050_refine_cells.py +++ /dev/null @@ -1,173 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Jun 30 17:44:42 2015 - -@author: junz -""" -import os -import h5py -import numpy as np -import operator -import matplotlib.pyplot as plt -import scipy.ndimage as ni -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.core.FileTools as ft -import corticalmapping.core.PlottingTools as pt -import corticalmapping.SingleCellAnalysis as sca - -plt.ioff() - -# pixels, masks with center location within this pixel region at the image border will be discarded -center_margin = [20, 20] - -# area range, range of number of pixels of a valid roi -area_range = [20, 500] - -# for the two masks that are overlapping, if the ratio between overlap and the area of the smaller mask is larger than -# this value, the smaller mask will be discarded. -overlap_thr = 0.5 - -save_folder = 'figures' - -data_file_name = 'cells.hdf5' -save_file_name = 'cells_refined.hdf5' -background_file_name = "corrected_mean_projection.tif" - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -# read cells -dfile = h5py.File(data_file_name) -cells = {} -for cellname in dfile.iterkeys(): - cells.update({cellname:ia.WeightedROI.from_h5_group(dfile[cellname])}) - -print 'total number of cells:', len(cells) - -# get the names of cells which are on the edge -edge_cells = [] -for cellname, cellmask in cells.iteritems(): - dimension = cellmask.dimension - center = cellmask.get_center() - if center[0] < center_margin[0] or \ - center[0] > dimension[0] - center_margin[0] or \ - center[1] < center_margin[1] or \ - center[1] > dimension[1] - center_margin[1]: - - # cellmask.plot_binary_mask_border(color='#ff0000', borderWidth=1) - # plt.title(cellname) - # plt.show() - - edge_cells.append(cellname) - -print '\ncells to be removed because they are on the edges:' -print '\n'.join(edge_cells) - -# remove edge cells -for edge_cell in edge_cells: - _ = cells.pop(edge_cell) - -# get dictionary of cell areas -cell_areas = {} -for cellname, cellmask in cells.iteritems(): - cell_areas.update({cellname: cellmask.get_binary_area()}) - - -# remove cellnames that have area outside of the area_range -invalid_cell_ns = [] -for cellname, cellarea in cell_areas.items(): - if cellarea < area_range[0] or cellarea > area_range[1]: - invalid_cell_ns.append(cellname) -print "cells to be removed because they do not meet area criterion:" -print "\n".join(invalid_cell_ns) -for invalid_cell_n in invalid_cell_ns: - cell_areas.pop(invalid_cell_n) - - -# sort cells with their binary area -cell_areas_sorted = sorted(cell_areas.items(), key=operator.itemgetter(1)) -cell_areas_sorted.reverse() -cell_names_sorted = [c[0] for c in cell_areas_sorted] -# print '\n'.join([str(c) for c in cell_areas_sorted]) - -# get the name of cells that needs to be removed because of overlapping -retain_cells = [] -remove_cells = [] -for cell1_name in cell_names_sorted: - cell1_mask = cells[cell1_name] - is_remove = 0 - cell1_area = cell1_mask.get_binary_area() - for cell2_name in retain_cells: - cell2_mask = cells[cell2_name] - cell2_area = cell2_mask.get_binary_area() - curr_overlap = cell1_mask.binary_overlap(cell2_mask) - - if float(curr_overlap) / cell1_area > overlap_thr: - remove_cells.append(cell1_name) - is_remove = 1 - print cell1_name, ':', cell1_mask.get_binary_area(), ': removed' - - # f = plt.figure(figsize=(10,10)) - # ax = f.add_subplot(111) - # cell1_mask.plot_binary_mask_border(plotAxis=ax, color='#ff0000', borderWidth=1) - # cell2_mask.plot_binary_mask_border(plotAxis=ax, color='#0000ff', borderWidth=1) - # ax.set_title('red:'+cell1_name+'; blue:'+cell2_name) - # plt.show() - break - - if is_remove == 0: - retain_cells.append(cell1_name) - print cell1_name, ':', cell1_mask.get_binary_area(), ': retained' - -print '\ncells to be removed because of overlapping:' -print '\n'.join(remove_cells) - -print '\ntotal number of reatined cells:', len(retain_cells) - -# plotting -colors = pt.random_color(len(cells.keys())) -bgImg = tf.imread(background_file_name) - -f = plt.figure(figsize=(10, 10)) -ax = f.add_subplot(111) -ax.imshow(ia.array_nor(bgImg), cmap='gray', vmin=0, vmax=0.5, interpolation='nearest') - -f2 = plt.figure(figsize=(10, 10)) -ax2 = f2.add_subplot(111) -ax2.imshow(np.zeros(bgImg.shape, dtype=np.uint8), vmin=0, vmax=1, cmap='gray', interpolation='nearest') - -i = 0 -for retain_cell in retain_cells: - cells[retain_cell].plot_binary_mask_border(plotAxis=ax, color=colors[i], borderWidth=1) - cells[retain_cell].plot_binary_mask_border(plotAxis=ax2, color=colors[i], borderWidth=1) - i += 1 -plt.show() - -# save figures -pt.save_figure_without_borders(f, os.path.join(save_folder, '2P_refined_ROIs_with_background.png'), dpi=300) -pt.save_figure_without_borders(f2, os.path.join(save_folder, '2P_refined_ROIs_without_background.png'), dpi=300) - -# save h5 file -save_file = h5py.File(save_file_name, 'w') -i = 0 -for retain_cell in retain_cells: - print retain_cell, ':', cells[retain_cell].get_binary_area() - - currGroup = save_file.create_group('cell' + ft.int2str(i, 4)) - currGroup.attrs['name'] = retain_cell - roiGroup = currGroup.create_group('roi') - cells[retain_cell].to_h5_group(roiGroup) - i += 1 - -for attr, value in dfile.attrs.iteritems(): - save_file.attrs[attr] = value - -save_file.close() -dfile.close() - - - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/060_get_weighted_rois_and_surrounds.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/060_get_weighted_rois_and_surrounds.py deleted file mode 100644 index b31e405..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/060_get_weighted_rois_and_surrounds.py +++ /dev/null @@ -1,122 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Jun 30 17:44:42 2015 - -@author: junz -""" - -import os -import numpy as np -import h5py -import tifffile as tf -import allensdk_internal.brain_observatory.mask_set as mask_set -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.core.PlottingTools as pt -import scipy.ndimage as ni -import matplotlib.pyplot as plt - -plt.ioff() - -data_file_name = 'cells_refined.hdf5' -background_file_name = "corrected_mean_projection.tif" -save_folder = 'figures' - -overlap_threshold = 0.9 -surround_limit = [1, 8] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -print 'reading cells file ...' -data_f = h5py.File(data_file_name, 'r') - -cell_ns = data_f.keys() -cell_ns.sort() - -binary_mask_array = [] -weight_mask_array = [] - -for cell_n in cell_ns: - curr_roi = ia.ROI.from_h5_group(data_f[cell_n]['roi']) - binary_mask_array.append(curr_roi.get_binary_mask()) - weight_mask_array.append(curr_roi.get_weighted_mask()) - -data_f.close() -binary_mask_array = np.array(binary_mask_array) -weight_mask_array = np.array(weight_mask_array) -print 'starting mask_array shape:', weight_mask_array.shape - -print 'getting total mask ...' -total_mask = np.zeros((binary_mask_array.shape[1], binary_mask_array.shape[2]), dtype=np.uint8) -for curr_mask in binary_mask_array: - total_mask = np.logical_or(total_mask, curr_mask) -total_mask = np.logical_not(total_mask) - -plt.imshow(total_mask, interpolation='nearest') -plt.title('total_mask') -plt.show() - -print 'getting and surround masks ...' -binary_surround_array = [] -for binary_center in binary_mask_array: - curr_surround = np.logical_xor(ni.binary_dilation(binary_center, iterations=surround_limit[1]), - ni.binary_dilation(binary_center, iterations=surround_limit[0])) - curr_surround = np.logical_and(curr_surround, total_mask).astype(np.uint8) - binary_surround_array.append(curr_surround) - # plt.imshow(curr_surround) - # plt.show() -binary_surround_array = np.array(binary_surround_array) - -print "saving rois ..." -center_areas = [] -surround_areas = [] -for mask_ind in range(binary_mask_array.shape[0]): - center_areas.append(np.sum(binary_mask_array[mask_ind].flat)) - surround_areas.append(np.sum(binary_surround_array[mask_ind].flat)) -roi_f = h5py.File('rois_and_traces.hdf5') -roi_f['masks_center'] = weight_mask_array -roi_f['masks_surround'] = binary_surround_array - -roi_f.close() -print 'minimum surround area:', min(surround_areas), 'pixels.' - -f = plt.figure(figsize=(10, 10)) -ax_center = f.add_subplot(211) -ax_center.hist(center_areas, bins=30) -ax_center.set_title('roi center area distribution') -ax_surround = f.add_subplot(212) -ax_surround.hist(surround_areas, bins=30) -ax_surround.set_title('roi surround area distribution') -plt.show() - -print 'plotting ...' -colors = pt.random_color(weight_mask_array.shape[0]) -bg = ia.array_nor(tf.imread('corrected_mean_projection.tif')) - -f_c_bg = plt.figure(figsize=(10, 10)) -ax_c_bg = f_c_bg.add_subplot(111) -ax_c_bg.imshow(bg, cmap='gray', vmin=0, vmax=0.5, interpolation='nearest') -f_c_nbg = plt.figure(figsize=(10, 10)) -ax_c_nbg = f_c_nbg.add_subplot(111) -ax_c_nbg.imshow(np.zeros(bg.shape,dtype=np.uint8),vmin=0,vmax=1,cmap='gray',interpolation='nearest') -f_s_nbg = plt.figure(figsize=(10, 10)) -ax_s_nbg = f_s_nbg.add_subplot(111) -ax_s_nbg.imshow(np.zeros(bg.shape,dtype=np.uint8),vmin=0,vmax=1,cmap='gray',interpolation='nearest') - -i = 0 -for mask_ind in range(binary_mask_array.shape[0]): - pt.plot_mask_borders(binary_mask_array[mask_ind], plotAxis=ax_c_bg, color=colors[i], borderWidth=1) - pt.plot_mask_borders(binary_mask_array[mask_ind], plotAxis=ax_c_nbg, color=colors[i], borderWidth=1) - pt.plot_mask_borders(binary_surround_array[mask_ind], plotAxis=ax_s_nbg, color=colors[i], borderWidth=1) - i += 1 - -plt.show() - -print 'saving figures ...' -pt.save_figure_without_borders(f_c_bg, os.path.join(save_folder, '2P_ROIs_with_background.png'), dpi=300) -pt.save_figure_without_borders(f_c_nbg, os.path.join(save_folder, '2P_ROIs_without_background.png'), dpi=300) -pt.save_figure_without_borders(f_s_nbg, os.path.join(save_folder, '2P_ROI_surrounds_background.png'), dpi=300) -f.savefig(os.path.join(save_folder, 'roi_area_distribution.pdf'), dpi=300) diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/070_get_raw_center_and_surround_traces.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/070_get_raw_center_and_surround_traces.py deleted file mode 100644 index 0e57582..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/070_get_raw_center_and_surround_traces.py +++ /dev/null @@ -1,129 +0,0 @@ -import os -import numpy as np -import h5py -import time -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.core.PlottingTools as pt -import corticalmapping.core.FileTools as ft -import corticalmapping.NwbTools as nt -import matplotlib.pyplot as plt -from multiprocessing import Pool - -CHUNK_SIZE = 2000 -PROCESS_NUM = 5 - -def get_chunk_frames(frame_num, chunk_size): - chunk_num = frame_num // chunk_size - if frame_num % chunk_size > 0: - chunk_num = chunk_num + 1 - - print("total number of frames:", frame_num) - print("total number of chunks:", chunk_num) - - chunk_ind = [] - chunk_starts = [] - chunk_ends = [] - - for chunk_i in range(chunk_num): - chunk_ind.append(chunk_i) - chunk_starts.append(chunk_i * chunk_size) - - if chunk_i < chunk_num - 1: - chunk_ends.append((chunk_i + 1) * chunk_size) - else: - chunk_ends.append(frame_num) - - return zip(chunk_ind, chunk_starts, chunk_ends) - -def get_traces(params): - t0 = time.time() - - chunk_ind, chunk_start, chunk_end, nwb_path, data_path, curr_folder, center_array, surround_array = params - - nwb_f = h5py.File(nwb_path, 'r') - print('\nstart analyzing chunk: {}'.format(chunk_ind)) - curr_mov = nwb_f[data_path][chunk_start: chunk_end] - nwb_f.close() - - # print 'extracting traces' - curr_traces_center = np.empty((center_array.shape[0], curr_mov.shape[0]), dtype=np.float32) - curr_traces_surround = np.empty((center_array.shape[0], curr_mov.shape[0]), dtype=np.float32) - for i in range(center_array.shape[0]): - curr_center = ia.WeightedROI(center_array[i]) - curr_surround = ia.ROI(surround_array[i]) - curr_traces_center[i, :] = curr_center.get_weighted_trace_pixelwise(curr_mov) - - # scale surround trace to be similar as center trace - mean_center_weight = curr_center.get_mean_weight() - curr_traces_surround[i, :] = curr_surround.get_binary_trace_pixelwise(curr_mov) * mean_center_weight - - # print 'saveing chunk {} ...'.format(chunk_ind) - chunk_folder = os.path.join(curr_folder, 'chunks') - if not os.path.isdir(chunk_folder): - os.mkdir(chunk_folder) - chunk_f = h5py.File(os.path.join(chunk_folder, 'chunk_temp_' + ft.int2str(chunk_ind, 4) + '.hdf5')) - chunk_f['traces_center'] = curr_traces_center - chunk_f['traces_surround'] = curr_traces_surround - chunk_f.close() - - print('\n\t{:06d} seconds: chunk: {}; demixing finished.'.format(int(time.time() - t0), chunk_ind)) - - return None - -def run(): - - curr_folder = os.path.dirname(os.path.realpath(__file__)) - os.chdir(curr_folder) - - plane_n = os.path.split(curr_folder)[1] - print(plane_n) - - print('getting masks ...') - rois_f = h5py.File('rois_and_traces.hdf5') - center_array = rois_f['masks_center'].value - surround_array = rois_f['masks_surround'].value - - print('\nanalyzing movie in chunks of size:', CHUNK_SIZE , 'frames.') - - nwb_folder = os.path.dirname(curr_folder) - nwb_fn = [f for f in os.listdir(nwb_folder) if f[-4:] == '.nwb'][0] - nwb_path = os.path.join(nwb_folder, nwb_fn) - print('\n' + nwb_path) - data_path = '/processing/motion_correction/MotionCorrection/' + plane_n + '/corrected/data' - - nwb_f = h5py.File(nwb_path, 'r') - total_frame = nwb_f[data_path].shape[0] - nwb_f.close() - - chunk_frames = get_chunk_frames(total_frame, CHUNK_SIZE) - chunk_params = [(cf[0], cf[1], cf[2], nwb_path, data_path, - curr_folder, center_array, surround_array) for cf in chunk_frames] - - p = Pool(PROCESS_NUM) - p.map(get_traces, chunk_params) - - chunk_folder = os.path.join(curr_folder, 'chunks') - chunk_fns = [f for f in os.listdir(chunk_folder) if f[0:11] == 'chunk_temp_'] - chunk_fns.sort() - print('\nreading chunks files ...') - print('\n'.join(chunk_fns)) - - traces_raw = [] - traces_surround = [] - - for chunk_fn in chunk_fns: - curr_chunk_f = h5py.File(os.path.join(chunk_folder, chunk_fn)) - traces_raw.append(curr_chunk_f['traces_center'].value) - traces_surround.append(curr_chunk_f['traces_surround'].value) - - print("saving ...") - traces_raw = np.concatenate(traces_raw, axis=1) - traces_surround = np.concatenate(traces_surround, axis=1) - rois_f['traces_center_raw'] = traces_raw - rois_f['traces_surround_raw'] = traces_surround - print('done.') - - -if __name__ == '__main__': - run() - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/090_get_neuropil_subtracted_traces.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/090_get_neuropil_subtracted_traces.py deleted file mode 100644 index 551768f..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/090_get_neuropil_subtracted_traces.py +++ /dev/null @@ -1,101 +0,0 @@ -import sys -import os -import h5py -import numpy as np -import corticalmapping.HighLevel as hl -import corticalmapping.core.FileTools as ft -import matplotlib.pyplot as plt - - -lam = 1. # 100. -plot_chunk_size = 5000 - - -def plot_traces_chunks(traces, labels, chunk_size, roi_ind): - """ - - :param traces: np.array, shape=[trace_type, t_num] - :param labels: - :param chunk_size: - :param figures_folder: - :param roi_ind: - :return: - """ - - t_num = traces.shape[1] - chunk_num = t_num // chunk_size - - chunks = [] - for chunk_ind in range(chunk_num): - chunks.append([chunk_ind * chunk_size, (chunk_ind + 1) * chunk_size]) - - if t_num % chunk_size != 0: - chunks.append([chunk_num * chunk_size, t_num]) - - v_max = np.amax(traces) - v_min = np.amin(traces) - - fig = plt.figure(figsize=(75, 20)) - fig.suptitle('neuropil subtraction for ROI: {}'.format(roi_ind)) - for chunk_ind, chunk in enumerate(chunks): - curr_ax = fig.add_subplot(len(chunks), 1, chunk_ind + 1) - for trace_ind in range(traces.shape[0]): - curr_ax.plot(traces[trace_ind, chunk[0]: chunk[1]], label=labels[trace_ind]) - - curr_ax.set_xlim([0, chunk_size]) - curr_ax.set_ylim([v_min, v_max * 1.2]) - curr_ax.legend() - - return fig - - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -data_f = h5py.File('rois_and_traces.hdf5') -traces_raw = data_f['traces_center_raw'].value -traces_srround = data_f['traces_surround_raw'].value - -traces_subtracted = np.zeros(traces_raw.shape, np.float32) -ratio = np.zeros(traces_raw.shape[0], np.float32) -err = np.zeros(traces_raw.shape[0], np.float32) - -for i in range(traces_raw.shape[0]): - curr_trace_c = traces_raw[i] - curr_trace_s = traces_srround[i] - curr_r, curr_err, curr_trace_sub = hl.neural_pil_subtraction(curr_trace_c, curr_trace_s, lam=lam) - print "roi_%s \tr = %.4f; error = %.4f." % (ft.int2str(i, 5), curr_r, curr_err) - traces_subtracted[i] = curr_trace_sub - ratio[i] = curr_r - err[i] = curr_err - -print('\nplotting neuropil subtraction results ...') -figures_folder = 'figures/neuropil_subtraction_lam_{}'.format(lam) -if not os.path.isdir(figures_folder): - os.makedirs(figures_folder) -for roi_ind in range(traces_raw.shape[0]): - print('roi_{:04d}'.format(roi_ind)) - curr_traces = np.array([traces_raw[roi_ind], traces_srround[roi_ind], traces_subtracted[roi_ind]]) - curr_fig = plot_traces_chunks(traces=curr_traces, - labels=['center', 'surround', 'subtracted'], - chunk_size=plot_chunk_size, - roi_ind=roi_ind) - curr_fig.savefig(os.path.join(figures_folder, 'neuropil_subtraction_ROI_{:04d}.png'.format(roi_ind))) - curr_fig.clear() - plt.close(curr_fig) - -# wait for keyboard abortion -msg = raw_input('Do you want to save? (y/n)\n') -while True: - if msg == 'y': - break - elif msg == 'n': - sys.exit('Stop process without saving.') - else: - msg = raw_input('Do you want to save? (y/n)\n') - -data_f['traces_center_subtracted'] = traces_subtracted -data_f['neuropil_r'] = ratio -data_f['neuropil_err'] = err - -data_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/120_check_correlation.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/120_check_correlation.py deleted file mode 100644 index 65c3f07..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/120_check_correlation.py +++ /dev/null @@ -1,99 +0,0 @@ -import os -import h5py -import tifffile as tf -import numpy as np -import matplotlib.pyplot as plt -import corticalmapping.core.PlottingTools as pt -import corticalmapping.core.ImageAnalysis as ia - - -cor_thr = 0.8 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -def merger_pairs(pairs): - - total_set = set([]) - for pair in pairs: - total_set.update(set(pair)) - - all_nodes = list(total_set) - node_grps = [{n} for n in all_nodes] - - for pair in pairs: - - node0 = pair[0] - node1 = pair[1] - - for node_grp in node_grps: - if node0 in node_grp: - node_grp0 = node_grp - if node1 in node_grp: - node_grp1 = node_grp - - if node_grp0 != node_grp1: - node_grp0.update(node_grp1) - node_grps.remove(node_grp1) - - return node_grps - - -save_plot_dir = os.path.join(curr_folder, 'figures', 'dff_extraction') -if not os.path.isdir(save_plot_dir): - os.makedirs(save_plot_dir) - -bg = ia.array_nor(tf.imread('corrected_mean_projection.tif')) - -data_f = h5py.File('rois_and_traces.hdf5') -traces_subtracted = data_f['traces_center_subtracted'].value -masks = data_f['masks_center'].value - -f, axs = plt.subplots(1, 2, figsize=(16, 5)) - -cor_mat = np.corrcoef(traces_subtracted) -fig = axs[0].imshow(cor_mat, vmin=-1, vmax=1, cmap='jet', interpolation='nearest') -axs[0].set_title('coriance matrix') -f.colorbar(fig, ax=axs[0]) - -cors = cor_mat[np.tril_indices(cor_mat.shape[0], k=-1)] -cor_dist = axs[1].hist(cors, range=[-1., 1.], bins=40) -axs[1].set_title('coriance distribution') - -# cors = np.sort(cors) -# cor_thr = cors[int(cors.shape[0] * 0.99)] -# print('Cutoff threshold for coriance: {}'.format(cor_thr)) - -pos_cor_loc = np.where(cor_mat > cor_thr) - -roi_pairs = [] -for ind in range(len(pos_cor_loc[0])): - if pos_cor_loc[0][ind] < pos_cor_loc[1][ind]: - roi_pairs.append([pos_cor_loc[0][ind], pos_cor_loc[1][ind]]) -print(roi_pairs) - -roi_grps = merger_pairs(roi_pairs) -print roi_grps - -cor_grps = [] -for roi_grp in roi_grps: - grp_traces = traces_subtracted[list(roi_grp)] - grp_cors = np.corrcoef(grp_traces)[np.tril_indices(len(roi_grp), k=-1)] - cor_grps.append(np.mean(grp_cors)) - -cor_grps = np.array(cor_grps) -cor_scalars = [(c + 1) / 2 for c in cor_grps] -print cor_scalars -cor_colors = [pt.value_2_rgb(c, cmap='inferno') for c in cor_scalars] - -f_roi = plt.figure() -ax_roi = f_roi.add_subplot(111) -ax_roi.imshow(bg, vmin=0, vmax=0.5, cmap='gray', interpolation='nearest') -for grp_ind, roi_grp in enumerate(roi_grps): - for roi_ind in roi_grp: - print roi_ind, cor_colors[grp_ind] - pt.plot_mask_borders(masks[roi_ind], plotAxis=ax_roi, color=cor_colors[grp_ind]) - -plt.show() - -data_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/old/100_get_dff_traces.py b/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/old/100_get_dff_traces.py deleted file mode 100644 index 3cd09ff..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/old/100_get_dff_traces.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -import h5py -import allensdk.brain_observatory.dff as dff -import numpy as np -import corticalmapping.HighLevel as hl -import corticalmapping.core.FileTools as ft - - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_plot_dir = os.path.join(curr_folder, 'figures', 'dff_extraction') -if not os.path.isdir(save_plot_dir): - os.makedirs(save_plot_dir) - -data_f = h5py.File('rois_and_traces.hdf5') -traces_subtracted = data_f['traces_center_subtracted'].value - -traces_dff = dff.compute_dff(traces_subtracted, save_plot_dir=save_plot_dir, - mode_kernelsize=100, mean_kernelsize=100) -data_f['traces_center_dff'] = traces_dff -data_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_multi_channel_regular_2p/000_reorganize_data.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_multi_channel_regular_2p/000_reorganize_data.py deleted file mode 100644 index 9d1ed92..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_multi_channel_regular_2p/000_reorganize_data.py +++ /dev/null @@ -1,30 +0,0 @@ -import os -import numpy as np -import tifffile as tf - - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project\180215-M371139-2p" -file_identifier = 'Posterior_FOV_00001' -ch_ns = ['green', 'red'] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -f_ns = [f for f in os.listdir(data_folder) if file_identifier in f and f[-4:] == '.tif'] -f_ns.sort() -print('\n'.join(f_ns)) - -save_folders = [] -for ch_n in ch_ns: - curr_save_folder = os.path.join(data_folder, file_identifier, ch_n) - if not os.path.isdir(curr_save_folder): - os.makedirs(curr_save_folder) - save_folders.append(curr_save_folder) - -for f_n in f_ns: - print('procesing: {} ...'.format(f_n)) - curr_mov = tf.imread(os.path.join(data_folder, f_n)) - for ch_num, ch_n in enumerate(ch_ns): - curr_mov_ch = curr_mov[ch_num::len(ch_ns)].transpose((0, 2, 1))[:, ::-1, :] - curr_save_name = os.path.splitext(f_n)[0] + '_' + ch_n + '.tif' - tf.imsave(os.path.join(save_folders[ch_num], curr_save_name), curr_mov_ch) diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_multi_channel_regular_2p/010_motion_correction.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_multi_channel_regular_2p/010_motion_correction.py deleted file mode 100644 index 7487ac3..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_multi_channel_regular_2p/010_motion_correction.py +++ /dev/null @@ -1,53 +0,0 @@ -import os -import stia.motion_correction as mc - -def run(): - data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180104-M361012-2p\FOV1_injection_site_00001" - ref_ch_n = 'red' - apply_ch_ns = ['green', 'red'] - - curr_folder = os.path.dirname(os.path.realpath(__file__)) - os.chdir(curr_folder) - - ref_data_folder = os.path.join(data_folder, ref_ch_n) - - mc.motion_correction(input_folder=ref_data_folder, - input_path_identifier='.tif', - process_num=3, - output_folder=os.path.join(ref_data_folder, 'corrected'), - anchor_frame_ind_chunk=10, - anchor_frame_ind_projection=0, - iteration_chunk=10, - iteration_projection=10, - max_offset_chunk=(20., 20.), - max_offset_projection=(20., 20.), - align_func=mc.phase_correlation, - preprocessing_type=0, - fill_value=0.) - - offsets_path = os.path.join(ref_data_folder, 'corrected', 'correction_offsets.hdf5') - ref_fns = [f for f in os.listdir(ref_data_folder) if f[-4:] == '.tif'] - ref_fns.sort() - ref_paths = [os.path.join(ref_data_folder, f) for f in ref_fns] - print('\nreference paths:') - print('\n'.join(ref_paths)) - - for apply_ch_i, apply_ch_n in enumerate(apply_ch_ns): - apply_data_folder = os.path.join(data_folder, apply_ch_n) - apply_fns = [f for f in os.listdir(apply_data_folder) if f[-4:] == '.tif'] - apply_fns.sort() - apply_paths = [os.path.join(apply_data_folder, f) for f in apply_fns] - print('\napply paths:') - print('\n'.join(apply_paths)) - - mc.apply_correction_offsets(offsets_path=offsets_path, - path_pairs=zip(ref_paths, apply_paths), - output_folder=os.path.join(apply_data_folder, 'corrected'), - process_num=3, - fill_value=0., - avi_downsample_rate=20, - is_equalizing_histogram=False) - -if __name__ == "__main__": - run() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_multi_channel_regular_2p/020_downsample.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_multi_channel_regular_2p/020_downsample.py deleted file mode 100644 index 6b35f29..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_multi_channel_regular_2p/020_downsample.py +++ /dev/null @@ -1,21 +0,0 @@ -import os -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia - -xy_downsample_rate = 2 -t_downsample_rate = 10 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -f_ns = [f for f in os.listdir(curr_folder) if f[-4:] == '.tif' and 'downsampled' not in f] -f_ns.sort() -print('\n'.join(f_ns)) - -for f_n in f_ns: - print('processing {} ...'.format(f_n)) - mov = tf.imread(f_n) - mov_d = ia.rigid_transform_cv2(img=mov, zoom=(1. / xy_downsample_rate)) - mov_d = ia.z_downsample(mov_d, downSampleRate=t_downsample_rate) - save_n = os.path.splitext(f_n)[0] + '_downsampled.tif' - tf.imsave(save_n, mov_d) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_multi_channel_regular_2p/030_downsample_from_server.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_multi_channel_regular_2p/030_downsample_from_server.py deleted file mode 100644 index 08e0ec6..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_multi_channel_regular_2p/030_downsample_from_server.py +++ /dev/null @@ -1,30 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180104-M361012-2p\FOV1_injection_site_00001" -chn = 'red' -xy_downsample_rate = 2 -t_downsample_rate = 30 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -f_ns = [f for f in os.listdir(os.path.join(data_folder, chn, 'corrected')) if f[-14:] == '_corrected.tif'] -f_ns.sort() -print('\n'.join(f_ns)) - -mov_d = [] - -for f_n in f_ns: - print('processing {} ...'.format(f_n)) - curr_mov = tf.imread(os.path.join(data_folder, chn, 'corrected', f_n)) - curr_mov_d = ia.rigid_transform_cv2(img=curr_mov, zoom=(1. / xy_downsample_rate)) - curr_mov_d = ia.z_downsample(curr_mov_d, downSampleRate=t_downsample_rate) - mov_d.append(curr_mov_d) - -mov_d = np.concatenate(mov_d, axis=0) -save_n = os.path.split(data_folder)[1] + '_' + chn + '_downsampled.tif' -tf.imsave(save_n, mov_d) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/010_get_wf_vas_maps.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/010_get_wf_vas_maps.py deleted file mode 100644 index 6d2f8d7..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/010_get_wf_vas_maps.py +++ /dev/null @@ -1,33 +0,0 @@ -import os -import numpy as np -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -import tifffile as tf - - -vas_map_paths= [r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" - r"\180404-M360495-2p\vasmap_wf\180404JCamF100", - r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" - r"\180404-M360495-2p\vasmap_wf\180404JCamF101", - r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" - r"\180404-M360495-2p\vasmap_wf\180404JCamF102", - ] - -saveFolder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(saveFolder) - -vas_maps = [] - -for vas_map_path in vas_map_paths: - - vas_map_focused, _, _ = ft.importRawJCamF(vas_map_path, column=1024, row=1024, headerLength = 116, - tailerLength=452) - vas_map_focused = vas_map_focused[2:] - vas_map_focused = vas_map_focused[:, ::-1, :] - vas_map_focused[vas_map_focused > 50000] = 400 - vas_map_focused = np.mean(vas_map_focused, axis=0) - vas_maps.append(ia.array_nor(vas_map_focused)) - -vas_map = ia.array_nor(np.mean(vas_maps, axis=0)) - -tf.imsave('vas_map_focused_wf.tif', vas_map.astype(np.float32)) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/020_rotate_2P_vas_maps.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/020_rotate_2P_vas_maps.py deleted file mode 100644 index 67f2c6f..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/020_rotate_2P_vas_maps.py +++ /dev/null @@ -1,38 +0,0 @@ -import os -import numpy as np -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -import tifffile as tf - - - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project\180404-M360495-2p\vasmap_2p" - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -data_folder = os.path.join(curr_folder, data_folder) - -file_list = [f for f in os.listdir(data_folder) if f[-4:] == '.tif'] -file_list.sort() -print '\n'.join(file_list) - -file_paths = [os.path.join(data_folder, f) for f in file_list] - -for file_path in file_paths: - fn, ext = os.path.splitext(os.path.split(file_path)[1]) - save_path = os.path.join(data_folder, fn + '_rotated.tif') - print save_path - - curr_mov = tf.imread(file_path) - curr_mov = curr_mov.transpose((0, 2, 1))[:, ::-1, :] - tf.imsave(save_path, curr_mov) - - -# correction = mc.align_multiple_files_iterate(file_paths, output_folder=data_folder, is_output_mov=True, iteration=10, -# max_offset=(10., 10.), align_func=mc.phase_correlation, fill_value=0., -# verbose=True, offset_file_name=offset_file_name, -# mean_projection_file_name=mean_projection_file_name) - - -# print correction \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/030_get_2p_vas_maps.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/030_get_2p_vas_maps.py deleted file mode 100644 index 40bd360..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/030_get_2p_vas_maps.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia -import matplotlib.pyplot as plt - - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project\180404-M360495-2p\vasmap_2p" - -zoom1_paths = [os.path.join(data_folder, f) for f in os.listdir(data_folder) - if f[-12:] == '_rotated.tif' and '_zoom1_' in f] - -# zoom2_paths = [os.path.join(data_folder, f) for f in os.listdir(data_folder) -# if f[-12:] == '_rotated.tif' and '_zoom2_' in f] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -vas_map_zoom1 = [] -# vas_map_zoom2 = [] - -for zoom1_path in zoom1_paths: - curr_vasmap = np.mean(tf.imread(zoom1_path), axis=0) - vas_map_zoom1.append(curr_vasmap) - -# for zoom2_path in zoom2_paths: -# curr_vasmap = np.mean(tf.imread(zoom2_path), axis=0) -# vas_map_zoom2.append(curr_vasmap) - -vas_map_zoom1 = ia.array_nor(np.mean(vas_map_zoom1, axis=0)) -# vas_map_zoom2 = ia.array_nor(np.mean(vas_map_zoom2, axis=0)) - -tf.imsave('vas_map_focused_2p_zoom1.tif', vas_map_zoom1.astype(np.float32)) -# tf.imsave('vas_map_focused_2p_zoom2.tif', vas_map_zoom2.astype(np.float32)) diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/040_reorganize_2P_movies.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/040_reorganize_2P_movies.py deleted file mode 100644 index e3b74fe..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/040_reorganize_2P_movies.py +++ /dev/null @@ -1,61 +0,0 @@ -import os -import numpy as np -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -import tifffile as tf - -data_folder = r'\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project\180404-M360495-2p\2p_movie' -frames_per_file = 500 -temporal_downsample_rate = 2 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -file_list = [f for f in os.listdir(data_folder) if f[-4:] == '.tif'] -file_list.sort() -print '\n'.join(file_list) - -file_paths = [os.path.join(data_folder, f) for f in file_list] - -file_id_save = 0 -total_mov = None -base_name = '_'.join(file_list[0].split('_')[:-1]) -save_folder = os.path.join(data_folder, 'reorged') -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -for file_path in file_paths: - print('\nprocessing {} ...'.format(os.path.split(file_path)[1])) - - curr_mov = tf.imread(file_path) - curr_mov = curr_mov.transpose((0, 2, 1))[:, ::-1, :] - - if temporal_downsample_rate != 1: - curr_mov = ia.z_downsample(curr_mov, downSampleRate=temporal_downsample_rate) - - if total_mov is None: - total_mov = curr_mov - else: - total_mov = np.concatenate((total_mov, curr_mov), axis=0) - - while (total_mov is not None) and (total_mov.shape[0] >= frames_per_file): - - num_file_to_save = total_mov.shape[0] // frames_per_file - - for save_file_id in range(num_file_to_save): - save_chunk = total_mov[save_file_id * frames_per_file : (save_file_id + 1) * frames_per_file] - save_path = os.path.join(save_folder, '{}_{:05d}_reorged.tif'.format(base_name, file_id_save)) - print('saving {} ...'.format(os.path.split(save_path)[1])) - tf.imsave(save_path, save_chunk) - file_id_save = file_id_save + 1 - - if total_mov.shape[0] % frames_per_file == 0: - total_mov = None - else: - frame_num_left = total_mov.shape[0] % frames_per_file - total_mov = total_mov[-frame_num_left:] - -if total_mov is not None: - save_path = os.path.join(save_folder, '{}_{:05d}_reorged.tif'.format(base_name, file_id_save)) - print('saving {} ...'.format(os.path.split(save_path)[1])) - tf.imsave(save_path, total_mov) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/050_motion_correction.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/050_motion_correction.py deleted file mode 100644 index 42e1658..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/050_motion_correction.py +++ /dev/null @@ -1,41 +0,0 @@ -import os -import stia.motion_correction as mc - -def run(): - data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180404-M360495-2p\2p_movie\reorged" - - curr_folder = os.path.dirname(os.path.realpath(__file__)) - os.chdir(curr_folder) - - mc.motion_correction(input_folder=data_folder, - input_path_identifier='.tif', - process_num=6, - output_folder=os.path.join(data_folder, 'corrected'), - anchor_frame_ind_chunk=10, - anchor_frame_ind_projection=0, - iteration_chunk=10, - iteration_projection=10, - max_offset_chunk=(40., 40.), - max_offset_projection=(40., 40.), - align_func=mc.phase_correlation, - preprocessing_type=0, - fill_value=0.) - - offsets_path = os.path.join(data_folder, 'corrected', 'correction_offsets.hdf5') - fns = [f for f in os.listdir(data_folder) if f[-4:] == '.tif'] - fns.sort() - f_paths = [os.path.join(data_folder, f) for f in fns] - print('\nfile paths:') - print('\n'.join(f_paths)) - - mc.apply_correction_offsets(offsets_path=offsets_path, - path_pairs=zip(f_paths, f_paths), - output_folder=os.path.join(data_folder, 'corrected'), - process_num=6, - fill_value=0., - avi_downsample_rate=10, - is_equalizing_histogram=True) - -if __name__ == "__main__": - run() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/055_downsample_from_server.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/055_downsample_from_server.py deleted file mode 100644 index 0719386..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/055_downsample_from_server.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import numpy as np -import tifffile as tf - -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180404-M360495-2p\2p_movie\reorged" -xy_downsample_rate = 2 -t_downsample_rate = 10 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -corr_folder = os.path.join(data_folder, 'corrected') - -f_ns = [f for f in os.listdir(corr_folder) if f[-14:] == '_corrected.tif'] -f_ns.sort() -print('\n'.join(f_ns)) - -mov_d = [] - -for f_n in f_ns: - print('processing {} ...'.format(f_n)) - curr_mov = tf.imread(os.path.join(corr_folder, f_n)) - curr_mov_d = ia.rigid_transform_cv2(img=curr_mov, zoom=(1. / xy_downsample_rate)) - curr_mov_d = ia.z_downsample(curr_mov_d, downSampleRate=t_downsample_rate) - mov_d.append(curr_mov_d) - -mov_d = np.concatenate(mov_d, axis=0).astype(np.int16) -tf.imsave('2p_movie_downsampled.tif', mov_d) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/060_get_image_data.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/060_get_image_data.py deleted file mode 100644 index 20db02c..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/060_get_image_data.py +++ /dev/null @@ -1,40 +0,0 @@ -import os -import h5py -import numpy as np -import tifffile as tf -from toolbox.misc import BinarySlicer -import corticalmapping.core.FileTools as ft - -save_fn = '180404_M360495_110_2p_movies.hdf5' -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project\180404-M360495-2p\2p_movie" \ - r"\reorged\corrected" -identifier = '_00110_' -frame_num_tot = 16000 -resolution = [1024, 1024] # rows, columns of each frame - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -data_shape = (frame_num_tot, resolution[0], resolution[1]) - -curr_flist = [f for f in os.listdir(data_folder) if identifier in f and f[-14:] == '_corrected.tif'] -curr_flist.sort() -print('\n'.join(curr_flist)) - -print ('\nWriting file: ' + save_fn) -save_f = h5py.File(save_fn) -save_dset = save_f.create_dataset('2p_movie', data_shape, dtype=np.int16, compression='lzf') - -start_frame = 0 -for curr_f in curr_flist: - print curr_f - curr_mov = tf.imread(os.path.join(data_folder, curr_f)) - end_frame = start_frame + curr_mov.shape[0] - save_dset[start_frame : end_frame, :, :] = curr_mov - start_frame = end_frame - -save_dset.attrs['conversion'] = 1. -save_dset.attrs['resolution'] = 1. -save_dset.attrs['unit'] = 'arbiturary_unit' - -save_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/070_get_mmap_files_for_caiman_from_tif.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/070_get_mmap_files_for_caiman_from_tif.py deleted file mode 100644 index d19d61d..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/070_get_mmap_files_for_caiman_from_tif.py +++ /dev/null @@ -1,48 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia -import h5py - - -file_range = None # [0, 37] # None - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180404-M360495-2p\2p_movie\reorged\corrected" -base_name = '180404_M360495_110' -t_downsample_rate = 10 - -f_ns = [f for f in os.listdir(data_folder) if f[-14:] == '_corrected.tif'] -f_ns.sort() -if file_range is not None: - f_ns = f_ns[file_range[0] : file_range[1]] -print('\n'.join(f_ns)) - -mov_join = [] -for f_n in f_ns: - curr_mov = tf.imread(os.path.join(data_folder, f_n)) - - if curr_mov.shape[0] % t_downsample_rate !=0: - raise ValueError('the frame number of {} ({}) is not divisible by t_downsample_rate ({}).' - .format(f_n, curr_mov.shape[0], t_downsample_rate)) - - curr_mov_d = ia.z_downsample(curr_mov, downSampleRate=t_downsample_rate) - mov_join.append(curr_mov_d) - -mov_join = np.concatenate(mov_join, axis=0) -add_to_mov = 10 - np.amin(mov_join) - -save_name = '{}_d1_{}_d2_{}_d3_1_order_C_frames_{}_.mmap'\ - .format(base_name, mov_join.shape[2], mov_join.shape[1], mov_join.shape[0]) - -mov_join = mov_join.reshape((mov_join.shape[0], mov_join.shape[1] * mov_join.shape[2]), order='F').transpose() - -mov_join_mmap = np.memmap(os.path.join(data_folder, save_name), shape=mov_join.shape, order='C', dtype=np.float32, - mode='w+') -mov_join_mmap[:] = mov_join + add_to_mov -mov_join_mmap.flush() -del mov_join_mmap - -save_file = h5py.File(os.path.join(data_folder, 'caiman_segmentation_results.hdf5')) -save_file['bias_added_to_movie'] = add_to_mov -save_file.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/090_show_mmap_movie.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/090_show_mmap_movie.py deleted file mode 100644 index 1b69a19..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/090_show_mmap_movie.py +++ /dev/null @@ -1,35 +0,0 @@ -import sys; print('Python %s on %s' % (sys.version, sys.platform)) -sys.path.extend([r"E:\data\github_packages\CaImAn"]) - -import os -import numpy as np -import caiman as cm -import matplotlib.pyplot as plt - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180209-M360495-2p\2p_movie\rotated\corrected" -fn = '180209_M360495_110_d1_512_d2_512_d3_1_order_C_frames_400_.mmap' - -fn_parts = fn.split('_') -d1 = int(fn_parts[fn_parts.index('d1') + 1]) # column, x -d2 = int(fn_parts[fn_parts.index('d2') + 1]) # row, y -d3 = int(fn_parts[fn_parts.index('d3') + 1]) # channel -d4 = int(fn_parts[fn_parts.index('frames') + 1]) # frame, T -order = fn_parts[fn_parts.index('order') + 1] - -mov = np.memmap(filename=os.path.join(data_folder, fn), shape=(d1, d2, d4), order=order, dtype=np.float32, mode='r') -mov = mov.transpose((2, 1, 0)) - -print('movie shape: {}'.format(mov.shape)) - -f = plt.figure(figsize=(8, 5)) -ax = f.add_subplot(111) -fig = ax.imshow(np.mean(mov, axis=0), vmin=300, vmax=1500, cmap='inferno', interpolation='nearest') -f.colorbar(fig) -plt.show() - -input("Press enter to continue ...") - -print('playing {} ...'.format(fn)) -cm.movie(mov).play(fr=30,magnification=1,gain=2.) - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/110_caiman_segmentation.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/110_caiman_segmentation.py deleted file mode 100644 index bd6b4b4..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/110_caiman_segmentation.py +++ /dev/null @@ -1,103 +0,0 @@ -import sys; print('Python %s on %s' % (sys.version, sys.platform)) -sys.path.extend([r"E:\data\github_packages\CaImAn"]) - -import os -import numpy as np -import caiman as cm -import matplotlib.pyplot as plt -from caiman.source_extraction.cnmf import cnmf as cnmf -import h5py -from shutil import copyfile - -def run(): - - data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180404-M360495-2p\2p_movie\reorged\corrected" - play_movie = False - resolution = 1024 - - curr_folder = os.path.dirname(os.path.realpath(__file__)) - - # %% start cluster - c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=6, single_thread=False) - - os.chdir(data_folder) - - fn = [f for f in os.listdir(data_folder) if f[-5:] == '.mmap'] - if len(fn) > 1: - print('\n'.join(fn)) - raise LookupError('more than one file found.') - elif len(fn) == 0: - raise LookupError('no file found.') - else: - fn = fn[0] - - fn_parts = fn.split('_') - d1 = int(fn_parts[fn_parts.index('d1') + 1]) # column, x - d2 = int(fn_parts[fn_parts.index('d2') + 1]) # row, y - d3 = int(fn_parts[fn_parts.index('d3') + 1]) # channel - d4 = int(fn_parts[fn_parts.index('frames') + 1]) # frame, T - order = fn_parts[fn_parts.index('order') + 1] - - print('playing {} ...'.format(fn)) - - mov = np.memmap(filename=fn, shape=(d1, d2, d4), order=order, dtype=np.float32, mode='r') - mov = mov.transpose((2, 1, 0)) - - # mov = cm.load(os.path.join(data_folder, fn)) - - print('shape of joined movie: {}.'.format(mov.shape)) - - #%% play movie, press q to quit - if play_movie: - cm.movie(mov).play(fr=50,magnification=1,gain=2.) - - #%% movie cannot be negative! - mov_min = float(np.amin(mov)) - print('minimum pixel value: {}.'.format(mov_min)) - if mov_min < 0: - raise Exception('Movie too negative, add_to_movie should be larger') - - #%% correlation image. From here infer neuron size and density - Cn = cm.movie(mov).local_correlations(swap_dim=False) - plt.imshow(Cn, cmap='gray') - plt.show() - - cnm = cnmf.CNMF(n_processes, - k=40, # number of neurons expected per patch - gSig=[5, 5] , # expected half size of neurons - merge_thresh=0.9, # merging threshold, max correlation allowed - p=2, # order of the autoregressive system - dview=dview, - Ain=None, - method_deconvolution='oasis', - rolling_sum = False, - method_init='sparse_nmf', - alpha_snmf=10e1, - ssub=1, - tsub=1, - p_ssub=1, - p_tsub=1, - rf=int(resolution / 2), # half-size of the patches in pixels - border_pix=20, - do_merge=False) - cnm = cnm.fit(mov) - A, C, b, f, YrA, sn = cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, cnm.sn - #%% - crd = cm.utils.visualization.plot_contours(cnm.A, Cn) - plt.show() - input("Press enter to continue ...") - - roi_num = cnm.A.shape[1] - save_fn = h5py.File('caiman_segmentation_results.hdf5') - bias = save_fn['bias_added_to_movie'].value - save_fn['masks'] = np.array(cnm.A.todense()).T.reshape((roi_num, resolution, resolution), order='F') - save_fn['traces'] = cnm.C - bias - save_fn.close() - - copyfile(os.path.join(data_folder, 'caiman_segmentation_results.hdf5'), - os.path.join(curr_folder, 'caiman_segmentation_results.hdf5')) - - -if __name__ == '__main__': - run() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/120_get_cells_file.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/120_get_cells_file.py deleted file mode 100644 index 97a3a27..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/120_get_cells_file.py +++ /dev/null @@ -1,87 +0,0 @@ -import os -import numpy as np -import h5py -import tifffile as tf -import allensdk_internal.brain_observatory.mask_set as mask_set -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.core.PlottingTools as pt -import scipy.ndimage as ni -import matplotlib.pyplot as plt - - -isSave = True -is_filter = False - -filter_sigma = 0.5 # parameters only used if filter the rois -dilation_iterations = 0 # parameters only used if filter the rois -cut_thr = 3. # parameters only used if filter the rois - -bg_fn = "corrected_mean_projection.tif" -save_folder = 'figures' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -data_f = h5py.File('caiman_segmentation_results.hdf5') -masks = data_f['masks'].value -data_f.close() - -bg = tf.imread(bg_fn) - -final_roi_dict = {} - -for i, mask in enumerate(masks): - - if is_filter: - mask_nor = (mask - np.mean(mask.flatten())) / np.abs(np.std(mask.flatten())) - mask_nor_f = ni.filters.gaussian_filter(mask_nor, filter_sigma) - mask_bin = np.zeros(mask_nor_f.shape, dtype=np.uint8) - mask_bin[mask_nor_f > cut_thr] = 1 - - else: - mask_bin = np.zeros(mask.shape, dtype=np.uint8) - mask_bin[mask > 0] = 1 - - mask_labeled, mask_num = ni.label(mask_bin) - curr_mask_dict = ia.get_masks(labeled=mask_labeled, keyPrefix='caiman_mask_{:03d}'.format(i), labelLength=5) - for roi_key, roi_mask in curr_mask_dict.items(): - final_roi_dict.update({roi_key: ia.WeightedROI(roi_mask * mask)}) - -print 'Total number of ROIs:',len(final_roi_dict) - -f = plt.figure(figsize=(15, 8)) -ax1 = f.add_subplot(121) -ax1.imshow(ia.array_nor(bg), vmin=0, vmax=0.5, cmap='gray', interpolation='nearest') -colors1 = pt.random_color(masks.shape[0]) -for i, mask in enumerate(masks): - pt.plot_mask_borders(mask, plotAxis=ax1, color=colors1[i]) -ax1.set_title('original ROIs') -ax1.set_axis_off() -ax2 = f.add_subplot(122) -ax2.imshow(ia.array_nor(bg), vmin=0, vmax=0.5, cmap='gray', interpolation='nearest') -colors2 = pt.random_color(len(final_roi_dict)) -i = 0 -for roi in final_roi_dict.values(): - pt.plot_mask_borders(roi.get_binary_mask(), plotAxis=ax2, color=colors2[i]) - i = i + 1 -ax2.set_title('filtered ROIs') -ax2.set_axis_off() -plt.show() - -if isSave: - - if not os.path.isdir(save_folder): - os.makedirs(save_folder) - - f.savefig(os.path.join(save_folder, 'caiman_segmentation_filtering.pdf'), dpi=300) - - cell_file = h5py.File('cells.hdf5', 'w') - - i = 0 - for key, value in sorted(final_roi_dict.iteritems()): - curr_grp = cell_file.create_group('cell{:04d}'.format(i)) - curr_grp.attrs['name'] = key - value.to_h5_group(curr_grp) - i += 1 - - cell_file.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/130_refine_cells.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/130_refine_cells.py deleted file mode 100644 index 88b88e5..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/130_refine_cells.py +++ /dev/null @@ -1,173 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Jun 30 17:44:42 2015 - -@author: junz -""" -import os -import h5py -import numpy as np -import operator -import matplotlib.pyplot as plt -import scipy.ndimage as ni -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.core.FileTools as ft -import corticalmapping.core.PlottingTools as pt -import corticalmapping.SingleCellAnalysis as sca - -plt.ioff() - -# pixels, masks with center location within this pixel region at the image border will be discarded -center_margin = [20, 20] - -# area range, range of number of pixels of a valid roi -area_range = [20, 10000] - -# for the two masks that are overlapping, if the ratio between overlap and the area of the smaller mask is larger than -# this value, the smaller mask will be discarded. -overlap_thr = 0.5 - -save_folder = 'figures' - -data_file_name = 'cells.hdf5' -save_file_name = 'cells_refined.hdf5' -background_file_name = "corrected_mean_projection.tif" - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -# read cells -dfile = h5py.File(data_file_name) -cells = {} -for cellname in dfile.iterkeys(): - cells.update({cellname:ia.WeightedROI.from_h5_group(dfile[cellname])}) - -print 'total number of cells:', len(cells) - -# get the names of cells which are on the edge -edge_cells = [] -for cellname, cellmask in cells.iteritems(): - dimension = cellmask.dimension - center = cellmask.get_center() - if center[0] < center_margin[0] or \ - center[0] > dimension[0] - center_margin[0] or \ - center[1] < center_margin[1] or \ - center[1] > dimension[1] - center_margin[1]: - - # cellmask.plot_binary_mask_border(color='#ff0000', borderWidth=1) - # plt.title(cellname) - # plt.show() - - edge_cells.append(cellname) - -print '\ncells to be removed because they are on the edges:' -print '\n'.join(edge_cells) - -# remove edge cells -for edge_cell in edge_cells: - _ = cells.pop(edge_cell) - -# get dictionary of cell areas -cell_areas = {} -for cellname, cellmask in cells.iteritems(): - cell_areas.update({cellname: cellmask.get_binary_area()}) - - -# remove cellnames that have area outside of the area_range -invalid_cell_ns = [] -for cellname, cellarea in cell_areas.items(): - if cellarea < area_range[0] or cellarea > area_range[1]: - invalid_cell_ns.append(cellname) -print "cells to be removed because they do not meet area criterion:" -print "\n".join(invalid_cell_ns) -for invalid_cell_n in invalid_cell_ns: - cell_areas.pop(invalid_cell_n) - - -# sort cells with their binary area -cell_areas_sorted = sorted(cell_areas.items(), key=operator.itemgetter(1)) -cell_areas_sorted.reverse() -cell_names_sorted = [c[0] for c in cell_areas_sorted] -# print '\n'.join([str(c) for c in cell_areas_sorted]) - -# get the name of cells that needs to be removed because of overlapping -retain_cells = [] -remove_cells = [] -for cell1_name in cell_names_sorted: - cell1_mask = cells[cell1_name] - is_remove = 0 - cell1_area = cell1_mask.get_binary_area() - for cell2_name in retain_cells: - cell2_mask = cells[cell2_name] - cell2_area = cell2_mask.get_binary_area() - curr_overlap = cell1_mask.binary_overlap(cell2_mask) - - if float(curr_overlap) / cell1_area > overlap_thr: - remove_cells.append(cell1_name) - is_remove = 1 - print cell1_name, ':', cell1_mask.get_binary_area(), ': removed' - - # f = plt.figure(figsize=(10,10)) - # ax = f.add_subplot(111) - # cell1_mask.plot_binary_mask_border(plotAxis=ax, color='#ff0000', borderWidth=1) - # cell2_mask.plot_binary_mask_border(plotAxis=ax, color='#0000ff', borderWidth=1) - # ax.set_title('red:'+cell1_name+'; blue:'+cell2_name) - # plt.show() - break - - if is_remove == 0: - retain_cells.append(cell1_name) - print cell1_name, ':', cell1_mask.get_binary_area(), ': retained' - -print '\ncells to be removed because of overlapping:' -print '\n'.join(remove_cells) - -print '\ntotal number of reatined cells:', len(retain_cells) - -# plotting -colors = pt.random_color(len(cells.keys())) -bgImg = tf.imread(background_file_name) - -f = plt.figure(figsize=(10, 10)) -ax = f.add_subplot(111) -ax.imshow(ia.array_nor(bgImg), cmap='gray', vmin=0, vmax=0.5, interpolation='nearest') - -f2 = plt.figure(figsize=(10, 10)) -ax2 = f2.add_subplot(111) -ax2.imshow(np.zeros(bgImg.shape, dtype=np.uint8), vmin=0, vmax=1, cmap='gray', interpolation='nearest') - -i = 0 -for retain_cell in retain_cells: - cells[retain_cell].plot_binary_mask_border(plotAxis=ax, color=colors[i], borderWidth=1) - cells[retain_cell].plot_binary_mask_border(plotAxis=ax2, color=colors[i], borderWidth=1) - i += 1 -plt.show() - -# save figures -pt.save_figure_without_borders(f, os.path.join(save_folder, '2P_refined_ROIs_with_background.png'), dpi=300) -pt.save_figure_without_borders(f2, os.path.join(save_folder, '2P_refined_ROIs_without_background.png'), dpi=300) - -# save h5 file -save_file = h5py.File(save_file_name, 'w') -i = 0 -for retain_cell in retain_cells: - print retain_cell, ':', cells[retain_cell].get_binary_area() - - currGroup = save_file.create_group('cell' + ft.int2str(i, 4)) - currGroup.attrs['name'] = retain_cell - roiGroup = currGroup.create_group('roi') - cells[retain_cell].to_h5_group(roiGroup) - i += 1 - -for attr, value in dfile.attrs.iteritems(): - save_file.attrs[attr] = value - -save_file.close() -dfile.close() - - - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/140_get_weighted_rois_and_surrounds.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/140_get_weighted_rois_and_surrounds.py deleted file mode 100644 index b31e405..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/140_get_weighted_rois_and_surrounds.py +++ /dev/null @@ -1,122 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Jun 30 17:44:42 2015 - -@author: junz -""" - -import os -import numpy as np -import h5py -import tifffile as tf -import allensdk_internal.brain_observatory.mask_set as mask_set -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.core.PlottingTools as pt -import scipy.ndimage as ni -import matplotlib.pyplot as plt - -plt.ioff() - -data_file_name = 'cells_refined.hdf5' -background_file_name = "corrected_mean_projection.tif" -save_folder = 'figures' - -overlap_threshold = 0.9 -surround_limit = [1, 8] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -print 'reading cells file ...' -data_f = h5py.File(data_file_name, 'r') - -cell_ns = data_f.keys() -cell_ns.sort() - -binary_mask_array = [] -weight_mask_array = [] - -for cell_n in cell_ns: - curr_roi = ia.ROI.from_h5_group(data_f[cell_n]['roi']) - binary_mask_array.append(curr_roi.get_binary_mask()) - weight_mask_array.append(curr_roi.get_weighted_mask()) - -data_f.close() -binary_mask_array = np.array(binary_mask_array) -weight_mask_array = np.array(weight_mask_array) -print 'starting mask_array shape:', weight_mask_array.shape - -print 'getting total mask ...' -total_mask = np.zeros((binary_mask_array.shape[1], binary_mask_array.shape[2]), dtype=np.uint8) -for curr_mask in binary_mask_array: - total_mask = np.logical_or(total_mask, curr_mask) -total_mask = np.logical_not(total_mask) - -plt.imshow(total_mask, interpolation='nearest') -plt.title('total_mask') -plt.show() - -print 'getting and surround masks ...' -binary_surround_array = [] -for binary_center in binary_mask_array: - curr_surround = np.logical_xor(ni.binary_dilation(binary_center, iterations=surround_limit[1]), - ni.binary_dilation(binary_center, iterations=surround_limit[0])) - curr_surround = np.logical_and(curr_surround, total_mask).astype(np.uint8) - binary_surround_array.append(curr_surround) - # plt.imshow(curr_surround) - # plt.show() -binary_surround_array = np.array(binary_surround_array) - -print "saving rois ..." -center_areas = [] -surround_areas = [] -for mask_ind in range(binary_mask_array.shape[0]): - center_areas.append(np.sum(binary_mask_array[mask_ind].flat)) - surround_areas.append(np.sum(binary_surround_array[mask_ind].flat)) -roi_f = h5py.File('rois_and_traces.hdf5') -roi_f['masks_center'] = weight_mask_array -roi_f['masks_surround'] = binary_surround_array - -roi_f.close() -print 'minimum surround area:', min(surround_areas), 'pixels.' - -f = plt.figure(figsize=(10, 10)) -ax_center = f.add_subplot(211) -ax_center.hist(center_areas, bins=30) -ax_center.set_title('roi center area distribution') -ax_surround = f.add_subplot(212) -ax_surround.hist(surround_areas, bins=30) -ax_surround.set_title('roi surround area distribution') -plt.show() - -print 'plotting ...' -colors = pt.random_color(weight_mask_array.shape[0]) -bg = ia.array_nor(tf.imread('corrected_mean_projection.tif')) - -f_c_bg = plt.figure(figsize=(10, 10)) -ax_c_bg = f_c_bg.add_subplot(111) -ax_c_bg.imshow(bg, cmap='gray', vmin=0, vmax=0.5, interpolation='nearest') -f_c_nbg = plt.figure(figsize=(10, 10)) -ax_c_nbg = f_c_nbg.add_subplot(111) -ax_c_nbg.imshow(np.zeros(bg.shape,dtype=np.uint8),vmin=0,vmax=1,cmap='gray',interpolation='nearest') -f_s_nbg = plt.figure(figsize=(10, 10)) -ax_s_nbg = f_s_nbg.add_subplot(111) -ax_s_nbg.imshow(np.zeros(bg.shape,dtype=np.uint8),vmin=0,vmax=1,cmap='gray',interpolation='nearest') - -i = 0 -for mask_ind in range(binary_mask_array.shape[0]): - pt.plot_mask_borders(binary_mask_array[mask_ind], plotAxis=ax_c_bg, color=colors[i], borderWidth=1) - pt.plot_mask_borders(binary_mask_array[mask_ind], plotAxis=ax_c_nbg, color=colors[i], borderWidth=1) - pt.plot_mask_borders(binary_surround_array[mask_ind], plotAxis=ax_s_nbg, color=colors[i], borderWidth=1) - i += 1 - -plt.show() - -print 'saving figures ...' -pt.save_figure_without_borders(f_c_bg, os.path.join(save_folder, '2P_ROIs_with_background.png'), dpi=300) -pt.save_figure_without_borders(f_c_nbg, os.path.join(save_folder, '2P_ROIs_without_background.png'), dpi=300) -pt.save_figure_without_borders(f_s_nbg, os.path.join(save_folder, '2P_ROI_surrounds_background.png'), dpi=300) -f.savefig(os.path.join(save_folder, 'roi_area_distribution.pdf'), dpi=300) diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/150_get_raw_center_and_surround_traces.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/150_get_raw_center_and_surround_traces.py deleted file mode 100644 index 0e57582..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/150_get_raw_center_and_surround_traces.py +++ /dev/null @@ -1,129 +0,0 @@ -import os -import numpy as np -import h5py -import time -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.core.PlottingTools as pt -import corticalmapping.core.FileTools as ft -import corticalmapping.NwbTools as nt -import matplotlib.pyplot as plt -from multiprocessing import Pool - -CHUNK_SIZE = 2000 -PROCESS_NUM = 5 - -def get_chunk_frames(frame_num, chunk_size): - chunk_num = frame_num // chunk_size - if frame_num % chunk_size > 0: - chunk_num = chunk_num + 1 - - print("total number of frames:", frame_num) - print("total number of chunks:", chunk_num) - - chunk_ind = [] - chunk_starts = [] - chunk_ends = [] - - for chunk_i in range(chunk_num): - chunk_ind.append(chunk_i) - chunk_starts.append(chunk_i * chunk_size) - - if chunk_i < chunk_num - 1: - chunk_ends.append((chunk_i + 1) * chunk_size) - else: - chunk_ends.append(frame_num) - - return zip(chunk_ind, chunk_starts, chunk_ends) - -def get_traces(params): - t0 = time.time() - - chunk_ind, chunk_start, chunk_end, nwb_path, data_path, curr_folder, center_array, surround_array = params - - nwb_f = h5py.File(nwb_path, 'r') - print('\nstart analyzing chunk: {}'.format(chunk_ind)) - curr_mov = nwb_f[data_path][chunk_start: chunk_end] - nwb_f.close() - - # print 'extracting traces' - curr_traces_center = np.empty((center_array.shape[0], curr_mov.shape[0]), dtype=np.float32) - curr_traces_surround = np.empty((center_array.shape[0], curr_mov.shape[0]), dtype=np.float32) - for i in range(center_array.shape[0]): - curr_center = ia.WeightedROI(center_array[i]) - curr_surround = ia.ROI(surround_array[i]) - curr_traces_center[i, :] = curr_center.get_weighted_trace_pixelwise(curr_mov) - - # scale surround trace to be similar as center trace - mean_center_weight = curr_center.get_mean_weight() - curr_traces_surround[i, :] = curr_surround.get_binary_trace_pixelwise(curr_mov) * mean_center_weight - - # print 'saveing chunk {} ...'.format(chunk_ind) - chunk_folder = os.path.join(curr_folder, 'chunks') - if not os.path.isdir(chunk_folder): - os.mkdir(chunk_folder) - chunk_f = h5py.File(os.path.join(chunk_folder, 'chunk_temp_' + ft.int2str(chunk_ind, 4) + '.hdf5')) - chunk_f['traces_center'] = curr_traces_center - chunk_f['traces_surround'] = curr_traces_surround - chunk_f.close() - - print('\n\t{:06d} seconds: chunk: {}; demixing finished.'.format(int(time.time() - t0), chunk_ind)) - - return None - -def run(): - - curr_folder = os.path.dirname(os.path.realpath(__file__)) - os.chdir(curr_folder) - - plane_n = os.path.split(curr_folder)[1] - print(plane_n) - - print('getting masks ...') - rois_f = h5py.File('rois_and_traces.hdf5') - center_array = rois_f['masks_center'].value - surround_array = rois_f['masks_surround'].value - - print('\nanalyzing movie in chunks of size:', CHUNK_SIZE , 'frames.') - - nwb_folder = os.path.dirname(curr_folder) - nwb_fn = [f for f in os.listdir(nwb_folder) if f[-4:] == '.nwb'][0] - nwb_path = os.path.join(nwb_folder, nwb_fn) - print('\n' + nwb_path) - data_path = '/processing/motion_correction/MotionCorrection/' + plane_n + '/corrected/data' - - nwb_f = h5py.File(nwb_path, 'r') - total_frame = nwb_f[data_path].shape[0] - nwb_f.close() - - chunk_frames = get_chunk_frames(total_frame, CHUNK_SIZE) - chunk_params = [(cf[0], cf[1], cf[2], nwb_path, data_path, - curr_folder, center_array, surround_array) for cf in chunk_frames] - - p = Pool(PROCESS_NUM) - p.map(get_traces, chunk_params) - - chunk_folder = os.path.join(curr_folder, 'chunks') - chunk_fns = [f for f in os.listdir(chunk_folder) if f[0:11] == 'chunk_temp_'] - chunk_fns.sort() - print('\nreading chunks files ...') - print('\n'.join(chunk_fns)) - - traces_raw = [] - traces_surround = [] - - for chunk_fn in chunk_fns: - curr_chunk_f = h5py.File(os.path.join(chunk_folder, chunk_fn)) - traces_raw.append(curr_chunk_f['traces_center'].value) - traces_surround.append(curr_chunk_f['traces_surround'].value) - - print("saving ...") - traces_raw = np.concatenate(traces_raw, axis=1) - traces_surround = np.concatenate(traces_surround, axis=1) - rois_f['traces_center_raw'] = traces_raw - rois_f['traces_surround_raw'] = traces_surround - print('done.') - - -if __name__ == '__main__': - run() - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/160_get_neuropil_subtracted_traces.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/160_get_neuropil_subtracted_traces.py deleted file mode 100644 index 71da943..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/160_get_neuropil_subtracted_traces.py +++ /dev/null @@ -1,101 +0,0 @@ -import sys -import os -import h5py -import numpy as np -import corticalmapping.HighLevel as hl -import corticalmapping.core.FileTools as ft -import matplotlib.pyplot as plt - - -lam = 100. -plot_chunk_size = 5000 - - -def plot_traces_chunks(traces, labels, chunk_size, roi_ind): - """ - - :param traces: np.array, shape=[trace_type, t_num] - :param labels: - :param chunk_size: - :param figures_folder: - :param roi_ind: - :return: - """ - - t_num = traces.shape[1] - chunk_num = t_num // chunk_size - - chunks = [] - for chunk_ind in range(chunk_num): - chunks.append([chunk_ind * chunk_size, (chunk_ind + 1) * chunk_size]) - - if t_num % chunk_size != 0: - chunks.append([chunk_num * chunk_size, t_num]) - - v_max = np.amax(traces) - v_min = np.amin(traces) - - fig = plt.figure(figsize=(75, 20)) - fig.suptitle('neuropil subtraction for ROI: {}'.format(roi_ind)) - for chunk_ind, chunk in enumerate(chunks): - curr_ax = fig.add_subplot(len(chunks), 1, chunk_ind + 1) - for trace_ind in range(traces.shape[0]): - curr_ax.plot(traces[trace_ind, chunk[0]: chunk[1]], label=labels[trace_ind]) - - curr_ax.set_xlim([0, chunk_size]) - curr_ax.set_ylim([v_min, v_max * 1.2]) - curr_ax.legend() - - return fig - - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -data_f = h5py.File('rois_and_traces.hdf5') -traces_raw = data_f['traces_center_raw'].value -traces_srround = data_f['traces_surround_raw'].value - -traces_subtracted = np.zeros(traces_raw.shape, np.float32) -ratio = np.zeros(traces_raw.shape[0], np.float32) -err = np.zeros(traces_raw.shape[0], np.float32) - -for i in range(traces_raw.shape[0]): - curr_trace_c = traces_raw[i] - curr_trace_s = traces_srround[i] - curr_r, curr_err, curr_trace_sub = hl.neural_pil_subtraction(curr_trace_c, curr_trace_s, lam=lam) - print "roi_%s \tr = %.4f; error = %.4f." % (ft.int2str(i, 5), curr_r, curr_err) - traces_subtracted[i] = curr_trace_sub - ratio[i] = curr_r - err[i] = curr_err - -print('\nplotting neuropil subtraction results ...') -figures_folder = 'figures/neuropil_subtraction_lam_{}'.format(lam) -if not os.path.isdir(figures_folder): - os.makedirs(figures_folder) -for roi_ind in range(traces_raw.shape[0]): - print('roi_{:04d}'.format(roi_ind)) - curr_traces = np.array([traces_raw[roi_ind], traces_srround[roi_ind], traces_subtracted[roi_ind]]) - curr_fig = plot_traces_chunks(traces=curr_traces, - labels=['center', 'surround', 'subtracted'], - chunk_size=plot_chunk_size, - roi_ind=roi_ind) - curr_fig.savefig(os.path.join(figures_folder, 'neuropil_subtraction_ROI_{:04d}.png'.format(roi_ind))) - curr_fig.clear() - plt.close(curr_fig) - -# wait for keyboard abortion -msg = raw_input('Do you want to save? (y/n)\n') -while True: - if msg == 'y': - break - elif msg == 'n': - sys.exit('Stop process without saving.') - else: - msg = raw_input('Do you want to save? (y/n)\n') - -data_f['traces_center_subtracted'] = traces_subtracted -data_f['neuropil_r'] = ratio -data_f['neuropil_err'] = err - -data_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/180_check_correlation.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/180_check_correlation.py deleted file mode 100644 index 65c3f07..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/180_check_correlation.py +++ /dev/null @@ -1,99 +0,0 @@ -import os -import h5py -import tifffile as tf -import numpy as np -import matplotlib.pyplot as plt -import corticalmapping.core.PlottingTools as pt -import corticalmapping.core.ImageAnalysis as ia - - -cor_thr = 0.8 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -def merger_pairs(pairs): - - total_set = set([]) - for pair in pairs: - total_set.update(set(pair)) - - all_nodes = list(total_set) - node_grps = [{n} for n in all_nodes] - - for pair in pairs: - - node0 = pair[0] - node1 = pair[1] - - for node_grp in node_grps: - if node0 in node_grp: - node_grp0 = node_grp - if node1 in node_grp: - node_grp1 = node_grp - - if node_grp0 != node_grp1: - node_grp0.update(node_grp1) - node_grps.remove(node_grp1) - - return node_grps - - -save_plot_dir = os.path.join(curr_folder, 'figures', 'dff_extraction') -if not os.path.isdir(save_plot_dir): - os.makedirs(save_plot_dir) - -bg = ia.array_nor(tf.imread('corrected_mean_projection.tif')) - -data_f = h5py.File('rois_and_traces.hdf5') -traces_subtracted = data_f['traces_center_subtracted'].value -masks = data_f['masks_center'].value - -f, axs = plt.subplots(1, 2, figsize=(16, 5)) - -cor_mat = np.corrcoef(traces_subtracted) -fig = axs[0].imshow(cor_mat, vmin=-1, vmax=1, cmap='jet', interpolation='nearest') -axs[0].set_title('coriance matrix') -f.colorbar(fig, ax=axs[0]) - -cors = cor_mat[np.tril_indices(cor_mat.shape[0], k=-1)] -cor_dist = axs[1].hist(cors, range=[-1., 1.], bins=40) -axs[1].set_title('coriance distribution') - -# cors = np.sort(cors) -# cor_thr = cors[int(cors.shape[0] * 0.99)] -# print('Cutoff threshold for coriance: {}'.format(cor_thr)) - -pos_cor_loc = np.where(cor_mat > cor_thr) - -roi_pairs = [] -for ind in range(len(pos_cor_loc[0])): - if pos_cor_loc[0][ind] < pos_cor_loc[1][ind]: - roi_pairs.append([pos_cor_loc[0][ind], pos_cor_loc[1][ind]]) -print(roi_pairs) - -roi_grps = merger_pairs(roi_pairs) -print roi_grps - -cor_grps = [] -for roi_grp in roi_grps: - grp_traces = traces_subtracted[list(roi_grp)] - grp_cors = np.corrcoef(grp_traces)[np.tril_indices(len(roi_grp), k=-1)] - cor_grps.append(np.mean(grp_cors)) - -cor_grps = np.array(cor_grps) -cor_scalars = [(c + 1) / 2 for c in cor_grps] -print cor_scalars -cor_colors = [pt.value_2_rgb(c, cmap='inferno') for c in cor_scalars] - -f_roi = plt.figure() -ax_roi = f_roi.add_subplot(111) -ax_roi.imshow(bg, vmin=0, vmax=0.5, cmap='gray', interpolation='nearest') -for grp_ind, roi_grp in enumerate(roi_grps): - for roi_ind in roi_grp: - print roi_ind, cor_colors[grp_ind] - pt.plot_mask_borders(masks[roi_ind], plotAxis=ax_roi, color=cor_colors[grp_ind]) - -plt.show() - -data_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/200_generate_nwb.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/200_generate_nwb.py deleted file mode 100644 index 06c0370..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/200_generate_nwb.py +++ /dev/null @@ -1,48 +0,0 @@ -import os -import corticalmapping.NwbTools as nt - -date_recorded = '180404' -mouse_id = '360495' -sess_num = '110' - -experimenter = 'Jun' -genotype = 'Vipr2-IRES2-Cre-neo' -sex = 'male' -age = '180' -indicator = 'GCaMP6s' -imaging_rate = 15.24 -imaging_depth = '150 microns' -imaging_location = 'visual cortex' -imaging_device = 'Sutter 2p Scope' -imaging_excitation_lambda = '920 nanometers' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -notebook_path = os.path.join(curr_folder, 'notebook.txt') -with open(notebook_path, 'r') as ff: - notes = ff.read() - -general = nt.DEFAULT_GENERAL -general['experimenter'] = experimenter -general['subject']['subject_id'] = mouse_id -general['subject']['genotype'] = genotype -general['subject']['sex'] = sex -general['subject']['age'] = age -general['optophysiology'].update({'imaging_plane_1': {}}) -general['optophysiology']['imaging_plane_1'].update({'indicator': indicator}) -general['optophysiology']['imaging_plane_1'].update({'imaging_rate': imaging_rate}) -general['optophysiology']['imaging_plane_1'].update({'imaging_depth': imaging_depth}) -general['optophysiology']['imaging_plane_1'].update({'location': imaging_location}) -general['optophysiology']['imaging_plane_1'].update({'device': imaging_device}) -general['optophysiology']['imaging_plane_1'].update({'excitation_lambda': imaging_excitation_lambda}) -general['notes'] = notes - -file_name = date_recorded + '_M' + mouse_id + '_' + sess_num + '.nwb' - -rf = nt.RecordedFile(os.path.join(curr_folder, file_name), identifier=file_name[:-4], description='') -rf.add_general(general=general) -rf.close() - - - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/210_add_vasmap.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/210_add_vasmap.py deleted file mode 100644 index a35455f..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/210_add_vasmap.py +++ /dev/null @@ -1,38 +0,0 @@ -import os -import corticalmapping.NwbTools as nt -import corticalmapping.core.ImageAnalysis as ia -import matplotlib.pyplot as plt -import tifffile as tf - - -vasmap_name_wf = 'vas_map_focused_wf.tif' -vasmap_name_2p_zoom1 = 'vas_map_focused_2p_zoom1.tif' -# vasmap_name_2p_zoom2 = 'vas_map_focused_2p_zoom2.tif' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -vasmap_wf = tf.imread(vasmap_name_wf) -vasmap_2p_zoom1 = ia.array_nor(tf.imread(vasmap_name_2p_zoom1)) -# vasmap_2p_zoom2 = ia.array_nor(tf.imread(vasmap_name_2p_zoom2)) - -f = plt.figure(figsize=(15, 7)) -ax1 = f.add_subplot(121) -ax1.imshow(vasmap_wf, cmap='gray', interpolation='nearest') -ax1.set_title('wide field surface vasculature') -ax2 = f.add_subplot(122) -ax2.imshow(vasmap_2p_zoom1, vmin=0, vmax=0.1, cmap='gray', interpolation='nearest') -ax2.set_title('two photon surface vasculature') -plt.show() - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -nwb_f.add_acquisition_image('surface_vas_map_wf', vasmap_wf, - description='wide field surface vasculature map through cranial window') -nwb_f.add_acquisition_image('surface_vas_map_2p_zoom1', vasmap_2p_zoom1, - description='2-photon surface vasculature map through cranial window, zoom 1') -# nwb_f.add_acquisition_image('surface_vas_map_2p_zoom2', vasmap_2p_zoom2, -# description='2-photon surface vasculature map through cranial window, zoom 2') - -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/220_add_sync_data.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/220_add_sync_data.py deleted file mode 100644 index 1341950..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/220_add_sync_data.py +++ /dev/null @@ -1,23 +0,0 @@ -import os -import corticalmapping.NwbTools as nt - -record_date = '180404' -mouse_id = '360495' -session_id = '110' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = record_date + '_M' + mouse_id + '_' + session_id + '.nwb' - -sync_fn = [f for f in os.listdir(curr_folder) if f[-3:] == '.h5' and record_date in f and 'M' + mouse_id in f] -if len(sync_fn) == 0: - raise LookupError('Did not find sync .h5 file.') -elif len(sync_fn) > 1: - raise LookupError('More than one sync .h5 files found.') -else: - sync_fn = sync_fn[0] - -nwb_f = nt.RecordedFile(nwb_fn) -nwb_f.add_sync_data(sync_fn) -nwb_f.close() diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/230_add_image_data.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/230_add_image_data.py deleted file mode 100644 index 3304bd7..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/230_add_image_data.py +++ /dev/null @@ -1,43 +0,0 @@ -import os -import h5py -import corticalmapping.NwbTools as nt - -dset_n = '2p_movie' -temporal_downsample_rate = 2 -pixel_size = 0.00000035 # meter, 0.2 micron, deepscope 8K Hz scanner, zoom 2, 1024 x 1024 - -description = '2-photon imaging data' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) -mov_ts = nwb_f.file_pointer['/acquisition/timeseries/digital_vsync_2p_rise/timestamps'].value -print('\ntotal 2p timestamps count: {}'.format(len(mov_ts))) - -mov_ts_d = mov_ts[::temporal_downsample_rate] -print('downsampled 2p timestamps count: {}'.format(len(mov_ts_d))) - -mov_fn = os.path.splitext(nwb_fn)[0] + '_2p_movies.hdf5' -mov_f = h5py.File(mov_fn, 'r') -mov_dset = mov_f[dset_n] -print('downsampled 2p movie frame num: {}'.format(mov_dset.shape[0])) - -mov_ts_d = mov_ts_d[0: mov_dset.shape[0]] - -# if len(mov_ts) == mov_dset.shape[0]: -# pass -# elif len(mov_ts) == mov_dset.shape[0] + 1: -# mov_ts = mov_ts[0: -1] -# else: -# raise ValueError('the number of timestamps of {} movie ({}) does not equal (or is not greater by one) ' -# 'the number of frames in the movie ({})'.format(mov_dn, len(mov_ts), curr_dset.shape[0])) - -nwb_f.add_acquired_image_series_as_remote_link('2p_movie', image_file_path=mov_fn, dataset_path=dset_n, - timestamps=mov_ts_d, description=description, comments='', - data_format='zyx', pixel_size=[pixel_size, pixel_size], - pixel_size_unit='meter') - -mov_f.close() -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/240_add_motion_correction_module.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/240_add_motion_correction_module.py deleted file mode 100644 index 335058c..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/240_add_motion_correction_module.py +++ /dev/null @@ -1,42 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import h5py -import corticalmapping.NwbTools as nt - -corrected_file_path = '180404_M360495_110_2p_movies.hdf5' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -input_parameters = [] - -offsets_path = 'correction_offsets.hdf5' -offsets_f = h5py.File(offsets_path) -offsets_keys = offsets_f.keys() -if 'path_list' in offsets_keys: - offsets_keys.remove('path_list') - -offsets_keys.sort() -offsets = [] -for offsets_key in offsets_keys: - offsets.append(offsets_f[offsets_key].value) -offsets = np.concatenate(offsets, axis=0) -offsets = np.array(zip(offsets[:, 1], offsets[:, 0])) -offsets_f.close() - -mean_projection = tf.imread('corrected_mean_projection.tif') -max_projection = tf.imread('corrected_max_projection.tif') - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -nwb_f.add_motion_correction_module(module_name='motion_correction', - original_timeseries_path='/acquisition/timeseries/2p_movie', - corrected_file_path=corrected_file_path, corrected_dataset_path='2p_movie', - xy_translation_offsets=offsets, mean_projection=mean_projection, - max_projection=max_projection) -nwb_f.close() - - - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/250_get_photodiode_onset.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/250_get_photodiode_onset.py deleted file mode 100644 index fe4ec1d..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/250_get_photodiode_onset.py +++ /dev/null @@ -1,45 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import matplotlib.pyplot as plt -import corticalmapping.NwbTools as nt -import corticalmapping.HighLevel as hl - -# photodiode -digitizeThr = 0.8 -filterSize = 0.01 -segmentThr = 0.01 -smallestInterval = 0.05 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] - -nwb_f = nt.RecordedFile(nwb_fn) -pd, pd_t = nwb_f.get_analog_data(ch_n='analog_photodiode') -fs = 1. / np.mean(np.diff(pd_t)) -# print fs - -pd_onsets = hl.segmentPhotodiodeSignal(pd, digitizeThr=digitizeThr, filterSize=filterSize, - segmentThr=segmentThr, Fs=fs, smallestInterval=smallestInterval) - -raw_input('press enter to continue ...') - -pdo_ts = nwb_f.create_timeseries('TimeSeries', 'digital_photodiode_rise', modality='other') -pdo_ts.set_time(pd_onsets) -pdo_ts.set_data([], unit='', conversion=np.nan, resolution=np.nan) -pdo_ts.set_value('digitize_threshold', digitizeThr) -pdo_ts.set_value('filter_size', filterSize) -pdo_ts.set_value('segment_threshold', segmentThr) -pdo_ts.set_value('smallest_interval', smallestInterval) -pdo_ts.set_description('Real Timestamps (master acquisition clock) of photodiode onset. ' - 'Extracted from analog photodiode signal by the function:' - 'corticalmapping.HighLevel.segmentPhotodiodeSignal() using parameters saved in the' - 'current timeseries.') -pdo_ts.set_path('/analysis') -pdo_ts.finalize() - -nwb_f.close() - - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/260_add_visual_stimuli_retinotopic_mapping.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/260_add_visual_stimuli_retinotopic_mapping.py deleted file mode 100644 index 0414239..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/260_add_visual_stimuli_retinotopic_mapping.py +++ /dev/null @@ -1,15 +0,0 @@ -import os -import retinotopic_mapping.DisplayLogAnalysis as dla -import corticalmapping.NwbTools as nt - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -stim_pkl_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.pkl'][0] -stim_log = dla.DisplayLogAnalyzer(stim_pkl_fn) - -nwb_f.add_visual_display_log_retinotopic_mapping(stim_log=stim_log) -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/270_analyze_photodiode_onsets.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/270_analyze_photodiode_onsets.py deleted file mode 100644 index a064337..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/270_analyze_photodiode_onsets.py +++ /dev/null @@ -1,38 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt -import corticalmapping.NwbTools as nt -import retinotopic_mapping.DisplayLogAnalysis as dla -import corticalmapping.core.TimingAnalysis as ta - - -pd_ts_pd_path = 'analysis/digital_photodiode_rise' -pd_thr = 0.5 # this is color threshold, not analog photodiode threshold -ccg_t_range = (0., 0.1) -ccg_bins = 100 -is_plot = True - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -stim_pkl_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.pkl'][0] -stim_log = dla.DisplayLogAnalyzer(stim_pkl_fn) - -# get display lag -display_delay = nwb_f.get_display_delay_retinotopic_mapping(stim_log=stim_log, indicator_color_thr=pd_thr, - ccg_t_range=ccg_t_range, ccg_bins=ccg_bins, - is_plot=is_plot, pd_onset_ts_path=pd_ts_pd_path) - -# analyze photodiode onset -stim_dict = stim_log.get_stim_dict() -pd_onsets_seq = stim_log.analyze_photodiode_onsets_sequential(stim_dict=stim_dict, pd_thr=pd_thr) -pd_onsets_com = stim_log.analyze_photodiode_onsets_combined(pd_onsets_seq=pd_onsets_seq, - is_dgc_blocked=True) -nwb_f.add_photodiode_onsets_combined_retinotopic_mapping(pd_onsets_com=pd_onsets_com, - display_delay=display_delay, - vsync_frame_path='acquisition/timeseries' - '/digital_vsync_visual_rise') -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/280_add_rois_and_traces_caiman_segmentation.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/280_add_rois_and_traces_caiman_segmentation.py deleted file mode 100644 index f0d74aa..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/280_add_rois_and_traces_caiman_segmentation.py +++ /dev/null @@ -1,154 +0,0 @@ -import os -import h5py -import numpy as np -import matplotlib.pyplot as plt -import tifffile as tf -import corticalmapping.NwbTools as nt -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia - -resolution = 1024 # 512 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -def add_rois_and_traces(data_folder, nwb_f, mov_path): - - mov_grp = nwb_f.file_pointer[mov_path + '/corrected'] - - data_f = h5py.File(os.path.join(data_folder, 'rois_and_traces.hdf5'), 'r') - mask_arr_c = data_f['masks_center'].value - mask_arr_s = data_f['masks_surround'].value - traces_center_raw = data_f['traces_center_raw'].value - # traces_center_demixed = data_f['traces_center_demixed'].value - traces_center_subtracted = data_f['traces_center_subtracted'].value - # traces_center_dff = data_f['traces_center_dff'].value - traces_surround_raw = data_f['traces_surround_raw'].value - neuropil_r = data_f['neuropil_r'].value - neuropil_err = data_f['neuropil_err'].value - data_f.close() - - - if traces_center_raw.shape[1] != mov_grp['num_samples'].value: - raise ValueError('number of trace time points ({}) does not match frame number of ' - 'corresponding movie ({}).'.format(traces_center_raw.shape[0], mov_grp['num_samples'].value)) - - rf_img = tf.imread(os.path.join(data_folder, 'corrected_mean_projection.tif')) - - print 'adding segmentation results ...' - rt_mo = nwb_f.create_module('rois_and_traces_plane0') - is_if = rt_mo.create_interface('ImageSegmentation') - is_if.create_imaging_plane('imaging_plane', description='') - is_if.add_reference_image('imaging_plane', 'mean_projection', rf_img) - - for i in range(mask_arr_c.shape[0]): - curr_cen = mask_arr_c[i] - curr_cen_n = 'roi_' + ft.int2str(i, 4) - curr_cen_roi = ia.WeightedROI(curr_cen) - curr_cen_pixels_yx = curr_cen_roi.get_pixel_array() - curr_cen_pixels_xy = np.array([curr_cen_pixels_yx[:, 1], curr_cen_pixels_yx[:, 0]]).transpose() - is_if.add_roi_mask_pixels(image_plane='imaging_plane', roi_name=curr_cen_n, desc='', - pixel_list=curr_cen_pixels_xy, weights=curr_cen_roi.weights, width=resolution, height=resolution) - - curr_sur = mask_arr_s[i] - curr_sur_n = 'surround_' + ft.int2str(i, 4) - curr_sur_roi = ia.ROI(curr_sur) - curr_sur_pixels_yx = curr_sur_roi.get_pixel_array() - curr_sur_pixels_xy = np.array([curr_sur_pixels_yx[:, 1], curr_sur_pixels_yx[:, 0]]).transpose() - is_if.add_roi_mask_pixels(image_plane='imaging_plane', roi_name=curr_sur_n, desc='', - pixel_list=curr_sur_pixels_xy, weights=None, width=resolution, height=resolution) - is_if.finalize() - - - - trace_f_if = rt_mo.create_interface('Fluorescence') - seg_if_path = '/processing/rois_and_traces_plane0'+ '/ImageSegmentation/imaging_plane' - # print seg_if_path - ts_path = mov_path + '/corrected' - - print 'adding center fluorescence raw' - trace_raw_ts = nwb_f.create_timeseries('RoiResponseSeries', 'f_center_raw') - trace_raw_ts.set_data(traces_center_raw, unit='au', conversion=np.nan, resolution=np.nan) - trace_raw_ts.set_value('data_format', 'roi (row) x time (column)') - trace_raw_ts.set_value('data_range', '[-8192, 8191]') - trace_raw_ts.set_description('fluorescence traces extracted from the center region of each roi') - trace_raw_ts.set_time_as_link(ts_path) - trace_raw_ts.set_value_as_link('segmentation_interface', seg_if_path) - roi_names = ['roi_' + ft.int2str(ind, 4) for ind in range(traces_center_raw.shape[0])] - trace_raw_ts.set_value('roi_names', roi_names) - trace_raw_ts.set_value('num_samples', traces_center_raw.shape[1]) - trace_f_if.add_timeseries(trace_raw_ts) - trace_raw_ts.finalize() - - print 'adding neuropil fluorescence raw' - trace_sur_ts = nwb_f.create_timeseries('RoiResponseSeries', 'f_surround_raw') - trace_sur_ts.set_data(traces_surround_raw, unit='au', conversion=np.nan, resolution=np.nan) - trace_sur_ts.set_value('data_format', 'roi (row) x time (column)') - trace_sur_ts.set_value('data_range', '[-8192, 8191]') - trace_sur_ts.set_description('neuropil traces extracted from the surroud region of each roi') - trace_sur_ts.set_time_as_link(ts_path) - trace_sur_ts.set_value_as_link('segmentation_interface', seg_if_path) - sur_names = ['surround_' + ft.int2str(ind, 4) for ind in range(traces_center_raw.shape[0])] - trace_sur_ts.set_value('roi_names', sur_names) - trace_sur_ts.set_value('num_samples', traces_surround_raw.shape[1]) - trace_f_if.add_timeseries(trace_sur_ts) - trace_sur_ts.finalize() - - roi_center_n_path = '/processing/rois_and_traces_plane0/Fluorescence/f_center_raw/roi_names' - # print 'adding center fluorescence demixed' - # trace_demix_ts = nwb_f.create_timeseries('RoiResponseSeries', 'f_center_demixed') - # trace_demix_ts.set_data(traces_center_demixed, unit='au', conversion=np.nan, resolution=np.nan) - # trace_demix_ts.set_value('data_format', 'roi (row) x time (column)') - # trace_demix_ts.set_description('center traces after overlapping demixing for each roi') - # trace_demix_ts.set_time_as_link(ts_path) - # trace_demix_ts.set_value_as_link('segmentation_interface', seg_if_path) - # trace_demix_ts.set_value('roi_names', roi_names) - # trace_demix_ts.set_value('num_samples', traces_center_demixed.shape[1]) - # trace_f_if.add_timeseries(trace_demix_ts) - # trace_demix_ts.finalize() - - print 'adding center fluorescence after neuropil subtraction' - trace_sub_ts = nwb_f.create_timeseries('RoiResponseSeries', 'f_center_subtracted') - trace_sub_ts.set_data(traces_center_subtracted, unit='au', conversion=np.nan, resolution=np.nan) - trace_sub_ts.set_value('data_format', 'roi (row) x time (column)') - trace_sub_ts.set_description('center traces after overlap demixing and neuropil subtraction for each roi') - trace_sub_ts.set_time_as_link(ts_path) - trace_sub_ts.set_value_as_link('segmentation_interface', seg_if_path) - trace_sub_ts.set_value_as_link('roi_names', roi_center_n_path) - trace_sub_ts.set_value('num_samples', traces_center_subtracted.shape[1]) - trace_sub_ts.set_value('r', neuropil_r, dtype='float32') - trace_sub_ts.set_value('rmse', neuropil_err, dtype='float32') - trace_sub_ts.set_comments('value "r": neuropil contribution ratio for each roi. ' - 'value "rmse": RMS error of neuropil subtraction for each roi') - trace_f_if.add_timeseries(trace_sub_ts) - trace_sub_ts.finalize() - - trace_f_if.finalize() - - # print 'adding global dF/F traces for each roi' - # trace_dff_if = rt_mo.create_interface('DfOverF') - # - # trace_dff_ts = nwb_f.create_timeseries('RoiResponseSeries', 'dff_center') - # trace_dff_ts.set_data(traces_center_dff, unit='au', conversion=np.nan, resolution=np.nan) - # trace_dff_ts.set_value('data_format', 'roi (row) x time (column)') - # trace_dff_ts.set_description('global df/f traces for each roi center, input fluorescence is the trace after demixing' - # ' and neuropil subtraction. global df/f is calculated by ' - # 'allensdk.brain_observatory.dff.compute_dff() function.') - # trace_dff_ts.set_time_as_link(ts_path) - # trace_dff_ts.set_value_as_link('segmentation_interface', seg_if_path) - # trace_dff_ts.set_value('roi_names', roi_names) - # trace_dff_ts.set_value('num_samples', traces_center_dff.shape[1]) - # trace_dff_if.add_timeseries(trace_dff_ts) - # trace_dff_ts.finalize() - # trace_dff_if.finalize() - - rt_mo.finalize() - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) -add_rois_and_traces(data_folder=curr_folder, nwb_f=nwb_f, - mov_path='/processing/motion_correction/MotionCorrection/MotionCorrection') - -nwb_f.close() - - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/290_get_STRFs.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/290_get_STRFs.py deleted file mode 100644 index ea10f52..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/290_get_STRFs.py +++ /dev/null @@ -1,90 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt -import corticalmapping.NwbTools as nt -import corticalmapping.core.TimingAnalysis as ta -import corticalmapping.SingleCellAnalysis as sca -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -from matplotlib.backends.backend_pdf import PdfPages - -stim_name = '001_LocallySparseNoiseRetinotopicMapping' -trace_source = 'f_center_subtracted' -start_time = -0.5 -end_time = 1.5 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -probe_grp = nwb_f.file_pointer['analysis/photodiode_onsets/' + stim_name] -probe_ns = probe_grp.keys() -probe_ns.sort() - -probe_locations = [[float(pn[3: 9]), float(pn[13: 19])] for pn in probe_ns] -probe_signs = [float(pn[-2:]) for pn in probe_ns] -# print(probe_locations) - -plane_ns = nwb_f.file_pointer['processing'].keys() -plane_ns = [pn.split('_')[-1] for pn in plane_ns if 'rois_and_traces_plane' in pn] -plane_ns.sort() -print('\n'.join(plane_ns)) - -strf_grp = nwb_f.file_pointer['analysis'].create_group('STRFs') - -for plane_n in plane_ns: - print('\ngetting STRFs for {} ...'.format(plane_n)) - - roi_ns = nwb_f.file_pointer['processing/rois_and_traces_' + plane_n + - '/ImageSegmentation/imaging_plane/roi_list'].value - roi_ns = [rn for rn in roi_ns if rn[0: 4] == 'roi_'] - roi_ns.sort() - roi_num = len(roi_ns) - - plane_strf_grp = strf_grp.create_group(plane_n) - plane_traces = nwb_f.file_pointer['processing/rois_and_traces_' + plane_n + '/Fluorescence/' + - trace_source + '/data'].value - plane_trace_ts = nwb_f.file_pointer['processing/rois_and_traces_' + plane_n + '/Fluorescence/' + - trace_source + '/timestamps'].value - - plane_mean_frame_dur = np.mean(np.diff(plane_trace_ts)) - plane_chunk_frame_dur = int(np.ceil((end_time - start_time) / plane_mean_frame_dur)) - plane_chunk_frame_start = int(np.floor(start_time / plane_mean_frame_dur)) - plane_t = (np.arange(plane_chunk_frame_dur) + plane_chunk_frame_start) * plane_mean_frame_dur - print '{}: STRF time axis: \n{}'.format(plane_n, plane_t) - - plane_roi_traces = [] - - for probe_ind, probe_n in enumerate(probe_ns): - - probe_ts = probe_grp[probe_n]['pd_onset_ts_sec'].value - probe_traces = [] - for curr_probe_ts in probe_ts: - curr_frame_start = ta.find_nearest(plane_trace_ts, curr_probe_ts) + plane_chunk_frame_start - curr_frame_end = curr_frame_start + plane_chunk_frame_dur - if curr_frame_start >= 0 and curr_frame_end <= len(plane_trace_ts): - probe_traces.append(plane_traces[:, curr_frame_start: curr_frame_end]) - - plane_roi_traces.append(np.array(probe_traces)) - print('probe: {} / {}; shape: {}'.format(probe_ind + 1, len(probe_ns), np.array(probe_traces).shape)) - - # plane_roi_traces = np.array(plane_roi_traces) - - print('saving ...') - for roi_ind in range(roi_num): - - print "roi: {} / {}".format(roi_ind + 1, roi_num) - curr_unit_traces = [pt[:, roi_ind, :] for pt in plane_roi_traces] - curr_unit_traces = [list(t) for t in curr_unit_traces] - curr_strf = sca.SpatialTemporalReceptiveField(probe_locations, probe_signs, curr_unit_traces, plane_t, - name='roi_{:04d}'.format(roi_ind), - trace_data_type=trace_source) - - curr_strf_grp = plane_strf_grp.create_group('strf_roi_{:04d}'.format(roi_ind)) - curr_strf.to_h5_group(curr_strf_grp) - -nwb_f.close() - - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/310_plot_STRFs.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/310_plot_STRFs.py deleted file mode 100644 index dabe157..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/310_plot_STRFs.py +++ /dev/null @@ -1,61 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt -import h5py -import corticalmapping.core.TimingAnalysis as ta -import corticalmapping.SingleCellAnalysis as sca -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -from matplotlib.backends.backend_pdf import PdfPages - -save_folder = 'figures' -is_local_dff = True -is_add_to_traces = True - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, save_folder) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = h5py.File(nwb_fn, 'r') - -strf_grp = nwb_f['analysis/STRFs'] -plane_ns = strf_grp.keys() -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -for plane_n in plane_ns: - print('plotting rois in {} ...'.format(plane_n)) - - if is_add_to_traces: - add_to_trace = h5py.File("caiman_segmentation_results.hdf5", 'r')['bias_added_to_movie'].value - else: - add_to_trace = 0. - - plane_grp = strf_grp[plane_n] - pdff = PdfPages(os.path.join(save_folder, 'STRFs_' + plane_n + '.pdf')) - - roi_ns = [rn[-8:] for rn in plane_grp.keys()] - roi_ns.sort() - - for roi_ind, roi_n in enumerate(roi_ns): - print('roi: {} / {}'.format(roi_ind + 1, len(roi_ns))) - curr_strf = sca.SpatialTemporalReceptiveField.from_h5_group(plane_grp['strf_' + roi_n]) - - curr_strf_dff = curr_strf.get_local_dff_strf(is_collaps_before_normalize=True, add_to_trace=add_to_trace) - - v_min, v_max = curr_strf_dff.get_data_range() - f = curr_strf_dff.plot_traces(yRange=(v_min, v_max * 1.1), figSize=(16, 10), - columnSpacing=0.002, rowSpacing=0.002) - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - - pdff.close() - -nwb_f.close() diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/320_plot_zscore_RFs.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/320_plot_zscore_RFs.py deleted file mode 100644 index 8bc3863..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/320_plot_zscore_RFs.py +++ /dev/null @@ -1,68 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt -import h5py -import corticalmapping.core.TimingAnalysis as ta -import corticalmapping.SingleCellAnalysis as sca -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -from matplotlib.backends.backend_pdf import PdfPages - -save_folder = 'figures' -is_local_dff = True -zscore_range = [0., 4.] -t_window = [0., 1.] -is_add_to_traces = True - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, save_folder) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = h5py.File(nwb_fn, 'r') - -strf_grp = nwb_f['analysis/STRFs'] -plane_ns = strf_grp.keys() -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -for plane_n in plane_ns: - print('plotting rois in {} ...'.format(plane_n)) - - if is_add_to_traces: - add_to_trace = h5py.File("caiman_segmentation_results.hdf5", 'r')['bias_added_to_movie'].value - else: - add_to_trace = 0. - - plane_grp = strf_grp[plane_n] - pdff = PdfPages(os.path.join(save_folder, 'zscore_RFs_' + plane_n + '.pdf')) - - roi_ns = [rn[-8:] for rn in plane_grp.keys()] - roi_ns.sort() - - for roi_ind, roi_n in enumerate(roi_ns): - print('roi: {} / {}'.format(roi_ind + 1, len(roi_ns))) - curr_strf = sca.SpatialTemporalReceptiveField.from_h5_group(plane_grp['strf_' + roi_n]) - curr_strf_dff = curr_strf.get_local_dff_strf(is_collaps_before_normalize=True, add_to_trace=add_to_trace) - v_min, v_max = curr_strf_dff.get_data_range() - - rf_on, rf_off = curr_strf_dff.get_zscore_receptive_field(timeWindow=t_window) - f = plt.figure(figsize=(15, 4)) - ax_on = f.add_subplot(121) - rf_on.plot_rf(plot_axis=ax_on, is_colorbar=True, cmap='Reds', vmin=zscore_range[0], vmax=zscore_range[1]) - ax_off = f.add_subplot(122) - rf_off.plot_rf(plot_axis=ax_off, is_colorbar=True, cmap='Blues', vmin=zscore_range[0], vmax=zscore_range[1]) - plt.close() - - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - - pdff.close() - -nwb_f.close() diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/330_plot_RF_contours.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/330_plot_RF_contours.py deleted file mode 100644 index 9f55594..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/330_plot_RF_contours.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -import numpy as np -import h5py -import matplotlib.pyplot as plt -import corticalmapping.NwbTools as nt -import corticalmapping.core.TimingAnalysis as ta -import corticalmapping.SingleCellAnalysis as sca -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -from matplotlib.backends.backend_pdf import PdfPages - -roi_t_window = [0., 1.] -zscore_range = [0., 4.] -save_folder = 'figures' -is_add_to_traces = True - -# plot control -thr_ratio = 0.4 -filter_sigma = 1. -interpolate_rate = 5 -absolute_thr = 1.6 -level_num = 1 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, save_folder) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'] -print('\n'.join(nwb_fn)) - -if len(nwb_fn) != 1: - raise LookupError - -nwb_fn = nwb_fn[0] -rff = h5py.File(nwb_fn, 'r') - -strf_grp = rff['analysis/STRFs'] -plane_ns = strf_grp.keys() -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -X = None -Y = None - -for plane_n in plane_ns: - print('plotting rois in {} ...'.format(plane_n)) - - if is_add_to_traces: - add_to_trace = h5py.File("caiman_segmentation_results.hdf5", 'r')['bias_added_to_movie'].value - else: - add_to_trace = 0. - - plane_grp = strf_grp[plane_n] - - roi_ns = [rn[-8:] for rn in plane_grp.keys()] - roi_ns.sort() - - f_all = plt.figure(figsize=(10, 10)) - ax_all = f_all.add_subplot(111) - - pdff = PdfPages(os.path.join(save_folder, 'RF_contours_' + plane_n + '.pdf')) - - for roi_ind, roi_n in enumerate(roi_ns): - print('roi: {} / {}'.format(roi_ind + 1, len(roi_ns))) - curr_strf = sca.SpatialTemporalReceptiveField.from_h5_group(plane_grp['strf_' + roi_n]) - curr_strf_dff = curr_strf.get_local_dff_strf(is_collaps_before_normalize=True, add_to_trace=add_to_trace) - rf_on, rf_off, _ = curr_strf_dff.get_zscore_thresholded_receptive_fields(timeWindow=roi_t_window, - thr_ratio=thr_ratio, - filter_sigma=filter_sigma, - interpolate_rate=interpolate_rate, - absolute_thr=absolute_thr) - - if X is None and Y is None: - X, Y = np.meshgrid(np.arange(len(rf_on.aziPos)), - np.arange(len(rf_on.altPos))) - - levels_on = [np.max(rf_on.get_weighted_mask().flat) * thr_ratio] - levels_off = [np.max(rf_off.get_weighted_mask().flat) * thr_ratio] - ax_all.contour(X, Y, rf_on.get_weighted_mask(), levels=levels_on, colors='r', lw=5) - ax_all.contour(X, Y, rf_off.get_weighted_mask(), levels=levels_off, colors='b', lw=5) - - f_single = plt.figure(figsize=(10, 10)) - ax_single = f_single.add_subplot(111) - ax_single.contour(X, Y, rf_on.get_weighted_mask(), levels=levels_on, colors='r', lw=5) - ax_single.contour(X, Y, rf_off.get_weighted_mask(), levels=levels_off, colors='b', lw=5) - ax_single.set_xticks(range(len(rf_on.aziPos))[::10]) - ax_single.set_xticklabels(['{:05.1f}'.format(l) for l in rf_on.aziPos[::10]]) - ax_single.set_yticks(range(len(rf_on.altPos))[::10]) - ax_single.set_yticklabels(['{:05.1f}'.format(l) for l in rf_on.altPos[::-1][::10]]) - ax_single.set_aspect('equal') - ax_single.set_title('{}: {}. ON thr:{}; OFF thr:{}.'.format(plane_n, roi_n, rf_on.thr, rf_off.thr)) - pdff.savefig(f_single) - f_single.clear() - plt.close(f_single) - - pdff.close() - - ax_all.set_xticks(range(len(rf_on.aziPos))[::10]) - ax_all.set_xticklabels(['{:05.1f}'.format(l) for l in rf_on.aziPos[::10]]) - ax_all.set_yticks(range(len(rf_on.altPos))[::10]) - ax_all.set_yticklabels(['{:05.1f}'.format(l) for l in rf_on.altPos[::-1][::10]]) - ax_all.set_aspect('equal') - ax_all.set_title('{}, abs_zscore_thr:{}'.format(plane_n, absolute_thr)) - - f_all.savefig(os.path.join(save_folder, 'RF_contours_' + plane_n + '_all.pdf'), dpi=300) - -rff.close() - diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/old/120_get_cells_file.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/old/120_get_cells_file.py deleted file mode 100644 index 5f9731a..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/old/120_get_cells_file.py +++ /dev/null @@ -1,76 +0,0 @@ -import os -import numpy as np -import h5py -import tifffile as tf -import allensdk_internal.brain_observatory.mask_set as mask_set -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.core.PlottingTools as pt -import scipy.ndimage as ni -import matplotlib.pyplot as plt - -isSave = True -filter_sigma = 1. -cut_thr = 4. -dilation_iterations = 0 -bg_fn = "corrected_mean_projection.tif" -save_folder = 'figures' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -data_f = h5py.File('caiman_segmentation_results.hdf5') -masks = data_f['masks'].value -data_f.close() - -bg = tf.imread(bg_fn) - -final_roi_dict = {} - -for i, mask in enumerate(masks): - mask_nor = (mask - np.mean(mask.flatten())) / np.abs(np.std(mask.flatten())) - mask_nor_f = ni.filters.gaussian_filter(mask_nor, filter_sigma) - mask_bin = np.zeros(mask_nor_f.shape, dtype=np.uint8) - mask_bin[mask_nor_f > cut_thr] = 1 - mask_labeled, mask_num = ni.label(mask_bin) - curr_mask_dict = ia.get_masks(labeled=mask_labeled, keyPrefix='caiman_mask_{:03d}'.format(i), labelLength=5) - for roi_key, roi_mask in curr_mask_dict.items(): - final_roi_dict.update({roi_key: ia.WeightedROI(roi_mask * mask)}) - -print 'Total number of ROIs:',len(final_roi_dict) - -f = plt.figure(figsize=(15, 8)) -ax1 = f.add_subplot(121) -ax1.imshow(ia.array_nor(bg), vmin=0, vmax=0.1, cmap='gray', interpolation='nearest') -colors1 = pt.random_color(masks.shape[0]) -for i, mask in enumerate(masks): - pt.plot_mask_borders(mask, plotAxis=ax1, color=colors1[i]) -ax1.set_title('original ROIs') -ax1.set_axis_off() -ax2 = f.add_subplot(122) -ax2.imshow(ia.array_nor(bg), vmin=0, vmax=0.1, cmap='gray', interpolation='nearest') -colors2 = pt.random_color(len(final_roi_dict)) -i = 0 -for roi in final_roi_dict.values(): - pt.plot_mask_borders(roi.get_binary_mask(), plotAxis=ax2, color=colors2[i]) - i = i + 1 -ax2.set_title('filtered ROIs') -ax2.set_axis_off() -plt.show() - -if isSave: - - if not os.path.isdir(save_folder): - os.makedirs(save_folder) - - f.savefig(os.path.join(save_folder, 'caiman_segmentation_filtering.pdf'), dpi=300) - - cell_file = h5py.File('cells.hdf5', 'w') - - i = 0 - for key, value in sorted(final_roi_dict.iteritems()): - curr_grp = cell_file.create_group('cell{:04d}'.format(i)) - curr_grp.attrs['name'] = key - value.to_h5_group(curr_grp) - i += 1 - - cell_file.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/old/170_get_dff_traces.py b/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/old/170_get_dff_traces.py deleted file mode 100644 index 3cd09ff..0000000 --- a/corticalmapping/scripts/post_recording/00_old/movie_single_plane_single_clannel_regular_2p/old/170_get_dff_traces.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -import h5py -import allensdk.brain_observatory.dff as dff -import numpy as np -import corticalmapping.HighLevel as hl -import corticalmapping.core.FileTools as ft - - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_plot_dir = os.path.join(curr_folder, 'figures', 'dff_extraction') -if not os.path.isdir(save_plot_dir): - os.makedirs(save_plot_dir) - -data_f = h5py.File('rois_and_traces.hdf5') -traces_subtracted = data_f['traces_center_subtracted'].value - -traces_dff = dff.compute_dff(traces_subtracted, save_plot_dir=save_plot_dir, - mode_kernelsize=100, mean_kernelsize=100) -data_f['traces_center_dff'] = traces_dff -data_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/000_reorganized_data.py b/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/000_reorganized_data.py deleted file mode 100644 index 3297f9c..0000000 --- a/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/000_reorganized_data.py +++ /dev/null @@ -1,37 +0,0 @@ -import os -import tifffile as tf -import numpy as np - -file_identifier = 'cell1_zoom4' -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project\180419-388189-2p" -frame_per_step = 500 - -save_folder = os.path.join(data_folder, file_identifier) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -fns = [f for f in os.listdir(data_folder) if file_identifier in f and f[-4:] == '.tif'] -fns.sort() -print('\n'.join(fns)) - -curr_step = 0 - -print('\n') -for fn in fns: - print(fn) - curr_mov = tf.imread(os.path.join(data_folder, fn)) - - # reorient movie - curr_mov = curr_mov.transpose((0, 2, 1))[:, ::-1, :] - - steps = curr_mov.shape[0] / frame_per_step - - for step in range(steps): - # print(curr_step) - curr_step_mov = curr_mov[step * frame_per_step : (step + 1) * frame_per_step] - curr_fn = '{}_step_{:03d}'.format(file_identifier, curr_step) - curr_save_folder = os.path.join(save_folder, curr_fn) - if not os.path.isdir(curr_save_folder): - os.makedirs(curr_save_folder) - tf.imsave(os.path.join(curr_save_folder, curr_fn + '.tif'), curr_step_mov) - curr_step += 1 \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/005_remove_correction_files.py b/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/005_remove_correction_files.py deleted file mode 100644 index 36e614c..0000000 --- a/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/005_remove_correction_files.py +++ /dev/null @@ -1,31 +0,0 @@ -import os - -base_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180502-M386507-2p\FOV4_zoom10" - -step_fns = [f for f in os.listdir(base_folder) if f.split('_')[-2] == 'step'] -step_fns.sort() -print('\n'.join(step_fns)) - -for step_fn in step_fns: - - print('\n' + step_fn) - step_folder = os.path.join(base_folder, step_fn) - fns = os.listdir(step_folder) - - if 'corrected_max_projection.tif' in fns: - print('removing corrected_max_projection.tif') - os.remove(os.path.join(step_folder, 'corrected_max_projection.tif')) - - if 'corrected_mean_projection.tif' in fns: - print('removing corrected_mean_projection.tif') - os.remove(os.path.join(step_folder, 'corrected_mean_projection.tif')) - - if 'correction_offsets.hdf5' in fns: - print('removing correction_offsets.hdf5') - os.remove(os.path.join(step_folder, 'correction_offsets.hdf5')) - - fn_cor = [f for f in fns if f[-14:] == '_corrected.tif'] - if len(fn_cor) == 1: - print('removing ' + fn_cor[0]) - os.remove(os.path.join(step_folder, fn_cor[0])) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/010_motion_correction_zstack_caiman.py b/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/010_motion_correction_zstack_caiman.py deleted file mode 100644 index a20586b..0000000 --- a/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/010_motion_correction_zstack_caiman.py +++ /dev/null @@ -1,128 +0,0 @@ -import sys -sys.path.extend([r"E:\data\github_packages\CaImAn"]) - -import caiman as cm -import numpy as np -import os -from caiman.motion_correction import MotionCorrect, tile_and_correct, motion_correction_piecewise -import tifffile as tf -import h5py -import warnings - -base_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180427-M386507-2p\FOV1_zstack\FOV1_zstack" - -identifier = 'FOV1_zstack' - -n_processes = 3 - - -def correct_single_movie(data_folder, identifier, dview): - - #=======================================setup parameters============================================== - # number of iterations for rigid motion correction - niter_rig = 5 - - # maximum allowed rigid shift in pixels (view the movie to get a sense of motion) - max_shifts = (30, 30) - - # for parallelization split the movies in num_splits chuncks across time - # if none all the splits are processed and the movie is saved - splits_rig = 56 - - # intervals at which patches are laid out for motion correction - # num_splits_to_process_rig = None - - # create a new patch every x pixels for pw-rigid correction - strides = (48, 48) - - # overlap between pathes (size of patch strides+overlaps) - overlaps = (24, 24) - - # for parallelization split the movies in num_splits chuncks across time - splits_els = 56 - - # num_splits_to_process_els = [28, None] - - # upsample factor to avoid smearing when merging patches - upsample_factor_grid = 4 - - # maximum deviation allowed for patch with respect to rigid shifts - max_deviation_rigid = 3 - - # if True, apply shifts fast way (but smoothing results) by using opencv - shifts_opencv = True - - # if True, make the SAVED movie and template mostly nonnegative by removing min_mov from movie - nonneg_movie = False - # =======================================setup parameters============================================== - - - fname = [f for f in os.listdir(data_folder) if f[-4:] == '.tif' and identifier in f] - - if len(fname) == 0: - print('\ndid not find movie file in directory: {}.'.format(data_folder)) - print('Do nothing.') - return - elif len(fname) > 1: - fname.sort() - print('\n') - print('\n'.join(fname)) - warnings.warn('more than one movie file in directory: {}. skip ...'.format(data_folder)) - return - else: - fname = fname[0] - print('\ncorrecting {} in directory {}.'.format(fname, data_folder)) - - # m_orig = cm.load(os.path.join(data_folder, fname)) - # offset_mov = np.min(m_orig) # if the data has very negative values compute an offset value - - offset_mov = 0. - - # create a motion correction object# creat - mc = MotionCorrect(os.path.join(data_folder, fname), offset_mov, - dview=dview, max_shifts=max_shifts, niter_rig=niter_rig, - splits_rig=splits_rig, strides=strides, overlaps=overlaps, - splits_els=splits_els, upsample_factor_grid=upsample_factor_grid, - max_deviation_rigid=max_deviation_rigid, - shifts_opencv=shifts_opencv, nonneg_movie=nonneg_movie) - - mc.motion_correct_rigid(save_movie=True) - # load motion corrected movie - m_rig = cm.load(mc.fname_tot_rig) - m_rig = m_rig.astype(np.int16) - save_name = os.path.splitext(fname)[0] + '_corrected.tif' - tf.imsave(os.path.join(data_folder, save_name), m_rig) - tf.imsave(os.path.join(data_folder, 'corrected_mean_projection.tif'), - np.mean(m_rig, axis=0).astype(np.float32)) - tf.imsave(os.path.join(data_folder, 'corrected_max_projection.tif'), - np.max(m_rig, axis=0).astype(np.float32)) - - offset_f = h5py.File(os.path.join(data_folder, 'correction_offsets.hdf5')) - offsets = mc.shifts_rig - offsets = np.array([np.array(o) for o in offsets]).astype(np.float32) - offset_dset = offset_f.create_dataset(name='file_0000', data=offsets) - offset_dset.attrs['format'] = 'height, width' - offset_dset.attrs['path'] = os.path.join(data_folder, fname) - - os.remove(mc.fname_tot_rig[0]) - - -if __name__ == '__main__': - subfolder_ns = [f for f in os.listdir(base_folder) if identifier in f] - subfolder_ns.sort() - print('\n'.join(subfolder_ns)) - - # %% start the cluster (if a cluster already exists terminate it) - if 'dview' in locals(): - dview.terminate() - - c, dview, n_processes = cm.cluster.setup_cluster(backend='local', - n_processes=n_processes, - single_thread=False) - - for subfolder_n in subfolder_ns: - - correct_single_movie(data_folder=os.path.join(base_folder, subfolder_n), - identifier=identifier, - dview=dview) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/020_get_images.py b/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/020_get_images.py deleted file mode 100644 index 08fe803..0000000 --- a/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/020_get_images.py +++ /dev/null @@ -1,27 +0,0 @@ -import os -import tifffile as tf -import numpy as np - -identifier = 'cell1_zoom4' -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project\180419-M388189-2p" - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -# save_folder = os.path.join(curr_folder, identifier) -# if not os.path.isdir(save_folder): -# os.makedirs(save_folder) - -source_folder = os.path.join(data_folder, identifier) -folder_ns = [f for f in os.listdir(source_folder) if os.path.isdir(os.path.join(source_folder, f))] -folder_ns.sort() -print('\n'.join(folder_ns)) - -stack = [] -for folder_n in folder_ns: - curr_source_folder = os.path.join(source_folder, folder_n) - stack.append(tf.imread(os.path.join(curr_source_folder, 'corrected_mean_projection.tif'))) - -stack = np.array(stack) -tf.imsave(os.path.join(curr_folder, '{}_zstack.tif'.format(identifier)), stack) -# tf.imsave(os.path.join(save_folder, '{}_zstack.tif'.format(identifier)), stack) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/030_motion_correction_cross_steps.py b/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/030_motion_correction_cross_steps.py deleted file mode 100644 index 56be4ee..0000000 --- a/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/030_motion_correction_cross_steps.py +++ /dev/null @@ -1,48 +0,0 @@ -import os -import stia.motion_correction as mc - -input_folder = 'cell1' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -input_path_identifier = 'zstack' - -output_folder = os.path.join(input_folder, 'corrected') -if not os.path.isdir(output_folder): - os.makedirs(output_folder) - -process_num = 1 -anchor_frame_ind_chunk = 10 -anchor_frame_ind_projection = 0 -iteration_chunk = 10 -iteration_projection = 5 -max_offset_chunk = (30., 30.) -max_offset_projection = (30., 30.) -align_func = mc.phase_correlation -fill_value = 0. -avi_downsample_rate = 20 - -f_paths, _ = mc.motion_correction(input_folder=input_folder, - input_path_identifier=input_path_identifier, - process_num=process_num, - output_folder=output_folder, - anchor_frame_ind_chunk=anchor_frame_ind_chunk, - anchor_frame_ind_projection=anchor_frame_ind_projection, - iteration_chunk=iteration_chunk, - iteration_projection=iteration_projection, - max_offset_chunk=max_offset_chunk, - max_offset_projection=max_offset_projection, - align_func=align_func, - fill_value=fill_value) - -print('\n'.join(f_paths)) - -offsets_path = os.path.join(output_folder, 'correction_offsets.hdf5') -path_pairs = zip(f_paths, f_paths) -mc.apply_correction_offsets(offsets_path=offsets_path, - path_pairs=path_pairs, - process_num=process_num, - fill_value=fill_value, - output_folder=output_folder, - avi_downsample_rate=avi_downsample_rate) diff --git a/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/040_get_fine_zstack.py b/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/040_get_fine_zstack.py deleted file mode 100644 index 66e69e7..0000000 --- a/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/040_get_fine_zstack.py +++ /dev/null @@ -1,53 +0,0 @@ -import os -import h5py -import numpy as np -import tifffile as tf -import stia.motion_correction as mc -import stia.utility.image_analysis as ia - -zstack_fn = 'zstack_zoom2_green.tif' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -zstack = tf.imread(zstack_fn) - -step_offsets = [[0., 0.]] # offsets between adjacent steps - -print('calculating step offsets ...') -for step_i in range(1, zstack.shape[0]): - curr_offset = mc.phase_correlation(zstack[step_i], zstack[step_i - 1]) - step_offsets.append(curr_offset) -step_offsets = np.array([np.array(so) for so in step_offsets], dtype=np.float32) -print('\nsetp offsets:') -print(step_offsets) - -print('\ncalculating final offsets ...') -final_offsets_y = np.cumsum(step_offsets[:, 0]) -final_offsets_x = np.cumsum(step_offsets[:, 1]) -final_offsets = np.array([final_offsets_x, final_offsets_y], dtype=np.float32).transpose() - -middle_frame_ind = zstack.shape[0] // 2 -middle_offsets = final_offsets[middle_frame_ind: middle_frame_ind + 1] -final_offsets = final_offsets - middle_offsets -print('\nfinal offsets:') -print(final_offsets) - -print('applying final offsets ...') - -zstack_f = [] # fine zstack - -for step_i in range(zstack.shape[0]): - - curr_offset = final_offsets[step_i] - - frame = zstack[step_i] - frame_f = ia.rigid_transform_cv2_2d(frame, offset=curr_offset, fill_value=0.).astype(np.float32) - zstack_f.append(frame_f) - -zstack_f = np.array(zstack_f, dtype=np.float32) - -tf.imsave(os.path.splitext(zstack_fn)[0] + '_aligned.tif', zstack_f) -tf.imsave(os.path.splitext(zstack_fn)[0] + '_max_projection.tif', np.max(zstack_f, axis=0)) - - diff --git a/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/050_get_2p_vas_maps.py b/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/050_get_2p_vas_maps.py deleted file mode 100644 index 9a08d07..0000000 --- a/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/050_get_2p_vas_maps.py +++ /dev/null @@ -1,19 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia -import matplotlib.pyplot as plt - - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project\180322-M360495-2p" -file_name = "vasmap_zoom1_00001_00001.tif" -save_name = 'vasmap_2p_zoom1.tif' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -vasmap = tf.imread(os.path.join(data_folder, file_name)) -vasmap = np.mean(vasmap.transpose((0, 2, 1))[:, ::-1, :], axis=0) -vasmap = ia.array_nor(vasmap) - -tf.imsave(save_name, vasmap.astype(np.float32)) diff --git a/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/060_get_wf_vas_maps.py b/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/060_get_wf_vas_maps.py deleted file mode 100644 index 4a49965..0000000 --- a/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/060_get_wf_vas_maps.py +++ /dev/null @@ -1,35 +0,0 @@ -import os -import numpy as np -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -import tifffile as tf - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project\180622-M391354-2p\vasmap_wf" - -vas_map_fns= ["180622JCamF100", - "180622JCamF101", - "180622JCamF102", - "180622JCamF103", - "180622JCamF104", - "180622JCamF105",] - -saveFolder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(saveFolder) - -vas_map_paths = [os.path.join(data_folder, f) for f in vas_map_fns] - -vas_maps = [] - -for vas_map_path in vas_map_paths: - - vas_map_focused, _, _ = ft.importRawJCamF(vas_map_path, column=1024, row=1024, headerLength = 116, - tailerLength=452) - vas_map_focused = vas_map_focused[2:] - vas_map_focused = vas_map_focused[:, ::-1, :] - vas_map_focused[vas_map_focused > 50000] = 400 - vas_map_focused = np.mean(vas_map_focused, axis=0) - vas_maps.append(ia.array_nor(vas_map_focused)) - -vas_map = ia.array_nor(np.mean(vas_maps, axis=0)) - -tf.imsave('vas_map_focused_wf_green.tif', vas_map.astype(np.float32)) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/old/010_motion_correction.py b/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/old/010_motion_correction.py deleted file mode 100644 index 2ad361c..0000000 --- a/corticalmapping/scripts/post_recording/00_old/zstack_single_channel_regular_2p/old/010_motion_correction.py +++ /dev/null @@ -1,49 +0,0 @@ -import os -import stia.motion_correction as mc - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180322-M360495-2p\zstack_zoom4" - -input_path_identifier = '.tif' -process_num = 1 -anchor_frame_ind_chunk = 100 -anchor_frame_ind_projection = 0 -iteration_chunk = 10 -iteration_projection = 5 -max_offset_chunk = (30., 30.) -max_offset_projection = (100., 100.) -align_func = mc.phase_correlation -fill_value = 0. -avi_downsample_rate = 20 - -sub_folder_ns = [f for f in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder, f))] -sub_folder_ns.sort() -print('\n'.join(sub_folder_ns)) - -for sub_folder_n in sub_folder_ns: - - sub_folder = os.path.join(data_folder, sub_folder_n) - - f_paths, _ = mc.motion_correction(input_folder=sub_folder, - input_path_identifier=input_path_identifier, - process_num=process_num, - output_folder=sub_folder, - anchor_frame_ind_chunk=anchor_frame_ind_chunk, - anchor_frame_ind_projection=anchor_frame_ind_projection, - iteration_chunk=iteration_chunk, - iteration_projection=iteration_projection, - max_offset_chunk=max_offset_chunk, - max_offset_projection=max_offset_projection, - align_func=align_func, - fill_value=fill_value) - - print('\n'.join(f_paths)) - - offsets_path = os.path.join(sub_folder, 'correction_offsets.hdf5') - path_pairs = zip(f_paths, f_paths) - mc.apply_correction_offsets(offsets_path=offsets_path, - path_pairs=path_pairs, - process_num=process_num, - fill_value=fill_value, - output_folder=sub_folder, - avi_downsample_rate=avi_downsample_rate) diff --git a/corticalmapping/scripts/post_recording/00_old/FlashingCircle/batch_analyzeFlashingCircle.py b/corticalmapping/scripts/post_recording/FlashingCircle/batch_analyzeFlashingCircle.py similarity index 95% rename from corticalmapping/scripts/post_recording/00_old/FlashingCircle/batch_analyzeFlashingCircle.py rename to corticalmapping/scripts/post_recording/FlashingCircle/batch_analyzeFlashingCircle.py index aacf1d8..eb931b9 100644 --- a/corticalmapping/scripts/post_recording/00_old/FlashingCircle/batch_analyzeFlashingCircle.py +++ b/corticalmapping/scripts/post_recording/FlashingCircle/batch_analyzeFlashingCircle.py @@ -68,7 +68,7 @@ vasMap = hl.getVasMap(vasMapPaths,dtype=vasMapDtype,headerLength=vasMapHeaderLength,tailerLength=vasMapTailerLength, column=vasMapColumn,row=vasMapRow,frame=vasMapFrame,crop=vasMapCrop,mergeMethod=vasMapMergeMethod) else: - print 'No vasculature map find. Taking first frame of movie as vasculature map.' + print('No vasculature map find. Taking first frame of movie as vasculature map.') firstMovPath = os.path.join(dataFolder, [f for f in fileList if (dateRecorded+'JCamF'+str(fileNumList[0]) in f) and ('.npy' in f)][0]) vasMap = BinarySlicer(firstMovPath)[0,:,:] tf.imsave(dateRecorded+'_M'+mouseID+'_vasMap.tif',vasMap.astype(np.float32)) @@ -91,10 +91,10 @@ log = ft.loadFile(logPath) refreshRate = float(log['monitor']['refreshRate']) - if 'preGapFrameNum' in log['stimulation'].keys(): + if 'preGapFrameNum' in list(log['stimulation'].keys()): preGapDur = log['stimulation']['preGapFrameNum'] / refreshRate postGapDur = log['stimulation']['postGapFrameNum'] / refreshRate - elif 'preGapDur' in log['stimulation'].keys(): + elif 'preGapDur' in list(log['stimulation'].keys()): preGapDur = log['stimulation']['preGapDur'] postGapDur = log['stimulation']['postGapDur'] diff --git a/corticalmapping/scripts/post_recording/analysis_retinotopicmapping/Retinotopic_Mapping_Analysis_Template/2015-10-30_RetinotopicMappingAnalysisTemplate.ipynb b/corticalmapping/scripts/post_recording/RetinotopicMapping/Retinotopic_Mapping_Analysis_Template/2015-10-30_RetinotopicMappingAnalysisTemplate.ipynb similarity index 100% rename from corticalmapping/scripts/post_recording/analysis_retinotopicmapping/Retinotopic_Mapping_Analysis_Template/2015-10-30_RetinotopicMappingAnalysisTemplate.ipynb rename to corticalmapping/scripts/post_recording/RetinotopicMapping/Retinotopic_Mapping_Analysis_Template/2015-10-30_RetinotopicMappingAnalysisTemplate.ipynb diff --git a/corticalmapping/scripts/post_recording/analysis_retinotopicmapping/Retinotopic_Mapping_Analysis_Template/2015-10-31_RetinotopicMappingAnalysisTemplate.ipynb b/corticalmapping/scripts/post_recording/RetinotopicMapping/Retinotopic_Mapping_Analysis_Template/2015-10-31_RetinotopicMappingAnalysisTemplate.ipynb similarity index 100% rename from corticalmapping/scripts/post_recording/analysis_retinotopicmapping/Retinotopic_Mapping_Analysis_Template/2015-10-31_RetinotopicMappingAnalysisTemplate.ipynb rename to corticalmapping/scripts/post_recording/RetinotopicMapping/Retinotopic_Mapping_Analysis_Template/2015-10-31_RetinotopicMappingAnalysisTemplate.ipynb diff --git a/corticalmapping/scripts/post_recording/analysis_retinotopicmapping/Retinotopic_Mapping_Analysis_Template/2015-11-17_RetinotopicMappingAnalysisTemplate.ipynb b/corticalmapping/scripts/post_recording/RetinotopicMapping/Retinotopic_Mapping_Analysis_Template/2015-11-17_RetinotopicMappingAnalysisTemplate.ipynb similarity index 100% rename from corticalmapping/scripts/post_recording/analysis_retinotopicmapping/Retinotopic_Mapping_Analysis_Template/2015-11-17_RetinotopicMappingAnalysisTemplate.ipynb rename to corticalmapping/scripts/post_recording/RetinotopicMapping/Retinotopic_Mapping_Analysis_Template/2015-11-17_RetinotopicMappingAnalysisTemplate.ipynb diff --git a/corticalmapping/scripts/post_recording/analysis_retinotopicmapping/Retinotopic_Mapping_Analysis_Template/2016-10-26_RetinotopicMappingAnalysisTemplate-ManualFileEntry.ipynb b/corticalmapping/scripts/post_recording/RetinotopicMapping/Retinotopic_Mapping_Analysis_Template/2016-10-26_RetinotopicMappingAnalysisTemplate-ManualFileEntry.ipynb similarity index 100% rename from corticalmapping/scripts/post_recording/analysis_retinotopicmapping/Retinotopic_Mapping_Analysis_Template/2016-10-26_RetinotopicMappingAnalysisTemplate-ManualFileEntry.ipynb rename to corticalmapping/scripts/post_recording/RetinotopicMapping/Retinotopic_Mapping_Analysis_Template/2016-10-26_RetinotopicMappingAnalysisTemplate-ManualFileEntry.ipynb diff --git a/corticalmapping/scripts/post_recording/RetinotopicMapping/Thumbs.db b/corticalmapping/scripts/post_recording/RetinotopicMapping/Thumbs.db new file mode 100644 index 0000000..6247ede Binary files /dev/null and b/corticalmapping/scripts/post_recording/RetinotopicMapping/Thumbs.db differ diff --git a/corticalmapping/scripts/post_recording/analysis_retinotopicmapping/batch_MarkPatches.py b/corticalmapping/scripts/post_recording/RetinotopicMapping/batch_MarkPatches.py similarity index 100% rename from corticalmapping/scripts/post_recording/analysis_retinotopicmapping/batch_MarkPatches.py rename to corticalmapping/scripts/post_recording/RetinotopicMapping/batch_MarkPatches.py diff --git a/corticalmapping/scripts/post_recording/analysis_retinotopicmapping/batch_PatchName.py b/corticalmapping/scripts/post_recording/RetinotopicMapping/batch_PatchName.py similarity index 91% rename from corticalmapping/scripts/post_recording/analysis_retinotopicmapping/batch_PatchName.py rename to corticalmapping/scripts/post_recording/RetinotopicMapping/batch_PatchName.py index 9c612af..e38a7c6 100644 --- a/corticalmapping/scripts/post_recording/analysis_retinotopicmapping/batch_PatchName.py +++ b/corticalmapping/scripts/post_recording/RetinotopicMapping/batch_PatchName.py @@ -30,7 +30,7 @@ trialPath = os.path.join(currFolder,trialName) trial, _ = rm.loadTrial(trialPath) finalPatches = getattr(trial,patchesToShow) - numOfPatches = len(finalPatches.keys()) + numOfPatches = len(list(finalPatches.keys())) rowNum = numOfPatches // columnNum + 1 f = plt.figure(figsize=(10,10)) f.suptitle(trialName) @@ -38,7 +38,7 @@ rm.plotPatches(finalPatches,plotaxis=ax,markersize=0) - for key,patch in finalPatches.iteritems(): + for key,patch in finalPatches.items(): center = patch.getCenter() ax.text(center[1],center[0],key,verticalalignment='center', horizontalalignment='center') diff --git a/corticalmapping/scripts/post_recording/analysis_retinotopicmapping/batch_analyzeRetinotopicMapping_1.py b/corticalmapping/scripts/post_recording/RetinotopicMapping/batch_analyzeRetinotopicMapping_1.py similarity index 92% rename from corticalmapping/scripts/post_recording/analysis_retinotopicmapping/batch_analyzeRetinotopicMapping_1.py rename to corticalmapping/scripts/post_recording/RetinotopicMapping/batch_analyzeRetinotopicMapping_1.py index 4be51a3..c377855 100644 --- a/corticalmapping/scripts/post_recording/analysis_retinotopicmapping/batch_analyzeRetinotopicMapping_1.py +++ b/corticalmapping/scripts/post_recording/RetinotopicMapping/batch_analyzeRetinotopicMapping_1.py @@ -6,7 +6,7 @@ import matplotlib.pyplot as plt from toolbox.misc import BinarySlicer import warnings -import tifffile as tf +import imaging_behavior.core.tifffile as tf import corticalmapping.core.FileTools as ft import corticalmapping.core.TimingAnalysis as ta import corticalmapping.HighLevel as hl @@ -14,13 +14,13 @@ import corticalmapping.core.ImageAnalysis as ia -dateRecorded = '170720' # str 'yymmdd' -mouseID = '312805' # str, without 'M', for example: '214522' -userID = 'Jun' # user name, should be consistent withe the display log user name -mouseType='Rorb-Cre;Camk2a-tTA;Ai94(TITL-GCaMP6s)' -trialNum='1' # str -vasfileNums = [100, 101] # file numbers of vasculature images, should be a list -fileNum = 103 # file number of the imaged movie +dateRecorded = '171215' # str 'yymmdd' +mouseID = '321607' # str, without 'M', for example: '214522' +userID = 'mattv' # user name, should be consistent withe the display log user name +mouseType='Slc17a7;Ai162(TITL-GCaMP6s)' +trialNum='4' # str +vasfileNums = [102] # file numbers of vasculature images, should be a list +fileNum = 102 # file number of the imaged movie FFTmode='peak' # detecting peak of valley of the signal, GCaMP:'peak'; intrinsic signal: 'valley' @@ -79,7 +79,7 @@ vasMap = hl.getVasMap(vasMapPaths,dtype=vasMapDtype,headerLength=vasMapHeaderLength,tailerLength=vasMapTailerLength, column=vasMapColumn,row=vasMapRow,frame=vasMapFrame,crop=vasMapCrop,mergeMethod=vasMapMergeMethod) else: - print 'No vasculature map find. Taking first frame of movie as vasculature map.' + print('No vasculature map find. Taking first frame of movie as vasculature map.') vasMap = BinarySlicer(movPath)[0,:,:] vasMap = ia.array_nor(vasMap).astype(np.float32) diff --git a/corticalmapping/scripts/post_recording/analysis_retinotopicmapping/batch_analyzeRetinotopicMapping_2.py b/corticalmapping/scripts/post_recording/RetinotopicMapping/batch_analyzeRetinotopicMapping_2.py similarity index 100% rename from corticalmapping/scripts/post_recording/analysis_retinotopicmapping/batch_analyzeRetinotopicMapping_2.py rename to corticalmapping/scripts/post_recording/RetinotopicMapping/batch_analyzeRetinotopicMapping_2.py diff --git a/corticalmapping/scripts/post_recording/analysis_retinotopicmapping/batch_analyzeRetinotopicMapping_3.py b/corticalmapping/scripts/post_recording/RetinotopicMapping/batch_analyzeRetinotopicMapping_3.py similarity index 100% rename from corticalmapping/scripts/post_recording/analysis_retinotopicmapping/batch_analyzeRetinotopicMapping_3.py rename to corticalmapping/scripts/post_recording/RetinotopicMapping/batch_analyzeRetinotopicMapping_3.py diff --git a/corticalmapping/scripts/post_recording/analysis_database/0041_get_plane_dfs.py b/corticalmapping/scripts/post_recording/analysis_database/0041_get_plane_dfs.py deleted file mode 100644 index 93a5c25..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/0041_get_plane_dfs.py +++ /dev/null @@ -1,427 +0,0 @@ -import sys -sys.path.extend(['/home/junz/PycharmProjects/corticalmapping']) -import os -import time -import pandas as pd -import numpy as np -import h5py -import datetime -import corticalmapping.DatabaseTools as dt -from multiprocessing import Pool -from shutil import copyfile - -date_range = [180301, 190610] -database_folder = 'nwbs' -save_folder_n = "dataframes" -process_num = 8 -is_overwrite = False - -params = dt.ANALYSIS_PARAMS -params['trace_type'] = 'f_center_raw' -params['is_collapse_dire'] = False -params['is_collapse_sf'] = True -params['is_collapse_tf'] = False - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -print('pandas version: {}\n'.format(pd.__version__)) - -columns = [ - 'date', - 'mouse_id', - 'plane_n', - 'roi_n', - 'depth', # microns under pia, float - - # roi mask - 'roi_area', # square micron - 'roi_center_row', # center of roi mask in field of view, row - 'roi_center_col', # center of roi mask in field of view, column - - # trace skewness - 'skew_raw', # skewness of unfiltered trace (neuropil subtracted), float - 'skew_fil', # skewness of highpassed trace, float - - # receptive fields - 'rf_pos_on_peak_z', - 'rf_pos_on_area', - 'rf_pos_on_center_alt', - 'rf_pos_on_center_azi', - - 'rf_pos_off_peak_z', - 'rf_pos_off_area', - 'rf_pos_off_center_alt', - 'rf_pos_off_center_azi', - - 'rf_pos_onoff_peak_z', - 'rf_pos_onoff_area', - 'rf_pos_onoff_center_alt', - 'rf_pos_onoff_center_azi', - - 'rf_pos_lsi', - - 'rf_neg_on_peak_z', - 'rf_neg_on_area', - 'rf_neg_on_center_alt', - 'rf_neg_on_center_azi', - - 'rf_neg_off_peak_z', - 'rf_neg_off_area', - 'rf_neg_off_center_alt', - 'rf_neg_off_center_azi', - - 'rf_neg_onoff_peak_z', - 'rf_neg_onoff_area', - 'rf_neg_onoff_center_alt', - 'rf_neg_onoff_center_azi', - - 'rf_neg_lsi', - - # drifting grating peak response - 'dgc_pos_peak_df', - 'dgc_neg_peak_df', - 'dgc_pos_p_ttest_df', - 'dgc_neg_p_ttest_df', - 'dgc_p_anova_df', - - 'dgc_pos_peak_dff', - 'dgc_neg_peak_dff', - 'dgc_pos_p_ttest_dff', - 'dgc_neg_p_ttest_dff', - 'dgc_p_anova_dff', - - 'dgc_pos_peak_z', - 'dgc_neg_peak_z', - 'dgc_pos_p_ttest_z', - 'dgc_neg_p_ttest_z', - 'dgc_p_anova_z', - - # direction / orientation tuning, pos, df - 'dgc_pos_osi_raw_df', - 'dgc_pos_dsi_raw_df', - 'dgc_pos_gosi_raw_df', - 'dgc_pos_gdsi_raw_df', - 'dgc_pos_osi_ele_df', - 'dgc_pos_dsi_ele_df', - 'dgc_pos_gosi_ele_df', - 'dgc_pos_gdsi_ele_df', - 'dgc_pos_osi_rec_df', - 'dgc_pos_dsi_rec_df', - 'dgc_pos_gosi_rec_df', - 'dgc_pos_gdsi_rec_df', - 'dgc_pos_peak_dire_raw_df', - 'dgc_pos_vs_dire_raw_df', - 'dgc_pos_vs_dire_ele_df', - 'dgc_pos_vs_dire_rec_df', - - # direction / orientation tuning, neg, df - 'dgc_neg_osi_raw_df', - 'dgc_neg_dsi_raw_df', - 'dgc_neg_gosi_raw_df', - 'dgc_neg_gdsi_raw_df', - 'dgc_neg_osi_ele_df', - 'dgc_neg_dsi_ele_df', - 'dgc_neg_gosi_ele_df', - 'dgc_neg_gdsi_ele_df', - 'dgc_neg_osi_rec_df', - 'dgc_neg_dsi_rec_df', - 'dgc_neg_gosi_rec_df', - 'dgc_neg_gdsi_rec_df', - 'dgc_neg_peak_dire_raw_df', - 'dgc_neg_vs_dire_raw_df', - 'dgc_neg_vs_dire_ele_df', - 'dgc_neg_vs_dire_rec_df', - - # direction / orientation tuning, pos, dff - 'dgc_pos_osi_raw_dff', - 'dgc_pos_dsi_raw_dff', - 'dgc_pos_gosi_raw_dff', - 'dgc_pos_gdsi_raw_dff', - 'dgc_pos_osi_ele_dff', - 'dgc_pos_dsi_ele_dff', - 'dgc_pos_gosi_ele_dff', - 'dgc_pos_gdsi_ele_dff', - 'dgc_pos_osi_rec_dff', - 'dgc_pos_dsi_rec_dff', - 'dgc_pos_gosi_rec_dff', - 'dgc_pos_gdsi_rec_dff', - 'dgc_pos_peak_dire_raw_dff', - 'dgc_pos_vs_dire_raw_dff', - 'dgc_pos_vs_dire_ele_dff', - 'dgc_pos_vs_dire_rec_dff', - - # direction / orientation tuning, neg, dff - 'dgc_neg_osi_raw_dff', - 'dgc_neg_dsi_raw_dff', - 'dgc_neg_gosi_raw_dff', - 'dgc_neg_gdsi_raw_dff', - 'dgc_neg_osi_ele_dff', - 'dgc_neg_dsi_ele_dff', - 'dgc_neg_gosi_ele_dff', - 'dgc_neg_gdsi_ele_dff', - 'dgc_neg_osi_rec_dff', - 'dgc_neg_dsi_rec_dff', - 'dgc_neg_gosi_rec_dff', - 'dgc_neg_gdsi_rec_dff', - 'dgc_neg_peak_dire_raw_dff', - 'dgc_neg_vs_dire_raw_dff', - 'dgc_neg_vs_dire_ele_dff', - 'dgc_neg_vs_dire_rec_dff', - - # direction / orientation tuning, pos, zscore - 'dgc_pos_osi_raw_z', - 'dgc_pos_dsi_raw_z', - 'dgc_pos_gosi_raw_z', - 'dgc_pos_gdsi_raw_z', - 'dgc_pos_osi_ele_z', - 'dgc_pos_dsi_ele_z', - 'dgc_pos_gosi_ele_z', - 'dgc_pos_gdsi_ele_z', - 'dgc_pos_osi_rec_z', - 'dgc_pos_dsi_rec_z', - 'dgc_pos_gosi_rec_z', - 'dgc_pos_gdsi_rec_z', - 'dgc_pos_peak_dire_raw_z', - 'dgc_pos_vs_dire_raw_z', - 'dgc_pos_vs_dire_ele_z', - 'dgc_pos_vs_dire_rec_z', - - # direction / orientation tuning, neg, zscore - 'dgc_neg_osi_raw_z', - 'dgc_neg_dsi_raw_z', - 'dgc_neg_gosi_raw_z', - 'dgc_neg_gdsi_raw_z', - 'dgc_neg_osi_ele_z', - 'dgc_neg_dsi_ele_z', - 'dgc_neg_gosi_ele_z', - 'dgc_neg_gdsi_ele_z', - 'dgc_neg_osi_rec_z', - 'dgc_neg_dsi_rec_z', - 'dgc_neg_gosi_rec_z', - 'dgc_neg_gdsi_rec_z', - 'dgc_neg_peak_dire_raw_z', - 'dgc_neg_vs_dire_raw_z', - 'dgc_neg_vs_dire_ele_z', - 'dgc_neg_vs_dire_rec_z', - - # sf tuning, pos, df - 'dgc_pos_peak_sf_raw_df', - 'dgc_pos_weighted_sf_raw_df', - 'dgc_pos_weighted_sf_log_raw_df', - 'dgc_pos_weighted_sf_ele_df', - 'dgc_pos_weighted_sf_log_ele_df', - 'dgc_pos_weighted_sf_rec_df', - 'dgc_pos_weighted_sf_log_rec_df', - - # sf tuning, neg, df - 'dgc_neg_peak_sf_raw_df', - 'dgc_neg_weighted_sf_raw_df', - 'dgc_neg_weighted_sf_log_raw_df', - 'dgc_neg_weighted_sf_ele_df', - 'dgc_neg_weighted_sf_log_ele_df', - 'dgc_neg_weighted_sf_rec_df', - 'dgc_neg_weighted_sf_log_rec_df', - - # sf tuning, pos, dff - 'dgc_pos_peak_sf_raw_dff', - 'dgc_pos_weighted_sf_raw_dff', - 'dgc_pos_weighted_sf_log_raw_dff', - 'dgc_pos_weighted_sf_ele_dff', - 'dgc_pos_weighted_sf_log_ele_dff', - 'dgc_pos_weighted_sf_rec_dff', - 'dgc_pos_weighted_sf_log_rec_dff', - - # sf tuning, neg, dff - 'dgc_neg_peak_sf_raw_dff', - 'dgc_neg_weighted_sf_raw_dff', - 'dgc_neg_weighted_sf_log_raw_dff', - 'dgc_neg_weighted_sf_ele_dff', - 'dgc_neg_weighted_sf_log_ele_dff', - 'dgc_neg_weighted_sf_rec_dff', - 'dgc_neg_weighted_sf_log_rec_dff', - - # sf tuning, pos, zscore - 'dgc_pos_peak_sf_raw_z', - 'dgc_pos_weighted_sf_raw_z', - 'dgc_pos_weighted_sf_log_raw_z', - 'dgc_pos_weighted_sf_ele_z', - 'dgc_pos_weighted_sf_log_ele_z', - 'dgc_pos_weighted_sf_rec_z', - 'dgc_pos_weighted_sf_log_rec_z', - - # sf tuning, neg, zscore - 'dgc_neg_peak_sf_raw_z', - 'dgc_neg_weighted_sf_raw_z', - 'dgc_neg_weighted_sf_log_raw_z', - 'dgc_neg_weighted_sf_ele_z', - 'dgc_neg_weighted_sf_log_ele_z', - 'dgc_neg_weighted_sf_rec_z', - 'dgc_neg_weighted_sf_log_rec_z', - - # tf tuning, pos, df - 'dgc_pos_peak_tf_raw_df', - 'dgc_pos_weighted_tf_raw_df', - 'dgc_pos_weighted_tf_log_raw_df', - 'dgc_pos_weighted_tf_ele_df', - 'dgc_pos_weighted_tf_log_ele_df', - 'dgc_pos_weighted_tf_rec_df', - 'dgc_pos_weighted_tf_log_rec_df', - - # tf tuning, neg, df - 'dgc_neg_peak_tf_raw_df', - 'dgc_neg_weighted_tf_raw_df', - 'dgc_neg_weighted_tf_log_raw_df', - 'dgc_neg_weighted_tf_ele_df', - 'dgc_neg_weighted_tf_log_ele_df', - 'dgc_neg_weighted_tf_rec_df', - 'dgc_neg_weighted_tf_log_rec_df', - - # tf tuning, pos, dff - 'dgc_pos_peak_tf_raw_dff', - 'dgc_pos_weighted_tf_raw_dff', - 'dgc_pos_weighted_tf_log_raw_dff', - 'dgc_pos_weighted_tf_ele_dff', - 'dgc_pos_weighted_tf_log_ele_dff', - 'dgc_pos_weighted_tf_rec_dff', - 'dgc_pos_weighted_tf_log_rec_dff', - - # tf tuning, neg, dff - 'dgc_neg_peak_tf_raw_dff', - 'dgc_neg_weighted_tf_raw_dff', - 'dgc_neg_weighted_tf_log_raw_dff', - 'dgc_neg_weighted_tf_ele_dff', - 'dgc_neg_weighted_tf_log_ele_dff', - 'dgc_neg_weighted_tf_rec_dff', - 'dgc_neg_weighted_tf_log_rec_dff', - - # tf tuning, pos, zscore - 'dgc_pos_peak_tf_raw_z', - 'dgc_pos_weighted_tf_raw_z', - 'dgc_pos_weighted_tf_log_raw_z', - 'dgc_pos_weighted_tf_ele_z', - 'dgc_pos_weighted_tf_log_ele_z', - 'dgc_pos_weighted_tf_rec_z', - 'dgc_pos_weighted_tf_log_rec_z', - - # tf tuning, neg, zscore - 'dgc_neg_peak_tf_raw_z', - 'dgc_neg_weighted_tf_raw_z', - 'dgc_neg_weighted_tf_log_raw_z', - 'dgc_neg_weighted_tf_ele_z', - 'dgc_neg_weighted_tf_log_ele_z', - 'dgc_neg_weighted_tf_rec_z', - 'dgc_neg_weighted_tf_log_rec_z', -] - -def process_one_nwb_for_multi_thread(inputs): - - nwb_path, params, columns, save_folder, t0, nwb_i, nwb_f_num, is_overwrite = inputs - nwb_fn = os.path.splitext(os.path.split(nwb_path)[1])[0] - - nwb_f = h5py.File(nwb_path, 'r') - - plane_ns = [k for k in nwb_f['processing'].keys() if k[0:16] == 'rois_and_traces_'] - plane_ns = [k[16:] for k in plane_ns] - plane_ns.sort() - # print('total plane number: {}'.format(len(plane_ns))) - - for plane_n in plane_ns: - print('\tt: {:5.0f} minutes, processing {}, {} / {}, {} ...'.format((time.time() - t0) / 60., - nwb_fn, - nwb_i + 1, - nwb_f_num, - plane_n)) - - save_fn = '_'.join(nwb_fn.split('_')[0:2]) + '_' + plane_n + '.xlsx' - save_path = os.path.join(save_folder, save_fn) - if os.path.isfile(save_path): - - if is_overwrite: # overwrite existing xlsx files - print('\t{}, file already exists. Overwirite.'.format(os.path.split(save_path)[1])) - os.remove(save_path) - - - else: # do not overwrite existing xlsx files - print('\t{}, file already exists. Skip.'.format(os.path.split(save_path)[1])) - return - - roi_ns = nwb_f['processing/rois_and_traces_{}/ImageSegmentation/imaging_plane/roi_list'.format(plane_n)].value - roi_ns = [r.encode('utf-8') for r in roi_ns if r[0:4] == 'roi_'] - roi_ns.sort() - - df = pd.DataFrame(np.nan, index=range(len(roi_ns)), columns=columns) - - for roi_i, roi_n in enumerate(roi_ns): - # print('\t\t\troi: {} / {}'.format(roi_i+1, len(roi_ns))) - roi_properties, _, _, _, _, _, _, _, _, _, _, _, _, _ = \ - dt.get_everything_from_roi(nwb_f=nwb_f, plane_n=plane_n, roi_n=roi_n, params=params) - for rp_name, rp_value in roi_properties.items(): - df.loc[roi_i, rp_name] = rp_value - - with pd.ExcelWriter(save_path, mode='w') as writer: - df.to_excel(writer, sheet_name='sheet1') - - -def run(): - - t0 = time.time() - - nwb_fns = [] - for fn in os.listdir(database_folder): - if fn[-4:] == '.nwb' and date_range[0] <= int(fn[0:6]) <= date_range[1]: - nwb_fns.append(fn) - nwb_fns.sort() - print('\nnwb file list:') - print('\n'.join(nwb_fns)) - - date_str = datetime.datetime.now().strftime('%y%m%d%H%M%S') - save_folder = os.path.join(curr_folder, '{}_{}'.format(save_folder_n, date_str)) - - if not os.path.isdir(save_folder): - os.makedirs(save_folder) - - copyfile(os.path.realpath(__file__), os.path.join(save_folder, 'script_log.py')) - - inputs_lst = [(os.path.join(curr_folder, database_folder, nwb_fn), - params, - columns, - save_folder, - t0, - nwb_i, - len(nwb_fns), - is_overwrite) for nwb_i, nwb_fn in enumerate(nwb_fns)] - - print('\nprocessing individual nwb files ...') - p = Pool(process_num) - p.map(process_one_nwb_for_multi_thread, inputs_lst) - # process_one_nwb_for_multi_thread(inputs_lst[0]) - - # print('\nConcatenating indiviudal dataframes ...') - # xlsx_fns = [f for f in os.listdir(os.path.join(curr_folder,save_folder)) if f[-5:] == '.xlsx'] - # xlsx_fns.sort() - # - # dfs = [] - # for xlsx_fn in xlsx_fns: - # curr_df = pd.read_excel(os.path.join(curr_folder, save_folder, xlsx_fn), sheetname='sheet1') - # # print(curr_df) - # dfs.append(curr_df) - # - # big_df = pd.concat(dfs, ignore_index=True) - # - # print('\nsaving ...') - # date_str = datetime.datetime.now().strftime('%y%m%d%H%M%S') - # save_path = os.path.join(curr_folder, 'big_roi_table_{}.xlsx'.format(date_str)) - # - # if os.path.isfile(save_path): - # with pd.ExcelWriter(save_path, mode='a') as writer: - # big_df.to_excel(writer, sheet_name=params['trace_type']) - # else: - # with pd.ExcelWriter(save_path, mode='w') as writer: - # big_df.to_excel(writer, sheet_name=params['trace_type']) - - print('\ndone!') - - -if __name__ == "__main__": - run() diff --git a/corticalmapping/scripts/post_recording/analysis_database/0042_get_plane_dfs_single.py b/corticalmapping/scripts/post_recording/analysis_database/0042_get_plane_dfs_single.py deleted file mode 100644 index 14defd6..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/0042_get_plane_dfs_single.py +++ /dev/null @@ -1,432 +0,0 @@ -import sys -sys.path.extend(['/home/junz/PycharmProjects/corticalmapping']) -import os -import time -import pandas as pd -import numpy as np -import h5py -import datetime -import corticalmapping.DatabaseTools as dt -from multiprocessing import Pool -from shutil import copyfile - -nwb_fns = ['190510_M439939_110_repacked.nwb', - '190523_M439939_110_repacked.nwb', - '190524_M439939_110_repacked.nwb', - '190509_M439943_110_repacked.nwb', - '190521_M439943_110_repacked.nwb', - '190523_M439943_110_repacked.nwb',] -database_folder = 'nwbs' -save_folder_n = "dataframes" -process_num = 6 -is_overwrite = False - -params = dt.ANALYSIS_PARAMS -params['trace_type'] = 'f_center_subtracted' -params['is_collapse_dire'] = False -params['is_collapse_sf'] = True -params['is_collapse_tf'] = True - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -print('pandas version: {}\n'.format(pd.__version__)) - -columns = [ - 'date', - 'mouse_id', - 'plane_n', - 'roi_n', - 'depth', # microns under pia, float - - # roi mask - 'roi_area', # square micron - 'roi_center_row', # center of roi mask in field of view, row - 'roi_center_col', # center of roi mask in field of view, column - - # trace skewness - 'skew_raw', # skewness of unfiltered trace (neuropil subtracted), float - 'skew_fil', # skewness of highpassed trace, float - - # receptive fields - 'rf_pos_on_peak_z', - 'rf_pos_on_area', - 'rf_pos_on_center_alt', - 'rf_pos_on_center_azi', - - 'rf_pos_off_peak_z', - 'rf_pos_off_area', - 'rf_pos_off_center_alt', - 'rf_pos_off_center_azi', - - 'rf_pos_onoff_peak_z', - 'rf_pos_onoff_area', - 'rf_pos_onoff_center_alt', - 'rf_pos_onoff_center_azi', - - 'rf_pos_lsi', - - 'rf_neg_on_peak_z', - 'rf_neg_on_area', - 'rf_neg_on_center_alt', - 'rf_neg_on_center_azi', - - 'rf_neg_off_peak_z', - 'rf_neg_off_area', - 'rf_neg_off_center_alt', - 'rf_neg_off_center_azi', - - 'rf_neg_onoff_peak_z', - 'rf_neg_onoff_area', - 'rf_neg_onoff_center_alt', - 'rf_neg_onoff_center_azi', - - 'rf_neg_lsi', - - # drifting grating peak response - 'dgc_pos_peak_df', - 'dgc_neg_peak_df', - 'dgc_pos_p_ttest_df', - 'dgc_neg_p_ttest_df', - 'dgc_p_anova_df', - - 'dgc_pos_peak_dff', - 'dgc_neg_peak_dff', - 'dgc_pos_p_ttest_dff', - 'dgc_neg_p_ttest_dff', - 'dgc_p_anova_dff', - - 'dgc_pos_peak_z', - 'dgc_neg_peak_z', - 'dgc_pos_p_ttest_z', - 'dgc_neg_p_ttest_z', - 'dgc_p_anova_z', - - # direction / orientation tuning, pos, df - 'dgc_pos_osi_raw_df', - 'dgc_pos_dsi_raw_df', - 'dgc_pos_gosi_raw_df', - 'dgc_pos_gdsi_raw_df', - 'dgc_pos_osi_ele_df', - 'dgc_pos_dsi_ele_df', - 'dgc_pos_gosi_ele_df', - 'dgc_pos_gdsi_ele_df', - 'dgc_pos_osi_rec_df', - 'dgc_pos_dsi_rec_df', - 'dgc_pos_gosi_rec_df', - 'dgc_pos_gdsi_rec_df', - 'dgc_pos_peak_dire_raw_df', - 'dgc_pos_vs_dire_raw_df', - 'dgc_pos_vs_dire_ele_df', - 'dgc_pos_vs_dire_rec_df', - - # direction / orientation tuning, neg, df - 'dgc_neg_osi_raw_df', - 'dgc_neg_dsi_raw_df', - 'dgc_neg_gosi_raw_df', - 'dgc_neg_gdsi_raw_df', - 'dgc_neg_osi_ele_df', - 'dgc_neg_dsi_ele_df', - 'dgc_neg_gosi_ele_df', - 'dgc_neg_gdsi_ele_df', - 'dgc_neg_osi_rec_df', - 'dgc_neg_dsi_rec_df', - 'dgc_neg_gosi_rec_df', - 'dgc_neg_gdsi_rec_df', - 'dgc_neg_peak_dire_raw_df', - 'dgc_neg_vs_dire_raw_df', - 'dgc_neg_vs_dire_ele_df', - 'dgc_neg_vs_dire_rec_df', - - # direction / orientation tuning, pos, dff - 'dgc_pos_osi_raw_dff', - 'dgc_pos_dsi_raw_dff', - 'dgc_pos_gosi_raw_dff', - 'dgc_pos_gdsi_raw_dff', - 'dgc_pos_osi_ele_dff', - 'dgc_pos_dsi_ele_dff', - 'dgc_pos_gosi_ele_dff', - 'dgc_pos_gdsi_ele_dff', - 'dgc_pos_osi_rec_dff', - 'dgc_pos_dsi_rec_dff', - 'dgc_pos_gosi_rec_dff', - 'dgc_pos_gdsi_rec_dff', - 'dgc_pos_peak_dire_raw_dff', - 'dgc_pos_vs_dire_raw_dff', - 'dgc_pos_vs_dire_ele_dff', - 'dgc_pos_vs_dire_rec_dff', - - # direction / orientation tuning, neg, dff - 'dgc_neg_osi_raw_dff', - 'dgc_neg_dsi_raw_dff', - 'dgc_neg_gosi_raw_dff', - 'dgc_neg_gdsi_raw_dff', - 'dgc_neg_osi_ele_dff', - 'dgc_neg_dsi_ele_dff', - 'dgc_neg_gosi_ele_dff', - 'dgc_neg_gdsi_ele_dff', - 'dgc_neg_osi_rec_dff', - 'dgc_neg_dsi_rec_dff', - 'dgc_neg_gosi_rec_dff', - 'dgc_neg_gdsi_rec_dff', - 'dgc_neg_peak_dire_raw_dff', - 'dgc_neg_vs_dire_raw_dff', - 'dgc_neg_vs_dire_ele_dff', - 'dgc_neg_vs_dire_rec_dff', - - # direction / orientation tuning, pos, zscore - 'dgc_pos_osi_raw_z', - 'dgc_pos_dsi_raw_z', - 'dgc_pos_gosi_raw_z', - 'dgc_pos_gdsi_raw_z', - 'dgc_pos_osi_ele_z', - 'dgc_pos_dsi_ele_z', - 'dgc_pos_gosi_ele_z', - 'dgc_pos_gdsi_ele_z', - 'dgc_pos_osi_rec_z', - 'dgc_pos_dsi_rec_z', - 'dgc_pos_gosi_rec_z', - 'dgc_pos_gdsi_rec_z', - 'dgc_pos_peak_dire_raw_z', - 'dgc_pos_vs_dire_raw_z', - 'dgc_pos_vs_dire_ele_z', - 'dgc_pos_vs_dire_rec_z', - - # direction / orientation tuning, neg, zscore - 'dgc_neg_osi_raw_z', - 'dgc_neg_dsi_raw_z', - 'dgc_neg_gosi_raw_z', - 'dgc_neg_gdsi_raw_z', - 'dgc_neg_osi_ele_z', - 'dgc_neg_dsi_ele_z', - 'dgc_neg_gosi_ele_z', - 'dgc_neg_gdsi_ele_z', - 'dgc_neg_osi_rec_z', - 'dgc_neg_dsi_rec_z', - 'dgc_neg_gosi_rec_z', - 'dgc_neg_gdsi_rec_z', - 'dgc_neg_peak_dire_raw_z', - 'dgc_neg_vs_dire_raw_z', - 'dgc_neg_vs_dire_ele_z', - 'dgc_neg_vs_dire_rec_z', - - # sf tuning, pos, df - 'dgc_pos_peak_sf_raw_df', - 'dgc_pos_weighted_sf_raw_df', - 'dgc_pos_weighted_sf_log_raw_df', - 'dgc_pos_weighted_sf_ele_df', - 'dgc_pos_weighted_sf_log_ele_df', - 'dgc_pos_weighted_sf_rec_df', - 'dgc_pos_weighted_sf_log_rec_df', - - # sf tuning, neg, df - 'dgc_neg_peak_sf_raw_df', - 'dgc_neg_weighted_sf_raw_df', - 'dgc_neg_weighted_sf_log_raw_df', - 'dgc_neg_weighted_sf_ele_df', - 'dgc_neg_weighted_sf_log_ele_df', - 'dgc_neg_weighted_sf_rec_df', - 'dgc_neg_weighted_sf_log_rec_df', - - # sf tuning, pos, dff - 'dgc_pos_peak_sf_raw_dff', - 'dgc_pos_weighted_sf_raw_dff', - 'dgc_pos_weighted_sf_log_raw_dff', - 'dgc_pos_weighted_sf_ele_dff', - 'dgc_pos_weighted_sf_log_ele_dff', - 'dgc_pos_weighted_sf_rec_dff', - 'dgc_pos_weighted_sf_log_rec_dff', - - # sf tuning, neg, dff - 'dgc_neg_peak_sf_raw_dff', - 'dgc_neg_weighted_sf_raw_dff', - 'dgc_neg_weighted_sf_log_raw_dff', - 'dgc_neg_weighted_sf_ele_dff', - 'dgc_neg_weighted_sf_log_ele_dff', - 'dgc_neg_weighted_sf_rec_dff', - 'dgc_neg_weighted_sf_log_rec_dff', - - # sf tuning, pos, zscore - 'dgc_pos_peak_sf_raw_z', - 'dgc_pos_weighted_sf_raw_z', - 'dgc_pos_weighted_sf_log_raw_z', - 'dgc_pos_weighted_sf_ele_z', - 'dgc_pos_weighted_sf_log_ele_z', - 'dgc_pos_weighted_sf_rec_z', - 'dgc_pos_weighted_sf_log_rec_z', - - # sf tuning, neg, zscore - 'dgc_neg_peak_sf_raw_z', - 'dgc_neg_weighted_sf_raw_z', - 'dgc_neg_weighted_sf_log_raw_z', - 'dgc_neg_weighted_sf_ele_z', - 'dgc_neg_weighted_sf_log_ele_z', - 'dgc_neg_weighted_sf_rec_z', - 'dgc_neg_weighted_sf_log_rec_z', - - # tf tuning, pos, df - 'dgc_pos_peak_tf_raw_df', - 'dgc_pos_weighted_tf_raw_df', - 'dgc_pos_weighted_tf_log_raw_df', - 'dgc_pos_weighted_tf_ele_df', - 'dgc_pos_weighted_tf_log_ele_df', - 'dgc_pos_weighted_tf_rec_df', - 'dgc_pos_weighted_tf_log_rec_df', - - # tf tuning, neg, df - 'dgc_neg_peak_tf_raw_df', - 'dgc_neg_weighted_tf_raw_df', - 'dgc_neg_weighted_tf_log_raw_df', - 'dgc_neg_weighted_tf_ele_df', - 'dgc_neg_weighted_tf_log_ele_df', - 'dgc_neg_weighted_tf_rec_df', - 'dgc_neg_weighted_tf_log_rec_df', - - # tf tuning, pos, dff - 'dgc_pos_peak_tf_raw_dff', - 'dgc_pos_weighted_tf_raw_dff', - 'dgc_pos_weighted_tf_log_raw_dff', - 'dgc_pos_weighted_tf_ele_dff', - 'dgc_pos_weighted_tf_log_ele_dff', - 'dgc_pos_weighted_tf_rec_dff', - 'dgc_pos_weighted_tf_log_rec_dff', - - # tf tuning, neg, dff - 'dgc_neg_peak_tf_raw_dff', - 'dgc_neg_weighted_tf_raw_dff', - 'dgc_neg_weighted_tf_log_raw_dff', - 'dgc_neg_weighted_tf_ele_dff', - 'dgc_neg_weighted_tf_log_ele_dff', - 'dgc_neg_weighted_tf_rec_dff', - 'dgc_neg_weighted_tf_log_rec_dff', - - # tf tuning, pos, zscore - 'dgc_pos_peak_tf_raw_z', - 'dgc_pos_weighted_tf_raw_z', - 'dgc_pos_weighted_tf_log_raw_z', - 'dgc_pos_weighted_tf_ele_z', - 'dgc_pos_weighted_tf_log_ele_z', - 'dgc_pos_weighted_tf_rec_z', - 'dgc_pos_weighted_tf_log_rec_z', - - # tf tuning, neg, zscore - 'dgc_neg_peak_tf_raw_z', - 'dgc_neg_weighted_tf_raw_z', - 'dgc_neg_weighted_tf_log_raw_z', - 'dgc_neg_weighted_tf_ele_z', - 'dgc_neg_weighted_tf_log_ele_z', - 'dgc_neg_weighted_tf_rec_z', - 'dgc_neg_weighted_tf_log_rec_z', -] - -def process_one_nwb_for_multi_thread(inputs): - - nwb_path, params, columns, save_folder, t0, nwb_i, nwb_f_num, is_overwrite = inputs - nwb_fn = os.path.splitext(os.path.split(nwb_path)[1])[0] - - nwb_f = h5py.File(nwb_path, 'r') - - plane_ns = [k for k in nwb_f['processing'].keys() if k[0:16] == 'rois_and_traces_'] - plane_ns = [k[16:] for k in plane_ns] - plane_ns.sort() - # print('total plane number: {}'.format(len(plane_ns))) - - for plane_n in plane_ns: - print('\tt: {:5.0f} minutes, processing {}, {} / {}, {} ...'.format((time.time() - t0) / 60., - nwb_fn, - nwb_i + 1, - nwb_f_num, - plane_n)) - - save_fn = '_'.join(nwb_fn.split('_')[0:2]) + '_' + plane_n + '.xlsx' - save_path = os.path.join(save_folder, save_fn) - if os.path.isfile(save_path): - - if is_overwrite: # overwrite existing xlsx files - print('\t{}, file already exists. Overwirite.'.format(os.path.split(save_path)[1])) - os.remove(save_path) - - - else: # do not overwrite existing xlsx files - print('\t{}, file already exists. Skip.'.format(os.path.split(save_path)[1])) - return - - roi_ns = nwb_f['processing/rois_and_traces_{}/ImageSegmentation/imaging_plane/roi_list'.format(plane_n)].value - roi_ns = [r.encode('utf-8') for r in roi_ns if r[0:4] == 'roi_'] - roi_ns.sort() - - df = pd.DataFrame(np.nan, index=range(len(roi_ns)), columns=columns) - - for roi_i, roi_n in enumerate(roi_ns): - # print('\t\t\troi: {} / {}'.format(roi_i+1, len(roi_ns))) - roi_properties, _, _, _, _, _, _, _, _, _, _, _, _, _ = \ - dt.get_everything_from_roi(nwb_f=nwb_f, plane_n=plane_n, roi_n=roi_n, params=params) - for rp_name, rp_value in roi_properties.items(): - df.loc[roi_i, rp_name] = rp_value - - with pd.ExcelWriter(save_path, mode='w') as writer: - df.to_excel(writer, sheet_name='sheet1') - - -def run(): - - t0 = time.time() - - # nwb_fns = [] - # for fn in os.listdir(database_folder): - # if fn[-4:] == '.nwb' and date_range[0] <= int(fn[0:6]) <= date_range[1]: - # nwb_fns.append(fn) - # nwb_fns.sort() - print('\nnwb file list:') - print('\n'.join(nwb_fns)) - - date_str = datetime.datetime.now().strftime('%y%m%d%H%M%S') - save_folder = os.path.join(curr_folder, '{}_{}'.format(save_folder_n, date_str)) - - if not os.path.isdir(save_folder): - os.makedirs(save_folder) - - copyfile(os.path.realpath(__file__), os.path.join(save_folder, 'script_log.py')) - - inputs_lst = [(os.path.join(curr_folder, database_folder, nwb_fn), - params, - columns, - save_folder, - t0, - nwb_i, - len(nwb_fns), - is_overwrite) for nwb_i, nwb_fn in enumerate(nwb_fns)] - - print('\nprocessing individual nwb files ...') - p = Pool(process_num) - p.map(process_one_nwb_for_multi_thread, inputs_lst) - # process_one_nwb_for_multi_thread(inputs_lst[0]) - - # print('\nConcatenating indiviudal dataframes ...') - # xlsx_fns = [f for f in os.listdir(os.path.join(curr_folder,save_folder)) if f[-5:] == '.xlsx'] - # xlsx_fns.sort() - # - # dfs = [] - # for xlsx_fn in xlsx_fns: - # curr_df = pd.read_excel(os.path.join(curr_folder, save_folder, xlsx_fn), sheetname='sheet1') - # # print(curr_df) - # dfs.append(curr_df) - # - # big_df = pd.concat(dfs, ignore_index=True) - # - # print('\nsaving ...') - # date_str = datetime.datetime.now().strftime('%y%m%d%H%M%S') - # save_path = os.path.join(curr_folder, 'big_roi_table_{}.xlsx'.format(date_str)) - # - # if os.path.isfile(save_path): - # with pd.ExcelWriter(save_path, mode='a') as writer: - # big_df.to_excel(writer, sheet_name=params['trace_type']) - # else: - # with pd.ExcelWriter(save_path, mode='w') as writer: - # big_df.to_excel(writer, sheet_name=params['trace_type']) - - print('\ndone!') - - -if __name__ == "__main__": - run() diff --git a/corticalmapping/scripts/post_recording/analysis_database/0043_get_plane_meta.py b/corticalmapping/scripts/post_recording/analysis_database/0043_get_plane_meta.py deleted file mode 100644 index c43a175..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/0043_get_plane_meta.py +++ /dev/null @@ -1,40 +0,0 @@ -import os -import datetime -import pandas as pd -import h5py - -df_folder = 'dataframes_190529210731' -save_fn = 'plane_table' -nwb_folder = 'nwbs/small_nwbs' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -fns = [fn for fn in os.listdir(df_folder) if fn[-5:] == '.xlsx'] -print('\n'.join(fns)) - -df = pd.DataFrame(index=range(len(fns)), columns=['date', 'mouse_id', 'plane_n', 'volume_n', - 'depth', 'has_lsn', 'has_dgc']) - -for fn_i, fn in enumerate(fns): - print(fn) - - date = fn.split('_')[0] - mouse_id = fn.split('_')[1] - plane_n = fn.split('_')[-1][0:-5] - - nwb_path = os.path.join(curr_folder, nwb_folder, '{}_{}_110_repacked.nwb'.format(date, mouse_id)) - nwb_f = h5py.File(nwb_path, 'r') - depth = nwb_f['processing/rois_and_traces_{}/imaging_depth_micron'.format(plane_n)].value - nwb_f.close() - - df.loc[fn_i] = [date, mouse_id, plane_n, '', depth, True, True] - -df.sort_values(by=['mouse_id', 'date', 'plane_n'], inplace=True) -df.reset_index(inplace=True, drop=True) - -print(df) - -date_str = datetime.datetime.now().strftime('%y%m%d%H%M%S') -with pd.ExcelWriter('{}_{}.xlsx'.format(save_fn, date_str), mode='w') as writer: - df.to_excel(writer, sheet_name='sheet1') \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_database/0044_reorganize_plane_meta.py b/corticalmapping/scripts/post_recording/analysis_database/0044_reorganize_plane_meta.py deleted file mode 100644 index 8ec4300..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/0044_reorganize_plane_meta.py +++ /dev/null @@ -1,19 +0,0 @@ -import os -import datetime -import pandas as pd - -meta_fn = "plane_table_190530165648.xlsx" - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -meta_df = pd.read_excel(meta_fn, sheet_name='sheet1') - -meta_df.sort_values(by=['mouse_id', 'volume_n', 'depth'], inplace=True) -meta_df.reset_index(inplace=True, drop=True) - -print(meta_df) - -date_str = datetime.datetime.now().strftime('%y%m%d%H%M%S') -with pd.ExcelWriter('plane_table_{}.xlsx'.format(date_str), mode='w') as writer: - meta_df.to_excel(writer, sheet_name='sheet1') \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_database/0070_get_rf_maps.py b/corticalmapping/scripts/post_recording/analysis_database/0070_get_rf_maps.py deleted file mode 100644 index 483f8bb..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/0070_get_rf_maps.py +++ /dev/null @@ -1,247 +0,0 @@ -import sys -sys.path.extend(['/home/junz/PycharmProjects/corticalmapping']) -import os -import numpy as np -import h5py -import datetime -import pandas as pd -import corticalmapping.DatabaseTools as dt -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.SingleCellAnalysis as sca -from shutil import copyfile - -table_folder = 'dataframes_190529210731' -nwb_folder = 'nwbs' -save_folder = "intermediate_results" - -response_dir = 'pos' -skew_thr = 0.6 -analysis_params = dt.ANALYSIS_PARAMS - -notes = ''' - zscore receptive field maps of all significant rois. Spatial temporal receptive fields - are first converted to df/f. Then 2-d zscore maps are generated. Then the zscore maps are - 2d filtered to smooth and interpolated in to high resolution. After preprocessing, if the - peak value of zscore is larger than the threshold, the receptive field will be considered - as sigificant. - ''' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(save_folder, 'rf_maps_' + table_folder) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -copyfile(os.path.realpath(__file__), - os.path.join(save_folder, - 'script_log_{}.py'.format(datetime.datetime.now().strftime('%y%m%d%H%M%S')))) - -table_fns = [f for f in os.listdir(table_folder) if f[-5:] == '.xlsx'] -table_fns.sort() -print('number of planes: {}'.format(len(table_fns))) - -for table_i, table_fn in enumerate(table_fns): - print('\nanalyzing {}, {} / {} ... '.format(table_fn, table_i+1, len(table_fns))) - - save_fn = table_fn[0:-5] + '_{}.hdf5'.format(response_dir) - - if os.path.isfile(os.path.join(save_folder, save_fn)): - print('\tAlready analyzed. Skip.') - continue - - df = pd.read_excel(os.path.join(table_folder, table_fn), sheetname='sheet1') - subdf = df[np.logical_not(df['rf_pos_on_peak_z'].isnull())] - subdf = subdf[subdf['skew_fil'] >= skew_thr] - - subdf = subdf[(subdf['rf_{}_on_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs']) | - (subdf['rf_{}_off_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs'])] - - if len(subdf) > 0: - - save_f = h5py.File(os.path.join(save_folder, save_fn)) - - nwb_fn = table_fn[0:-11] + '110_repacked.nwb' - nwb_f = h5py.File(os.path.join(nwb_folder, nwb_fn), 'r') - plane_n = table_fn[-11:-5] - - # S2 - s2_df = subdf[(subdf['rf_{}_on_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs']) & - (subdf['rf_{}_off_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs'])].reset_index() - - if len(s2_df) > 0: - s2_grp = save_f.create_group(table_fn[0:-5] + '_{}_ONOFF'.format(response_dir)) - s1_on_grp = save_f.create_group(table_fn[0:-5] + '_{}_ON'.format(response_dir)) - s1_off_grp = save_f.create_group(table_fn[0:-5] + '_{}_OFF'.format(response_dir)) - - for roi_i, roi_row in s2_df.iterrows(): - - print('\t s2 receptive fields, {}, {} / {} ...'.format(roi_row['roi_n'], roi_i+1, len(s2_df))) - - if response_dir == 'pos': - _, _, _, srf_on, srf_off, _, _, _, _, _, _, _, _, \ - _ = dt.get_everything_from_roi(nwb_f=nwb_f, - plane_n=plane_n, - roi_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_on_new = dt.get_rf_properties(srf=srf_on, - polarity='positive', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - - _, rf_off_new = dt.get_rf_properties(srf=srf_off, - polarity='positive', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - - elif response_dir == 'neg': - _, _, _, _, _, srf_on, srf_off, _, _, _, _, _, _, \ - _ = dt.get_everything_from_roi(nwb_f=nwb_f, - plane_n=plane_n, - roi_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_on_new = dt.get_rf_properties(srf=srf_on, - polarity='negative', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - - _, rf_off_new = dt.get_rf_properties(srf=srf_off, - polarity='negative', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - else: - raise ValueError - - rf_on_mask = rf_on_new.get_weighted_mask() - rf_off_mask = rf_off_new.get_weighted_mask() - rf_onoff_new = sca.SpatialReceptiveField(mask=np.max([rf_on_mask, rf_off_mask], axis=0), - altPos=rf_on_new.altPos, - aziPos=rf_on_new.aziPos, - sign='ON_OFF', - thr=analysis_params['rf_z_thr_abs']) - - curr_s2_grp = s2_grp.create_group(roi_row['roi_n']) - rf_onoff_new.to_h5_group(curr_s2_grp) - - curr_s1_on_grp = s1_on_grp.create_group(roi_row['roi_n']) - rf_on_new.to_h5_group(curr_s1_on_grp) - - curr_s1_off_grp = s1_off_grp.create_group(roi_row['roi_n']) - rf_off_new.to_h5_group(curr_s1_off_grp) - - - # positive S1 ON - s1_on_df = subdf[(subdf['rf_{}_on_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs']) & - (subdf['rf_{}_off_peak_z'.format(response_dir)] < analysis_params['rf_z_thr_abs'])].reset_index() - - if len(s1_on_df) > 0: - - s1_on_grp_n = table_fn[0:-5] + '_{}_ON'.format(response_dir) - - if s1_on_grp_n in save_f.keys(): - s1_on_grp = save_f[s1_on_grp_n] - else: - s1_on_grp = save_f.create_group(s1_on_grp_n) - - for roi_i, roi_row in s1_on_df.iterrows(): - - print('\t s1 ON receptive fields, {}, {} / {} ...'.format(roi_row['roi_n'], roi_i + 1, len(s1_on_df))) - - if response_dir == 'pos': - _, _, _, srf_on, _, _, _, _, _, _, _, _, _, \ - _ = dt.get_everything_from_roi(nwb_f=nwb_f, - plane_n=plane_n, - roi_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_on_new = dt.get_rf_properties(srf=srf_on, - polarity='positive', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - elif response_dir == 'neg': - _, _, _, _, _, srf_on, _, _, _, _, _, _, _, \ - _ = dt.get_everything_from_roi(nwb_f=nwb_f, - plane_n=plane_n, - roi_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_on_new = dt.get_rf_properties(srf=srf_on, - polarity='negative', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - else: - print(response_dir) - raise ValueError - - curr_s1_on_grp = s1_on_grp.create_group(roi_row['roi_n']) - rf_on_new.to_h5_group(curr_s1_on_grp) - - - # positive S1 OFF - s1_off_df = subdf[(subdf['rf_{}_on_peak_z'.format(response_dir)] < analysis_params['rf_z_thr_abs']) & - (subdf['rf_{}_off_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs'])].reset_index() - - if len(s1_off_df) > 0: - - s1_off_grp_n = table_fn[0:-5] + '_{}_OFF'.format(response_dir) - - if s1_off_grp_n in save_f.keys(): - s1_off_grp = save_f[s1_off_grp_n] - else: - s1_off_grp = save_f.create_group(s1_off_grp_n) - - for roi_i, roi_row in s1_off_df.iterrows(): - - print('\t s1 OFF receptive fields, {}, {} / {} ...'.format(roi_row['roi_n'], roi_i + 1, len(s1_off_df))) - - if response_dir == 'pos': - _, _, _, _, srf_off, _, _, _, _, _, _, _, _, \ - _ = dt.get_everything_from_roi(nwb_f=nwb_f, - plane_n=plane_n, - roi_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_off_new = dt.get_rf_properties(srf=srf_off, - polarity='positive', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - - elif response_dir == 'neg': - - _, _, _, _, _, _, srf_off, _, _, _, _, _, _, \ - _ = dt.get_everything_from_roi(nwb_f=nwb_f, - plane_n=plane_n, - roi_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_off_new = dt.get_rf_properties(srf=srf_off, - polarity='negative', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - - else: - raise ValueError - - curr_s1_off_grp = s1_off_grp.create_group(roi_row['roi_n']) - rf_off_new.to_h5_group(curr_s1_off_grp) - - - save_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_database/0075_get_rf_maps_axon.py b/corticalmapping/scripts/post_recording/analysis_database/0075_get_rf_maps_axon.py deleted file mode 100644 index 3cc53f6..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/0075_get_rf_maps_axon.py +++ /dev/null @@ -1,257 +0,0 @@ -import sys -sys.path.extend(['/home/junz/PycharmProjects/corticalmapping']) -import os -import numpy as np -import h5py -# import datetime -import pandas as pd -import corticalmapping.DatabaseTools as dt -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.SingleCellAnalysis as sca -from shutil import copyfile - -df_fn = 'dataframe_190530171338_axon_AllStimuli_DistanceThr_1.30.csv' -clu_folder = r'intermediate_results\bouton_clustering\AllStimuli_DistanceThr_1.30' -nwb_folder = 'nwbs' -save_folder = "intermediate_results" - -response_dir = 'pos' -skew_thr = 0.6 -analysis_params = dt.ANALYSIS_PARAMS - -notes = ''' - zscore receptive field maps of all significant rois. Spatial temporal receptive fields - are first converted to df/f. Then 2-d zscore maps are generated. Then the zscore maps are - 2d filtered to smooth and interpolated in to high resolution. After preprocessing, if the - peak value of zscore is larger than the threshold, the receptive field will be considered - as sigificant. - ''' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(save_folder, 'rf_maps_' + os.path.splitext(df_fn)[0]) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -copyfile(os.path.realpath(__file__), - os.path.join(save_folder, - 'script_log.py')) - -df_axon = pd.read_csv(df_fn) -df_axon = df_axon[np.logical_not(df_axon['rf_{}_on_peak_z'.format(response_dir)].isnull())] -df_axon = df_axon[df_axon['skew_fil'] >= skew_thr] - -df_axon = df_axon[(df_axon['rf_{}_on_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs']) | - (df_axon['rf_{}_off_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs'])] - -plane_df = df_axon[['date', 'mouse_id', 'plane_n']].drop_duplicates().reset_index() -print('total number of planes with lsn data: {}'.format(len(plane_df))) - -for plane_i, plane_row in plane_df.iterrows(): - date = int(plane_row['date']) - mid = plane_row['mouse_id'] - plane_n = plane_row['plane_n'] - - print('processing {}_{}_{}, {} / {}'.format(date, mid, plane_n, plane_i+1, len(plane_df))) - - subdf = df_axon[(df_axon['date'] == date) & - (df_axon['mouse_id'] == mid) & - (df_axon['plane_n'] == plane_n)] - - nwb_fn = '{}_{}_110_repacked.nwb'.format(date, mid) - nwb_f = h5py.File(os.path.join(nwb_folder, nwb_fn), 'r') - - clu_fn = '{}_{}_{}_axon_grouping.hdf5'.format(date, mid, plane_n) - clu_f = h5py.File(os.path.join(clu_folder, clu_fn), 'r') - - save_fn = '{}_{}_{}_{}.hdf5'.format(date, mid, plane_n, response_dir) - save_f = h5py.File(os.path.join(save_folder, save_fn)) - - # S2 - s2_df = subdf[(subdf['rf_{}_on_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs']) & - (subdf['rf_{}_off_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs'])].reset_index() - - if len(s2_df) > 0: - s2_grp = save_f.create_group('{}_{}_{}_{}_ONOFF'.format(date, mid, plane_n, response_dir)) - s1_on_grp = save_f.create_group('{}_{}_{}_{}_ON'.format(date, mid, plane_n, response_dir)) - s1_off_grp = save_f.create_group('{}_{}_{}_{}_OFF'.format(date, mid, plane_n, response_dir)) - - for roi_i, roi_row in s2_df.iterrows(): - - print('\t s2 receptive fields, {}, {} / {} ...'.format(roi_row['roi_n'], roi_i + 1, len(s2_df))) - - if response_dir == 'pos': - _, _, _, srf_on, srf_off, _, _, _, _, _, _, _, _, \ - _ = dt.get_everything_from_axon(nwb_f=nwb_f, - clu_f=clu_f, - plane_n=plane_n, - axon_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_on_new = dt.get_rf_properties(srf=srf_on, - polarity='positive', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - - _, rf_off_new = dt.get_rf_properties(srf=srf_off, - polarity='positive', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - - elif response_dir == 'neg': - _, _, _, _, _, srf_on, srf_off, _, _, _, _, _, _, \ - _ = dt.get_everything_from_axon(nwb_f=nwb_f, - clu_f=clu_f, - plane_n=plane_n, - axon_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_on_new = dt.get_rf_properties(srf=srf_on, - polarity='negative', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - - _, rf_off_new = dt.get_rf_properties(srf=srf_off, - polarity='negative', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - else: - raise ValueError - - rf_on_mask = rf_on_new.get_weighted_mask() - rf_off_mask = rf_off_new.get_weighted_mask() - rf_onoff_new = sca.SpatialReceptiveField(mask=np.max([rf_on_mask, rf_off_mask], axis=0), - altPos=rf_on_new.altPos, - aziPos=rf_on_new.aziPos, - sign='ON_OFF', - thr=analysis_params['rf_z_thr_abs']) - - curr_s2_grp = s2_grp.create_group(roi_row['roi_n']) - rf_onoff_new.to_h5_group(curr_s2_grp) - - curr_s1_on_grp = s1_on_grp.create_group(roi_row['roi_n']) - rf_on_new.to_h5_group(curr_s1_on_grp) - - curr_s1_off_grp = s1_off_grp.create_group(roi_row['roi_n']) - rf_off_new.to_h5_group(curr_s1_off_grp) - - # positive S1 ON - s1_on_df = subdf[(subdf['rf_{}_on_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs']) & - (subdf['rf_{}_off_peak_z'.format(response_dir)] < analysis_params['rf_z_thr_abs'])].reset_index() - - if len(s1_on_df) > 0: - - s1_on_grp_n = '{}_{}_{}_{}_ON'.format(date, mid, plane_n, response_dir) - - if s1_on_grp_n in save_f.keys(): - s1_on_grp = save_f[s1_on_grp_n] - else: - s1_on_grp = save_f.create_group(s1_on_grp_n) - - for roi_i, roi_row in s1_on_df.iterrows(): - - print('\t s1 ON receptive fields, {}, {} / {} ...'.format(roi_row['roi_n'], roi_i + 1, len(s1_on_df))) - - if response_dir == 'pos': - _, _, _, srf_on, _, _, _, _, _, _, _, _, _, \ - _ = dt.get_everything_from_axon(nwb_f=nwb_f, - clu_f=clu_f, - plane_n=plane_n, - axon_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_on_new = dt.get_rf_properties(srf=srf_on, - polarity='positive', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - elif response_dir == 'neg': - _, _, _, _, _, srf_on, _, _, _, _, _, _, _, \ - _ = dt.get_everything_from_axon(nwb_f=nwb_f, - clu_f=clu_f, - plane_n=plane_n, - axon_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_on_new = dt.get_rf_properties(srf=srf_on, - polarity='negative', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - else: - print(response_dir) - raise ValueError - - curr_s1_on_grp = s1_on_grp.create_group(roi_row['roi_n']) - rf_on_new.to_h5_group(curr_s1_on_grp) - - # positive S1 OFF - s1_off_df = subdf[(subdf['rf_{}_on_peak_z'.format(response_dir)] < analysis_params['rf_z_thr_abs']) & - (subdf['rf_{}_off_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs'])].reset_index() - - if len(s1_off_df) > 0: - - s1_off_grp_n = '{}_{}_{}_{}_OFF'.format(date, mid, plane_n, response_dir) - - if s1_off_grp_n in save_f.keys(): - s1_off_grp = save_f[s1_off_grp_n] - else: - s1_off_grp = save_f.create_group(s1_off_grp_n) - - for roi_i, roi_row in s1_off_df.iterrows(): - - print('\t s1 OFF receptive fields, {}, {} / {} ...'.format(roi_row['roi_n'], roi_i + 1, len(s1_off_df))) - - if response_dir == 'pos': - _, _, _, _, srf_off, _, _, _, _, _, _, _, _, \ - _ = dt.get_everything_from_axon(nwb_f=nwb_f, - clu_f=clu_f, - plane_n=plane_n, - axon_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_off_new = dt.get_rf_properties(srf=srf_off, - polarity='positive', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - - elif response_dir == 'neg': - - _, _, _, _, _, _, srf_off, _, _, _, _, _, _, \ - _ = dt.get_everthing_from_axon(nwb_f=nwb_f, - clu_f=clu_f, - plane_n=plane_n, - axon_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_off_new = dt.get_rf_properties(srf=srf_off, - polarity='negative', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - - else: - raise ValueError - - curr_s1_off_grp = s1_off_grp.create_group(roi_row['roi_n']) - rf_off_new.to_h5_group(curr_s1_off_grp) - - save_f.close() - - - - diff --git a/corticalmapping/scripts/post_recording/analysis_database/0080_plot_plane_rf_center.py b/corticalmapping/scripts/post_recording/analysis_database/0080_plot_plane_rf_center.py deleted file mode 100644 index d849ca4..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/0080_plot_plane_rf_center.py +++ /dev/null @@ -1,262 +0,0 @@ -import sys -sys.path.extend(['/home/junz/PycharmProjects/corticalmapping']) -import os -import numpy as np -import matplotlib.pyplot as plt -import pandas as pd -from matplotlib.backends.backend_pdf import PdfPages -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.DatabaseTools as dt - -table_name = 'big_roi_table_test.xlsx' -sheet_name = 'f_center_subtracted' - -response_dir = 'pos' -skew_thr = 0.6 -rf_peak_z_thr = 1.6 - -save_fn = 'plane_rf_centers.pdf' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -table_path = os.path.join(curr_folder, table_name) -df = pd.read_excel(table_path, sheetname=sheet_name) -subdf = df[df['skew_fil'] >= skew_thr] - -planes = subdf[['date', 'mouse_id', 'plane_n', 'depth']].drop_duplicates().reset_index() -print(planes) - -pdff = PdfPages(os.path.join('intermediate_figures', save_fn)) - -for plane_i, plane_row in planes.iterrows(): - - print('plotting {}_{}_{}, {} / {}'.format( - plane_row['date'], - plane_row['mouse_id'], - plane_row['plane_n'], - plane_i + 1, - len(planes))) - - planedf = subdf[(subdf['date'] == plane_row['date']) & \ - (subdf['mouse_id'] == plane_row['mouse_id']) & \ - (subdf['plane_n'] == plane_row['plane_n']) & \ - (subdf['depth'] == plane_row['depth'])] - - df_or = planedf[planedf['rf_{}_onoff_peak_z'.format(response_dir)] >= rf_peak_z_thr] - df_and = planedf[(planedf['rf_{}_on_peak_z'.format(response_dir)] >= rf_peak_z_thr) & \ - (planedf['rf_{}_off_peak_z'.format(response_dir)] >= rf_peak_z_thr)] - df_on = planedf[planedf['rf_{}_on_peak_z'.format(response_dir)] >= rf_peak_z_thr].drop(df_and.index) - df_off = planedf[planedf['rf_{}_off_peak_z'.format(response_dir)] >= rf_peak_z_thr].drop(df_and.index) - - df_or = df_or.reset_index() - df_and = df_and.reset_index() - df_on = df_on.reset_index() - df_off = df_off.reset_index() - - if len(df_or) == 0: - print('no any receptive fields. skip.') - else: - print('\tnumber of rois with significant rf: {}'.format(len(df_or))) - print('\tnumber of rois with S1 ON: {}'.format(len(df_on))) - print('\tnumber of rois with S1 OFF: {}'.format(len(df_off))) - print('\tnumber of rois with S2 ON/OFF: {}'.format(len(df_and))) - - f = plt.figure(figsize=(11, 8.5)) - - f.suptitle('{}_{}_{}; {} um'.format(plane_row['date'], - plane_row['mouse_id'], - plane_row['plane_n'], - plane_row['depth'])) - - #=============================RF center============================================= - # ON/OFF - alt_min = int(np.min(df_or['rf_{}_onoff_center_alt'.format(response_dir)]) - 5) - alt_max = int(np.max(df_or['rf_{}_onoff_center_alt'.format(response_dir)]) + 5) - azi_min = int(np.min(df_or['rf_{}_onoff_center_azi'.format(response_dir)]) - 5) - azi_max = int(np.max(df_or['rf_{}_onoff_center_azi'.format(response_dir)]) + 5) - ax_or_scatter = f.add_subplot(4, 5, 1) - ax_or_scatter.plot(df_or['rf_{}_onoff_center_azi'.format(response_dir)], - df_or['rf_{}_onoff_center_alt'.format(response_dir)], - '.', color='#888888') - ax_or_scatter.set_xlim([azi_min, azi_max]) - ax_or_scatter.set_ylim([alt_min, alt_max]) - ax_or_scatter.set_title('RF center') - - # ON - ax_on_scatter = f.add_subplot(4, 5, 6) - ax_on_scatter.plot(df_off['rf_{}_off_center_azi'.format(response_dir)], - df_off['rf_{}_off_center_alt'.format(response_dir)], - '.', color='#aaaaaa') - ax_on_scatter.plot(df_on['rf_{}_on_center_azi'.format(response_dir)], - df_on['rf_{}_on_center_alt'.format(response_dir)], - '.', color='#ff0000') - ax_on_scatter.set_xlim([azi_min, azi_max]) - ax_on_scatter.set_ylim([alt_min, alt_max]) - - # OFF - ax_off_scatter = f.add_subplot(4, 5, 11) - ax_off_scatter.plot(df_on['rf_{}_on_center_azi'.format(response_dir)], - df_on['rf_{}_on_center_alt'.format(response_dir)], - '.', color='#aaaaaa') - ax_off_scatter.plot(df_off['rf_{}_off_center_azi'.format(response_dir)], - df_off['rf_{}_off_center_alt'.format(response_dir)], - '.', color='#0000ff') - ax_off_scatter.set_xlim([azi_min, azi_max]) - ax_off_scatter.set_ylim([alt_min, alt_max]) - - # ON-OFF - ax_and_scatter = f.add_subplot(4, 5, 16) - ax_and_scatter.plot(df_and['rf_{}_on_center_azi'.format(response_dir)], - df_and['rf_{}_on_center_alt'.format(response_dir)], - '.', color='#ff0000') - ax_and_scatter.plot(df_and['rf_{}_off_center_azi'.format(response_dir)], - df_and['rf_{}_off_center_alt'.format(response_dir)], - '.', color='#0000ff') - ax_and_scatter.set_xlim([azi_min, azi_max]) - ax_and_scatter.set_ylim([alt_min, alt_max]) - - # =============================pairwise distance============================================= - dis_or = ia.pairwise_distance(df_or[['rf_{}_onoff_center_azi'.format(response_dir), - 'rf_{}_onoff_center_alt'.format(response_dir)]].values) - ax_or_pd = f.add_subplot(4, 5, 2) - if len(dis_or) > 0: - ax_or_pd.hist(dis_or, range=[0, 80], bins=20, facecolor='#aaaaaa', edgecolor='none') - ax_or_pd.get_yaxis().set_ticks([]) - ax_or_pd.set_title('pw RF dis') # pairwise receptive field center distance - - dis_on = ia.pairwise_distance(df_on[['rf_{}_on_center_azi'.format(response_dir), - 'rf_{}_on_center_alt'.format(response_dir)]].values) - ax_on_pd = f.add_subplot(4, 5, 7) - if len(dis_on) > 0: - ax_on_pd.hist(dis_on, range=[0, 80], bins=20, facecolor='#ff0000', edgecolor='none') - ax_on_pd.get_yaxis().set_ticks([]) - - dis_off = ia.pairwise_distance(df_off[['rf_{}_off_center_azi'.format(response_dir), - 'rf_{}_off_center_alt'.format(response_dir)]].values) - ax_off_pd = f.add_subplot(4, 5, 12) - if len(dis_off) > 0: - ax_off_pd.hist(dis_off, range=[0, 80], bins=20, facecolor='#0000ff', edgecolor='none') - ax_off_pd.get_yaxis().set_ticks([]) - - dis_and_on = ia.pairwise_distance(df_and[['rf_{}_on_center_azi'.format(response_dir), - 'rf_{}_on_center_alt'.format(response_dir)]].values) - dis_and_off = ia.pairwise_distance(df_and[['rf_{}_off_center_azi'.format(response_dir), - 'rf_{}_off_center_alt'.format(response_dir)]].values) - ax_and_pd = f.add_subplot(4, 5, 17) - if len(dis_and_on) > 0: - ax_and_pd.hist(dis_and_on, range=[0, 80], bins=20, facecolor='#ff0000', edgecolor='none', alpha=0.5) - ax_and_pd.hist(dis_and_off, range=[0, 80], bins=20, facecolor='#0000ff', edgecolor='none', alpha=0.5) - ax_and_pd.get_yaxis().set_ticks([]) - - # =============================parewise magnification============================================= - mag_or = ia.pairwise_magnification(df_or[['rf_{}_onoff_center_azi'.format(response_dir), - 'rf_{}_onoff_center_alt'.format(response_dir)]].values, - df_or[['roi_center_col', 'roi_center_row']].values) - ax_or_pm = f.add_subplot(4, 5, 3) - if len(mag_or) > 0: - mag_or = 0.00035 / mag_or # 0.35 um per pixel - ax_or_pm.hist(mag_or, range=[0, 0.2], bins=20, facecolor='#aaaaaa', edgecolor='none') - ax_or_pm.get_yaxis().set_ticks([]) - ax_or_pm.set_title('mm/deg') # pairwise magnification - # - mag_on = ia.pairwise_magnification(df_on[['rf_{}_on_center_azi'.format(response_dir), - 'rf_{}_on_center_alt'.format(response_dir)]].values, - df_on[['roi_center_col', 'roi_center_row']].values) - ax_on_pm = f.add_subplot(4, 5, 8) - if len(mag_on) > 0: - mag_on = 0.00035 / mag_on # 0.35 um per pixel - ax_on_pm.hist(mag_on, range=[0, 0.2], bins=20, facecolor='#ff0000', edgecolor='none') - ax_on_pm.get_yaxis().set_ticks([]) - - mag_off = ia.pairwise_magnification(df_off[['rf_{}_off_center_azi'.format(response_dir), - 'rf_{}_off_center_alt'.format(response_dir)]].values, - df_off[['roi_center_col', 'roi_center_row']].values) - ax_off_pm = f.add_subplot(4, 5, 13) - if len(mag_off) > 0: - mag_off = 0.00035 / mag_off # 0.35 um per pixel - ax_off_pm.hist(mag_off, range=[0, 0.2], bins=20, facecolor='#0000ff', edgecolor='none') - ax_off_pm.get_yaxis().set_ticks([]) - - mag_and_on = ia.pairwise_magnification(df_and[['rf_{}_on_center_azi'.format(response_dir), - 'rf_{}_on_center_alt'.format(response_dir)]].values, - df_and[['roi_center_col', 'roi_center_row']].values) - - mag_and_off = ia.pairwise_magnification(df_and[['rf_{}_off_center_azi'.format(response_dir), - 'rf_{}_off_center_alt'.format(response_dir)]].values, - df_and[['roi_center_col', 'roi_center_row']].values) - - ax_and_pm = f.add_subplot(4, 5, 18) - if len(mag_and_on) > 0: - mag_and_on = 0.00035 / mag_and_on # 0.35 um per pixel - mag_and_off = 0.00035 / mag_and_off # 0.35 um per pixel - ax_and_pm.hist(mag_and_on, range=[0, 0.2], bins=20, facecolor='#ff0000', edgecolor='none', alpha=0.5,) - ax_and_pm.hist(mag_and_off, range=[0, 0.2], bins=20, facecolor='#0000ff', edgecolor='none', alpha=0.5,) - ax_and_pm.get_yaxis().set_ticks([]) - - # =============================azi alt spatial distribution============================================= - ax_alt_or = f.add_subplot(4, 5, 4) - ax_alt_or.set_title('altitude') - ax_azi_or = f.add_subplot(4, 5, 5) - ax_azi_or.set_title('azimuth') - if len(df_or) > 0: - dt.plot_roi_retinotopy(coords_rf=df_or[['rf_{}_onoff_center_alt'.format(response_dir), - 'rf_{}_onoff_center_azi'.format(response_dir)]].values, - coords_roi=df_or[['roi_center_row', 'roi_center_col']].values, - ax_alt=ax_alt_or, - ax_azi=ax_azi_or, - cmap='viridis', - canvas_shape=(512, 512), - edgecolors='#000000', - linewidth=0.5) - else: - ax_alt_or.set_xticks([]) - ax_alt_or.set_yticks([]) - ax_azi_or.set_xticks([]) - ax_azi_or.set_yticks([]) - - ax_alt_on = f.add_subplot(4, 5, 9) - ax_azi_on = f.add_subplot(4, 5, 10) - if len(df_on) > 0: - dt.plot_roi_retinotopy(coords_rf=df_on[['rf_{}_on_center_alt'.format(response_dir), - 'rf_{}_on_center_azi'.format(response_dir)]].values, - coords_roi=df_on[['roi_center_row', 'roi_center_col']].values, - ax_alt=ax_alt_on, - ax_azi=ax_azi_on, - cmap='viridis', - canvas_shape=(512, 512), - edgecolors='#000000', - linewidth=0.5) - else: - ax_alt_on.set_xticks([]) - ax_alt_on.set_yticks([]) - ax_azi_on.set_xticks([]) - ax_azi_on.set_yticks([]) - - ax_alt_off = f.add_subplot(4, 5, 14) - ax_azi_off = f.add_subplot(4, 5, 15) - if len(df_off) > 0: - dt.plot_roi_retinotopy(coords_rf=df_off[['rf_{}_off_center_alt'.format(response_dir), - 'rf_{}_off_center_azi'.format(response_dir)]].values, - coords_roi=df_off[['roi_center_row', 'roi_center_col']].values, - ax_alt=ax_alt_off, - ax_azi=ax_azi_off, - cmap='viridis', - canvas_shape=(512, 512), - edgecolors='#000000', - linewidth=0.5) - else: - ax_alt_off.set_xticks([]) - ax_alt_off.set_yticks([]) - ax_azi_off.set_xticks([]) - ax_azi_off.set_yticks([]) - - # plt.tight_layout() - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - -pdff.close() - -print('for debug ...') diff --git a/corticalmapping/scripts/post_recording/analysis_database/0090_group_boutons_into_axons.py b/corticalmapping/scripts/post_recording/analysis_database/0090_group_boutons_into_axons.py deleted file mode 100644 index d34cbc9..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/0090_group_boutons_into_axons.py +++ /dev/null @@ -1,70 +0,0 @@ -import os -import numpy as np -import h5py -import corticalmapping.DatabaseTools as dt -import corticalmapping.SingleCellAnalysis as sca -import matplotlib.pyplot as plt -import pandas as pd - - -nwb_folder = "nwbs" -save_folder = r"intermediate_results\bouton_clustering" -trace_type = 'f_center_subtracted' -trace_window = 'UniformContrast' # 'AllStimuli', 'UniformContrast', 'LocallySparseNoise', or 'DriftingGratingSpont' - -# BoutonClassifier parameters -skew_filter_sigma = 5. -skew_thr = 0.6 -lowpass_sigma=0.1 -detrend_sigma=3. -event_std_thr = 3. -peri_event_dur = (-1., 3.) -corr_len_thr = 30. -corr_abs_thr = 0.7 -corr_std_thr = 3. -is_cosine_similarity = False -distance_metric = 'euclidean' -linkage_method = 'weighted' -distance_thr = 1.3 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(save_folder, '{}_DistanceThr_{:.2f}'.format(trace_window, distance_thr)) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -nwb_fns = [f for f in os.listdir(nwb_folder) if f[-4:] == '.nwb'] -nwb_fns.sort() - -bc = dt.BoutonClassifier(skew_filter_sigma=skew_filter_sigma, - skew_thr=skew_thr, - lowpass_sigma=lowpass_sigma, - detrend_sigma=detrend_sigma, - event_std_thr=event_std_thr, - peri_event_dur=peri_event_dur, - corr_len_thr=corr_len_thr, - corr_abs_thr=corr_abs_thr, - corr_std_thr=corr_std_thr, - is_cosine_similarity=is_cosine_similarity, - distance_metric=distance_metric, - linkage_method=linkage_method, - distance_thr=distance_thr) - -for nwb_fi, nwb_fn in enumerate(nwb_fns): - - print('processing {}, {}/{}'.format(nwb_fn, nwb_fi + 1, len(nwb_fns))) - - nwb_f = h5py.File(os.path.join(nwb_folder, nwb_fn), 'r') - - plane_ns = dt.get_plane_ns(nwb_f=nwb_f) - plane_ns.sort() - - for plane_i, plane_n in enumerate(plane_ns): - - print('\n\t{}, {}/{}'.format(plane_n, plane_i + 1, len(plane_ns))) - - bc.process_plane(nwb_f=nwb_f, save_folder=save_folder, plane_n=plane_n, trace_type=trace_type, - trace_window=trace_window) - - nwb_f.close() diff --git a/corticalmapping/scripts/post_recording/analysis_database/0100_add_axon_strf.py b/corticalmapping/scripts/post_recording/analysis_database/0100_add_axon_strf.py deleted file mode 100644 index 585ea47..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/0100_add_axon_strf.py +++ /dev/null @@ -1,31 +0,0 @@ -import os -import h5py -import corticalmapping.DatabaseTools as dt - -nwb_folder = "nwbs" -clu_folder = r"intermediate_results\bouton_clustering\AllStimuli_DistanceThr_1.30" -strf_t_win = [-0.5, 2.] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -clu_fns = [f for f in os.listdir(clu_folder) if f[-5:] == '.hdf5'] -clu_fns.sort() -print('total number of planes: {}'.format(len(clu_fns))) - -for clu_fi, clu_fn in enumerate(clu_fns): - - date, mid, plane_n, _, _ = clu_fn.split('_') - - print('processing {}_{}_{}, {} / {}'.format(date, mid, plane_n, clu_fi + 1, len(clu_fns))) - - nwb_fn = '{}_{}_110_repacked.nwb'.format(date, mid) - nwb_f = h5py.File(os.path.join(nwb_folder, nwb_fn), 'r') - - clu_f = h5py.File(os.path.join(clu_folder, clu_fn)) - - bc = dt.BoutonClassifier() - bc.add_axon_strf(nwb_f=nwb_f, clu_f=clu_f, plane_n=plane_n, t_win=strf_t_win, verbose=False) - - nwb_f.close() - clu_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_database/0110_add_axon_dgcrm.py b/corticalmapping/scripts/post_recording/analysis_database/0110_add_axon_dgcrm.py deleted file mode 100644 index 68c304e..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/0110_add_axon_dgcrm.py +++ /dev/null @@ -1,31 +0,0 @@ -import os -import h5py -import corticalmapping.DatabaseTools as dt - -nwb_folder = "nwbs" -clu_folder = r"intermediate_results\bouton_clustering\AllStimuli_DistanceThr_1.30" -dgcrm_t_win = [-1., 2.5] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -clu_fns = [f for f in os.listdir(clu_folder) if f[-5:] == '.hdf5'] -clu_fns.sort() -print('total number of planes: {}'.format(len(clu_fns))) - -for clu_fi, clu_fn in enumerate(clu_fns): - - date, mid, plane_n, _, _ = clu_fn.split('_') - - print('processing {}_{}_{}, {} / {}'.format(date, mid, plane_n, clu_fi + 1, len(clu_fns))) - - nwb_fn = '{}_{}_110_repacked.nwb'.format(date, mid) - nwb_f = h5py.File(os.path.join(nwb_folder, nwb_fn), 'r') - - clu_f = h5py.File(os.path.join(clu_folder, clu_fn)) - - bc = dt.BoutonClassifier() - bc.add_axon_dgcrm(nwb_f=nwb_f, clu_f=clu_f, plane_n=plane_n, t_win=dgcrm_t_win, verbose=False) - - nwb_f.close() - clu_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_database/0120_get_axon_plane_df.py b/corticalmapping/scripts/post_recording/analysis_database/0120_get_axon_plane_df.py deleted file mode 100644 index 3df2aa4..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/0120_get_axon_plane_df.py +++ /dev/null @@ -1,413 +0,0 @@ -import os -import corticalmapping.DatabaseTools as dt -import time -import pandas as pd -import numpy as np -import h5py -from multiprocessing import Pool -import shutil - -date_range = [180301, 190601] -nwb_folder = 'nwbs' -df_folder = r'other_dataframes\dataframes_190530171338' -clu_folder = r'intermediate_results\bouton_clustering\AllStimuli_DistanceThr_1.30' -process_num = 6 -is_overwrite = True - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -print('pandas version: {}\n'.format(pd.__version__)) - -columns = [ - 'date', - 'mouse_id', - 'plane_n', - 'roi_n', - 'depth', # microns under pia, float - - # roi mask - 'roi_area', # square micron - 'roi_center_row', # center of roi mask in field of view, row - 'roi_center_col', # center of roi mask in field of view, column - - # trace skewness - 'skew_raw', # skewness of unfiltered trace (neuropil subtracted), float - 'skew_fil', # skewness of highpassed trace, float - - # receptive fields - 'rf_pos_on_peak_z', - 'rf_pos_on_area', - 'rf_pos_on_center_alt', - 'rf_pos_on_center_azi', - - 'rf_pos_off_peak_z', - 'rf_pos_off_area', - 'rf_pos_off_center_alt', - 'rf_pos_off_center_azi', - - 'rf_pos_onoff_peak_z', - 'rf_pos_onoff_area', - 'rf_pos_onoff_center_alt', - 'rf_pos_onoff_center_azi', - - 'rf_pos_lsi', - - 'rf_neg_on_peak_z', - 'rf_neg_on_area', - 'rf_neg_on_center_alt', - 'rf_neg_on_center_azi', - - 'rf_neg_off_peak_z', - 'rf_neg_off_area', - 'rf_neg_off_center_alt', - 'rf_neg_off_center_azi', - - 'rf_neg_onoff_peak_z', - 'rf_neg_onoff_area', - 'rf_neg_onoff_center_alt', - 'rf_neg_onoff_center_azi', - - 'rf_neg_lsi', - - # drifting grating peak response - 'dgc_pos_peak_df', - 'dgc_neg_peak_df', - 'dgc_pos_p_ttest_df', - 'dgc_neg_p_ttest_df', - 'dgc_p_anova_df', - - 'dgc_pos_peak_dff', - 'dgc_neg_peak_dff', - 'dgc_pos_p_ttest_dff', - 'dgc_neg_p_ttest_dff', - 'dgc_p_anova_dff', - - 'dgc_pos_peak_z', - 'dgc_neg_peak_z', - 'dgc_pos_p_ttest_z', - 'dgc_neg_p_ttest_z', - 'dgc_p_anova_z', - - # direction / orientation tuning, pos, df - 'dgc_pos_osi_raw_df', - 'dgc_pos_dsi_raw_df', - 'dgc_pos_gosi_raw_df', - 'dgc_pos_gdsi_raw_df', - 'dgc_pos_osi_ele_df', - 'dgc_pos_dsi_ele_df', - 'dgc_pos_gosi_ele_df', - 'dgc_pos_gdsi_ele_df', - 'dgc_pos_osi_rec_df', - 'dgc_pos_dsi_rec_df', - 'dgc_pos_gosi_rec_df', - 'dgc_pos_gdsi_rec_df', - 'dgc_pos_peak_dire_raw_df', - 'dgc_pos_vs_dire_raw_df', - 'dgc_pos_vs_dire_ele_df', - 'dgc_pos_vs_dire_rec_df', - - # direction / orientation tuning, neg, df - 'dgc_neg_osi_raw_df', - 'dgc_neg_dsi_raw_df', - 'dgc_neg_gosi_raw_df', - 'dgc_neg_gdsi_raw_df', - 'dgc_neg_osi_ele_df', - 'dgc_neg_dsi_ele_df', - 'dgc_neg_gosi_ele_df', - 'dgc_neg_gdsi_ele_df', - 'dgc_neg_osi_rec_df', - 'dgc_neg_dsi_rec_df', - 'dgc_neg_gosi_rec_df', - 'dgc_neg_gdsi_rec_df', - 'dgc_neg_peak_dire_raw_df', - 'dgc_neg_vs_dire_raw_df', - 'dgc_neg_vs_dire_ele_df', - 'dgc_neg_vs_dire_rec_df', - - # direction / orientation tuning, pos, dff - 'dgc_pos_osi_raw_dff', - 'dgc_pos_dsi_raw_dff', - 'dgc_pos_gosi_raw_dff', - 'dgc_pos_gdsi_raw_dff', - 'dgc_pos_osi_ele_dff', - 'dgc_pos_dsi_ele_dff', - 'dgc_pos_gosi_ele_dff', - 'dgc_pos_gdsi_ele_dff', - 'dgc_pos_osi_rec_dff', - 'dgc_pos_dsi_rec_dff', - 'dgc_pos_gosi_rec_dff', - 'dgc_pos_gdsi_rec_dff', - 'dgc_pos_peak_dire_raw_dff', - 'dgc_pos_vs_dire_raw_dff', - 'dgc_pos_vs_dire_ele_dff', - 'dgc_pos_vs_dire_rec_dff', - - # direction / orientation tuning, neg, dff - 'dgc_neg_osi_raw_dff', - 'dgc_neg_dsi_raw_dff', - 'dgc_neg_gosi_raw_dff', - 'dgc_neg_gdsi_raw_dff', - 'dgc_neg_osi_ele_dff', - 'dgc_neg_dsi_ele_dff', - 'dgc_neg_gosi_ele_dff', - 'dgc_neg_gdsi_ele_dff', - 'dgc_neg_osi_rec_dff', - 'dgc_neg_dsi_rec_dff', - 'dgc_neg_gosi_rec_dff', - 'dgc_neg_gdsi_rec_dff', - 'dgc_neg_peak_dire_raw_dff', - 'dgc_neg_vs_dire_raw_dff', - 'dgc_neg_vs_dire_ele_dff', - 'dgc_neg_vs_dire_rec_dff', - - # direction / orientation tuning, pos, zscore - 'dgc_pos_osi_raw_z', - 'dgc_pos_dsi_raw_z', - 'dgc_pos_gosi_raw_z', - 'dgc_pos_gdsi_raw_z', - 'dgc_pos_osi_ele_z', - 'dgc_pos_dsi_ele_z', - 'dgc_pos_gosi_ele_z', - 'dgc_pos_gdsi_ele_z', - 'dgc_pos_osi_rec_z', - 'dgc_pos_dsi_rec_z', - 'dgc_pos_gosi_rec_z', - 'dgc_pos_gdsi_rec_z', - 'dgc_pos_peak_dire_raw_z', - 'dgc_pos_vs_dire_raw_z', - 'dgc_pos_vs_dire_ele_z', - 'dgc_pos_vs_dire_rec_z', - - # direction / orientation tuning, neg, zscore - 'dgc_neg_osi_raw_z', - 'dgc_neg_dsi_raw_z', - 'dgc_neg_gosi_raw_z', - 'dgc_neg_gdsi_raw_z', - 'dgc_neg_osi_ele_z', - 'dgc_neg_dsi_ele_z', - 'dgc_neg_gosi_ele_z', - 'dgc_neg_gdsi_ele_z', - 'dgc_neg_osi_rec_z', - 'dgc_neg_dsi_rec_z', - 'dgc_neg_gosi_rec_z', - 'dgc_neg_gdsi_rec_z', - 'dgc_neg_peak_dire_raw_z', - 'dgc_neg_vs_dire_raw_z', - 'dgc_neg_vs_dire_ele_z', - 'dgc_neg_vs_dire_rec_z', - - # sf tuning, pos, df - 'dgc_pos_peak_sf_raw_df', - 'dgc_pos_weighted_sf_raw_df', - 'dgc_pos_weighted_sf_log_raw_df', - 'dgc_pos_weighted_sf_ele_df', - 'dgc_pos_weighted_sf_log_ele_df', - 'dgc_pos_weighted_sf_rec_df', - 'dgc_pos_weighted_sf_log_rec_df', - - # sf tuning, neg, df - 'dgc_neg_peak_sf_raw_df', - 'dgc_neg_weighted_sf_raw_df', - 'dgc_neg_weighted_sf_log_raw_df', - 'dgc_neg_weighted_sf_ele_df', - 'dgc_neg_weighted_sf_log_ele_df', - 'dgc_neg_weighted_sf_rec_df', - 'dgc_neg_weighted_sf_log_rec_df', - - # sf tuning, pos, dff - 'dgc_pos_peak_sf_raw_dff', - 'dgc_pos_weighted_sf_raw_dff', - 'dgc_pos_weighted_sf_log_raw_dff', - 'dgc_pos_weighted_sf_ele_dff', - 'dgc_pos_weighted_sf_log_ele_dff', - 'dgc_pos_weighted_sf_rec_dff', - 'dgc_pos_weighted_sf_log_rec_dff', - - # sf tuning, neg, dff - 'dgc_neg_peak_sf_raw_dff', - 'dgc_neg_weighted_sf_raw_dff', - 'dgc_neg_weighted_sf_log_raw_dff', - 'dgc_neg_weighted_sf_ele_dff', - 'dgc_neg_weighted_sf_log_ele_dff', - 'dgc_neg_weighted_sf_rec_dff', - 'dgc_neg_weighted_sf_log_rec_dff', - - # sf tuning, pos, zscore - 'dgc_pos_peak_sf_raw_z', - 'dgc_pos_weighted_sf_raw_z', - 'dgc_pos_weighted_sf_log_raw_z', - 'dgc_pos_weighted_sf_ele_z', - 'dgc_pos_weighted_sf_log_ele_z', - 'dgc_pos_weighted_sf_rec_z', - 'dgc_pos_weighted_sf_log_rec_z', - - # sf tuning, neg, zscore - 'dgc_neg_peak_sf_raw_z', - 'dgc_neg_weighted_sf_raw_z', - 'dgc_neg_weighted_sf_log_raw_z', - 'dgc_neg_weighted_sf_ele_z', - 'dgc_neg_weighted_sf_log_ele_z', - 'dgc_neg_weighted_sf_rec_z', - 'dgc_neg_weighted_sf_log_rec_z', - - # tf tuning, pos, df - 'dgc_pos_peak_tf_raw_df', - 'dgc_pos_weighted_tf_raw_df', - 'dgc_pos_weighted_tf_log_raw_df', - 'dgc_pos_weighted_tf_ele_df', - 'dgc_pos_weighted_tf_log_ele_df', - 'dgc_pos_weighted_tf_rec_df', - 'dgc_pos_weighted_tf_log_rec_df', - - # tf tuning, neg, df - 'dgc_neg_peak_tf_raw_df', - 'dgc_neg_weighted_tf_raw_df', - 'dgc_neg_weighted_tf_log_raw_df', - 'dgc_neg_weighted_tf_ele_df', - 'dgc_neg_weighted_tf_log_ele_df', - 'dgc_neg_weighted_tf_rec_df', - 'dgc_neg_weighted_tf_log_rec_df', - - # tf tuning, pos, dff - 'dgc_pos_peak_tf_raw_dff', - 'dgc_pos_weighted_tf_raw_dff', - 'dgc_pos_weighted_tf_log_raw_dff', - 'dgc_pos_weighted_tf_ele_dff', - 'dgc_pos_weighted_tf_log_ele_dff', - 'dgc_pos_weighted_tf_rec_dff', - 'dgc_pos_weighted_tf_log_rec_dff', - - # tf tuning, neg, dff - 'dgc_neg_peak_tf_raw_dff', - 'dgc_neg_weighted_tf_raw_dff', - 'dgc_neg_weighted_tf_log_raw_dff', - 'dgc_neg_weighted_tf_ele_dff', - 'dgc_neg_weighted_tf_log_ele_dff', - 'dgc_neg_weighted_tf_rec_dff', - 'dgc_neg_weighted_tf_log_rec_dff', - - # tf tuning, pos, zscore - 'dgc_pos_peak_tf_raw_z', - 'dgc_pos_weighted_tf_raw_z', - 'dgc_pos_weighted_tf_log_raw_z', - 'dgc_pos_weighted_tf_ele_z', - 'dgc_pos_weighted_tf_log_ele_z', - 'dgc_pos_weighted_tf_rec_z', - 'dgc_pos_weighted_tf_log_rec_z', - - # tf tuning, neg, zscore - 'dgc_neg_peak_tf_raw_z', - 'dgc_neg_weighted_tf_raw_z', - 'dgc_neg_weighted_tf_log_raw_z', - 'dgc_neg_weighted_tf_ele_z', - 'dgc_neg_weighted_tf_log_ele_z', - 'dgc_neg_weighted_tf_rec_z', - 'dgc_neg_weighted_tf_log_rec_z', -] - -def process_one_nwb_for_multi_thread(inputs): - - nwb_path, df_folder, clu_folder, params, columns, save_folder, t0, nwb_i, nwb_f_num, is_overwrite = inputs - - nwb_fn = os.path.splitext(os.path.split(nwb_path)[1])[0] - - date, mid, _, _ = nwb_fn.split('_') - - nwb_f = h5py.File(nwb_path, 'r') - plane_ns = dt.get_plane_ns(nwb_f=nwb_f) - plane_ns.sort() - - for plane_n in plane_ns: - print('\tt: {:5.0f} minutes, processing {}, {} / {}, {} ...'.format((time.time() - t0) / 60., - nwb_fn, - nwb_i + 1, - nwb_f_num, - plane_n)) - - roi_df_fn = '{}_{}_{}.csv'.format(date, mid, plane_n) - roi_df = pd.read_csv(os.path.join(df_folder, roi_df_fn)) - - clu_fn = '{}_{}_{}_axon_grouping.hdf5'.format(date, mid, plane_n) - clu_f = h5py.File(os.path.join(clu_folder, clu_fn), 'r') - - axon_ns = clu_f['axons'].keys() - axon_ns.sort() - - axon_df = pd.DataFrame(np.nan, index=range(len(axon_ns)), columns=columns) - - for axon_i, axon_n in enumerate(axon_ns): - - roi_lst = clu_f['axons/{}'.format(axon_n)].value - - if len(roi_lst) == 1: - curr_roi_df = roi_df[roi_df['roi_n'] == roi_lst[0]].reset_index() - for col in columns: - axon_df.loc[axon_i, col] = curr_roi_df.loc[0, col] - axon_df.loc[axon_i, 'roi_n'] = axon_n - else: - axon_properties, _, _, _, _, _, _, _, _, _, _, _, _, _ = \ - dt.get_everything_from_axon(nwb_f=nwb_f, - clu_f=clu_f, - plane_n=plane_n, - axon_n=axon_n, - params=params, - verbose=False) - for rp_name, rp_value in axon_properties.items(): - axon_df.loc[axon_i, rp_name] = rp_value - - save_path = os.path.join(save_folder, '{}_{}_{}.csv'.format(date, mid, plane_n)) - - if os.path.isfile(save_path): - if is_overwrite: - os.remove(save_path) - axon_df.to_csv(save_path) - else: - raise IOError('Axon dataframe file already exists. \npath: {}'.format(save_path)) - else: - axon_df.to_csv(save_path) - -def run(): - - t0 = time.time() - - with open(os.path.join(df_folder, 'script_log.py')) as script_f: - script = script_f.readlines() - - for line in script: - if line[0:6] == 'params': - exec(line) - - nwb_fns = [] - for fn in os.listdir(os.path.realpath(nwb_folder)): - if fn[-4:] == '.nwb' and date_range[0] <= int(fn[0:6]) <= date_range[1]: - nwb_fns.append(fn) - nwb_fns.sort() - print('\nnwb file list:') - print('\n'.join(nwb_fns)) - - save_folder = df_folder + '_axon_' + os.path.split(clu_folder)[1] - - if not os.path.isdir(save_folder): - os.makedirs(save_folder) - - shutil.copyfile(os.path.realpath(__file__), os.path.join(save_folder, 'script_log.py')) - - inputs_lst = [(os.path.join(curr_folder, nwb_folder, nwb_fn), - os.path.realpath(df_folder), - os.path.realpath(clu_folder), - params, - columns, - save_folder, - t0, - nwb_i, - len(nwb_fns), - is_overwrite) for nwb_i, nwb_fn in enumerate(nwb_fns)] - - print('\nprocessing individual nwb files ...') - p = Pool(process_num) - p.map(process_one_nwb_for_multi_thread, inputs_lst) - - -if __name__ == '__main__': - run() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_database/0130_get_overall_csv.py b/corticalmapping/scripts/post_recording/analysis_database/0130_get_overall_csv.py deleted file mode 100644 index 5c70614..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/0130_get_overall_csv.py +++ /dev/null @@ -1,54 +0,0 @@ -import os -import pandas as pd - -df_folder = 'other_dataframes' -# df_fn = 'dataframes_190530171338' -# df_fn = 'dataframes_190530171338_axon_AllStimuli_DistanceThr_0.50' -# df_fn = 'dataframes_190530171338_axon_AllStimuli_DistanceThr_1.00' -df_fn = 'dataframes_190530171338_axon_AllStimuli_DistanceThr_1.30' -plane_df_fn = 'plane_table_190530170648.xlsx' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -csv_fns = [fn for fn in os.listdir(os.path.join(df_folder, df_fn)) if fn[-4:] == '.csv'] -csv_fns.sort() - -plane_df = pd.read_excel(os.path.join(df_folder, plane_df_fn), sheetname='sheet1') - -df_all = [] - -for csv_fn in csv_fns: - print('reading {} ...'.format(csv_fn)) - df_all.append(pd.read_csv(os.path.join(df_folder, df_fn, csv_fn))) - -df_all = pd.concat(df_all, axis=0) - -try: - df_all.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis=1, inplace=True) -except KeyError: - pass - -print(df_all.columns) - -df_all['vol_n'] = '' - -for plane_i, plane_row in plane_df.iterrows(): - plane_ind = ((df_all['date'] == plane_row['date']) & - (df_all['mouse_id'] == plane_row['mouse_id']) & - (df_all['plane_n'] == plane_row['plane_n'])) - df_all.loc[plane_ind, 'vol_n'] = plane_row['volume_n'] - -print(df_all.vol_n.drop_duplicates()) - -df_all.sort_values(by=['vol_n', 'depth', 'roi_n'], inplace=True) -df_all.reset_index(inplace=True) -df_all.drop(['index'], axis=1, inplace=True) - -print(df_all[['date', 'mouse_id', 'plane_n', 'depth', 'vol_n']].drop_duplicates()) - -df_all.to_csv(df_fn.replace('dataframes', 'dataframe') + '.csv') - - - - diff --git a/corticalmapping/scripts/post_recording/analysis_database/0140_plot_dire_retinotopy.py b/corticalmapping/scripts/post_recording/analysis_database/0140_plot_dire_retinotopy.py deleted file mode 100644 index 54e5a6d..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/0140_plot_dire_retinotopy.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -import numpy as np -import pandas as pd -import matplotlib.pyplot as plt -import corticalmapping.core.PlottingTools as pt -import corticalmapping.DatabaseTools as dt -from matplotlib.backends.backend_pdf import PdfPages - -# df_path = r"G:\bulk_LGN_database\dataframe_190530171338.csv" -df_path = r"G:\bulk_LGN_database\dataframe_190530171338_axon_AllStimuli_DistanceThr_1.00.csv" - -depths = [50, 100, 150, 200, 250, 300, 350, 400,] -mouse_ids = ['M360495', 'M376019', 'M386444', 'M426525', 'M439939', 'M439943'] -# mouse_ids = ['M439939'] -dire_type = 'peak_dire' # 'vs_dire' or 'peak_dire' -response_dir = 'pos' -response_type = 'dff' -post_process_type = 'ele' # 'raw', 'ele' or 'rec' -skew_thr = 0.6 -dgc_peak_z_thr = 3. -dgc_p_anova_thr = 0.01 -dsi_type = 'gdsi' -dsi_thr = 0.5 - -rf_z_thr = 1.6 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -if dire_type == 'peak_dire' and (post_process_type == 'ele' or post_process_type == 'rec'): - dire_pp = 'raw' -else: - dire_pp = post_process_type - -print('loading csv file: {}'.format(df_path)) -df = pd.read_csv(df_path) -print('csv file loaded.') - -df = df[df['mouse_id'].isin(mouse_ids)] - -df = df[(df['skew_fil'] >= skew_thr) & - (df['dgc_{}_peak_z'.format(response_dir)] >= dgc_peak_z_thr) & - (df['dgc_p_anova_{}'.format(response_type)] <= dgc_p_anova_thr) & - (df['dgc_{}_{}_{}_{}'.format(response_dir, dsi_type, post_process_type, response_type)] >= dsi_thr)] - -pdff = PdfPages(os.path.join('intermediate_results', 'preferred_dire_depth.pdf')) - -f_all = plt.figure(figsize=(12, 8)) -ax_all = f_all.add_subplot(111) -ax_all.set_xlim([0, 90]) -ax_all.set_ylim([-30, 30]) -ax_all.set_aspect('equal') -ax_all.set_title('all depths') - -for depth_i, depth in enumerate(depths): - - depth_df = df[df['depth'] == depth] - print(len(depth_df)) - - f = plt.figure(figsize=(12, 8)) - ax = f.add_subplot(111) - ax.set_xlim([0, 90]) - ax.set_ylim([-30, 30]) - ax.set_aspect('equal') - ax.set_title('{} um'.format(depth)) - - for roi_i, roi_row in depth_df.iterrows(): - - if roi_row['rf_{}_on_peak_z'.format(response_dir)] >= rf_z_thr: - alt = roi_row['rf_{}_on_center_alt'.format(response_dir)] - azi = roi_row['rf_{}_on_center_azi'.format(response_dir)] - dire = roi_row['dgc_{}_{}_{}_{}'.format(response_dir, dire_type, dire_pp, response_type)] - # print('alt: {:6.2f}, azi: {:6.2f}, dire: {}'.format(alt, azi, dire)) - dire = dire * np.pi / 180. - bazi = azi - np.cos(dire) * 1. - dazi = np.cos(dire) * 2. - balt = alt - np.sin(dire) * 1. - dalt = np.sin(dire) * 2. - - ax.arrow(x=bazi, y=balt, dx=dazi, dy=dalt, length_includes_head=True, - head_width=0.5, head_length=1, ec='none', fc='r', alpha=0.5) - ax_all.arrow(x=bazi, y=balt, dx=dazi, dy=dalt, length_includes_head=True, - head_width=0.5, head_length=1, ec='none', fc='r', alpha=0.5) - - if roi_row['rf_{}_off_peak_z'.format(response_dir)] >= rf_z_thr: - alt = roi_row['rf_{}_off_center_alt'.format(response_dir)] - azi = roi_row['rf_{}_off_center_azi'.format(response_dir)] - dire = roi_row['dgc_{}_{}_{}_{}'.format(response_dir, dire_type, dire_pp, response_type)] - # print('alt: {:6.2f}, azi: {:6.2f}, dire: {}'.format(alt, azi, dire)) - dire = dire * np.pi / 180. - bazi = azi - np.sin(dire) * 1. - dazi = np.sin(dire) * 2. - balt = alt - np.cos(dire) * 1. - dalt = np.cos(dire) * 2. - - ax.arrow(x=bazi, y=balt, dx=dazi, dy=dalt, length_includes_head=True, - head_width=0.5, head_length=1, ec='none', fc='b', alpha=0.5) - ax_all.arrow(x=bazi, y=balt, dx=dazi, dy=dalt, length_includes_head=True, - head_width=0.5, head_length=1, ec='none', fc='b', alpha=0.5) - - - pdff.savefig(f) - f.clear() - plt.close(f) - -pdff.savefig(f_all) -pdff.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_database/0150_plot_ori_vs_rf_axis_DS.py b/corticalmapping/scripts/post_recording/analysis_database/0150_plot_ori_vs_rf_axis_DS.py deleted file mode 100644 index 8f04553..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/0150_plot_ori_vs_rf_axis_DS.py +++ /dev/null @@ -1,139 +0,0 @@ -import os -import pandas as pd -import numpy as np -import matplotlib.pyplot as plt -import corticalmapping.SingleCellAnalysis as sca -import scipy.stats as stats -import h5py - -# df_path = r"G:\bulk_LGN_database\dataframe_190530171338.csv" -# rf_maps_folder = r"intermediate_results\rf_maps_dataframes_190529210731" - -df_path = r"G:\bulk_LGN_database\dataframe_190530171338_axon_AllStimuli_DistanceThr_1.30.csv" -rf_maps_folder = r"G:\bulk_LGN_database\intermediate_results" \ - r"\rf_maps_dataframe_190530171338_axon_AllStimuli_DistanceThr_1.30" - - -depths = [50, 100, 150, 200, 250, 300, 350, 400,] -mouse_ids = ['M360495', 'M376019', 'M386444', 'M426525', 'M439939', 'M439943'] -# mouse_ids = ['M439939'] -dire_type = 'peak_dire' # 'vs_dire' or 'peak_dire' -response_dir = 'pos' -response_type = 'dff' -post_process_type = 'ele' # 'raw', 'ele' or 'rec' -skew_thr = 0.6 -dgc_peak_z_thr = 3. -dgc_p_anova_thr = 0.01 -dsi_type = 'gdsi' -dsi_thr = 0.5 -osi_type = 'gosi' -osi_thr = 1. / 3. - -ellipse_aspect_thr = 1.0 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -if dire_type == 'peak_dire' and (post_process_type == 'ele' or post_process_type == 'rec'): - dire_pp = 'raw' -else: - dire_pp = post_process_type - -print('loading csv file: {}'.format(df_path)) -df = pd.read_csv(df_path) -print('csv file loaded.') - -df = df[(df['mouse_id'].isin(mouse_ids)) & \ - (df['skew_fil'] >= skew_thr) & \ - (df['dgc_{}_peak_z'.format(response_dir)] >= dgc_peak_z_thr) & \ - (df['dgc_p_anova_{}'.format(response_type)] <= dgc_p_anova_thr) & \ - (np.isfinite(df['rf_{}_on_peak_z'.format(response_dir)]))] - -dsdf = df[(df['dgc_{}_{}_{}_{}'.format(response_dir, dsi_type, post_process_type, response_type)] >= dsi_thr)] - -ds_diff_onoff = [] -ds_diff_on = [] -ds_diff_off = [] -for roi_i, roi_row in dsdf.iterrows(): - date = int(roi_row['date']) - mid = roi_row['mouse_id'] - plane_n = roi_row['plane_n'] - roi_n = roi_row['roi_n'] - - map_fn = '{}_{}_{}_{}'.format(date, mid, plane_n, response_dir) - map_f = h5py.File(os.path.join(rf_maps_folder, map_fn + '.hdf5'), 'r') - - on_grp = map_f['{}_ON'.format(map_fn)] - off_grp = map_f['{}_OFF'.format(map_fn)] - - dire = roi_row['dgc_{}_{}_{}_{}'.format(response_dir, dire_type, dire_pp, response_type)] - ori = sca.dire2ori(dire) - - if roi_n in on_grp.keys() and roi_n in off_grp.keys(): - rf_on = sca.SpatialReceptiveField.from_h5_group(on_grp[roi_n]) - rf_off = sca.SpatialReceptiveField.from_h5_group(off_grp[roi_n]) - c_alt_on, c_azi_on = rf_on.get_weighted_rf_center() - c_alt_off, c_azi_off = rf_off.get_weighted_rf_center() - - onoff_ang = np.arctan((c_alt_on - c_alt_off) / (c_azi_on - c_azi_off)) - onoff_ang = onoff_ang * 180. / np.pi - onoff_ang = sca.dire2ori(onoff_ang) - - curr_diff = abs(onoff_ang - ori) - if curr_diff > 90.: - curr_diff = 180 - curr_diff - - ds_diff_onoff.append(curr_diff) - - elif roi_n in on_grp.keys(): - rf_on = sca.SpatialReceptiveField.from_h5_group(on_grp[roi_n]) - ell_on = rf_on.ellipse_fitting(is_plot=False) - if ell_on is not None and ell_on.get_aspect_ratio() >= ellipse_aspect_thr: - curr_diff = abs(ell_on.angle - ori) - if curr_diff > 90.: - curr_diff = 180 - curr_diff - ds_diff_on.append(curr_diff) - - elif roi_n in off_grp.keys(): - rf_off = sca.SpatialReceptiveField.from_h5_group(off_grp[roi_n]) - ell_off = rf_off.ellipse_fitting(is_plot=False) - if ell_off is not None and ell_off.get_aspect_ratio() >= ellipse_aspect_thr: - curr_diff = abs(ell_off.angle - ori) - if curr_diff > 90.: - curr_diff = 180 - curr_diff - ds_diff_off.append(curr_diff) - -print('\nDirection Selective ROIs:') -print('\tWith ONOFF receptive fields:') -print('\t\tn={}'.format(len(ds_diff_onoff))) -print('\t\torie difference predicted vs. measured, mean={}'.format(np.mean(ds_diff_onoff))) -print('\t\torie difference predicted vs. measured, std={}'.format(np.std(ds_diff_onoff))) -chisq_ds_onoff, p_ds_onoff = stats.chisquare(np.histogram(ds_diff_onoff, range=[0., 90.], bins=20)[0]) -print('\t\tagainst uniform distribution: chi-squared={}, p={}'.format(chisq_ds_onoff, p_ds_onoff)) - -print('\tWith only ON receptive fields:') -print('\t\tn={}'.format(len(ds_diff_on))) -print('\t\torie difference predicted vs. measured, mean={}'.format(np.mean(ds_diff_on))) -print('\t\torie difference predicted vs. measured, std={}'.format(np.std(ds_diff_on))) -chisq_ds_on, p_ds_on = stats.chisquare(np.histogram(ds_diff_on, range=[0., 90.], bins=20)[0]) -print('\t\tagainst uniform distribution: chi-squared={}, p={}'.format(chisq_ds_on, p_ds_on)) - -print('\tWith only OFF receptive fields:') -print('\t\tn={}'.format(len(ds_diff_off))) -print('\t\torie difference predicted vs. measured, mean={}'.format(np.mean(ds_diff_off))) -print('\t\torie difference predicted vs. measured, std={}'.format(np.std(ds_diff_off))) -chisq_ds_off, p_ds_off = stats.chisquare(np.histogram(ds_diff_off, range=[0., 90.], bins=20)[0]) -print('\t\tagainst uniform distribution: chi-squared={}, p={}'.format(chisq_ds_off, p_ds_off)) - -ds_diff_all = ds_diff_onoff + ds_diff_on + ds_diff_off -print('\tWith all receptive fields:') -print('\t\tn={}'.format(len(ds_diff_all))) -print('\t\torie difference predicted vs. measured, mean={}'.format(np.mean(ds_diff_all))) -print('\t\torie difference predicted vs. measured, std={}'.format(np.std(ds_diff_all))) -chisq_ds_all, p_ds_all = stats.chisquare(np.histogram(ds_diff_all, range=[0., 90.], bins=20)[0]) -print('\t\tagainst uniform distribution: chi-squared={}, p={}'.format(chisq_ds_all, p_ds_all)) - - -plt.hist([ds_diff_onoff, ds_diff_on, ds_diff_off], range=[0, 90], bins=20, stacked=True, - color=['purple', 'r', 'b'], ec='none', alpha=0.5) -plt.show() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_database/0160_plot_ori_vs_rf_axis_OS.py b/corticalmapping/scripts/post_recording/analysis_database/0160_plot_ori_vs_rf_axis_OS.py deleted file mode 100644 index 10cc5da..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/0160_plot_ori_vs_rf_axis_OS.py +++ /dev/null @@ -1,139 +0,0 @@ -import os -import pandas as pd -import numpy as np -import matplotlib.pyplot as plt -import corticalmapping.SingleCellAnalysis as sca -import scipy.stats as stats -import h5py - -# df_path = r"G:\bulk_LGN_database\dataframe_190530171338.csv" -# rf_maps_folder = r"intermediate_results\rf_maps_dataframes_190529210731" - -df_path = r"G:\bulk_LGN_database\dataframe_190530171338_axon_AllStimuli_DistanceThr_1.30.csv" -rf_maps_folder = r"G:\bulk_LGN_database\intermediate_results" \ - r"\rf_maps_dataframe_190530171338_axon_AllStimuli_DistanceThr_1.30" - -depths = [50, 100, 150, 200, 250, 300, 350, 400,] -mouse_ids = ['M360495', 'M376019', 'M386444', 'M426525', 'M439939', 'M439943'] -# mouse_ids = ['M439939'] -dire_type = 'peak_dire' # 'vs_dire' or 'peak_dire' -response_dir = 'pos' -response_type = 'dff' -post_process_type = 'ele' # 'raw', 'ele' or 'rec' -skew_thr = 0.6 -dgc_peak_z_thr = 3. -dgc_p_anova_thr = 0.01 -dsi_type = 'gdsi' -dsi_thr = 0.5 -osi_type = 'gosi' -osi_thr = 1. / 3. - -ellipse_aspect_thr = 1.0 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -if dire_type == 'peak_dire' and (post_process_type == 'ele' or post_process_type == 'rec'): - dire_pp = 'raw' -else: - dire_pp = post_process_type - -print('loading csv file: {}'.format(df_path)) -df = pd.read_csv(df_path) -print('csv file loaded.') - -df = df[(df['mouse_id'].isin(mouse_ids)) & \ - (df['skew_fil'] >= skew_thr) & \ - (df['dgc_{}_peak_z'.format(response_dir)] >= dgc_peak_z_thr) & \ - (df['dgc_p_anova_{}'.format(response_type)] <= dgc_p_anova_thr) & \ - (np.isfinite(df['rf_{}_on_peak_z'.format(response_dir)]))] - -osdf = df[(df['dgc_{}_{}_{}_{}'.format(response_dir, osi_type, post_process_type, response_type)] >= osi_thr) & \ - (df['dgc_{}_{}_{}_{}'.format(response_dir, dsi_type, post_process_type, response_type)] < dsi_thr)] - -os_diff_onoff = [] -os_diff_on = [] -os_diff_off = [] -for roi_i, roi_row in osdf.iterrows(): - date = int(roi_row['date']) - mid = roi_row['mouse_id'] - plane_n = roi_row['plane_n'] - roi_n = roi_row['roi_n'] - - map_fn = '{}_{}_{}_{}'.format(date, mid, plane_n, response_dir) - map_f = h5py.File(os.path.join(rf_maps_folder, map_fn + '.hdf5'), 'r') - - on_grp = map_f['{}_ON'.format(map_fn)] - off_grp = map_f['{}_OFF'.format(map_fn)] - - dire = roi_row['dgc_{}_{}_{}_{}'.format(response_dir, dire_type, dire_pp, response_type)] - ori = sca.dire2ori(dire) - - if roi_n in on_grp.keys() and roi_n in off_grp.keys(): - rf_on = sca.SpatialReceptiveField.from_h5_group(on_grp[roi_n]) - rf_off = sca.SpatialReceptiveField.from_h5_group(off_grp[roi_n]) - c_alt_on, c_azi_on = rf_on.get_weighted_rf_center() - c_alt_off, c_azi_off = rf_off.get_weighted_rf_center() - - onoff_ang = np.arctan((c_alt_on - c_alt_off) / (c_azi_on - c_azi_off)) - onoff_ang = onoff_ang * 180. / np.pi - onoff_ang = sca.dire2ori(onoff_ang) - - curr_diff = abs(onoff_ang - ori) - if curr_diff > 90.: - curr_diff = 180 - curr_diff - - os_diff_onoff.append(curr_diff) - - elif roi_n in on_grp.keys(): - rf_on = sca.SpatialReceptiveField.from_h5_group(on_grp[roi_n]) - ell_on = rf_on.ellipse_fitting(is_plot=False) - if ell_on is not None and ell_on.get_aspect_ratio() >= ellipse_aspect_thr: - curr_diff = abs(ell_on.angle - ori) - if curr_diff > 90.: - curr_diff = 180 - curr_diff - os_diff_on.append(curr_diff) - - elif roi_n in off_grp.keys(): - rf_off = sca.SpatialReceptiveField.from_h5_group(off_grp[roi_n]) - ell_off = rf_off.ellipse_fitting(is_plot=False) - if ell_off is not None and ell_off.get_aspect_ratio() >= ellipse_aspect_thr: - curr_diff = abs(ell_off.angle - ori) - if curr_diff > 90.: - curr_diff = 180 - curr_diff - os_diff_off.append(curr_diff) - -print('\nOrientation Selective ROIs:') -print('\tWith ONOFF receptive fields:') -print('\t\tn={}'.format(len(os_diff_onoff))) -print('\t\torie difference predicted vs. measured, mean={}'.format(np.mean(os_diff_onoff))) -print('\t\torie difference predicted vs. measured, std={}'.format(np.std(os_diff_onoff))) -chisq_os_onoff, p_os_onoff = stats.chisquare(np.histogram(os_diff_onoff, range=[0., 90.], bins=20)[0]) -print('\t\tagainst uniform distribution: chi-squared={}, p={}'.format(chisq_os_onoff, p_os_onoff)) - -print('\tWith only ON receptive fields:') -print('\t\tn={}'.format(len(os_diff_on))) -print('\t\torie difference predicted vs. measured, mean={}'.format(np.mean(os_diff_on))) -print('\t\torie difference predicted vs. measured, std={}'.format(np.std(os_diff_on))) -chisq_os_on, p_os_on = stats.chisquare(np.histogram(os_diff_on, range=[0., 90.], bins=20)[0]) -print('\t\tagainst uniform distribution: chi-squared={}, p={}'.format(chisq_os_on, p_os_on)) - -print('\tWith only OFF receptive fields:') -print('\t\tn={}'.format(len(os_diff_off))) -print('\t\torie difference predicted vs. measured, mean={}'.format(np.mean(os_diff_off))) -print('\t\torie difference predicted vs. measured, std={}'.format(np.std(os_diff_off))) -chisq_os_off, p_os_off = stats.chisquare(np.histogram(os_diff_off, range=[0., 90.], bins=20)[0]) -print('\t\tagainst uniform distribution: chi-squared={}, p={}'.format(chisq_os_off, p_os_off)) - -os_diff_all = os_diff_onoff + os_diff_on + os_diff_off -print('\tWith all receptive fields:') -print('\t\tn={}'.format(len(os_diff_all))) -print('\t\torie difference predicted vs. measured, mean={}'.format(np.mean(os_diff_all))) -print('\t\torie difference predicted vs. measured, std={}'.format(np.std(os_diff_all))) -chisq_os_all, p_os_all = stats.chisquare(np.histogram(os_diff_all, range=[0., 90.], bins=20)[0]) -print('\t\tagainst uniform distribution: chi-squared={}, p={}'.format(chisq_os_all, p_os_all)) - - -plt.hist([os_diff_onoff, os_diff_on, os_diff_off], range=[0, 90], bins=20, stacked=True, - color=['purple', 'r', 'b'], ec='none', alpha=0.5) -plt.show() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_database/0170_two_way_anova_dsi.py b/corticalmapping/scripts/post_recording/analysis_database/0170_two_way_anova_dsi.py deleted file mode 100644 index da6e4b5..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/0170_two_way_anova_dsi.py +++ /dev/null @@ -1,90 +0,0 @@ -""" -effects of volume and depth on dsi measurement -two way anova -""" - -import os -import numpy as np -import pandas as pd - -import statsmodels.api as sm -from statsmodels.formula.api import ols -import statsmodels.stats.multicomp - - -df_path = r"G:\bulk_LGN_database\dataframe_190530171338.csv" -# df_path = r"G:\bulk_LGN_database\dataframe_190530171338_axon_AllStimuli_DistanceThr_1.30.csv") - -depths = [50, 100, 150, 200, 250, 300, 350, 400,] -mouse_ids = ['M360495', 'M376019', 'M386444', 'M426525', 'M439939', 'M439943'] - -# depths = [50, 100, 150, 200, 250, 300, 350, 400,] -# mouse_ids = ['M439943'] - -# depths = [50, 100, 150, 200, 250, 300, 350] -# mouse_ids = ['M439939'] - -# depths = [50, 100, 150, 200, 250, 300] -# mouse_ids = ['M426525'] - -# depths = [100, 200, 300] -# mouse_ids = ['M386444'] - -# depths = [50, 100, 150, 200, 250] -# mouse_ids = ['M376019', 'M360495'] - -# dire_type = 'peak_dire' # 'vs_dire' or 'peak_dire' -response_dir = 'pos' -response_type = 'dff' -post_process_type = 'ele' # 'raw', 'ele' or 'rec' -skew_thr = 0.6 -dgc_peak_z_thr = 3. -dgc_p_anova_thr = 0.01 -dsi_type = 'gdsi' -# dsi_thr = 0.5 - -# nti_half_span = 45. -# nti_sum_thr = 10 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -print('loading csv file: {}'.format(df_path)) -df = pd.read_csv(df_path) -print('csv file loaded.') - - -df = df[(df['mouse_id'].isin(mouse_ids)) & - (df['depth'].isin(depths)) & - (df['skew_fil'] >= skew_thr) & - (df['dgc_{}_peak_z'.format(response_dir)] >= dgc_peak_z_thr) & - (df['dgc_p_anova_{}'.format(response_type)] <= dgc_p_anova_thr)] - -model = ols(f'dgc_{response_dir}_{dsi_type}_{post_process_type}_{response_type} ~ ' \ - f'C(depth)*C(vol_n)', df).fit() - -# model = ols(f'dgc_{response_dir}_{dsi_type}_{post_process_type}_{response_type} ~ ' \ - # f'C(depth)*C(mouse_id)', df).fit() - -# model = ols(f'dgc_{response_dir}_{dsi_type}_{post_process_type}_{response_type} ~ ' \ -# f'C(depth)*C(vol_n)*C(mouse_id)', df).fit() - -print(f"Overall model F({model.df_model: .0f},{model.df_resid: .0f})" \ - f"= {model.fvalue: .3f}, p = {model.f_pvalue: .4f}") - -print(model.summary()) - -res = sm.stats.anova_lm(model, typ=2) - -def anova_table(aov): - aov['mean_sq'] = aov[:]['sum_sq']/aov[:]['df'] - - aov['eta_sq'] = aov[:-1]['sum_sq']/sum(aov['sum_sq']) - - aov['omega_sq'] = (aov[:-1]['sum_sq']-(aov[:-1]['df']*aov['mean_sq'][-1]))/(sum(aov['sum_sq'])+aov['mean_sq'][-1]) - - cols = ['sum_sq', 'mean_sq', 'df', 'F', 'PR(>F)', 'eta_sq', 'omega_sq'] - aov = aov[cols] - return aov - -print(anova_table(res)) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_database/0180_two_way_anova_rf_area.py b/corticalmapping/scripts/post_recording/analysis_database/0180_two_way_anova_rf_area.py deleted file mode 100644 index 58abe3a..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/0180_two_way_anova_rf_area.py +++ /dev/null @@ -1,79 +0,0 @@ -""" -effects of volume and depth on rf_area measurement -two way anova -""" - -import os -import numpy as np -import pandas as pd - -import statsmodels.api as sm -from statsmodels.formula.api import ols -import statsmodels.stats.multicomp - - -df_path = r"G:\bulk_LGN_database\dataframe_190530171338.csv" -# df_path = r"G:\bulk_LGN_database\dataframe_190530171338_axon_AllStimuli_DistanceThr_1.30.csv" - -# depths = [50, 100, 150, 200, 250, 300, 350, 400,] -# mouse_ids = ['M360495', 'M376019', 'M386444', 'M426525', 'M439939', 'M439943'] - -# depths = [50, 100, 150, 200, 250, 300, 350, 400,] -# mouse_ids = ['M439943'] - -# depths = [50, 100, 150, 200, 250, 300, 350] -# mouse_ids = ['M439939'] - -# depths = [50, 100, 150, 200, 250, 300] -# mouse_ids = ['M426525'] - -depths = [100, 200, 300] -mouse_ids = ['M386444'] - -# depths = [50, 100, 150, 200, 250] -# mouse_ids = ['M376019', 'M360495'] - -response_dir = 'pos' -response_type = 'dff' -post_process_type = 'ele' # 'raw', 'ele' or 'rec' -skew_thr = 0.6 -rf_z_thr_abs = 1.6 -polarity = 'off' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -print('loading csv file: {}'.format(df_path)) -df = pd.read_csv(df_path) -print('csv file loaded.') - -df = df[(df['mouse_id'].isin(mouse_ids)) & - (df['depth'].isin(depths)) & - (df['skew_fil'] >= skew_thr) & - (df[f'rf_{response_dir}_{polarity}_peak_z'] >= rf_z_thr_abs)] - -model = ols(f'rf_{response_dir}_{polarity}_area ~ C(depth)*C(vol_n)', df).fit() - -# model = ols(f'rf_{response_dir}_{polarity}_area ~ C(depth)*C(mouse_id)', df).fit() - -# model = ols(f'rf_{response_dir}_{polarity}_area ~ C(depth)*C(vol_n)*C(mouse_id)', df).fit() - -print(f"Overall model F({model.df_model: .0f},{model.df_resid: .0f})" \ - f"= {model.fvalue: .3f}, p = {model.f_pvalue: .4f}") - -print(model.summary()) - -res = sm.stats.anova_lm(model, typ=2) - -def anova_table(aov): - aov['mean_sq'] = aov[:]['sum_sq']/aov[:]['df'] - - aov['eta_sq'] = aov[:-1]['sum_sq']/sum(aov['sum_sq']) - - aov['omega_sq'] = (aov[:-1]['sum_sq']-(aov[:-1]['df']*aov['mean_sq'][-1]))/(sum(aov['sum_sq'])+aov['mean_sq'][-1]) - - cols = ['sum_sq', 'mean_sq', 'df', 'F', 'PR(>F)', 'eta_sq', 'omega_sq'] - aov = aov[cols] - return aov - -print(anova_table(res)) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_database/0190_get_axon_morphology.py b/corticalmapping/scripts/post_recording/analysis_database/0190_get_axon_morphology.py deleted file mode 100644 index 54bcdb1..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/0190_get_axon_morphology.py +++ /dev/null @@ -1,54 +0,0 @@ -import os -import h5py -import pandas as pd -import corticalmapping.DatabaseTools as dt - -nwb_folder = r"G:\bulk_LGN_database\nwbs" -clu_folder = r"intermediate_results\bouton_clustering\AllStimuli_DistanceThr_1.30" - -dfa_fn = "dataframe_190530171338_axon_AllStimuli_DistanceThr_1.30.csv" - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -print('reading csv file ...') -dfa = pd.read_csv(dfa_fn) -print('csv file loaded.') - -df_plane = dfa[['date', 'mouse_id', 'plane_n']].drop_duplicates().reset_index() -print(len(df_plane)) - -axon_morph_dict = {} - -ind = 0 -for plane_i, plane_row in df_plane.iterrows(): - date = int(plane_row['date']) - mid = plane_row['mouse_id'] - plane_n = plane_row['plane_n'] - - print('{}_{}_{}, {}/{}'.format(date, mid, plane_n, plane_i + 1, len(df_plane))) - - nwb_f = h5py.File(os.path.join(nwb_folder, '{}_{}_110_repacked.nwb'.format(date, mid)), 'r') - clu_f = h5py.File(os.path.join(clu_folder, '{}_{}_{}_axon_grouping.hdf5'.format(date, mid, plane_n)), 'r') - - curr_dfa = dfa[(dfa['date'] == date) & - (dfa['mouse_id'] == mid) & - (dfa['plane_n'] == plane_n)].reset_index() - - for axon_i, axon_row in curr_dfa.iterrows(): - - axon_morph = dt.get_axon_morphology(clu_f=clu_f, nwb_f=nwb_f, plane_n=plane_n, axon_n=axon_row['roi_n']) - axon_morph.update({'date': date, - 'mouse_id': mid, - 'plane_n': plane_n, - 'roi_n': axon_row['roi_n']}) - - axon_morph_dict[ind] = axon_morph - ind = ind + 1 - -dfa_morph = pd.DataFrame.from_dict(axon_morph_dict, orient='index') -print(dfa_morph) - -strs = os.path.splitext(dfa_fn)[0].split('_') -save_n = '{}_{}_axon_morphology_{}_{}.csv'.format(strs[0], strs[1], strs[-2], strs[-1]) -dfa_morph.to_csv(save_n) diff --git a/corticalmapping/scripts/post_recording/analysis_database/h5repack.exe b/corticalmapping/scripts/post_recording/analysis_database/h5repack.exe deleted file mode 100644 index 3ee5da8..0000000 Binary files a/corticalmapping/scripts/post_recording/analysis_database/h5repack.exe and /dev/null differ diff --git a/corticalmapping/scripts/post_recording/analysis_database/old/0041_get_plane_dfs.py b/corticalmapping/scripts/post_recording/analysis_database/old/0041_get_plane_dfs.py deleted file mode 100644 index 93a5c25..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/old/0041_get_plane_dfs.py +++ /dev/null @@ -1,427 +0,0 @@ -import sys -sys.path.extend(['/home/junz/PycharmProjects/corticalmapping']) -import os -import time -import pandas as pd -import numpy as np -import h5py -import datetime -import corticalmapping.DatabaseTools as dt -from multiprocessing import Pool -from shutil import copyfile - -date_range = [180301, 190610] -database_folder = 'nwbs' -save_folder_n = "dataframes" -process_num = 8 -is_overwrite = False - -params = dt.ANALYSIS_PARAMS -params['trace_type'] = 'f_center_raw' -params['is_collapse_dire'] = False -params['is_collapse_sf'] = True -params['is_collapse_tf'] = False - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -print('pandas version: {}\n'.format(pd.__version__)) - -columns = [ - 'date', - 'mouse_id', - 'plane_n', - 'roi_n', - 'depth', # microns under pia, float - - # roi mask - 'roi_area', # square micron - 'roi_center_row', # center of roi mask in field of view, row - 'roi_center_col', # center of roi mask in field of view, column - - # trace skewness - 'skew_raw', # skewness of unfiltered trace (neuropil subtracted), float - 'skew_fil', # skewness of highpassed trace, float - - # receptive fields - 'rf_pos_on_peak_z', - 'rf_pos_on_area', - 'rf_pos_on_center_alt', - 'rf_pos_on_center_azi', - - 'rf_pos_off_peak_z', - 'rf_pos_off_area', - 'rf_pos_off_center_alt', - 'rf_pos_off_center_azi', - - 'rf_pos_onoff_peak_z', - 'rf_pos_onoff_area', - 'rf_pos_onoff_center_alt', - 'rf_pos_onoff_center_azi', - - 'rf_pos_lsi', - - 'rf_neg_on_peak_z', - 'rf_neg_on_area', - 'rf_neg_on_center_alt', - 'rf_neg_on_center_azi', - - 'rf_neg_off_peak_z', - 'rf_neg_off_area', - 'rf_neg_off_center_alt', - 'rf_neg_off_center_azi', - - 'rf_neg_onoff_peak_z', - 'rf_neg_onoff_area', - 'rf_neg_onoff_center_alt', - 'rf_neg_onoff_center_azi', - - 'rf_neg_lsi', - - # drifting grating peak response - 'dgc_pos_peak_df', - 'dgc_neg_peak_df', - 'dgc_pos_p_ttest_df', - 'dgc_neg_p_ttest_df', - 'dgc_p_anova_df', - - 'dgc_pos_peak_dff', - 'dgc_neg_peak_dff', - 'dgc_pos_p_ttest_dff', - 'dgc_neg_p_ttest_dff', - 'dgc_p_anova_dff', - - 'dgc_pos_peak_z', - 'dgc_neg_peak_z', - 'dgc_pos_p_ttest_z', - 'dgc_neg_p_ttest_z', - 'dgc_p_anova_z', - - # direction / orientation tuning, pos, df - 'dgc_pos_osi_raw_df', - 'dgc_pos_dsi_raw_df', - 'dgc_pos_gosi_raw_df', - 'dgc_pos_gdsi_raw_df', - 'dgc_pos_osi_ele_df', - 'dgc_pos_dsi_ele_df', - 'dgc_pos_gosi_ele_df', - 'dgc_pos_gdsi_ele_df', - 'dgc_pos_osi_rec_df', - 'dgc_pos_dsi_rec_df', - 'dgc_pos_gosi_rec_df', - 'dgc_pos_gdsi_rec_df', - 'dgc_pos_peak_dire_raw_df', - 'dgc_pos_vs_dire_raw_df', - 'dgc_pos_vs_dire_ele_df', - 'dgc_pos_vs_dire_rec_df', - - # direction / orientation tuning, neg, df - 'dgc_neg_osi_raw_df', - 'dgc_neg_dsi_raw_df', - 'dgc_neg_gosi_raw_df', - 'dgc_neg_gdsi_raw_df', - 'dgc_neg_osi_ele_df', - 'dgc_neg_dsi_ele_df', - 'dgc_neg_gosi_ele_df', - 'dgc_neg_gdsi_ele_df', - 'dgc_neg_osi_rec_df', - 'dgc_neg_dsi_rec_df', - 'dgc_neg_gosi_rec_df', - 'dgc_neg_gdsi_rec_df', - 'dgc_neg_peak_dire_raw_df', - 'dgc_neg_vs_dire_raw_df', - 'dgc_neg_vs_dire_ele_df', - 'dgc_neg_vs_dire_rec_df', - - # direction / orientation tuning, pos, dff - 'dgc_pos_osi_raw_dff', - 'dgc_pos_dsi_raw_dff', - 'dgc_pos_gosi_raw_dff', - 'dgc_pos_gdsi_raw_dff', - 'dgc_pos_osi_ele_dff', - 'dgc_pos_dsi_ele_dff', - 'dgc_pos_gosi_ele_dff', - 'dgc_pos_gdsi_ele_dff', - 'dgc_pos_osi_rec_dff', - 'dgc_pos_dsi_rec_dff', - 'dgc_pos_gosi_rec_dff', - 'dgc_pos_gdsi_rec_dff', - 'dgc_pos_peak_dire_raw_dff', - 'dgc_pos_vs_dire_raw_dff', - 'dgc_pos_vs_dire_ele_dff', - 'dgc_pos_vs_dire_rec_dff', - - # direction / orientation tuning, neg, dff - 'dgc_neg_osi_raw_dff', - 'dgc_neg_dsi_raw_dff', - 'dgc_neg_gosi_raw_dff', - 'dgc_neg_gdsi_raw_dff', - 'dgc_neg_osi_ele_dff', - 'dgc_neg_dsi_ele_dff', - 'dgc_neg_gosi_ele_dff', - 'dgc_neg_gdsi_ele_dff', - 'dgc_neg_osi_rec_dff', - 'dgc_neg_dsi_rec_dff', - 'dgc_neg_gosi_rec_dff', - 'dgc_neg_gdsi_rec_dff', - 'dgc_neg_peak_dire_raw_dff', - 'dgc_neg_vs_dire_raw_dff', - 'dgc_neg_vs_dire_ele_dff', - 'dgc_neg_vs_dire_rec_dff', - - # direction / orientation tuning, pos, zscore - 'dgc_pos_osi_raw_z', - 'dgc_pos_dsi_raw_z', - 'dgc_pos_gosi_raw_z', - 'dgc_pos_gdsi_raw_z', - 'dgc_pos_osi_ele_z', - 'dgc_pos_dsi_ele_z', - 'dgc_pos_gosi_ele_z', - 'dgc_pos_gdsi_ele_z', - 'dgc_pos_osi_rec_z', - 'dgc_pos_dsi_rec_z', - 'dgc_pos_gosi_rec_z', - 'dgc_pos_gdsi_rec_z', - 'dgc_pos_peak_dire_raw_z', - 'dgc_pos_vs_dire_raw_z', - 'dgc_pos_vs_dire_ele_z', - 'dgc_pos_vs_dire_rec_z', - - # direction / orientation tuning, neg, zscore - 'dgc_neg_osi_raw_z', - 'dgc_neg_dsi_raw_z', - 'dgc_neg_gosi_raw_z', - 'dgc_neg_gdsi_raw_z', - 'dgc_neg_osi_ele_z', - 'dgc_neg_dsi_ele_z', - 'dgc_neg_gosi_ele_z', - 'dgc_neg_gdsi_ele_z', - 'dgc_neg_osi_rec_z', - 'dgc_neg_dsi_rec_z', - 'dgc_neg_gosi_rec_z', - 'dgc_neg_gdsi_rec_z', - 'dgc_neg_peak_dire_raw_z', - 'dgc_neg_vs_dire_raw_z', - 'dgc_neg_vs_dire_ele_z', - 'dgc_neg_vs_dire_rec_z', - - # sf tuning, pos, df - 'dgc_pos_peak_sf_raw_df', - 'dgc_pos_weighted_sf_raw_df', - 'dgc_pos_weighted_sf_log_raw_df', - 'dgc_pos_weighted_sf_ele_df', - 'dgc_pos_weighted_sf_log_ele_df', - 'dgc_pos_weighted_sf_rec_df', - 'dgc_pos_weighted_sf_log_rec_df', - - # sf tuning, neg, df - 'dgc_neg_peak_sf_raw_df', - 'dgc_neg_weighted_sf_raw_df', - 'dgc_neg_weighted_sf_log_raw_df', - 'dgc_neg_weighted_sf_ele_df', - 'dgc_neg_weighted_sf_log_ele_df', - 'dgc_neg_weighted_sf_rec_df', - 'dgc_neg_weighted_sf_log_rec_df', - - # sf tuning, pos, dff - 'dgc_pos_peak_sf_raw_dff', - 'dgc_pos_weighted_sf_raw_dff', - 'dgc_pos_weighted_sf_log_raw_dff', - 'dgc_pos_weighted_sf_ele_dff', - 'dgc_pos_weighted_sf_log_ele_dff', - 'dgc_pos_weighted_sf_rec_dff', - 'dgc_pos_weighted_sf_log_rec_dff', - - # sf tuning, neg, dff - 'dgc_neg_peak_sf_raw_dff', - 'dgc_neg_weighted_sf_raw_dff', - 'dgc_neg_weighted_sf_log_raw_dff', - 'dgc_neg_weighted_sf_ele_dff', - 'dgc_neg_weighted_sf_log_ele_dff', - 'dgc_neg_weighted_sf_rec_dff', - 'dgc_neg_weighted_sf_log_rec_dff', - - # sf tuning, pos, zscore - 'dgc_pos_peak_sf_raw_z', - 'dgc_pos_weighted_sf_raw_z', - 'dgc_pos_weighted_sf_log_raw_z', - 'dgc_pos_weighted_sf_ele_z', - 'dgc_pos_weighted_sf_log_ele_z', - 'dgc_pos_weighted_sf_rec_z', - 'dgc_pos_weighted_sf_log_rec_z', - - # sf tuning, neg, zscore - 'dgc_neg_peak_sf_raw_z', - 'dgc_neg_weighted_sf_raw_z', - 'dgc_neg_weighted_sf_log_raw_z', - 'dgc_neg_weighted_sf_ele_z', - 'dgc_neg_weighted_sf_log_ele_z', - 'dgc_neg_weighted_sf_rec_z', - 'dgc_neg_weighted_sf_log_rec_z', - - # tf tuning, pos, df - 'dgc_pos_peak_tf_raw_df', - 'dgc_pos_weighted_tf_raw_df', - 'dgc_pos_weighted_tf_log_raw_df', - 'dgc_pos_weighted_tf_ele_df', - 'dgc_pos_weighted_tf_log_ele_df', - 'dgc_pos_weighted_tf_rec_df', - 'dgc_pos_weighted_tf_log_rec_df', - - # tf tuning, neg, df - 'dgc_neg_peak_tf_raw_df', - 'dgc_neg_weighted_tf_raw_df', - 'dgc_neg_weighted_tf_log_raw_df', - 'dgc_neg_weighted_tf_ele_df', - 'dgc_neg_weighted_tf_log_ele_df', - 'dgc_neg_weighted_tf_rec_df', - 'dgc_neg_weighted_tf_log_rec_df', - - # tf tuning, pos, dff - 'dgc_pos_peak_tf_raw_dff', - 'dgc_pos_weighted_tf_raw_dff', - 'dgc_pos_weighted_tf_log_raw_dff', - 'dgc_pos_weighted_tf_ele_dff', - 'dgc_pos_weighted_tf_log_ele_dff', - 'dgc_pos_weighted_tf_rec_dff', - 'dgc_pos_weighted_tf_log_rec_dff', - - # tf tuning, neg, dff - 'dgc_neg_peak_tf_raw_dff', - 'dgc_neg_weighted_tf_raw_dff', - 'dgc_neg_weighted_tf_log_raw_dff', - 'dgc_neg_weighted_tf_ele_dff', - 'dgc_neg_weighted_tf_log_ele_dff', - 'dgc_neg_weighted_tf_rec_dff', - 'dgc_neg_weighted_tf_log_rec_dff', - - # tf tuning, pos, zscore - 'dgc_pos_peak_tf_raw_z', - 'dgc_pos_weighted_tf_raw_z', - 'dgc_pos_weighted_tf_log_raw_z', - 'dgc_pos_weighted_tf_ele_z', - 'dgc_pos_weighted_tf_log_ele_z', - 'dgc_pos_weighted_tf_rec_z', - 'dgc_pos_weighted_tf_log_rec_z', - - # tf tuning, neg, zscore - 'dgc_neg_peak_tf_raw_z', - 'dgc_neg_weighted_tf_raw_z', - 'dgc_neg_weighted_tf_log_raw_z', - 'dgc_neg_weighted_tf_ele_z', - 'dgc_neg_weighted_tf_log_ele_z', - 'dgc_neg_weighted_tf_rec_z', - 'dgc_neg_weighted_tf_log_rec_z', -] - -def process_one_nwb_for_multi_thread(inputs): - - nwb_path, params, columns, save_folder, t0, nwb_i, nwb_f_num, is_overwrite = inputs - nwb_fn = os.path.splitext(os.path.split(nwb_path)[1])[0] - - nwb_f = h5py.File(nwb_path, 'r') - - plane_ns = [k for k in nwb_f['processing'].keys() if k[0:16] == 'rois_and_traces_'] - plane_ns = [k[16:] for k in plane_ns] - plane_ns.sort() - # print('total plane number: {}'.format(len(plane_ns))) - - for plane_n in plane_ns: - print('\tt: {:5.0f} minutes, processing {}, {} / {}, {} ...'.format((time.time() - t0) / 60., - nwb_fn, - nwb_i + 1, - nwb_f_num, - plane_n)) - - save_fn = '_'.join(nwb_fn.split('_')[0:2]) + '_' + plane_n + '.xlsx' - save_path = os.path.join(save_folder, save_fn) - if os.path.isfile(save_path): - - if is_overwrite: # overwrite existing xlsx files - print('\t{}, file already exists. Overwirite.'.format(os.path.split(save_path)[1])) - os.remove(save_path) - - - else: # do not overwrite existing xlsx files - print('\t{}, file already exists. Skip.'.format(os.path.split(save_path)[1])) - return - - roi_ns = nwb_f['processing/rois_and_traces_{}/ImageSegmentation/imaging_plane/roi_list'.format(plane_n)].value - roi_ns = [r.encode('utf-8') for r in roi_ns if r[0:4] == 'roi_'] - roi_ns.sort() - - df = pd.DataFrame(np.nan, index=range(len(roi_ns)), columns=columns) - - for roi_i, roi_n in enumerate(roi_ns): - # print('\t\t\troi: {} / {}'.format(roi_i+1, len(roi_ns))) - roi_properties, _, _, _, _, _, _, _, _, _, _, _, _, _ = \ - dt.get_everything_from_roi(nwb_f=nwb_f, plane_n=plane_n, roi_n=roi_n, params=params) - for rp_name, rp_value in roi_properties.items(): - df.loc[roi_i, rp_name] = rp_value - - with pd.ExcelWriter(save_path, mode='w') as writer: - df.to_excel(writer, sheet_name='sheet1') - - -def run(): - - t0 = time.time() - - nwb_fns = [] - for fn in os.listdir(database_folder): - if fn[-4:] == '.nwb' and date_range[0] <= int(fn[0:6]) <= date_range[1]: - nwb_fns.append(fn) - nwb_fns.sort() - print('\nnwb file list:') - print('\n'.join(nwb_fns)) - - date_str = datetime.datetime.now().strftime('%y%m%d%H%M%S') - save_folder = os.path.join(curr_folder, '{}_{}'.format(save_folder_n, date_str)) - - if not os.path.isdir(save_folder): - os.makedirs(save_folder) - - copyfile(os.path.realpath(__file__), os.path.join(save_folder, 'script_log.py')) - - inputs_lst = [(os.path.join(curr_folder, database_folder, nwb_fn), - params, - columns, - save_folder, - t0, - nwb_i, - len(nwb_fns), - is_overwrite) for nwb_i, nwb_fn in enumerate(nwb_fns)] - - print('\nprocessing individual nwb files ...') - p = Pool(process_num) - p.map(process_one_nwb_for_multi_thread, inputs_lst) - # process_one_nwb_for_multi_thread(inputs_lst[0]) - - # print('\nConcatenating indiviudal dataframes ...') - # xlsx_fns = [f for f in os.listdir(os.path.join(curr_folder,save_folder)) if f[-5:] == '.xlsx'] - # xlsx_fns.sort() - # - # dfs = [] - # for xlsx_fn in xlsx_fns: - # curr_df = pd.read_excel(os.path.join(curr_folder, save_folder, xlsx_fn), sheetname='sheet1') - # # print(curr_df) - # dfs.append(curr_df) - # - # big_df = pd.concat(dfs, ignore_index=True) - # - # print('\nsaving ...') - # date_str = datetime.datetime.now().strftime('%y%m%d%H%M%S') - # save_path = os.path.join(curr_folder, 'big_roi_table_{}.xlsx'.format(date_str)) - # - # if os.path.isfile(save_path): - # with pd.ExcelWriter(save_path, mode='a') as writer: - # big_df.to_excel(writer, sheet_name=params['trace_type']) - # else: - # with pd.ExcelWriter(save_path, mode='w') as writer: - # big_df.to_excel(writer, sheet_name=params['trace_type']) - - print('\ndone!') - - -if __name__ == "__main__": - run() diff --git a/corticalmapping/scripts/post_recording/analysis_database/old/0042_get_plane_dfs_single.py b/corticalmapping/scripts/post_recording/analysis_database/old/0042_get_plane_dfs_single.py deleted file mode 100644 index 14defd6..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/old/0042_get_plane_dfs_single.py +++ /dev/null @@ -1,432 +0,0 @@ -import sys -sys.path.extend(['/home/junz/PycharmProjects/corticalmapping']) -import os -import time -import pandas as pd -import numpy as np -import h5py -import datetime -import corticalmapping.DatabaseTools as dt -from multiprocessing import Pool -from shutil import copyfile - -nwb_fns = ['190510_M439939_110_repacked.nwb', - '190523_M439939_110_repacked.nwb', - '190524_M439939_110_repacked.nwb', - '190509_M439943_110_repacked.nwb', - '190521_M439943_110_repacked.nwb', - '190523_M439943_110_repacked.nwb',] -database_folder = 'nwbs' -save_folder_n = "dataframes" -process_num = 6 -is_overwrite = False - -params = dt.ANALYSIS_PARAMS -params['trace_type'] = 'f_center_subtracted' -params['is_collapse_dire'] = False -params['is_collapse_sf'] = True -params['is_collapse_tf'] = True - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -print('pandas version: {}\n'.format(pd.__version__)) - -columns = [ - 'date', - 'mouse_id', - 'plane_n', - 'roi_n', - 'depth', # microns under pia, float - - # roi mask - 'roi_area', # square micron - 'roi_center_row', # center of roi mask in field of view, row - 'roi_center_col', # center of roi mask in field of view, column - - # trace skewness - 'skew_raw', # skewness of unfiltered trace (neuropil subtracted), float - 'skew_fil', # skewness of highpassed trace, float - - # receptive fields - 'rf_pos_on_peak_z', - 'rf_pos_on_area', - 'rf_pos_on_center_alt', - 'rf_pos_on_center_azi', - - 'rf_pos_off_peak_z', - 'rf_pos_off_area', - 'rf_pos_off_center_alt', - 'rf_pos_off_center_azi', - - 'rf_pos_onoff_peak_z', - 'rf_pos_onoff_area', - 'rf_pos_onoff_center_alt', - 'rf_pos_onoff_center_azi', - - 'rf_pos_lsi', - - 'rf_neg_on_peak_z', - 'rf_neg_on_area', - 'rf_neg_on_center_alt', - 'rf_neg_on_center_azi', - - 'rf_neg_off_peak_z', - 'rf_neg_off_area', - 'rf_neg_off_center_alt', - 'rf_neg_off_center_azi', - - 'rf_neg_onoff_peak_z', - 'rf_neg_onoff_area', - 'rf_neg_onoff_center_alt', - 'rf_neg_onoff_center_azi', - - 'rf_neg_lsi', - - # drifting grating peak response - 'dgc_pos_peak_df', - 'dgc_neg_peak_df', - 'dgc_pos_p_ttest_df', - 'dgc_neg_p_ttest_df', - 'dgc_p_anova_df', - - 'dgc_pos_peak_dff', - 'dgc_neg_peak_dff', - 'dgc_pos_p_ttest_dff', - 'dgc_neg_p_ttest_dff', - 'dgc_p_anova_dff', - - 'dgc_pos_peak_z', - 'dgc_neg_peak_z', - 'dgc_pos_p_ttest_z', - 'dgc_neg_p_ttest_z', - 'dgc_p_anova_z', - - # direction / orientation tuning, pos, df - 'dgc_pos_osi_raw_df', - 'dgc_pos_dsi_raw_df', - 'dgc_pos_gosi_raw_df', - 'dgc_pos_gdsi_raw_df', - 'dgc_pos_osi_ele_df', - 'dgc_pos_dsi_ele_df', - 'dgc_pos_gosi_ele_df', - 'dgc_pos_gdsi_ele_df', - 'dgc_pos_osi_rec_df', - 'dgc_pos_dsi_rec_df', - 'dgc_pos_gosi_rec_df', - 'dgc_pos_gdsi_rec_df', - 'dgc_pos_peak_dire_raw_df', - 'dgc_pos_vs_dire_raw_df', - 'dgc_pos_vs_dire_ele_df', - 'dgc_pos_vs_dire_rec_df', - - # direction / orientation tuning, neg, df - 'dgc_neg_osi_raw_df', - 'dgc_neg_dsi_raw_df', - 'dgc_neg_gosi_raw_df', - 'dgc_neg_gdsi_raw_df', - 'dgc_neg_osi_ele_df', - 'dgc_neg_dsi_ele_df', - 'dgc_neg_gosi_ele_df', - 'dgc_neg_gdsi_ele_df', - 'dgc_neg_osi_rec_df', - 'dgc_neg_dsi_rec_df', - 'dgc_neg_gosi_rec_df', - 'dgc_neg_gdsi_rec_df', - 'dgc_neg_peak_dire_raw_df', - 'dgc_neg_vs_dire_raw_df', - 'dgc_neg_vs_dire_ele_df', - 'dgc_neg_vs_dire_rec_df', - - # direction / orientation tuning, pos, dff - 'dgc_pos_osi_raw_dff', - 'dgc_pos_dsi_raw_dff', - 'dgc_pos_gosi_raw_dff', - 'dgc_pos_gdsi_raw_dff', - 'dgc_pos_osi_ele_dff', - 'dgc_pos_dsi_ele_dff', - 'dgc_pos_gosi_ele_dff', - 'dgc_pos_gdsi_ele_dff', - 'dgc_pos_osi_rec_dff', - 'dgc_pos_dsi_rec_dff', - 'dgc_pos_gosi_rec_dff', - 'dgc_pos_gdsi_rec_dff', - 'dgc_pos_peak_dire_raw_dff', - 'dgc_pos_vs_dire_raw_dff', - 'dgc_pos_vs_dire_ele_dff', - 'dgc_pos_vs_dire_rec_dff', - - # direction / orientation tuning, neg, dff - 'dgc_neg_osi_raw_dff', - 'dgc_neg_dsi_raw_dff', - 'dgc_neg_gosi_raw_dff', - 'dgc_neg_gdsi_raw_dff', - 'dgc_neg_osi_ele_dff', - 'dgc_neg_dsi_ele_dff', - 'dgc_neg_gosi_ele_dff', - 'dgc_neg_gdsi_ele_dff', - 'dgc_neg_osi_rec_dff', - 'dgc_neg_dsi_rec_dff', - 'dgc_neg_gosi_rec_dff', - 'dgc_neg_gdsi_rec_dff', - 'dgc_neg_peak_dire_raw_dff', - 'dgc_neg_vs_dire_raw_dff', - 'dgc_neg_vs_dire_ele_dff', - 'dgc_neg_vs_dire_rec_dff', - - # direction / orientation tuning, pos, zscore - 'dgc_pos_osi_raw_z', - 'dgc_pos_dsi_raw_z', - 'dgc_pos_gosi_raw_z', - 'dgc_pos_gdsi_raw_z', - 'dgc_pos_osi_ele_z', - 'dgc_pos_dsi_ele_z', - 'dgc_pos_gosi_ele_z', - 'dgc_pos_gdsi_ele_z', - 'dgc_pos_osi_rec_z', - 'dgc_pos_dsi_rec_z', - 'dgc_pos_gosi_rec_z', - 'dgc_pos_gdsi_rec_z', - 'dgc_pos_peak_dire_raw_z', - 'dgc_pos_vs_dire_raw_z', - 'dgc_pos_vs_dire_ele_z', - 'dgc_pos_vs_dire_rec_z', - - # direction / orientation tuning, neg, zscore - 'dgc_neg_osi_raw_z', - 'dgc_neg_dsi_raw_z', - 'dgc_neg_gosi_raw_z', - 'dgc_neg_gdsi_raw_z', - 'dgc_neg_osi_ele_z', - 'dgc_neg_dsi_ele_z', - 'dgc_neg_gosi_ele_z', - 'dgc_neg_gdsi_ele_z', - 'dgc_neg_osi_rec_z', - 'dgc_neg_dsi_rec_z', - 'dgc_neg_gosi_rec_z', - 'dgc_neg_gdsi_rec_z', - 'dgc_neg_peak_dire_raw_z', - 'dgc_neg_vs_dire_raw_z', - 'dgc_neg_vs_dire_ele_z', - 'dgc_neg_vs_dire_rec_z', - - # sf tuning, pos, df - 'dgc_pos_peak_sf_raw_df', - 'dgc_pos_weighted_sf_raw_df', - 'dgc_pos_weighted_sf_log_raw_df', - 'dgc_pos_weighted_sf_ele_df', - 'dgc_pos_weighted_sf_log_ele_df', - 'dgc_pos_weighted_sf_rec_df', - 'dgc_pos_weighted_sf_log_rec_df', - - # sf tuning, neg, df - 'dgc_neg_peak_sf_raw_df', - 'dgc_neg_weighted_sf_raw_df', - 'dgc_neg_weighted_sf_log_raw_df', - 'dgc_neg_weighted_sf_ele_df', - 'dgc_neg_weighted_sf_log_ele_df', - 'dgc_neg_weighted_sf_rec_df', - 'dgc_neg_weighted_sf_log_rec_df', - - # sf tuning, pos, dff - 'dgc_pos_peak_sf_raw_dff', - 'dgc_pos_weighted_sf_raw_dff', - 'dgc_pos_weighted_sf_log_raw_dff', - 'dgc_pos_weighted_sf_ele_dff', - 'dgc_pos_weighted_sf_log_ele_dff', - 'dgc_pos_weighted_sf_rec_dff', - 'dgc_pos_weighted_sf_log_rec_dff', - - # sf tuning, neg, dff - 'dgc_neg_peak_sf_raw_dff', - 'dgc_neg_weighted_sf_raw_dff', - 'dgc_neg_weighted_sf_log_raw_dff', - 'dgc_neg_weighted_sf_ele_dff', - 'dgc_neg_weighted_sf_log_ele_dff', - 'dgc_neg_weighted_sf_rec_dff', - 'dgc_neg_weighted_sf_log_rec_dff', - - # sf tuning, pos, zscore - 'dgc_pos_peak_sf_raw_z', - 'dgc_pos_weighted_sf_raw_z', - 'dgc_pos_weighted_sf_log_raw_z', - 'dgc_pos_weighted_sf_ele_z', - 'dgc_pos_weighted_sf_log_ele_z', - 'dgc_pos_weighted_sf_rec_z', - 'dgc_pos_weighted_sf_log_rec_z', - - # sf tuning, neg, zscore - 'dgc_neg_peak_sf_raw_z', - 'dgc_neg_weighted_sf_raw_z', - 'dgc_neg_weighted_sf_log_raw_z', - 'dgc_neg_weighted_sf_ele_z', - 'dgc_neg_weighted_sf_log_ele_z', - 'dgc_neg_weighted_sf_rec_z', - 'dgc_neg_weighted_sf_log_rec_z', - - # tf tuning, pos, df - 'dgc_pos_peak_tf_raw_df', - 'dgc_pos_weighted_tf_raw_df', - 'dgc_pos_weighted_tf_log_raw_df', - 'dgc_pos_weighted_tf_ele_df', - 'dgc_pos_weighted_tf_log_ele_df', - 'dgc_pos_weighted_tf_rec_df', - 'dgc_pos_weighted_tf_log_rec_df', - - # tf tuning, neg, df - 'dgc_neg_peak_tf_raw_df', - 'dgc_neg_weighted_tf_raw_df', - 'dgc_neg_weighted_tf_log_raw_df', - 'dgc_neg_weighted_tf_ele_df', - 'dgc_neg_weighted_tf_log_ele_df', - 'dgc_neg_weighted_tf_rec_df', - 'dgc_neg_weighted_tf_log_rec_df', - - # tf tuning, pos, dff - 'dgc_pos_peak_tf_raw_dff', - 'dgc_pos_weighted_tf_raw_dff', - 'dgc_pos_weighted_tf_log_raw_dff', - 'dgc_pos_weighted_tf_ele_dff', - 'dgc_pos_weighted_tf_log_ele_dff', - 'dgc_pos_weighted_tf_rec_dff', - 'dgc_pos_weighted_tf_log_rec_dff', - - # tf tuning, neg, dff - 'dgc_neg_peak_tf_raw_dff', - 'dgc_neg_weighted_tf_raw_dff', - 'dgc_neg_weighted_tf_log_raw_dff', - 'dgc_neg_weighted_tf_ele_dff', - 'dgc_neg_weighted_tf_log_ele_dff', - 'dgc_neg_weighted_tf_rec_dff', - 'dgc_neg_weighted_tf_log_rec_dff', - - # tf tuning, pos, zscore - 'dgc_pos_peak_tf_raw_z', - 'dgc_pos_weighted_tf_raw_z', - 'dgc_pos_weighted_tf_log_raw_z', - 'dgc_pos_weighted_tf_ele_z', - 'dgc_pos_weighted_tf_log_ele_z', - 'dgc_pos_weighted_tf_rec_z', - 'dgc_pos_weighted_tf_log_rec_z', - - # tf tuning, neg, zscore - 'dgc_neg_peak_tf_raw_z', - 'dgc_neg_weighted_tf_raw_z', - 'dgc_neg_weighted_tf_log_raw_z', - 'dgc_neg_weighted_tf_ele_z', - 'dgc_neg_weighted_tf_log_ele_z', - 'dgc_neg_weighted_tf_rec_z', - 'dgc_neg_weighted_tf_log_rec_z', -] - -def process_one_nwb_for_multi_thread(inputs): - - nwb_path, params, columns, save_folder, t0, nwb_i, nwb_f_num, is_overwrite = inputs - nwb_fn = os.path.splitext(os.path.split(nwb_path)[1])[0] - - nwb_f = h5py.File(nwb_path, 'r') - - plane_ns = [k for k in nwb_f['processing'].keys() if k[0:16] == 'rois_and_traces_'] - plane_ns = [k[16:] for k in plane_ns] - plane_ns.sort() - # print('total plane number: {}'.format(len(plane_ns))) - - for plane_n in plane_ns: - print('\tt: {:5.0f} minutes, processing {}, {} / {}, {} ...'.format((time.time() - t0) / 60., - nwb_fn, - nwb_i + 1, - nwb_f_num, - plane_n)) - - save_fn = '_'.join(nwb_fn.split('_')[0:2]) + '_' + plane_n + '.xlsx' - save_path = os.path.join(save_folder, save_fn) - if os.path.isfile(save_path): - - if is_overwrite: # overwrite existing xlsx files - print('\t{}, file already exists. Overwirite.'.format(os.path.split(save_path)[1])) - os.remove(save_path) - - - else: # do not overwrite existing xlsx files - print('\t{}, file already exists. Skip.'.format(os.path.split(save_path)[1])) - return - - roi_ns = nwb_f['processing/rois_and_traces_{}/ImageSegmentation/imaging_plane/roi_list'.format(plane_n)].value - roi_ns = [r.encode('utf-8') for r in roi_ns if r[0:4] == 'roi_'] - roi_ns.sort() - - df = pd.DataFrame(np.nan, index=range(len(roi_ns)), columns=columns) - - for roi_i, roi_n in enumerate(roi_ns): - # print('\t\t\troi: {} / {}'.format(roi_i+1, len(roi_ns))) - roi_properties, _, _, _, _, _, _, _, _, _, _, _, _, _ = \ - dt.get_everything_from_roi(nwb_f=nwb_f, plane_n=plane_n, roi_n=roi_n, params=params) - for rp_name, rp_value in roi_properties.items(): - df.loc[roi_i, rp_name] = rp_value - - with pd.ExcelWriter(save_path, mode='w') as writer: - df.to_excel(writer, sheet_name='sheet1') - - -def run(): - - t0 = time.time() - - # nwb_fns = [] - # for fn in os.listdir(database_folder): - # if fn[-4:] == '.nwb' and date_range[0] <= int(fn[0:6]) <= date_range[1]: - # nwb_fns.append(fn) - # nwb_fns.sort() - print('\nnwb file list:') - print('\n'.join(nwb_fns)) - - date_str = datetime.datetime.now().strftime('%y%m%d%H%M%S') - save_folder = os.path.join(curr_folder, '{}_{}'.format(save_folder_n, date_str)) - - if not os.path.isdir(save_folder): - os.makedirs(save_folder) - - copyfile(os.path.realpath(__file__), os.path.join(save_folder, 'script_log.py')) - - inputs_lst = [(os.path.join(curr_folder, database_folder, nwb_fn), - params, - columns, - save_folder, - t0, - nwb_i, - len(nwb_fns), - is_overwrite) for nwb_i, nwb_fn in enumerate(nwb_fns)] - - print('\nprocessing individual nwb files ...') - p = Pool(process_num) - p.map(process_one_nwb_for_multi_thread, inputs_lst) - # process_one_nwb_for_multi_thread(inputs_lst[0]) - - # print('\nConcatenating indiviudal dataframes ...') - # xlsx_fns = [f for f in os.listdir(os.path.join(curr_folder,save_folder)) if f[-5:] == '.xlsx'] - # xlsx_fns.sort() - # - # dfs = [] - # for xlsx_fn in xlsx_fns: - # curr_df = pd.read_excel(os.path.join(curr_folder, save_folder, xlsx_fn), sheetname='sheet1') - # # print(curr_df) - # dfs.append(curr_df) - # - # big_df = pd.concat(dfs, ignore_index=True) - # - # print('\nsaving ...') - # date_str = datetime.datetime.now().strftime('%y%m%d%H%M%S') - # save_path = os.path.join(curr_folder, 'big_roi_table_{}.xlsx'.format(date_str)) - # - # if os.path.isfile(save_path): - # with pd.ExcelWriter(save_path, mode='a') as writer: - # big_df.to_excel(writer, sheet_name=params['trace_type']) - # else: - # with pd.ExcelWriter(save_path, mode='w') as writer: - # big_df.to_excel(writer, sheet_name=params['trace_type']) - - print('\ndone!') - - -if __name__ == "__main__": - run() diff --git a/corticalmapping/scripts/post_recording/analysis_database/old/0043_get_plane_meta.py b/corticalmapping/scripts/post_recording/analysis_database/old/0043_get_plane_meta.py deleted file mode 100644 index c43a175..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/old/0043_get_plane_meta.py +++ /dev/null @@ -1,40 +0,0 @@ -import os -import datetime -import pandas as pd -import h5py - -df_folder = 'dataframes_190529210731' -save_fn = 'plane_table' -nwb_folder = 'nwbs/small_nwbs' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -fns = [fn for fn in os.listdir(df_folder) if fn[-5:] == '.xlsx'] -print('\n'.join(fns)) - -df = pd.DataFrame(index=range(len(fns)), columns=['date', 'mouse_id', 'plane_n', 'volume_n', - 'depth', 'has_lsn', 'has_dgc']) - -for fn_i, fn in enumerate(fns): - print(fn) - - date = fn.split('_')[0] - mouse_id = fn.split('_')[1] - plane_n = fn.split('_')[-1][0:-5] - - nwb_path = os.path.join(curr_folder, nwb_folder, '{}_{}_110_repacked.nwb'.format(date, mouse_id)) - nwb_f = h5py.File(nwb_path, 'r') - depth = nwb_f['processing/rois_and_traces_{}/imaging_depth_micron'.format(plane_n)].value - nwb_f.close() - - df.loc[fn_i] = [date, mouse_id, plane_n, '', depth, True, True] - -df.sort_values(by=['mouse_id', 'date', 'plane_n'], inplace=True) -df.reset_index(inplace=True, drop=True) - -print(df) - -date_str = datetime.datetime.now().strftime('%y%m%d%H%M%S') -with pd.ExcelWriter('{}_{}.xlsx'.format(save_fn, date_str), mode='w') as writer: - df.to_excel(writer, sheet_name='sheet1') \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_database/old/0044_reorganize_plane_meta.py b/corticalmapping/scripts/post_recording/analysis_database/old/0044_reorganize_plane_meta.py deleted file mode 100644 index 8ec4300..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/old/0044_reorganize_plane_meta.py +++ /dev/null @@ -1,19 +0,0 @@ -import os -import datetime -import pandas as pd - -meta_fn = "plane_table_190530165648.xlsx" - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -meta_df = pd.read_excel(meta_fn, sheet_name='sheet1') - -meta_df.sort_values(by=['mouse_id', 'volume_n', 'depth'], inplace=True) -meta_df.reset_index(inplace=True, drop=True) - -print(meta_df) - -date_str = datetime.datetime.now().strftime('%y%m%d%H%M%S') -with pd.ExcelWriter('plane_table_{}.xlsx'.format(date_str), mode='w') as writer: - meta_df.to_excel(writer, sheet_name='sheet1') \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_database/old/0070_get_rf_maps.py b/corticalmapping/scripts/post_recording/analysis_database/old/0070_get_rf_maps.py deleted file mode 100644 index 483f8bb..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/old/0070_get_rf_maps.py +++ /dev/null @@ -1,247 +0,0 @@ -import sys -sys.path.extend(['/home/junz/PycharmProjects/corticalmapping']) -import os -import numpy as np -import h5py -import datetime -import pandas as pd -import corticalmapping.DatabaseTools as dt -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.SingleCellAnalysis as sca -from shutil import copyfile - -table_folder = 'dataframes_190529210731' -nwb_folder = 'nwbs' -save_folder = "intermediate_results" - -response_dir = 'pos' -skew_thr = 0.6 -analysis_params = dt.ANALYSIS_PARAMS - -notes = ''' - zscore receptive field maps of all significant rois. Spatial temporal receptive fields - are first converted to df/f. Then 2-d zscore maps are generated. Then the zscore maps are - 2d filtered to smooth and interpolated in to high resolution. After preprocessing, if the - peak value of zscore is larger than the threshold, the receptive field will be considered - as sigificant. - ''' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(save_folder, 'rf_maps_' + table_folder) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -copyfile(os.path.realpath(__file__), - os.path.join(save_folder, - 'script_log_{}.py'.format(datetime.datetime.now().strftime('%y%m%d%H%M%S')))) - -table_fns = [f for f in os.listdir(table_folder) if f[-5:] == '.xlsx'] -table_fns.sort() -print('number of planes: {}'.format(len(table_fns))) - -for table_i, table_fn in enumerate(table_fns): - print('\nanalyzing {}, {} / {} ... '.format(table_fn, table_i+1, len(table_fns))) - - save_fn = table_fn[0:-5] + '_{}.hdf5'.format(response_dir) - - if os.path.isfile(os.path.join(save_folder, save_fn)): - print('\tAlready analyzed. Skip.') - continue - - df = pd.read_excel(os.path.join(table_folder, table_fn), sheetname='sheet1') - subdf = df[np.logical_not(df['rf_pos_on_peak_z'].isnull())] - subdf = subdf[subdf['skew_fil'] >= skew_thr] - - subdf = subdf[(subdf['rf_{}_on_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs']) | - (subdf['rf_{}_off_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs'])] - - if len(subdf) > 0: - - save_f = h5py.File(os.path.join(save_folder, save_fn)) - - nwb_fn = table_fn[0:-11] + '110_repacked.nwb' - nwb_f = h5py.File(os.path.join(nwb_folder, nwb_fn), 'r') - plane_n = table_fn[-11:-5] - - # S2 - s2_df = subdf[(subdf['rf_{}_on_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs']) & - (subdf['rf_{}_off_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs'])].reset_index() - - if len(s2_df) > 0: - s2_grp = save_f.create_group(table_fn[0:-5] + '_{}_ONOFF'.format(response_dir)) - s1_on_grp = save_f.create_group(table_fn[0:-5] + '_{}_ON'.format(response_dir)) - s1_off_grp = save_f.create_group(table_fn[0:-5] + '_{}_OFF'.format(response_dir)) - - for roi_i, roi_row in s2_df.iterrows(): - - print('\t s2 receptive fields, {}, {} / {} ...'.format(roi_row['roi_n'], roi_i+1, len(s2_df))) - - if response_dir == 'pos': - _, _, _, srf_on, srf_off, _, _, _, _, _, _, _, _, \ - _ = dt.get_everything_from_roi(nwb_f=nwb_f, - plane_n=plane_n, - roi_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_on_new = dt.get_rf_properties(srf=srf_on, - polarity='positive', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - - _, rf_off_new = dt.get_rf_properties(srf=srf_off, - polarity='positive', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - - elif response_dir == 'neg': - _, _, _, _, _, srf_on, srf_off, _, _, _, _, _, _, \ - _ = dt.get_everything_from_roi(nwb_f=nwb_f, - plane_n=plane_n, - roi_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_on_new = dt.get_rf_properties(srf=srf_on, - polarity='negative', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - - _, rf_off_new = dt.get_rf_properties(srf=srf_off, - polarity='negative', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - else: - raise ValueError - - rf_on_mask = rf_on_new.get_weighted_mask() - rf_off_mask = rf_off_new.get_weighted_mask() - rf_onoff_new = sca.SpatialReceptiveField(mask=np.max([rf_on_mask, rf_off_mask], axis=0), - altPos=rf_on_new.altPos, - aziPos=rf_on_new.aziPos, - sign='ON_OFF', - thr=analysis_params['rf_z_thr_abs']) - - curr_s2_grp = s2_grp.create_group(roi_row['roi_n']) - rf_onoff_new.to_h5_group(curr_s2_grp) - - curr_s1_on_grp = s1_on_grp.create_group(roi_row['roi_n']) - rf_on_new.to_h5_group(curr_s1_on_grp) - - curr_s1_off_grp = s1_off_grp.create_group(roi_row['roi_n']) - rf_off_new.to_h5_group(curr_s1_off_grp) - - - # positive S1 ON - s1_on_df = subdf[(subdf['rf_{}_on_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs']) & - (subdf['rf_{}_off_peak_z'.format(response_dir)] < analysis_params['rf_z_thr_abs'])].reset_index() - - if len(s1_on_df) > 0: - - s1_on_grp_n = table_fn[0:-5] + '_{}_ON'.format(response_dir) - - if s1_on_grp_n in save_f.keys(): - s1_on_grp = save_f[s1_on_grp_n] - else: - s1_on_grp = save_f.create_group(s1_on_grp_n) - - for roi_i, roi_row in s1_on_df.iterrows(): - - print('\t s1 ON receptive fields, {}, {} / {} ...'.format(roi_row['roi_n'], roi_i + 1, len(s1_on_df))) - - if response_dir == 'pos': - _, _, _, srf_on, _, _, _, _, _, _, _, _, _, \ - _ = dt.get_everything_from_roi(nwb_f=nwb_f, - plane_n=plane_n, - roi_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_on_new = dt.get_rf_properties(srf=srf_on, - polarity='positive', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - elif response_dir == 'neg': - _, _, _, _, _, srf_on, _, _, _, _, _, _, _, \ - _ = dt.get_everything_from_roi(nwb_f=nwb_f, - plane_n=plane_n, - roi_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_on_new = dt.get_rf_properties(srf=srf_on, - polarity='negative', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - else: - print(response_dir) - raise ValueError - - curr_s1_on_grp = s1_on_grp.create_group(roi_row['roi_n']) - rf_on_new.to_h5_group(curr_s1_on_grp) - - - # positive S1 OFF - s1_off_df = subdf[(subdf['rf_{}_on_peak_z'.format(response_dir)] < analysis_params['rf_z_thr_abs']) & - (subdf['rf_{}_off_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs'])].reset_index() - - if len(s1_off_df) > 0: - - s1_off_grp_n = table_fn[0:-5] + '_{}_OFF'.format(response_dir) - - if s1_off_grp_n in save_f.keys(): - s1_off_grp = save_f[s1_off_grp_n] - else: - s1_off_grp = save_f.create_group(s1_off_grp_n) - - for roi_i, roi_row in s1_off_df.iterrows(): - - print('\t s1 OFF receptive fields, {}, {} / {} ...'.format(roi_row['roi_n'], roi_i + 1, len(s1_off_df))) - - if response_dir == 'pos': - _, _, _, _, srf_off, _, _, _, _, _, _, _, _, \ - _ = dt.get_everything_from_roi(nwb_f=nwb_f, - plane_n=plane_n, - roi_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_off_new = dt.get_rf_properties(srf=srf_off, - polarity='positive', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - - elif response_dir == 'neg': - - _, _, _, _, _, _, srf_off, _, _, _, _, _, _, \ - _ = dt.get_everything_from_roi(nwb_f=nwb_f, - plane_n=plane_n, - roi_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_off_new = dt.get_rf_properties(srf=srf_off, - polarity='negative', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - - else: - raise ValueError - - curr_s1_off_grp = s1_off_grp.create_group(roi_row['roi_n']) - rf_off_new.to_h5_group(curr_s1_off_grp) - - - save_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_database/old/0075_get_rf_maps_axon.py b/corticalmapping/scripts/post_recording/analysis_database/old/0075_get_rf_maps_axon.py deleted file mode 100644 index 3cc53f6..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/old/0075_get_rf_maps_axon.py +++ /dev/null @@ -1,257 +0,0 @@ -import sys -sys.path.extend(['/home/junz/PycharmProjects/corticalmapping']) -import os -import numpy as np -import h5py -# import datetime -import pandas as pd -import corticalmapping.DatabaseTools as dt -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.SingleCellAnalysis as sca -from shutil import copyfile - -df_fn = 'dataframe_190530171338_axon_AllStimuli_DistanceThr_1.30.csv' -clu_folder = r'intermediate_results\bouton_clustering\AllStimuli_DistanceThr_1.30' -nwb_folder = 'nwbs' -save_folder = "intermediate_results" - -response_dir = 'pos' -skew_thr = 0.6 -analysis_params = dt.ANALYSIS_PARAMS - -notes = ''' - zscore receptive field maps of all significant rois. Spatial temporal receptive fields - are first converted to df/f. Then 2-d zscore maps are generated. Then the zscore maps are - 2d filtered to smooth and interpolated in to high resolution. After preprocessing, if the - peak value of zscore is larger than the threshold, the receptive field will be considered - as sigificant. - ''' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(save_folder, 'rf_maps_' + os.path.splitext(df_fn)[0]) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -copyfile(os.path.realpath(__file__), - os.path.join(save_folder, - 'script_log.py')) - -df_axon = pd.read_csv(df_fn) -df_axon = df_axon[np.logical_not(df_axon['rf_{}_on_peak_z'.format(response_dir)].isnull())] -df_axon = df_axon[df_axon['skew_fil'] >= skew_thr] - -df_axon = df_axon[(df_axon['rf_{}_on_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs']) | - (df_axon['rf_{}_off_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs'])] - -plane_df = df_axon[['date', 'mouse_id', 'plane_n']].drop_duplicates().reset_index() -print('total number of planes with lsn data: {}'.format(len(plane_df))) - -for plane_i, plane_row in plane_df.iterrows(): - date = int(plane_row['date']) - mid = plane_row['mouse_id'] - plane_n = plane_row['plane_n'] - - print('processing {}_{}_{}, {} / {}'.format(date, mid, plane_n, plane_i+1, len(plane_df))) - - subdf = df_axon[(df_axon['date'] == date) & - (df_axon['mouse_id'] == mid) & - (df_axon['plane_n'] == plane_n)] - - nwb_fn = '{}_{}_110_repacked.nwb'.format(date, mid) - nwb_f = h5py.File(os.path.join(nwb_folder, nwb_fn), 'r') - - clu_fn = '{}_{}_{}_axon_grouping.hdf5'.format(date, mid, plane_n) - clu_f = h5py.File(os.path.join(clu_folder, clu_fn), 'r') - - save_fn = '{}_{}_{}_{}.hdf5'.format(date, mid, plane_n, response_dir) - save_f = h5py.File(os.path.join(save_folder, save_fn)) - - # S2 - s2_df = subdf[(subdf['rf_{}_on_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs']) & - (subdf['rf_{}_off_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs'])].reset_index() - - if len(s2_df) > 0: - s2_grp = save_f.create_group('{}_{}_{}_{}_ONOFF'.format(date, mid, plane_n, response_dir)) - s1_on_grp = save_f.create_group('{}_{}_{}_{}_ON'.format(date, mid, plane_n, response_dir)) - s1_off_grp = save_f.create_group('{}_{}_{}_{}_OFF'.format(date, mid, plane_n, response_dir)) - - for roi_i, roi_row in s2_df.iterrows(): - - print('\t s2 receptive fields, {}, {} / {} ...'.format(roi_row['roi_n'], roi_i + 1, len(s2_df))) - - if response_dir == 'pos': - _, _, _, srf_on, srf_off, _, _, _, _, _, _, _, _, \ - _ = dt.get_everything_from_axon(nwb_f=nwb_f, - clu_f=clu_f, - plane_n=plane_n, - axon_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_on_new = dt.get_rf_properties(srf=srf_on, - polarity='positive', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - - _, rf_off_new = dt.get_rf_properties(srf=srf_off, - polarity='positive', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - - elif response_dir == 'neg': - _, _, _, _, _, srf_on, srf_off, _, _, _, _, _, _, \ - _ = dt.get_everything_from_axon(nwb_f=nwb_f, - clu_f=clu_f, - plane_n=plane_n, - axon_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_on_new = dt.get_rf_properties(srf=srf_on, - polarity='negative', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - - _, rf_off_new = dt.get_rf_properties(srf=srf_off, - polarity='negative', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - else: - raise ValueError - - rf_on_mask = rf_on_new.get_weighted_mask() - rf_off_mask = rf_off_new.get_weighted_mask() - rf_onoff_new = sca.SpatialReceptiveField(mask=np.max([rf_on_mask, rf_off_mask], axis=0), - altPos=rf_on_new.altPos, - aziPos=rf_on_new.aziPos, - sign='ON_OFF', - thr=analysis_params['rf_z_thr_abs']) - - curr_s2_grp = s2_grp.create_group(roi_row['roi_n']) - rf_onoff_new.to_h5_group(curr_s2_grp) - - curr_s1_on_grp = s1_on_grp.create_group(roi_row['roi_n']) - rf_on_new.to_h5_group(curr_s1_on_grp) - - curr_s1_off_grp = s1_off_grp.create_group(roi_row['roi_n']) - rf_off_new.to_h5_group(curr_s1_off_grp) - - # positive S1 ON - s1_on_df = subdf[(subdf['rf_{}_on_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs']) & - (subdf['rf_{}_off_peak_z'.format(response_dir)] < analysis_params['rf_z_thr_abs'])].reset_index() - - if len(s1_on_df) > 0: - - s1_on_grp_n = '{}_{}_{}_{}_ON'.format(date, mid, plane_n, response_dir) - - if s1_on_grp_n in save_f.keys(): - s1_on_grp = save_f[s1_on_grp_n] - else: - s1_on_grp = save_f.create_group(s1_on_grp_n) - - for roi_i, roi_row in s1_on_df.iterrows(): - - print('\t s1 ON receptive fields, {}, {} / {} ...'.format(roi_row['roi_n'], roi_i + 1, len(s1_on_df))) - - if response_dir == 'pos': - _, _, _, srf_on, _, _, _, _, _, _, _, _, _, \ - _ = dt.get_everything_from_axon(nwb_f=nwb_f, - clu_f=clu_f, - plane_n=plane_n, - axon_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_on_new = dt.get_rf_properties(srf=srf_on, - polarity='positive', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - elif response_dir == 'neg': - _, _, _, _, _, srf_on, _, _, _, _, _, _, _, \ - _ = dt.get_everything_from_axon(nwb_f=nwb_f, - clu_f=clu_f, - plane_n=plane_n, - axon_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_on_new = dt.get_rf_properties(srf=srf_on, - polarity='negative', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - else: - print(response_dir) - raise ValueError - - curr_s1_on_grp = s1_on_grp.create_group(roi_row['roi_n']) - rf_on_new.to_h5_group(curr_s1_on_grp) - - # positive S1 OFF - s1_off_df = subdf[(subdf['rf_{}_on_peak_z'.format(response_dir)] < analysis_params['rf_z_thr_abs']) & - (subdf['rf_{}_off_peak_z'.format(response_dir)] >= analysis_params['rf_z_thr_abs'])].reset_index() - - if len(s1_off_df) > 0: - - s1_off_grp_n = '{}_{}_{}_{}_OFF'.format(date, mid, plane_n, response_dir) - - if s1_off_grp_n in save_f.keys(): - s1_off_grp = save_f[s1_off_grp_n] - else: - s1_off_grp = save_f.create_group(s1_off_grp_n) - - for roi_i, roi_row in s1_off_df.iterrows(): - - print('\t s1 OFF receptive fields, {}, {} / {} ...'.format(roi_row['roi_n'], roi_i + 1, len(s1_off_df))) - - if response_dir == 'pos': - _, _, _, _, srf_off, _, _, _, _, _, _, _, _, \ - _ = dt.get_everything_from_axon(nwb_f=nwb_f, - clu_f=clu_f, - plane_n=plane_n, - axon_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_off_new = dt.get_rf_properties(srf=srf_off, - polarity='positive', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - - elif response_dir == 'neg': - - _, _, _, _, _, _, srf_off, _, _, _, _, _, _, \ - _ = dt.get_everthing_from_axon(nwb_f=nwb_f, - clu_f=clu_f, - plane_n=plane_n, - axon_n=roi_row['roi_n'], - params=analysis_params) - - _, rf_off_new = dt.get_rf_properties(srf=srf_off, - polarity='negative', - sigma=analysis_params['gaussian_filter_sigma_rf'], - interpolate_rate=analysis_params['interpolate_rate_rf'], - z_thr_abs=analysis_params['rf_z_thr_abs'], - z_thr_rel=analysis_params['rf_z_thr_rel']) - - else: - raise ValueError - - curr_s1_off_grp = s1_off_grp.create_group(roi_row['roi_n']) - rf_off_new.to_h5_group(curr_s1_off_grp) - - save_f.close() - - - - diff --git a/corticalmapping/scripts/post_recording/analysis_database/old/0080_plot_plane_rf_center.py b/corticalmapping/scripts/post_recording/analysis_database/old/0080_plot_plane_rf_center.py deleted file mode 100644 index d849ca4..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/old/0080_plot_plane_rf_center.py +++ /dev/null @@ -1,262 +0,0 @@ -import sys -sys.path.extend(['/home/junz/PycharmProjects/corticalmapping']) -import os -import numpy as np -import matplotlib.pyplot as plt -import pandas as pd -from matplotlib.backends.backend_pdf import PdfPages -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.DatabaseTools as dt - -table_name = 'big_roi_table_test.xlsx' -sheet_name = 'f_center_subtracted' - -response_dir = 'pos' -skew_thr = 0.6 -rf_peak_z_thr = 1.6 - -save_fn = 'plane_rf_centers.pdf' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -table_path = os.path.join(curr_folder, table_name) -df = pd.read_excel(table_path, sheetname=sheet_name) -subdf = df[df['skew_fil'] >= skew_thr] - -planes = subdf[['date', 'mouse_id', 'plane_n', 'depth']].drop_duplicates().reset_index() -print(planes) - -pdff = PdfPages(os.path.join('intermediate_figures', save_fn)) - -for plane_i, plane_row in planes.iterrows(): - - print('plotting {}_{}_{}, {} / {}'.format( - plane_row['date'], - plane_row['mouse_id'], - plane_row['plane_n'], - plane_i + 1, - len(planes))) - - planedf = subdf[(subdf['date'] == plane_row['date']) & \ - (subdf['mouse_id'] == plane_row['mouse_id']) & \ - (subdf['plane_n'] == plane_row['plane_n']) & \ - (subdf['depth'] == plane_row['depth'])] - - df_or = planedf[planedf['rf_{}_onoff_peak_z'.format(response_dir)] >= rf_peak_z_thr] - df_and = planedf[(planedf['rf_{}_on_peak_z'.format(response_dir)] >= rf_peak_z_thr) & \ - (planedf['rf_{}_off_peak_z'.format(response_dir)] >= rf_peak_z_thr)] - df_on = planedf[planedf['rf_{}_on_peak_z'.format(response_dir)] >= rf_peak_z_thr].drop(df_and.index) - df_off = planedf[planedf['rf_{}_off_peak_z'.format(response_dir)] >= rf_peak_z_thr].drop(df_and.index) - - df_or = df_or.reset_index() - df_and = df_and.reset_index() - df_on = df_on.reset_index() - df_off = df_off.reset_index() - - if len(df_or) == 0: - print('no any receptive fields. skip.') - else: - print('\tnumber of rois with significant rf: {}'.format(len(df_or))) - print('\tnumber of rois with S1 ON: {}'.format(len(df_on))) - print('\tnumber of rois with S1 OFF: {}'.format(len(df_off))) - print('\tnumber of rois with S2 ON/OFF: {}'.format(len(df_and))) - - f = plt.figure(figsize=(11, 8.5)) - - f.suptitle('{}_{}_{}; {} um'.format(plane_row['date'], - plane_row['mouse_id'], - plane_row['plane_n'], - plane_row['depth'])) - - #=============================RF center============================================= - # ON/OFF - alt_min = int(np.min(df_or['rf_{}_onoff_center_alt'.format(response_dir)]) - 5) - alt_max = int(np.max(df_or['rf_{}_onoff_center_alt'.format(response_dir)]) + 5) - azi_min = int(np.min(df_or['rf_{}_onoff_center_azi'.format(response_dir)]) - 5) - azi_max = int(np.max(df_or['rf_{}_onoff_center_azi'.format(response_dir)]) + 5) - ax_or_scatter = f.add_subplot(4, 5, 1) - ax_or_scatter.plot(df_or['rf_{}_onoff_center_azi'.format(response_dir)], - df_or['rf_{}_onoff_center_alt'.format(response_dir)], - '.', color='#888888') - ax_or_scatter.set_xlim([azi_min, azi_max]) - ax_or_scatter.set_ylim([alt_min, alt_max]) - ax_or_scatter.set_title('RF center') - - # ON - ax_on_scatter = f.add_subplot(4, 5, 6) - ax_on_scatter.plot(df_off['rf_{}_off_center_azi'.format(response_dir)], - df_off['rf_{}_off_center_alt'.format(response_dir)], - '.', color='#aaaaaa') - ax_on_scatter.plot(df_on['rf_{}_on_center_azi'.format(response_dir)], - df_on['rf_{}_on_center_alt'.format(response_dir)], - '.', color='#ff0000') - ax_on_scatter.set_xlim([azi_min, azi_max]) - ax_on_scatter.set_ylim([alt_min, alt_max]) - - # OFF - ax_off_scatter = f.add_subplot(4, 5, 11) - ax_off_scatter.plot(df_on['rf_{}_on_center_azi'.format(response_dir)], - df_on['rf_{}_on_center_alt'.format(response_dir)], - '.', color='#aaaaaa') - ax_off_scatter.plot(df_off['rf_{}_off_center_azi'.format(response_dir)], - df_off['rf_{}_off_center_alt'.format(response_dir)], - '.', color='#0000ff') - ax_off_scatter.set_xlim([azi_min, azi_max]) - ax_off_scatter.set_ylim([alt_min, alt_max]) - - # ON-OFF - ax_and_scatter = f.add_subplot(4, 5, 16) - ax_and_scatter.plot(df_and['rf_{}_on_center_azi'.format(response_dir)], - df_and['rf_{}_on_center_alt'.format(response_dir)], - '.', color='#ff0000') - ax_and_scatter.plot(df_and['rf_{}_off_center_azi'.format(response_dir)], - df_and['rf_{}_off_center_alt'.format(response_dir)], - '.', color='#0000ff') - ax_and_scatter.set_xlim([azi_min, azi_max]) - ax_and_scatter.set_ylim([alt_min, alt_max]) - - # =============================pairwise distance============================================= - dis_or = ia.pairwise_distance(df_or[['rf_{}_onoff_center_azi'.format(response_dir), - 'rf_{}_onoff_center_alt'.format(response_dir)]].values) - ax_or_pd = f.add_subplot(4, 5, 2) - if len(dis_or) > 0: - ax_or_pd.hist(dis_or, range=[0, 80], bins=20, facecolor='#aaaaaa', edgecolor='none') - ax_or_pd.get_yaxis().set_ticks([]) - ax_or_pd.set_title('pw RF dis') # pairwise receptive field center distance - - dis_on = ia.pairwise_distance(df_on[['rf_{}_on_center_azi'.format(response_dir), - 'rf_{}_on_center_alt'.format(response_dir)]].values) - ax_on_pd = f.add_subplot(4, 5, 7) - if len(dis_on) > 0: - ax_on_pd.hist(dis_on, range=[0, 80], bins=20, facecolor='#ff0000', edgecolor='none') - ax_on_pd.get_yaxis().set_ticks([]) - - dis_off = ia.pairwise_distance(df_off[['rf_{}_off_center_azi'.format(response_dir), - 'rf_{}_off_center_alt'.format(response_dir)]].values) - ax_off_pd = f.add_subplot(4, 5, 12) - if len(dis_off) > 0: - ax_off_pd.hist(dis_off, range=[0, 80], bins=20, facecolor='#0000ff', edgecolor='none') - ax_off_pd.get_yaxis().set_ticks([]) - - dis_and_on = ia.pairwise_distance(df_and[['rf_{}_on_center_azi'.format(response_dir), - 'rf_{}_on_center_alt'.format(response_dir)]].values) - dis_and_off = ia.pairwise_distance(df_and[['rf_{}_off_center_azi'.format(response_dir), - 'rf_{}_off_center_alt'.format(response_dir)]].values) - ax_and_pd = f.add_subplot(4, 5, 17) - if len(dis_and_on) > 0: - ax_and_pd.hist(dis_and_on, range=[0, 80], bins=20, facecolor='#ff0000', edgecolor='none', alpha=0.5) - ax_and_pd.hist(dis_and_off, range=[0, 80], bins=20, facecolor='#0000ff', edgecolor='none', alpha=0.5) - ax_and_pd.get_yaxis().set_ticks([]) - - # =============================parewise magnification============================================= - mag_or = ia.pairwise_magnification(df_or[['rf_{}_onoff_center_azi'.format(response_dir), - 'rf_{}_onoff_center_alt'.format(response_dir)]].values, - df_or[['roi_center_col', 'roi_center_row']].values) - ax_or_pm = f.add_subplot(4, 5, 3) - if len(mag_or) > 0: - mag_or = 0.00035 / mag_or # 0.35 um per pixel - ax_or_pm.hist(mag_or, range=[0, 0.2], bins=20, facecolor='#aaaaaa', edgecolor='none') - ax_or_pm.get_yaxis().set_ticks([]) - ax_or_pm.set_title('mm/deg') # pairwise magnification - # - mag_on = ia.pairwise_magnification(df_on[['rf_{}_on_center_azi'.format(response_dir), - 'rf_{}_on_center_alt'.format(response_dir)]].values, - df_on[['roi_center_col', 'roi_center_row']].values) - ax_on_pm = f.add_subplot(4, 5, 8) - if len(mag_on) > 0: - mag_on = 0.00035 / mag_on # 0.35 um per pixel - ax_on_pm.hist(mag_on, range=[0, 0.2], bins=20, facecolor='#ff0000', edgecolor='none') - ax_on_pm.get_yaxis().set_ticks([]) - - mag_off = ia.pairwise_magnification(df_off[['rf_{}_off_center_azi'.format(response_dir), - 'rf_{}_off_center_alt'.format(response_dir)]].values, - df_off[['roi_center_col', 'roi_center_row']].values) - ax_off_pm = f.add_subplot(4, 5, 13) - if len(mag_off) > 0: - mag_off = 0.00035 / mag_off # 0.35 um per pixel - ax_off_pm.hist(mag_off, range=[0, 0.2], bins=20, facecolor='#0000ff', edgecolor='none') - ax_off_pm.get_yaxis().set_ticks([]) - - mag_and_on = ia.pairwise_magnification(df_and[['rf_{}_on_center_azi'.format(response_dir), - 'rf_{}_on_center_alt'.format(response_dir)]].values, - df_and[['roi_center_col', 'roi_center_row']].values) - - mag_and_off = ia.pairwise_magnification(df_and[['rf_{}_off_center_azi'.format(response_dir), - 'rf_{}_off_center_alt'.format(response_dir)]].values, - df_and[['roi_center_col', 'roi_center_row']].values) - - ax_and_pm = f.add_subplot(4, 5, 18) - if len(mag_and_on) > 0: - mag_and_on = 0.00035 / mag_and_on # 0.35 um per pixel - mag_and_off = 0.00035 / mag_and_off # 0.35 um per pixel - ax_and_pm.hist(mag_and_on, range=[0, 0.2], bins=20, facecolor='#ff0000', edgecolor='none', alpha=0.5,) - ax_and_pm.hist(mag_and_off, range=[0, 0.2], bins=20, facecolor='#0000ff', edgecolor='none', alpha=0.5,) - ax_and_pm.get_yaxis().set_ticks([]) - - # =============================azi alt spatial distribution============================================= - ax_alt_or = f.add_subplot(4, 5, 4) - ax_alt_or.set_title('altitude') - ax_azi_or = f.add_subplot(4, 5, 5) - ax_azi_or.set_title('azimuth') - if len(df_or) > 0: - dt.plot_roi_retinotopy(coords_rf=df_or[['rf_{}_onoff_center_alt'.format(response_dir), - 'rf_{}_onoff_center_azi'.format(response_dir)]].values, - coords_roi=df_or[['roi_center_row', 'roi_center_col']].values, - ax_alt=ax_alt_or, - ax_azi=ax_azi_or, - cmap='viridis', - canvas_shape=(512, 512), - edgecolors='#000000', - linewidth=0.5) - else: - ax_alt_or.set_xticks([]) - ax_alt_or.set_yticks([]) - ax_azi_or.set_xticks([]) - ax_azi_or.set_yticks([]) - - ax_alt_on = f.add_subplot(4, 5, 9) - ax_azi_on = f.add_subplot(4, 5, 10) - if len(df_on) > 0: - dt.plot_roi_retinotopy(coords_rf=df_on[['rf_{}_on_center_alt'.format(response_dir), - 'rf_{}_on_center_azi'.format(response_dir)]].values, - coords_roi=df_on[['roi_center_row', 'roi_center_col']].values, - ax_alt=ax_alt_on, - ax_azi=ax_azi_on, - cmap='viridis', - canvas_shape=(512, 512), - edgecolors='#000000', - linewidth=0.5) - else: - ax_alt_on.set_xticks([]) - ax_alt_on.set_yticks([]) - ax_azi_on.set_xticks([]) - ax_azi_on.set_yticks([]) - - ax_alt_off = f.add_subplot(4, 5, 14) - ax_azi_off = f.add_subplot(4, 5, 15) - if len(df_off) > 0: - dt.plot_roi_retinotopy(coords_rf=df_off[['rf_{}_off_center_alt'.format(response_dir), - 'rf_{}_off_center_azi'.format(response_dir)]].values, - coords_roi=df_off[['roi_center_row', 'roi_center_col']].values, - ax_alt=ax_alt_off, - ax_azi=ax_azi_off, - cmap='viridis', - canvas_shape=(512, 512), - edgecolors='#000000', - linewidth=0.5) - else: - ax_alt_off.set_xticks([]) - ax_alt_off.set_yticks([]) - ax_azi_off.set_xticks([]) - ax_azi_off.set_yticks([]) - - # plt.tight_layout() - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - -pdff.close() - -print('for debug ...') diff --git a/corticalmapping/scripts/post_recording/analysis_database/old/0090_group_boutons_into_axons.py b/corticalmapping/scripts/post_recording/analysis_database/old/0090_group_boutons_into_axons.py deleted file mode 100644 index d34cbc9..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/old/0090_group_boutons_into_axons.py +++ /dev/null @@ -1,70 +0,0 @@ -import os -import numpy as np -import h5py -import corticalmapping.DatabaseTools as dt -import corticalmapping.SingleCellAnalysis as sca -import matplotlib.pyplot as plt -import pandas as pd - - -nwb_folder = "nwbs" -save_folder = r"intermediate_results\bouton_clustering" -trace_type = 'f_center_subtracted' -trace_window = 'UniformContrast' # 'AllStimuli', 'UniformContrast', 'LocallySparseNoise', or 'DriftingGratingSpont' - -# BoutonClassifier parameters -skew_filter_sigma = 5. -skew_thr = 0.6 -lowpass_sigma=0.1 -detrend_sigma=3. -event_std_thr = 3. -peri_event_dur = (-1., 3.) -corr_len_thr = 30. -corr_abs_thr = 0.7 -corr_std_thr = 3. -is_cosine_similarity = False -distance_metric = 'euclidean' -linkage_method = 'weighted' -distance_thr = 1.3 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(save_folder, '{}_DistanceThr_{:.2f}'.format(trace_window, distance_thr)) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -nwb_fns = [f for f in os.listdir(nwb_folder) if f[-4:] == '.nwb'] -nwb_fns.sort() - -bc = dt.BoutonClassifier(skew_filter_sigma=skew_filter_sigma, - skew_thr=skew_thr, - lowpass_sigma=lowpass_sigma, - detrend_sigma=detrend_sigma, - event_std_thr=event_std_thr, - peri_event_dur=peri_event_dur, - corr_len_thr=corr_len_thr, - corr_abs_thr=corr_abs_thr, - corr_std_thr=corr_std_thr, - is_cosine_similarity=is_cosine_similarity, - distance_metric=distance_metric, - linkage_method=linkage_method, - distance_thr=distance_thr) - -for nwb_fi, nwb_fn in enumerate(nwb_fns): - - print('processing {}, {}/{}'.format(nwb_fn, nwb_fi + 1, len(nwb_fns))) - - nwb_f = h5py.File(os.path.join(nwb_folder, nwb_fn), 'r') - - plane_ns = dt.get_plane_ns(nwb_f=nwb_f) - plane_ns.sort() - - for plane_i, plane_n in enumerate(plane_ns): - - print('\n\t{}, {}/{}'.format(plane_n, plane_i + 1, len(plane_ns))) - - bc.process_plane(nwb_f=nwb_f, save_folder=save_folder, plane_n=plane_n, trace_type=trace_type, - trace_window=trace_window) - - nwb_f.close() diff --git a/corticalmapping/scripts/post_recording/analysis_database/old/0100_add_axon_strf.py b/corticalmapping/scripts/post_recording/analysis_database/old/0100_add_axon_strf.py deleted file mode 100644 index 585ea47..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/old/0100_add_axon_strf.py +++ /dev/null @@ -1,31 +0,0 @@ -import os -import h5py -import corticalmapping.DatabaseTools as dt - -nwb_folder = "nwbs" -clu_folder = r"intermediate_results\bouton_clustering\AllStimuli_DistanceThr_1.30" -strf_t_win = [-0.5, 2.] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -clu_fns = [f for f in os.listdir(clu_folder) if f[-5:] == '.hdf5'] -clu_fns.sort() -print('total number of planes: {}'.format(len(clu_fns))) - -for clu_fi, clu_fn in enumerate(clu_fns): - - date, mid, plane_n, _, _ = clu_fn.split('_') - - print('processing {}_{}_{}, {} / {}'.format(date, mid, plane_n, clu_fi + 1, len(clu_fns))) - - nwb_fn = '{}_{}_110_repacked.nwb'.format(date, mid) - nwb_f = h5py.File(os.path.join(nwb_folder, nwb_fn), 'r') - - clu_f = h5py.File(os.path.join(clu_folder, clu_fn)) - - bc = dt.BoutonClassifier() - bc.add_axon_strf(nwb_f=nwb_f, clu_f=clu_f, plane_n=plane_n, t_win=strf_t_win, verbose=False) - - nwb_f.close() - clu_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_database/old/0110_add_axon_dgcrm.py b/corticalmapping/scripts/post_recording/analysis_database/old/0110_add_axon_dgcrm.py deleted file mode 100644 index 68c304e..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/old/0110_add_axon_dgcrm.py +++ /dev/null @@ -1,31 +0,0 @@ -import os -import h5py -import corticalmapping.DatabaseTools as dt - -nwb_folder = "nwbs" -clu_folder = r"intermediate_results\bouton_clustering\AllStimuli_DistanceThr_1.30" -dgcrm_t_win = [-1., 2.5] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -clu_fns = [f for f in os.listdir(clu_folder) if f[-5:] == '.hdf5'] -clu_fns.sort() -print('total number of planes: {}'.format(len(clu_fns))) - -for clu_fi, clu_fn in enumerate(clu_fns): - - date, mid, plane_n, _, _ = clu_fn.split('_') - - print('processing {}_{}_{}, {} / {}'.format(date, mid, plane_n, clu_fi + 1, len(clu_fns))) - - nwb_fn = '{}_{}_110_repacked.nwb'.format(date, mid) - nwb_f = h5py.File(os.path.join(nwb_folder, nwb_fn), 'r') - - clu_f = h5py.File(os.path.join(clu_folder, clu_fn)) - - bc = dt.BoutonClassifier() - bc.add_axon_dgcrm(nwb_f=nwb_f, clu_f=clu_f, plane_n=plane_n, t_win=dgcrm_t_win, verbose=False) - - nwb_f.close() - clu_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_database/old/0120_get_axon_plane_df.py b/corticalmapping/scripts/post_recording/analysis_database/old/0120_get_axon_plane_df.py deleted file mode 100644 index 3df2aa4..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/old/0120_get_axon_plane_df.py +++ /dev/null @@ -1,413 +0,0 @@ -import os -import corticalmapping.DatabaseTools as dt -import time -import pandas as pd -import numpy as np -import h5py -from multiprocessing import Pool -import shutil - -date_range = [180301, 190601] -nwb_folder = 'nwbs' -df_folder = r'other_dataframes\dataframes_190530171338' -clu_folder = r'intermediate_results\bouton_clustering\AllStimuli_DistanceThr_1.30' -process_num = 6 -is_overwrite = True - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -print('pandas version: {}\n'.format(pd.__version__)) - -columns = [ - 'date', - 'mouse_id', - 'plane_n', - 'roi_n', - 'depth', # microns under pia, float - - # roi mask - 'roi_area', # square micron - 'roi_center_row', # center of roi mask in field of view, row - 'roi_center_col', # center of roi mask in field of view, column - - # trace skewness - 'skew_raw', # skewness of unfiltered trace (neuropil subtracted), float - 'skew_fil', # skewness of highpassed trace, float - - # receptive fields - 'rf_pos_on_peak_z', - 'rf_pos_on_area', - 'rf_pos_on_center_alt', - 'rf_pos_on_center_azi', - - 'rf_pos_off_peak_z', - 'rf_pos_off_area', - 'rf_pos_off_center_alt', - 'rf_pos_off_center_azi', - - 'rf_pos_onoff_peak_z', - 'rf_pos_onoff_area', - 'rf_pos_onoff_center_alt', - 'rf_pos_onoff_center_azi', - - 'rf_pos_lsi', - - 'rf_neg_on_peak_z', - 'rf_neg_on_area', - 'rf_neg_on_center_alt', - 'rf_neg_on_center_azi', - - 'rf_neg_off_peak_z', - 'rf_neg_off_area', - 'rf_neg_off_center_alt', - 'rf_neg_off_center_azi', - - 'rf_neg_onoff_peak_z', - 'rf_neg_onoff_area', - 'rf_neg_onoff_center_alt', - 'rf_neg_onoff_center_azi', - - 'rf_neg_lsi', - - # drifting grating peak response - 'dgc_pos_peak_df', - 'dgc_neg_peak_df', - 'dgc_pos_p_ttest_df', - 'dgc_neg_p_ttest_df', - 'dgc_p_anova_df', - - 'dgc_pos_peak_dff', - 'dgc_neg_peak_dff', - 'dgc_pos_p_ttest_dff', - 'dgc_neg_p_ttest_dff', - 'dgc_p_anova_dff', - - 'dgc_pos_peak_z', - 'dgc_neg_peak_z', - 'dgc_pos_p_ttest_z', - 'dgc_neg_p_ttest_z', - 'dgc_p_anova_z', - - # direction / orientation tuning, pos, df - 'dgc_pos_osi_raw_df', - 'dgc_pos_dsi_raw_df', - 'dgc_pos_gosi_raw_df', - 'dgc_pos_gdsi_raw_df', - 'dgc_pos_osi_ele_df', - 'dgc_pos_dsi_ele_df', - 'dgc_pos_gosi_ele_df', - 'dgc_pos_gdsi_ele_df', - 'dgc_pos_osi_rec_df', - 'dgc_pos_dsi_rec_df', - 'dgc_pos_gosi_rec_df', - 'dgc_pos_gdsi_rec_df', - 'dgc_pos_peak_dire_raw_df', - 'dgc_pos_vs_dire_raw_df', - 'dgc_pos_vs_dire_ele_df', - 'dgc_pos_vs_dire_rec_df', - - # direction / orientation tuning, neg, df - 'dgc_neg_osi_raw_df', - 'dgc_neg_dsi_raw_df', - 'dgc_neg_gosi_raw_df', - 'dgc_neg_gdsi_raw_df', - 'dgc_neg_osi_ele_df', - 'dgc_neg_dsi_ele_df', - 'dgc_neg_gosi_ele_df', - 'dgc_neg_gdsi_ele_df', - 'dgc_neg_osi_rec_df', - 'dgc_neg_dsi_rec_df', - 'dgc_neg_gosi_rec_df', - 'dgc_neg_gdsi_rec_df', - 'dgc_neg_peak_dire_raw_df', - 'dgc_neg_vs_dire_raw_df', - 'dgc_neg_vs_dire_ele_df', - 'dgc_neg_vs_dire_rec_df', - - # direction / orientation tuning, pos, dff - 'dgc_pos_osi_raw_dff', - 'dgc_pos_dsi_raw_dff', - 'dgc_pos_gosi_raw_dff', - 'dgc_pos_gdsi_raw_dff', - 'dgc_pos_osi_ele_dff', - 'dgc_pos_dsi_ele_dff', - 'dgc_pos_gosi_ele_dff', - 'dgc_pos_gdsi_ele_dff', - 'dgc_pos_osi_rec_dff', - 'dgc_pos_dsi_rec_dff', - 'dgc_pos_gosi_rec_dff', - 'dgc_pos_gdsi_rec_dff', - 'dgc_pos_peak_dire_raw_dff', - 'dgc_pos_vs_dire_raw_dff', - 'dgc_pos_vs_dire_ele_dff', - 'dgc_pos_vs_dire_rec_dff', - - # direction / orientation tuning, neg, dff - 'dgc_neg_osi_raw_dff', - 'dgc_neg_dsi_raw_dff', - 'dgc_neg_gosi_raw_dff', - 'dgc_neg_gdsi_raw_dff', - 'dgc_neg_osi_ele_dff', - 'dgc_neg_dsi_ele_dff', - 'dgc_neg_gosi_ele_dff', - 'dgc_neg_gdsi_ele_dff', - 'dgc_neg_osi_rec_dff', - 'dgc_neg_dsi_rec_dff', - 'dgc_neg_gosi_rec_dff', - 'dgc_neg_gdsi_rec_dff', - 'dgc_neg_peak_dire_raw_dff', - 'dgc_neg_vs_dire_raw_dff', - 'dgc_neg_vs_dire_ele_dff', - 'dgc_neg_vs_dire_rec_dff', - - # direction / orientation tuning, pos, zscore - 'dgc_pos_osi_raw_z', - 'dgc_pos_dsi_raw_z', - 'dgc_pos_gosi_raw_z', - 'dgc_pos_gdsi_raw_z', - 'dgc_pos_osi_ele_z', - 'dgc_pos_dsi_ele_z', - 'dgc_pos_gosi_ele_z', - 'dgc_pos_gdsi_ele_z', - 'dgc_pos_osi_rec_z', - 'dgc_pos_dsi_rec_z', - 'dgc_pos_gosi_rec_z', - 'dgc_pos_gdsi_rec_z', - 'dgc_pos_peak_dire_raw_z', - 'dgc_pos_vs_dire_raw_z', - 'dgc_pos_vs_dire_ele_z', - 'dgc_pos_vs_dire_rec_z', - - # direction / orientation tuning, neg, zscore - 'dgc_neg_osi_raw_z', - 'dgc_neg_dsi_raw_z', - 'dgc_neg_gosi_raw_z', - 'dgc_neg_gdsi_raw_z', - 'dgc_neg_osi_ele_z', - 'dgc_neg_dsi_ele_z', - 'dgc_neg_gosi_ele_z', - 'dgc_neg_gdsi_ele_z', - 'dgc_neg_osi_rec_z', - 'dgc_neg_dsi_rec_z', - 'dgc_neg_gosi_rec_z', - 'dgc_neg_gdsi_rec_z', - 'dgc_neg_peak_dire_raw_z', - 'dgc_neg_vs_dire_raw_z', - 'dgc_neg_vs_dire_ele_z', - 'dgc_neg_vs_dire_rec_z', - - # sf tuning, pos, df - 'dgc_pos_peak_sf_raw_df', - 'dgc_pos_weighted_sf_raw_df', - 'dgc_pos_weighted_sf_log_raw_df', - 'dgc_pos_weighted_sf_ele_df', - 'dgc_pos_weighted_sf_log_ele_df', - 'dgc_pos_weighted_sf_rec_df', - 'dgc_pos_weighted_sf_log_rec_df', - - # sf tuning, neg, df - 'dgc_neg_peak_sf_raw_df', - 'dgc_neg_weighted_sf_raw_df', - 'dgc_neg_weighted_sf_log_raw_df', - 'dgc_neg_weighted_sf_ele_df', - 'dgc_neg_weighted_sf_log_ele_df', - 'dgc_neg_weighted_sf_rec_df', - 'dgc_neg_weighted_sf_log_rec_df', - - # sf tuning, pos, dff - 'dgc_pos_peak_sf_raw_dff', - 'dgc_pos_weighted_sf_raw_dff', - 'dgc_pos_weighted_sf_log_raw_dff', - 'dgc_pos_weighted_sf_ele_dff', - 'dgc_pos_weighted_sf_log_ele_dff', - 'dgc_pos_weighted_sf_rec_dff', - 'dgc_pos_weighted_sf_log_rec_dff', - - # sf tuning, neg, dff - 'dgc_neg_peak_sf_raw_dff', - 'dgc_neg_weighted_sf_raw_dff', - 'dgc_neg_weighted_sf_log_raw_dff', - 'dgc_neg_weighted_sf_ele_dff', - 'dgc_neg_weighted_sf_log_ele_dff', - 'dgc_neg_weighted_sf_rec_dff', - 'dgc_neg_weighted_sf_log_rec_dff', - - # sf tuning, pos, zscore - 'dgc_pos_peak_sf_raw_z', - 'dgc_pos_weighted_sf_raw_z', - 'dgc_pos_weighted_sf_log_raw_z', - 'dgc_pos_weighted_sf_ele_z', - 'dgc_pos_weighted_sf_log_ele_z', - 'dgc_pos_weighted_sf_rec_z', - 'dgc_pos_weighted_sf_log_rec_z', - - # sf tuning, neg, zscore - 'dgc_neg_peak_sf_raw_z', - 'dgc_neg_weighted_sf_raw_z', - 'dgc_neg_weighted_sf_log_raw_z', - 'dgc_neg_weighted_sf_ele_z', - 'dgc_neg_weighted_sf_log_ele_z', - 'dgc_neg_weighted_sf_rec_z', - 'dgc_neg_weighted_sf_log_rec_z', - - # tf tuning, pos, df - 'dgc_pos_peak_tf_raw_df', - 'dgc_pos_weighted_tf_raw_df', - 'dgc_pos_weighted_tf_log_raw_df', - 'dgc_pos_weighted_tf_ele_df', - 'dgc_pos_weighted_tf_log_ele_df', - 'dgc_pos_weighted_tf_rec_df', - 'dgc_pos_weighted_tf_log_rec_df', - - # tf tuning, neg, df - 'dgc_neg_peak_tf_raw_df', - 'dgc_neg_weighted_tf_raw_df', - 'dgc_neg_weighted_tf_log_raw_df', - 'dgc_neg_weighted_tf_ele_df', - 'dgc_neg_weighted_tf_log_ele_df', - 'dgc_neg_weighted_tf_rec_df', - 'dgc_neg_weighted_tf_log_rec_df', - - # tf tuning, pos, dff - 'dgc_pos_peak_tf_raw_dff', - 'dgc_pos_weighted_tf_raw_dff', - 'dgc_pos_weighted_tf_log_raw_dff', - 'dgc_pos_weighted_tf_ele_dff', - 'dgc_pos_weighted_tf_log_ele_dff', - 'dgc_pos_weighted_tf_rec_dff', - 'dgc_pos_weighted_tf_log_rec_dff', - - # tf tuning, neg, dff - 'dgc_neg_peak_tf_raw_dff', - 'dgc_neg_weighted_tf_raw_dff', - 'dgc_neg_weighted_tf_log_raw_dff', - 'dgc_neg_weighted_tf_ele_dff', - 'dgc_neg_weighted_tf_log_ele_dff', - 'dgc_neg_weighted_tf_rec_dff', - 'dgc_neg_weighted_tf_log_rec_dff', - - # tf tuning, pos, zscore - 'dgc_pos_peak_tf_raw_z', - 'dgc_pos_weighted_tf_raw_z', - 'dgc_pos_weighted_tf_log_raw_z', - 'dgc_pos_weighted_tf_ele_z', - 'dgc_pos_weighted_tf_log_ele_z', - 'dgc_pos_weighted_tf_rec_z', - 'dgc_pos_weighted_tf_log_rec_z', - - # tf tuning, neg, zscore - 'dgc_neg_peak_tf_raw_z', - 'dgc_neg_weighted_tf_raw_z', - 'dgc_neg_weighted_tf_log_raw_z', - 'dgc_neg_weighted_tf_ele_z', - 'dgc_neg_weighted_tf_log_ele_z', - 'dgc_neg_weighted_tf_rec_z', - 'dgc_neg_weighted_tf_log_rec_z', -] - -def process_one_nwb_for_multi_thread(inputs): - - nwb_path, df_folder, clu_folder, params, columns, save_folder, t0, nwb_i, nwb_f_num, is_overwrite = inputs - - nwb_fn = os.path.splitext(os.path.split(nwb_path)[1])[0] - - date, mid, _, _ = nwb_fn.split('_') - - nwb_f = h5py.File(nwb_path, 'r') - plane_ns = dt.get_plane_ns(nwb_f=nwb_f) - plane_ns.sort() - - for plane_n in plane_ns: - print('\tt: {:5.0f} minutes, processing {}, {} / {}, {} ...'.format((time.time() - t0) / 60., - nwb_fn, - nwb_i + 1, - nwb_f_num, - plane_n)) - - roi_df_fn = '{}_{}_{}.csv'.format(date, mid, plane_n) - roi_df = pd.read_csv(os.path.join(df_folder, roi_df_fn)) - - clu_fn = '{}_{}_{}_axon_grouping.hdf5'.format(date, mid, plane_n) - clu_f = h5py.File(os.path.join(clu_folder, clu_fn), 'r') - - axon_ns = clu_f['axons'].keys() - axon_ns.sort() - - axon_df = pd.DataFrame(np.nan, index=range(len(axon_ns)), columns=columns) - - for axon_i, axon_n in enumerate(axon_ns): - - roi_lst = clu_f['axons/{}'.format(axon_n)].value - - if len(roi_lst) == 1: - curr_roi_df = roi_df[roi_df['roi_n'] == roi_lst[0]].reset_index() - for col in columns: - axon_df.loc[axon_i, col] = curr_roi_df.loc[0, col] - axon_df.loc[axon_i, 'roi_n'] = axon_n - else: - axon_properties, _, _, _, _, _, _, _, _, _, _, _, _, _ = \ - dt.get_everything_from_axon(nwb_f=nwb_f, - clu_f=clu_f, - plane_n=plane_n, - axon_n=axon_n, - params=params, - verbose=False) - for rp_name, rp_value in axon_properties.items(): - axon_df.loc[axon_i, rp_name] = rp_value - - save_path = os.path.join(save_folder, '{}_{}_{}.csv'.format(date, mid, plane_n)) - - if os.path.isfile(save_path): - if is_overwrite: - os.remove(save_path) - axon_df.to_csv(save_path) - else: - raise IOError('Axon dataframe file already exists. \npath: {}'.format(save_path)) - else: - axon_df.to_csv(save_path) - -def run(): - - t0 = time.time() - - with open(os.path.join(df_folder, 'script_log.py')) as script_f: - script = script_f.readlines() - - for line in script: - if line[0:6] == 'params': - exec(line) - - nwb_fns = [] - for fn in os.listdir(os.path.realpath(nwb_folder)): - if fn[-4:] == '.nwb' and date_range[0] <= int(fn[0:6]) <= date_range[1]: - nwb_fns.append(fn) - nwb_fns.sort() - print('\nnwb file list:') - print('\n'.join(nwb_fns)) - - save_folder = df_folder + '_axon_' + os.path.split(clu_folder)[1] - - if not os.path.isdir(save_folder): - os.makedirs(save_folder) - - shutil.copyfile(os.path.realpath(__file__), os.path.join(save_folder, 'script_log.py')) - - inputs_lst = [(os.path.join(curr_folder, nwb_folder, nwb_fn), - os.path.realpath(df_folder), - os.path.realpath(clu_folder), - params, - columns, - save_folder, - t0, - nwb_i, - len(nwb_fns), - is_overwrite) for nwb_i, nwb_fn in enumerate(nwb_fns)] - - print('\nprocessing individual nwb files ...') - p = Pool(process_num) - p.map(process_one_nwb_for_multi_thread, inputs_lst) - - -if __name__ == '__main__': - run() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_database/old/0130_get_overall_csv.py b/corticalmapping/scripts/post_recording/analysis_database/old/0130_get_overall_csv.py deleted file mode 100644 index 5c70614..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/old/0130_get_overall_csv.py +++ /dev/null @@ -1,54 +0,0 @@ -import os -import pandas as pd - -df_folder = 'other_dataframes' -# df_fn = 'dataframes_190530171338' -# df_fn = 'dataframes_190530171338_axon_AllStimuli_DistanceThr_0.50' -# df_fn = 'dataframes_190530171338_axon_AllStimuli_DistanceThr_1.00' -df_fn = 'dataframes_190530171338_axon_AllStimuli_DistanceThr_1.30' -plane_df_fn = 'plane_table_190530170648.xlsx' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -csv_fns = [fn for fn in os.listdir(os.path.join(df_folder, df_fn)) if fn[-4:] == '.csv'] -csv_fns.sort() - -plane_df = pd.read_excel(os.path.join(df_folder, plane_df_fn), sheetname='sheet1') - -df_all = [] - -for csv_fn in csv_fns: - print('reading {} ...'.format(csv_fn)) - df_all.append(pd.read_csv(os.path.join(df_folder, df_fn, csv_fn))) - -df_all = pd.concat(df_all, axis=0) - -try: - df_all.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis=1, inplace=True) -except KeyError: - pass - -print(df_all.columns) - -df_all['vol_n'] = '' - -for plane_i, plane_row in plane_df.iterrows(): - plane_ind = ((df_all['date'] == plane_row['date']) & - (df_all['mouse_id'] == plane_row['mouse_id']) & - (df_all['plane_n'] == plane_row['plane_n'])) - df_all.loc[plane_ind, 'vol_n'] = plane_row['volume_n'] - -print(df_all.vol_n.drop_duplicates()) - -df_all.sort_values(by=['vol_n', 'depth', 'roi_n'], inplace=True) -df_all.reset_index(inplace=True) -df_all.drop(['index'], axis=1, inplace=True) - -print(df_all[['date', 'mouse_id', 'plane_n', 'depth', 'vol_n']].drop_duplicates()) - -df_all.to_csv(df_fn.replace('dataframes', 'dataframe') + '.csv') - - - - diff --git a/corticalmapping/scripts/post_recording/analysis_database/old/0140_plot_dire_retinotopy.py b/corticalmapping/scripts/post_recording/analysis_database/old/0140_plot_dire_retinotopy.py deleted file mode 100644 index 54e5a6d..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/old/0140_plot_dire_retinotopy.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -import numpy as np -import pandas as pd -import matplotlib.pyplot as plt -import corticalmapping.core.PlottingTools as pt -import corticalmapping.DatabaseTools as dt -from matplotlib.backends.backend_pdf import PdfPages - -# df_path = r"G:\bulk_LGN_database\dataframe_190530171338.csv" -df_path = r"G:\bulk_LGN_database\dataframe_190530171338_axon_AllStimuli_DistanceThr_1.00.csv" - -depths = [50, 100, 150, 200, 250, 300, 350, 400,] -mouse_ids = ['M360495', 'M376019', 'M386444', 'M426525', 'M439939', 'M439943'] -# mouse_ids = ['M439939'] -dire_type = 'peak_dire' # 'vs_dire' or 'peak_dire' -response_dir = 'pos' -response_type = 'dff' -post_process_type = 'ele' # 'raw', 'ele' or 'rec' -skew_thr = 0.6 -dgc_peak_z_thr = 3. -dgc_p_anova_thr = 0.01 -dsi_type = 'gdsi' -dsi_thr = 0.5 - -rf_z_thr = 1.6 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -if dire_type == 'peak_dire' and (post_process_type == 'ele' or post_process_type == 'rec'): - dire_pp = 'raw' -else: - dire_pp = post_process_type - -print('loading csv file: {}'.format(df_path)) -df = pd.read_csv(df_path) -print('csv file loaded.') - -df = df[df['mouse_id'].isin(mouse_ids)] - -df = df[(df['skew_fil'] >= skew_thr) & - (df['dgc_{}_peak_z'.format(response_dir)] >= dgc_peak_z_thr) & - (df['dgc_p_anova_{}'.format(response_type)] <= dgc_p_anova_thr) & - (df['dgc_{}_{}_{}_{}'.format(response_dir, dsi_type, post_process_type, response_type)] >= dsi_thr)] - -pdff = PdfPages(os.path.join('intermediate_results', 'preferred_dire_depth.pdf')) - -f_all = plt.figure(figsize=(12, 8)) -ax_all = f_all.add_subplot(111) -ax_all.set_xlim([0, 90]) -ax_all.set_ylim([-30, 30]) -ax_all.set_aspect('equal') -ax_all.set_title('all depths') - -for depth_i, depth in enumerate(depths): - - depth_df = df[df['depth'] == depth] - print(len(depth_df)) - - f = plt.figure(figsize=(12, 8)) - ax = f.add_subplot(111) - ax.set_xlim([0, 90]) - ax.set_ylim([-30, 30]) - ax.set_aspect('equal') - ax.set_title('{} um'.format(depth)) - - for roi_i, roi_row in depth_df.iterrows(): - - if roi_row['rf_{}_on_peak_z'.format(response_dir)] >= rf_z_thr: - alt = roi_row['rf_{}_on_center_alt'.format(response_dir)] - azi = roi_row['rf_{}_on_center_azi'.format(response_dir)] - dire = roi_row['dgc_{}_{}_{}_{}'.format(response_dir, dire_type, dire_pp, response_type)] - # print('alt: {:6.2f}, azi: {:6.2f}, dire: {}'.format(alt, azi, dire)) - dire = dire * np.pi / 180. - bazi = azi - np.cos(dire) * 1. - dazi = np.cos(dire) * 2. - balt = alt - np.sin(dire) * 1. - dalt = np.sin(dire) * 2. - - ax.arrow(x=bazi, y=balt, dx=dazi, dy=dalt, length_includes_head=True, - head_width=0.5, head_length=1, ec='none', fc='r', alpha=0.5) - ax_all.arrow(x=bazi, y=balt, dx=dazi, dy=dalt, length_includes_head=True, - head_width=0.5, head_length=1, ec='none', fc='r', alpha=0.5) - - if roi_row['rf_{}_off_peak_z'.format(response_dir)] >= rf_z_thr: - alt = roi_row['rf_{}_off_center_alt'.format(response_dir)] - azi = roi_row['rf_{}_off_center_azi'.format(response_dir)] - dire = roi_row['dgc_{}_{}_{}_{}'.format(response_dir, dire_type, dire_pp, response_type)] - # print('alt: {:6.2f}, azi: {:6.2f}, dire: {}'.format(alt, azi, dire)) - dire = dire * np.pi / 180. - bazi = azi - np.sin(dire) * 1. - dazi = np.sin(dire) * 2. - balt = alt - np.cos(dire) * 1. - dalt = np.cos(dire) * 2. - - ax.arrow(x=bazi, y=balt, dx=dazi, dy=dalt, length_includes_head=True, - head_width=0.5, head_length=1, ec='none', fc='b', alpha=0.5) - ax_all.arrow(x=bazi, y=balt, dx=dazi, dy=dalt, length_includes_head=True, - head_width=0.5, head_length=1, ec='none', fc='b', alpha=0.5) - - - pdff.savefig(f) - f.clear() - plt.close(f) - -pdff.savefig(f_all) -pdff.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_database/old/0150_plot_ori_vs_rf_axis_DS.py b/corticalmapping/scripts/post_recording/analysis_database/old/0150_plot_ori_vs_rf_axis_DS.py deleted file mode 100644 index 8f04553..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/old/0150_plot_ori_vs_rf_axis_DS.py +++ /dev/null @@ -1,139 +0,0 @@ -import os -import pandas as pd -import numpy as np -import matplotlib.pyplot as plt -import corticalmapping.SingleCellAnalysis as sca -import scipy.stats as stats -import h5py - -# df_path = r"G:\bulk_LGN_database\dataframe_190530171338.csv" -# rf_maps_folder = r"intermediate_results\rf_maps_dataframes_190529210731" - -df_path = r"G:\bulk_LGN_database\dataframe_190530171338_axon_AllStimuli_DistanceThr_1.30.csv" -rf_maps_folder = r"G:\bulk_LGN_database\intermediate_results" \ - r"\rf_maps_dataframe_190530171338_axon_AllStimuli_DistanceThr_1.30" - - -depths = [50, 100, 150, 200, 250, 300, 350, 400,] -mouse_ids = ['M360495', 'M376019', 'M386444', 'M426525', 'M439939', 'M439943'] -# mouse_ids = ['M439939'] -dire_type = 'peak_dire' # 'vs_dire' or 'peak_dire' -response_dir = 'pos' -response_type = 'dff' -post_process_type = 'ele' # 'raw', 'ele' or 'rec' -skew_thr = 0.6 -dgc_peak_z_thr = 3. -dgc_p_anova_thr = 0.01 -dsi_type = 'gdsi' -dsi_thr = 0.5 -osi_type = 'gosi' -osi_thr = 1. / 3. - -ellipse_aspect_thr = 1.0 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -if dire_type == 'peak_dire' and (post_process_type == 'ele' or post_process_type == 'rec'): - dire_pp = 'raw' -else: - dire_pp = post_process_type - -print('loading csv file: {}'.format(df_path)) -df = pd.read_csv(df_path) -print('csv file loaded.') - -df = df[(df['mouse_id'].isin(mouse_ids)) & \ - (df['skew_fil'] >= skew_thr) & \ - (df['dgc_{}_peak_z'.format(response_dir)] >= dgc_peak_z_thr) & \ - (df['dgc_p_anova_{}'.format(response_type)] <= dgc_p_anova_thr) & \ - (np.isfinite(df['rf_{}_on_peak_z'.format(response_dir)]))] - -dsdf = df[(df['dgc_{}_{}_{}_{}'.format(response_dir, dsi_type, post_process_type, response_type)] >= dsi_thr)] - -ds_diff_onoff = [] -ds_diff_on = [] -ds_diff_off = [] -for roi_i, roi_row in dsdf.iterrows(): - date = int(roi_row['date']) - mid = roi_row['mouse_id'] - plane_n = roi_row['plane_n'] - roi_n = roi_row['roi_n'] - - map_fn = '{}_{}_{}_{}'.format(date, mid, plane_n, response_dir) - map_f = h5py.File(os.path.join(rf_maps_folder, map_fn + '.hdf5'), 'r') - - on_grp = map_f['{}_ON'.format(map_fn)] - off_grp = map_f['{}_OFF'.format(map_fn)] - - dire = roi_row['dgc_{}_{}_{}_{}'.format(response_dir, dire_type, dire_pp, response_type)] - ori = sca.dire2ori(dire) - - if roi_n in on_grp.keys() and roi_n in off_grp.keys(): - rf_on = sca.SpatialReceptiveField.from_h5_group(on_grp[roi_n]) - rf_off = sca.SpatialReceptiveField.from_h5_group(off_grp[roi_n]) - c_alt_on, c_azi_on = rf_on.get_weighted_rf_center() - c_alt_off, c_azi_off = rf_off.get_weighted_rf_center() - - onoff_ang = np.arctan((c_alt_on - c_alt_off) / (c_azi_on - c_azi_off)) - onoff_ang = onoff_ang * 180. / np.pi - onoff_ang = sca.dire2ori(onoff_ang) - - curr_diff = abs(onoff_ang - ori) - if curr_diff > 90.: - curr_diff = 180 - curr_diff - - ds_diff_onoff.append(curr_diff) - - elif roi_n in on_grp.keys(): - rf_on = sca.SpatialReceptiveField.from_h5_group(on_grp[roi_n]) - ell_on = rf_on.ellipse_fitting(is_plot=False) - if ell_on is not None and ell_on.get_aspect_ratio() >= ellipse_aspect_thr: - curr_diff = abs(ell_on.angle - ori) - if curr_diff > 90.: - curr_diff = 180 - curr_diff - ds_diff_on.append(curr_diff) - - elif roi_n in off_grp.keys(): - rf_off = sca.SpatialReceptiveField.from_h5_group(off_grp[roi_n]) - ell_off = rf_off.ellipse_fitting(is_plot=False) - if ell_off is not None and ell_off.get_aspect_ratio() >= ellipse_aspect_thr: - curr_diff = abs(ell_off.angle - ori) - if curr_diff > 90.: - curr_diff = 180 - curr_diff - ds_diff_off.append(curr_diff) - -print('\nDirection Selective ROIs:') -print('\tWith ONOFF receptive fields:') -print('\t\tn={}'.format(len(ds_diff_onoff))) -print('\t\torie difference predicted vs. measured, mean={}'.format(np.mean(ds_diff_onoff))) -print('\t\torie difference predicted vs. measured, std={}'.format(np.std(ds_diff_onoff))) -chisq_ds_onoff, p_ds_onoff = stats.chisquare(np.histogram(ds_diff_onoff, range=[0., 90.], bins=20)[0]) -print('\t\tagainst uniform distribution: chi-squared={}, p={}'.format(chisq_ds_onoff, p_ds_onoff)) - -print('\tWith only ON receptive fields:') -print('\t\tn={}'.format(len(ds_diff_on))) -print('\t\torie difference predicted vs. measured, mean={}'.format(np.mean(ds_diff_on))) -print('\t\torie difference predicted vs. measured, std={}'.format(np.std(ds_diff_on))) -chisq_ds_on, p_ds_on = stats.chisquare(np.histogram(ds_diff_on, range=[0., 90.], bins=20)[0]) -print('\t\tagainst uniform distribution: chi-squared={}, p={}'.format(chisq_ds_on, p_ds_on)) - -print('\tWith only OFF receptive fields:') -print('\t\tn={}'.format(len(ds_diff_off))) -print('\t\torie difference predicted vs. measured, mean={}'.format(np.mean(ds_diff_off))) -print('\t\torie difference predicted vs. measured, std={}'.format(np.std(ds_diff_off))) -chisq_ds_off, p_ds_off = stats.chisquare(np.histogram(ds_diff_off, range=[0., 90.], bins=20)[0]) -print('\t\tagainst uniform distribution: chi-squared={}, p={}'.format(chisq_ds_off, p_ds_off)) - -ds_diff_all = ds_diff_onoff + ds_diff_on + ds_diff_off -print('\tWith all receptive fields:') -print('\t\tn={}'.format(len(ds_diff_all))) -print('\t\torie difference predicted vs. measured, mean={}'.format(np.mean(ds_diff_all))) -print('\t\torie difference predicted vs. measured, std={}'.format(np.std(ds_diff_all))) -chisq_ds_all, p_ds_all = stats.chisquare(np.histogram(ds_diff_all, range=[0., 90.], bins=20)[0]) -print('\t\tagainst uniform distribution: chi-squared={}, p={}'.format(chisq_ds_all, p_ds_all)) - - -plt.hist([ds_diff_onoff, ds_diff_on, ds_diff_off], range=[0, 90], bins=20, stacked=True, - color=['purple', 'r', 'b'], ec='none', alpha=0.5) -plt.show() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_database/old/0160_plot_ori_vs_rf_axis_OS.py b/corticalmapping/scripts/post_recording/analysis_database/old/0160_plot_ori_vs_rf_axis_OS.py deleted file mode 100644 index 10cc5da..0000000 --- a/corticalmapping/scripts/post_recording/analysis_database/old/0160_plot_ori_vs_rf_axis_OS.py +++ /dev/null @@ -1,139 +0,0 @@ -import os -import pandas as pd -import numpy as np -import matplotlib.pyplot as plt -import corticalmapping.SingleCellAnalysis as sca -import scipy.stats as stats -import h5py - -# df_path = r"G:\bulk_LGN_database\dataframe_190530171338.csv" -# rf_maps_folder = r"intermediate_results\rf_maps_dataframes_190529210731" - -df_path = r"G:\bulk_LGN_database\dataframe_190530171338_axon_AllStimuli_DistanceThr_1.30.csv" -rf_maps_folder = r"G:\bulk_LGN_database\intermediate_results" \ - r"\rf_maps_dataframe_190530171338_axon_AllStimuli_DistanceThr_1.30" - -depths = [50, 100, 150, 200, 250, 300, 350, 400,] -mouse_ids = ['M360495', 'M376019', 'M386444', 'M426525', 'M439939', 'M439943'] -# mouse_ids = ['M439939'] -dire_type = 'peak_dire' # 'vs_dire' or 'peak_dire' -response_dir = 'pos' -response_type = 'dff' -post_process_type = 'ele' # 'raw', 'ele' or 'rec' -skew_thr = 0.6 -dgc_peak_z_thr = 3. -dgc_p_anova_thr = 0.01 -dsi_type = 'gdsi' -dsi_thr = 0.5 -osi_type = 'gosi' -osi_thr = 1. / 3. - -ellipse_aspect_thr = 1.0 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -if dire_type == 'peak_dire' and (post_process_type == 'ele' or post_process_type == 'rec'): - dire_pp = 'raw' -else: - dire_pp = post_process_type - -print('loading csv file: {}'.format(df_path)) -df = pd.read_csv(df_path) -print('csv file loaded.') - -df = df[(df['mouse_id'].isin(mouse_ids)) & \ - (df['skew_fil'] >= skew_thr) & \ - (df['dgc_{}_peak_z'.format(response_dir)] >= dgc_peak_z_thr) & \ - (df['dgc_p_anova_{}'.format(response_type)] <= dgc_p_anova_thr) & \ - (np.isfinite(df['rf_{}_on_peak_z'.format(response_dir)]))] - -osdf = df[(df['dgc_{}_{}_{}_{}'.format(response_dir, osi_type, post_process_type, response_type)] >= osi_thr) & \ - (df['dgc_{}_{}_{}_{}'.format(response_dir, dsi_type, post_process_type, response_type)] < dsi_thr)] - -os_diff_onoff = [] -os_diff_on = [] -os_diff_off = [] -for roi_i, roi_row in osdf.iterrows(): - date = int(roi_row['date']) - mid = roi_row['mouse_id'] - plane_n = roi_row['plane_n'] - roi_n = roi_row['roi_n'] - - map_fn = '{}_{}_{}_{}'.format(date, mid, plane_n, response_dir) - map_f = h5py.File(os.path.join(rf_maps_folder, map_fn + '.hdf5'), 'r') - - on_grp = map_f['{}_ON'.format(map_fn)] - off_grp = map_f['{}_OFF'.format(map_fn)] - - dire = roi_row['dgc_{}_{}_{}_{}'.format(response_dir, dire_type, dire_pp, response_type)] - ori = sca.dire2ori(dire) - - if roi_n in on_grp.keys() and roi_n in off_grp.keys(): - rf_on = sca.SpatialReceptiveField.from_h5_group(on_grp[roi_n]) - rf_off = sca.SpatialReceptiveField.from_h5_group(off_grp[roi_n]) - c_alt_on, c_azi_on = rf_on.get_weighted_rf_center() - c_alt_off, c_azi_off = rf_off.get_weighted_rf_center() - - onoff_ang = np.arctan((c_alt_on - c_alt_off) / (c_azi_on - c_azi_off)) - onoff_ang = onoff_ang * 180. / np.pi - onoff_ang = sca.dire2ori(onoff_ang) - - curr_diff = abs(onoff_ang - ori) - if curr_diff > 90.: - curr_diff = 180 - curr_diff - - os_diff_onoff.append(curr_diff) - - elif roi_n in on_grp.keys(): - rf_on = sca.SpatialReceptiveField.from_h5_group(on_grp[roi_n]) - ell_on = rf_on.ellipse_fitting(is_plot=False) - if ell_on is not None and ell_on.get_aspect_ratio() >= ellipse_aspect_thr: - curr_diff = abs(ell_on.angle - ori) - if curr_diff > 90.: - curr_diff = 180 - curr_diff - os_diff_on.append(curr_diff) - - elif roi_n in off_grp.keys(): - rf_off = sca.SpatialReceptiveField.from_h5_group(off_grp[roi_n]) - ell_off = rf_off.ellipse_fitting(is_plot=False) - if ell_off is not None and ell_off.get_aspect_ratio() >= ellipse_aspect_thr: - curr_diff = abs(ell_off.angle - ori) - if curr_diff > 90.: - curr_diff = 180 - curr_diff - os_diff_off.append(curr_diff) - -print('\nOrientation Selective ROIs:') -print('\tWith ONOFF receptive fields:') -print('\t\tn={}'.format(len(os_diff_onoff))) -print('\t\torie difference predicted vs. measured, mean={}'.format(np.mean(os_diff_onoff))) -print('\t\torie difference predicted vs. measured, std={}'.format(np.std(os_diff_onoff))) -chisq_os_onoff, p_os_onoff = stats.chisquare(np.histogram(os_diff_onoff, range=[0., 90.], bins=20)[0]) -print('\t\tagainst uniform distribution: chi-squared={}, p={}'.format(chisq_os_onoff, p_os_onoff)) - -print('\tWith only ON receptive fields:') -print('\t\tn={}'.format(len(os_diff_on))) -print('\t\torie difference predicted vs. measured, mean={}'.format(np.mean(os_diff_on))) -print('\t\torie difference predicted vs. measured, std={}'.format(np.std(os_diff_on))) -chisq_os_on, p_os_on = stats.chisquare(np.histogram(os_diff_on, range=[0., 90.], bins=20)[0]) -print('\t\tagainst uniform distribution: chi-squared={}, p={}'.format(chisq_os_on, p_os_on)) - -print('\tWith only OFF receptive fields:') -print('\t\tn={}'.format(len(os_diff_off))) -print('\t\torie difference predicted vs. measured, mean={}'.format(np.mean(os_diff_off))) -print('\t\torie difference predicted vs. measured, std={}'.format(np.std(os_diff_off))) -chisq_os_off, p_os_off = stats.chisquare(np.histogram(os_diff_off, range=[0., 90.], bins=20)[0]) -print('\t\tagainst uniform distribution: chi-squared={}, p={}'.format(chisq_os_off, p_os_off)) - -os_diff_all = os_diff_onoff + os_diff_on + os_diff_off -print('\tWith all receptive fields:') -print('\t\tn={}'.format(len(os_diff_all))) -print('\t\torie difference predicted vs. measured, mean={}'.format(np.mean(os_diff_all))) -print('\t\torie difference predicted vs. measured, std={}'.format(np.std(os_diff_all))) -chisq_os_all, p_os_all = stats.chisquare(np.histogram(os_diff_all, range=[0., 90.], bins=20)[0]) -print('\t\tagainst uniform distribution: chi-squared={}, p={}'.format(chisq_os_all, p_os_all)) - - -plt.hist([os_diff_onoff, os_diff_on, os_diff_off], range=[0, 90], bins=20, stacked=True, - color=['purple', 'r', 'b'], ec='none', alpha=0.5) -plt.show() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_get_vasmap/010_get_vasmap_2p.py b/corticalmapping/scripts/post_recording/analysis_pipeline_get_vasmap/010_get_vasmap_2p.py deleted file mode 100644 index 9267e9c..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_get_vasmap/010_get_vasmap_2p.py +++ /dev/null @@ -1,67 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia -import matplotlib.pyplot as plt -import cv2 - - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\181213-M421211-2p" -scope = 'sutter' # 'sutter', 'deepscope' or 'scientifica' -identifier = "vasmap_2p" -channels = ['green', 'red'] -is_equalize = False # equalize histogram - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -vasmaps = {} -for chn in channels: - vasmaps.update({chn: []}) - -file_ns = [f for f in os.listdir(data_folder) if identifier in f] -file_ns.sort() -print('\n'.join(file_ns)) - -for file_n in file_ns: - print(file_n) - - curr_vasmap = tf.imread(os.path.join(data_folder, file_n)) - - if len(curr_vasmap.shape) == 2: - if len(channels) == 1: - vasmaps[channels[0]].append(np.array([curr_vasmap])) - else: - raise ValueError('recorded file is 2d, cannot be deinterleved into {} channels.'.format(len(channels))) - else: - if len(curr_vasmap.shape) != 3: - raise ValueError('shape of recorded file: {}. should be either 2d or 3d.'.format(curr_vasmap.shape)) - - for ch_i, ch_n in enumerate(channels): - curr_vasmap_ch = curr_vasmap[ch_i::len(channels)] - curr_vasmap_ch = ia.array_nor(np.mean(curr_vasmap_ch, axis=0)) - if is_equalize: - curr_vasmap_ch = (curr_vasmap_ch * 255).astype(np.uint8) - curr_vasmap_ch = cv2.equalizeHist(curr_vasmap_ch).astype(np.float32) - vasmaps[ch_n].append(curr_vasmap_ch) - -for ch_n, ch_vasmap in vasmaps.items(): - # save_vasmap = np.concatenate(ch_vasmap, axis=0) - # print(save_vasmap.shape) - # save_vasmap = ia.array_nor(np.mean(save_vasmap, axis=0)) - # print(save_vasmap.shape) - - save_vasmap = ia.array_nor(np.mean(ch_vasmap, axis=0)) - - if scope == 'scientifica': - save_vasmap_r = save_vasmap[::-1, :] - save_vasmap_r = ia.rigid_transform_cv2_2d(save_vasmap_r, rotation=135) - elif scope == 'sutter': - save_vasmap_r = save_vasmap.transpose()[::-1, :] - elif scope == 'deepscope': - save_vasmap_r = ia.rigid_transform_cv2(save_vasmap, rotation=140)[:, ::-1] - else: - raise LookupError("Do not understand scope type. Should be 'sutter' or 'deepscope' or 'scientifica'.") - - tf.imsave('{}_{}.tif'.format(identifier, ch_n), save_vasmap.astype(np.float32)) - tf.imsave('{}_{}_rotated.tif'.format(identifier, ch_n), save_vasmap_r.astype(np.float32)) diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_get_vasmap/020_get_vasmap_wf_deepscope.py b/corticalmapping/scripts/post_recording/analysis_pipeline_get_vasmap/020_get_vasmap_wf_deepscope.py deleted file mode 100644 index 97cf956..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_get_vasmap/020_get_vasmap_wf_deepscope.py +++ /dev/null @@ -1,40 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import skimage.io as io -import skimage.color as color -import matplotlib.pyplot as plt -import corticalmapping.core.ImageAnalysis as ia - -vasmap_wf_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\181102-M412052-deepscope\vasmap_wf" - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -map_fns = [f for f in os.listdir(vasmap_wf_folder) if f[-4:]=='.tif'] -map_fns.sort() -print('\n'.join(map_fns)) - -map_wf = [] -for map_fn in map_fns: - curr_map = tf.imread(os.path.join(vasmap_wf_folder, map_fn)).astype(np.float32) - map_wf.append(ia.array_nor(curr_map)) - -map_wf = ia.array_nor(np.mean(map_wf, axis=0)) -map_wf_r = ia.array_nor(ia.rigid_transform_cv2(map_wf, rotation=140)[:, ::-1]) - -f = plt.figure(figsize=(12, 6)) -ax_wf = f.add_subplot(121) -ax_wf.imshow(map_wf, vmin=0., vmax=1., cmap='gray', interpolation='nearest') -ax_wf.set_axis_off() -ax_wf.set_title('vasmap widefield') - -ax_wf_r = f.add_subplot(122) -ax_wf_r.imshow(map_wf_r, vmin=0., vmax=1., cmap='gray', interpolation='nearest') -ax_wf_r.set_axis_off() -ax_wf_r.set_title('vasmap widefield rotated') - -plt.show() - -tf.imsave('vasmap_wf.tif', map_wf.astype(np.float32)) -tf.imsave('vasmap_wf_rotated.tif', map_wf_r.astype(np.float32)) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_get_vasmap/030_get_vasmap_wf_sutter.py b/corticalmapping/scripts/post_recording/analysis_pipeline_get_vasmap/030_get_vasmap_wf_sutter.py deleted file mode 100644 index a6fd568..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_get_vasmap/030_get_vasmap_wf_sutter.py +++ /dev/null @@ -1,32 +0,0 @@ -import os -import numpy as np -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -import tifffile as tf - -save_name = 'vasmap_wf' -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\190228-M426525-2p\vasmap_wf" - -saveFolder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(saveFolder) - -vasmap_fns = [f for f in os.listdir(data_folder) if 'JCam' in f] -vasmap_fns.sort() -print('\n'.join(vasmap_fns)) - -vasmaps = [] - -for vasmap_fn in vasmap_fns: - - vasmap_focused, _, _ = ft.importRawJCamF(os.path.join(data_folder, vasmap_fn), column=1024, row=1024, - headerLength=116, tailerLength=452) # try 452 if 218 does not work - vasmap_focused = vasmap_focused[2:] - vasmap_focused[vasmap_focused > 50000] = 400 - vasmap_focused = np.mean(vasmap_focused, axis=0) - vasmaps.append(ia.array_nor(vasmap_focused)) - -vasmap = ia.array_nor(np.mean(vasmaps, axis=0)) -vasmap_r = vasmap[::-1, :] - -tf.imsave('{}.tif'.format(save_name), vasmap.astype(np.float32)) -tf.imsave('{}_rotated.tif'.format(save_name), vasmap_r.astype(np.float32)) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_histology_big_tiffs/01_get_thumbnail.py b/corticalmapping/scripts/post_recording/analysis_pipeline_histology_big_tiffs/01_get_thumbnail.py deleted file mode 100644 index a167681..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_histology_big_tiffs/01_get_thumbnail.py +++ /dev/null @@ -1,32 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia - -channels = ['DAPI', 'GCaMP', 'mRuby', 'NeuN'] -downsample_rate = 0.1 - -curr_folder = os.path.realpath(os.path.dirname(__file__)) -os.chdir(curr_folder) - -fns = [f for f in os.listdir(curr_folder) if f[-4:] == '.btf'] -fns.sort() -print('\n'.join(fns)) - -for fn in fns: - print('\nprocessing {} ...'.format(fn)) - big_img = tf.imread(fn) - fname = os.path.splitext(fn)[0] - print('shape: {}'.format(big_img.shape)) - print('dtype: {}'.format(big_img.dtype)) - - # comb_img = [] - - for chi, chn in enumerate(channels): - print('\tchannel: {}'.format(chn)) - down_img_ch = ia.rigid_transform_cv2(big_img[chi], zoom=downsample_rate).astype(np.uint16)[::-1, :] - tf.imsave('thumbnail_{}_{:02d}_{}.tif'.format(fname, chi, chn), down_img_ch) - # comb_img.append(down_img_ch) - - # comb_img = np.array(comb_img) - # tf.imsave('{}_downsampled.tif'.format(fname), comb_img) diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_histology_big_tiffs/02_get_section.py b/corticalmapping/scripts/post_recording/analysis_pipeline_histology_big_tiffs/02_get_section.py deleted file mode 100644 index 5b499a7..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_histology_big_tiffs/02_get_section.py +++ /dev/null @@ -1,44 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia - -base_name = '363669_1_01' -save_name = '363669_1_section02.tif' -thumbnail_region = [100, 1125, 1673, 3176] - -channels = ['mRuby', 'GCaMP', 'DAPI', 'NeuN'] -d_rate = 0.5 - -thumbnail_d_rate = 0.1 # down sample rate of thumbnail - -curr_folder = os.path.realpath(os.path.dirname(__file__)) -os.chdir(curr_folder) - -big_region = (np.array(thumbnail_region) / thumbnail_d_rate).astype(np.uint64) -print('region in big image: {}'.format(big_region)) - -thumbnail_fns = [f for f in os.listdir(curr_folder) if base_name in f and f[-4:] == '.tif'] -ch_lst = [] -for chn in channels: - curr_chi = [int(f.split('_')[-2]) for f in thumbnail_fns if chn in f] - if len(curr_chi) != 1: - raise LookupError - ch_lst.append(curr_chi[0]) - -print('channel index list: {}'.format(ch_lst)) - -big_img = tf.imread(base_name + '.btf') -print('reading the big image: {}.btf ...'.format(base_name)) - -section_img = [] - -for ch_i in ch_lst: - curr_img = big_img[ch_i][::-1, :][big_region[0]: big_region[1], big_region[2]: big_region[3]] - curr_img = ia.rigid_transform_cv2(curr_img, zoom=d_rate).astype(np.uint16) - section_img.append(curr_img) - -section_img = np.array(section_img) - -print('saving {} ...'.format(save_name)) -tf.imsave(save_name, section_img) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/000_(optional)_check_deepscope_file_creation_time.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/000_(optional)_check_deepscope_file_creation_time.py deleted file mode 100644 index 87f5cbe..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/000_(optional)_check_deepscope_file_creation_time.py +++ /dev/null @@ -1,28 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\190822-M471944-deepscope\movie" -identifier = '110_LSNDGCUC' - -fns = np.array([f for f in os.listdir(data_folder) if f[-4:] == '.tif' and identifier in f]) -f_nums = [int(os.path.splitext(fn)[0].split('_')[-2]) for fn in fns] -fns = fns[np.argsort(f_nums)] -print('total file number: {}'.format(len(fns))) - -ctimes = [] - -for fn in fns: - ctimes.append(os.path.getctime(os.path.join(data_folder, fn))) - -ctime_diff = np.diff(ctimes) -max_ind = np.argmax(ctime_diff) -print('maximum creation gap: {}'.format(ctime_diff[max_ind])) - -fis = np.arange(21, dtype=np.int) - 10 + max_ind - -for fi in fis: - print('{}, ctime: {}s, duration: {}s'.format(fns[fi], ctimes[fi], ctime_diff[fi])) - -plt.plot(ctime_diff) -plt.show() diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/000_(optional)_check_deepscope_filename.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/000_(optional)_check_deepscope_filename.py deleted file mode 100644 index 4a47cf1..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/000_(optional)_check_deepscope_filename.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -import numpy as np - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\190822-M471944-deepscope\movie" -identifier = '110_LSNDGCUC' - -fns = np.array([f for f in os.listdir(data_folder) if f[-4:] == '.tif' and identifier in f]) -f_nums = [int(os.path.splitext(fn)[0].split('_')[-2]) for fn in fns] -fns = fns[np.argsort(f_nums)] -print('total file number: {}'.format(len(fns))) - -for i in range(1, len(fns) + 1): - - if i < 100000: - if fns[i-1] != '{}_{:05d}_00001.tif'.format(identifier, i): - print('{}th file, name: {}, do not match!'.format(i, fns[i])) - break - elif i < 1000000: - if fns[i - 1] != '{}_{:06d}_00001.tif'.format(identifier, i): - print('{}th file, name: {}, do not match!'.format(i, fns[i])) - break - elif i < 10000000: - if fns[i - 1] != '{}_{:07d}_00001.tif'.format(identifier, i): - print('{}th file, name: {}, do not match!'.format(i, fns[i])) - break diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/000_(optional)_plot_volume.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/000_(optional)_plot_volume.py deleted file mode 100644 index 5324453..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/000_(optional)_plot_volume.py +++ /dev/null @@ -1,32 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import matplotlib.pyplot as plt -import corticalmapping.core.ImageAnalysis as ia - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\190822-M471944-deepscope\movie" -identifier = '110_LSNDGCUC' -start_ind = 121228 -frame_num = 3 - -fns = [] - -for ind in np.arange(frame_num, dtype=np.int) + start_ind: - - if ind < 100000: - fns.append('{}_{:05d}_00001.tif'.format(identifier, ind)) - elif ind < 1000000: - fns.append('{}_{:06d}_00001.tif'.format(identifier, ind)) - elif ind < 10000000: - fns.append('{}_{:07d}_00001.tif'.format(identifier, ind)) - -f = plt.figure(figsize=(5, 12)) -for frame_i in range(frame_num): - ax = f.add_subplot(frame_num, 1, frame_i+1) - ax.imshow(ia.array_nor(tf.imread(os.path.join(data_folder, fns[frame_i]))), cmap='gray', - vmin=0, vmax=0.5, interpolation='nearest') - ax.set_title(fns[frame_i]) - ax.set_axis_off() - -plt.tight_layout() -plt.show() diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/000_reorganize_movie_2p_deepscope.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/000_reorganize_movie_2p_deepscope.py deleted file mode 100644 index c0c1501..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/000_reorganize_movie_2p_deepscope.py +++ /dev/null @@ -1,97 +0,0 @@ -import os -import numpy as np -import tifffile as tf - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180928-M393857-deepscope\movie_mp" -identifier = '110_LSNDGC' -channels = ['green', 'red'] -plane_num = 3 -temporal_downsample_rate = 4 -frame_each_file = 500 -low_thr = -500 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -fns = np.array([f for f in os.listdir(data_folder) if f[-4:] == '.tif' and identifier in f]) -f_nums = [int(os.path.splitext(fn)[0].split('_')[-2]) for fn in fns] -fns = fns[np.argsort(f_nums)] -print('total file number: {}'.format(len(fns))) - -# print('\n'.join(fns)) - -save_folders = [] -for i in range(plane_num): - curr_save_folder = os.path.join(data_folder, identifier + '_reorged', 'plane{}'.format(i)) - if not os.path.isdir(curr_save_folder): - os.makedirs(curr_save_folder) - save_folders.append(curr_save_folder) - -# frame_per_plane = len(fns) // plane_num -for plane_ind in range(plane_num): - print('\nprocessing plane: {}'.format(plane_ind)) - curr_fns = fns[plane_ind::plane_num] - - total_frames_down = len(curr_fns) // temporal_downsample_rate - curr_fns = curr_fns[: total_frames_down * temporal_downsample_rate].reshape((total_frames_down, temporal_downsample_rate)) - - print(curr_fns.shape) - - print('current file ind: 000') - curr_file_ind = 0 - curr_frame_ind = 0 - curr_mov = {} - for ch_n in channels: - curr_mov.update({ch_n : []}) - - for fgs in curr_fns: - - frame_grp = [] - - for fn in fgs: - cf = tf.imread(os.path.join(data_folder, fn)) - # remove extreme negative pixels - cf[cf < low_thr] = low_thr - if len(cf.shape) == 2: - cf = np.array([cf]) - frame_grp.append(cf) - - curr_frame = {} - - for ch_i, ch_n in enumerate(channels): - ch_frame_grp = np.array([f[ch_i::len(channels)][0] for f in frame_grp]) - # print ch_frame_grp.shape - ch_frame = np.mean(ch_frame_grp, axis=0).astype(np.int16) - # ch_frame = ch_frame.transpose()[::-1, ::-1] - curr_frame.update({ch_n: ch_frame}) - - if curr_frame_ind < frame_each_file: - - for ch_n in channels: - curr_mov[ch_n].append(curr_frame[ch_n]) - - curr_frame_ind = curr_frame_ind + 1 - - else: - for ch_n in channels: - curr_mov_ch = np.array(curr_mov[ch_n], dtype=np.int16) - save_name = '{}_{:05d}_reorged.tif'.format(identifier, curr_file_ind) - save_folder_ch = os.path.join(save_folders[plane_ind], ch_n) - if not os.path.isdir(save_folder_ch): - os.makedirs(save_folder_ch) - tf.imsave(os.path.join(save_folder_ch, save_name), curr_mov_ch) - curr_mov[ch_n] = [curr_frame[ch_n]] - print('current file ind: {:05d}; channel: {}'.format(curr_file_ind, ch_n)) - curr_file_ind += 1 - curr_frame_ind = 1 - - for ch_n in channels: - curr_mov_ch = np.array(curr_mov[ch_n], dtype=np.int16) - save_name = '{}_{:05d}_reorged.tif'.format(identifier, curr_file_ind) - save_folder_ch = os.path.join(save_folders[plane_ind], ch_n) - if not os.path.isdir(save_folder_ch): - os.makedirs(save_folder_ch) - tf.imsave(os.path.join(save_folder_ch, save_name), curr_mov_ch) - print('current file ind: {:05d}; channel: {}'.format(curr_file_ind, ch_n)) - diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/000_reorganize_movie_2p_sutter.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/000_reorganize_movie_2p_sutter.py deleted file mode 100644 index 48233e7..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/000_reorganize_movie_2p_sutter.py +++ /dev/null @@ -1,98 +0,0 @@ -import os -import numpy as np -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -import tifffile as tf -import warnings -data_folder = r'\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\190503-M439939-2p\movie' -file_identifier = '110_LSNDGC' -ch_ns = ['green', 'red'] -frames_per_file = 500 -td_rate = 1 # temporal downsample rate - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -file_list = [f for f in os.listdir(data_folder) if file_identifier in f and f[-4:] == '.tif'] -file_list.sort() -print '\n'.join(file_list) - -file_paths = [os.path.join(data_folder, f) for f in file_list] - -save_folders = [] -save_ids = [0 for ch in ch_ns] -total_movs = [None for ch in ch_ns] -for ch_n in ch_ns: - curr_save_folder = os.path.join(data_folder, file_identifier + '_reorged', 'plane0', ch_n) - if not os.path.isdir(curr_save_folder): - os.makedirs(curr_save_folder) - save_folders.append(curr_save_folder) - -for file_path in file_paths: - print('\nprocessing {} ...'.format(os.path.split(file_path)[1])) - - curr_mov = tf.imread(file_path) - - if curr_mov.shape[0] % len(ch_ns) != 0: - raise ValueError('\ttotal frame number of current movie ({}) cannot be divided by number of ' - 'channels ({})!'.format(curr_mov.shape[0], len(ch_ns))) - - # curr_mov = curr_mov.transpose((0, 2, 1))[:, ::-1, :] - - for ch_i, ch_n in enumerate(ch_ns): - print('\n\tprocessing channel: {}'.format(ch_n)) - - curr_mov_ch = curr_mov[ch_i::len(ch_ns)] - - if total_movs[ch_i] is None: - total_movs[ch_i] = curr_mov_ch - else: - total_movs[ch_i] = np.concatenate((total_movs[ch_i], curr_mov_ch), axis=0) - - while (total_movs[ch_i] is not None) and \ - (total_movs[ch_i].shape[0] >= frames_per_file * td_rate): - - num_file_to_save = total_movs[ch_i].shape[0] // (frames_per_file * td_rate) - - for save_file_id in range(num_file_to_save): - save_chunk = total_movs[ch_i][save_file_id * (frames_per_file * td_rate) : - (save_file_id + 1) * (frames_per_file * td_rate)] - save_path = os.path.join(save_folders[ch_i], '{}_{:05d}_reorged.tif'.format(file_identifier, - save_ids[ch_i])) - if td_rate != 1: - print('\tdown sampling for {} ...'.format(os.path.split(save_path)[1])) - save_chunk = ia.z_downsample(save_chunk, downSampleRate=td_rate, is_verbose=False) - - print('\tsaving {} ...'.format(os.path.split(save_path)[1])) - tf.imsave(save_path, save_chunk) - save_ids[ch_i] = save_ids[ch_i] + 1 - - if total_movs[ch_i].shape[0] % (frames_per_file * td_rate) == 0: - total_movs[ch_i] = None - else: - frame_num_left = total_movs[ch_i].shape[0] % (frames_per_file * td_rate) - total_movs[ch_i] = total_movs[ch_i][-frame_num_left:] - -print('\nprocessing residual frames ...') - -for ch_i, ch_n in enumerate(ch_ns): - - if total_movs[ch_i] is not None: - print('\n\tprocessing channel: {}'.format(ch_n)) - - save_path = os.path.join(save_folders[ch_i], '{}_{:05d}_reorged.tif'.format(file_identifier, save_ids[ch_i])) - - curr_mov_ch = total_movs[ch_i] - - if td_rate != 1: - if curr_mov_ch.shape[0] % td_rate !=0: - warning_msg = '\tthe residual frame number ({}) cannot be divided by temporal down sample rate ({}).' \ - ' Drop last few frames.'.format(curr_mov_ch.shape[0], td_rate) - print(warning_msg) - print('\tdown sampling for {} ...'.format(os.path.split(save_path)[1])) - curr_mov_ch = ia.z_downsample(curr_mov_ch, downSampleRate=td_rate, is_verbose=False) - - print('\tsaving {} ...'.format(os.path.split(save_path)[1])) - tf.imsave(save_path, curr_mov_ch) - -print('\nDone!') \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/050_motion_correction.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/050_motion_correction.py deleted file mode 100644 index d85047f..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/050_motion_correction.py +++ /dev/null @@ -1,71 +0,0 @@ -import os -import stia.motion_correction as mc - -date_recorded = '190503' -mouse_id = 'M439939' -data_folder_n = '110_LSNDGC_reorged' -imaging_mode = '2p' # '2p' or 'deepscope' - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\{}-{}-{}" \ - r"\{}".format(date_recorded, mouse_id, imaging_mode, data_folder_n) - -def correct(data_folder): - - ref_ch_n = 'red' - apply_ch_ns = ['green', 'red'] - - curr_folder = os.path.dirname(os.path.realpath(__file__)) - os.chdir(curr_folder) - - ref_data_folder = os.path.join(data_folder, ref_ch_n) - - mc.motion_correction(input_folder=ref_data_folder, - input_path_identifier='.tif', - process_num=6, - output_folder=os.path.join(ref_data_folder, 'corrected'), - anchor_frame_ind_chunk=10, - anchor_frame_ind_projection=0, - iteration_chunk=10, - iteration_projection=10, - max_offset_chunk=(100., 100.), - max_offset_projection=(100., 100.), - align_func=mc.phase_correlation, - preprocessing_type=6, - fill_value=0.) - - offsets_path = os.path.join(ref_data_folder, 'corrected', 'correction_offsets.hdf5') - ref_fns = [f for f in os.listdir(ref_data_folder) if f[-4:] == '.tif'] - ref_fns.sort() - ref_paths = [os.path.join(ref_data_folder, f) for f in ref_fns] - print('\nreference paths:') - print('\n'.join(ref_paths)) - - for apply_ch_i, apply_ch_n in enumerate(apply_ch_ns): - apply_data_folder = os.path.join(data_folder, apply_ch_n) - apply_fns = [f for f in os.listdir(apply_data_folder) if f[-4:] == '.tif'] - apply_fns.sort() - apply_paths = [os.path.join(apply_data_folder, f) for f in apply_fns] - print('\napply paths:') - print('\n'.join(apply_paths)) - - mc.apply_correction_offsets(offsets_path=offsets_path, - path_pairs=zip(ref_paths, apply_paths), - output_folder=os.path.join(apply_data_folder, 'corrected'), - process_num=6, - fill_value=0., - avi_downsample_rate=10, - is_equalizing_histogram=False) - -def run(): - - plane_folders = [f for f in os.listdir(data_folder) if f[0:5] == 'plane' and - os.path.isdir(os.path.join(data_folder, f))] - plane_folders.sort() - print('folders to be corrected:') - print('\n'.join(plane_folders)) - - for plane_folder in plane_folders: - correct(os.path.join(data_folder, plane_folder)) - -if __name__ == "__main__": - run() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/051_(optional)_reapply_motion_correction_deepscope.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/051_(optional)_reapply_motion_correction_deepscope.py deleted file mode 100644 index 8ae7049..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/051_(optional)_reapply_motion_correction_deepscope.py +++ /dev/null @@ -1,44 +0,0 @@ -import os -import stia.motion_correction as mc - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\190118-M417949-deepscope\movie\110_LSNDGC" - -# apply the correction offsets from one plane to other planes -reference_plane = 'plane1' -apply_plane_ns = ['plane0', 'plane2'] -ref_ch_n = 'red' -apply_ch_ns = ['green', 'red'] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -def run(): - ref_folder = os.path.join(data_folder, reference_plane, ref_ch_n) - offsets_path = os.path.join(ref_folder, 'corrected', 'correction_offsets.hdf5') - ref_paths = [f for f in os.listdir(ref_folder) if f[-4:] == '.tif'] - ref_paths.sort() - ref_paths = [os.path.join(ref_folder, f) for f in ref_paths] - print('\nreference paths:') - print('\n'.join(ref_paths)) - - - for apply_plane_n in apply_plane_ns: - for apply_ch_n in apply_ch_ns: - print('\n\tapply to {}, channel: {}'.format(apply_plane_n, apply_ch_n)) - working_folder = os.path.join(data_folder, apply_plane_n, apply_ch_n) - apply_paths = [f for f in os.listdir(working_folder) if f[-4:] == '.tif'] - apply_paths.sort() - apply_paths = [os.path.join(working_folder, f) for f in apply_paths] - print('\n\tapply paths:') - print('\t'+'\n\t'.join(apply_paths)) - - mc.apply_correction_offsets(offsets_path=offsets_path, - path_pairs=zip(ref_paths, apply_paths), - output_folder=os.path.join(working_folder, 'corrected'), - process_num=6, - fill_value=0., - avi_downsample_rate=10, - is_equalizing_histogram=False) - -if __name__ == "__main__": - run() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/052_(optional)_downsample_corrected_files.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/052_(optional)_downsample_corrected_files.py deleted file mode 100644 index 103e305..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/052_(optional)_downsample_corrected_files.py +++ /dev/null @@ -1,105 +0,0 @@ -import os -import numpy as np -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -import tifffile as tf -import warnings -import shutil - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\190104-M417949-2p\110_LSNDGC_reorged" -td_rate = 5 -file_identifier = '110_LSNDGC' -frames_per_file = 500 - -def downsample_folder(working_folder, - td_rate, - file_identifier, - frames_per_file=500): - - file_list = [f for f in os.listdir(working_folder) if file_identifier in f and f[-14:] == '_corrected.tif'] - file_list.sort() - print('\t\tall files:') - print '\n'.join(['\t\t' + f for f in file_list]) - - print('\n\t\tmoving files to "not_downsampled" folder:') - file_paths = [os.path.join(working_folder, f) for f in file_list] - print - - not_downsampled_folder = os.path.join(working_folder, 'not_downsampled') - os.mkdir(not_downsampled_folder) - for file_path in file_paths: - fn = os.path.split(file_path)[1] - shutil.move(file_path, os.path.join(not_downsampled_folder, fn)) - - file_paths_original = [os.path.join(not_downsampled_folder, fn) for fn in file_list] - file_paths_original.sort() - - - save_id = 0 - total_mov = None - for file_path_o in file_paths_original: - print('\t\tprocessing {} ...'.format(os.path.split(file_path_o)[1])) - curr_mov = tf.imread(file_path_o) - - if total_mov is None: - total_mov = curr_mov - else: - total_mov = np.concatenate((total_mov, curr_mov), axis=0) - - while total_mov is not None and \ - (total_mov.shape[0] >= frames_per_file * td_rate): - - num_file_to_save = total_mov.shape[0] // (frames_per_file * td_rate) - - for save_file_id in range(num_file_to_save): - save_chunk = total_mov[save_file_id * (frames_per_file * td_rate) : - (save_file_id + 1) * (frames_per_file * td_rate)] - save_path = os.path.join(working_folder, '{}_{:05d}_corrected_downsampled.tif'.format(file_identifier, - save_id)) - save_chunk = ia.z_downsample(save_chunk, downSampleRate=td_rate, is_verbose=False) - - print('\t\t\tsaving {} ...'.format(os.path.split(save_path)[1])) - tf.imsave(save_path, save_chunk) - save_id = save_id + 1 - - if total_mov.shape[0] % (frames_per_file * td_rate) == 0: - total_mov = None - else: - frame_num_left = total_mov.shape[0] % (frames_per_file * td_rate) - total_mov = total_mov[-frame_num_left:] - - if total_mov is not None: - save_path = os.path.join(working_folder, '{}_{:05d}_corrected_downsampled.tif'.format(file_identifier, save_id)) - save_chunk = ia.z_downsample(total_mov, downSampleRate=td_rate, is_verbose=False) - print('\t\t\tsaving {} ...'.format(os.path.split(save_path)[1])) - tf.imsave(save_path, save_chunk) - - return - -if td_rate == 1: - raise ValueError('Downsample rate shold not be 1!') - -plane_ns = [f for f in os.listdir(data_folder) if f[0:5]=='plane'] -plane_ns.sort() -print('all planes:') -print('\n'.join(plane_ns)) -print - -for plane_n in plane_ns: - print('current plane: {}'.format(plane_n)) - plane_folder = os.path.join(data_folder, plane_n) - ch_ns = [f for f in os.listdir(plane_folder)] - ch_ns.sort() - print('\tall channels: {}'.format(ch_ns)) - - for ch_n in ch_ns: - print - print('\tcurrent channel: {}'.format(ch_n)) - ch_folder = os.path.join(plane_folder, ch_n) - - downsample_folder(working_folder=os.path.join(ch_folder, 'corrected'), - td_rate=td_rate, - file_identifier=file_identifier, - frames_per_file=frames_per_file) - -print('\nDone!') \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/055_downsample_from_server.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/055_downsample_from_server.py deleted file mode 100644 index 9e3a003..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/055_downsample_from_server.py +++ /dev/null @@ -1,57 +0,0 @@ -import os -import numpy as np -import tifffile as tf - -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia - -date_recorded = '190503' -mouse_id = 'M439939' -xy_downsample_rate = 2 -t_downsample_rate = 10 -ch_ns = ['green', 'red'] -data_folder_n = '110_LSNDGC_reorged' -imaging_mode = '2p' # '2p' or 'deepscope' -identifier = '110_LSNDGC' - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\{}-{}-{}" \ - r"\{}".format(date_recorded, mouse_id, imaging_mode, data_folder_n) - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -plane_ns = [f for f in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder, f)) and f[:5] == 'plane'] -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -for plane_n in plane_ns: - print('\nprocessing plane: {}'.format(plane_n)) - - save_folder = os.path.join(curr_folder, plane_n) - if not os.path.isdir(save_folder): - os.makedirs(save_folder) - - for ch_n in ch_ns: - print('\n\tprocessing channel: {}'.format(ch_n)) - plane_folder = os.path.join(data_folder, plane_n, ch_n, 'corrected') - - # f_ns = [f for f in os.listdir(plane_folder) if f[-14:] == '_corrected.tif'] - f_ns= [f for f in os.listdir(plane_folder) if f[-4:] == '.tif' and identifier in f] - f_ns.sort() - print('\t\t'+'\n\t\t'.join(f_ns) + '\n') - - mov_d = [] - - for f_n in f_ns: - print('\t\tprocessing {} ...'.format(f_n)) - curr_mov = tf.imread(os.path.join(plane_folder, f_n)) - curr_mov_d = ia.rigid_transform_cv2(img=curr_mov, zoom=(1. / xy_downsample_rate)) - curr_mov_d = ia.z_downsample(curr_mov_d, downSampleRate=t_downsample_rate, is_verbose=False) - mov_d.append(curr_mov_d) - - mov_d = np.concatenate(mov_d, axis=0) - save_n = '{}_{}_{}_downsampled.tif'.format(os.path.split(data_folder)[1], plane_n, ch_n) - tf.imsave(os.path.join(save_folder, save_n), mov_d) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/060_get_image_data.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/060_get_image_data.py deleted file mode 100644 index 88ce9b1..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/060_get_image_data.py +++ /dev/null @@ -1,78 +0,0 @@ -import os -import h5py -import numpy as np -import skimage.external.tifffile as tf - -date_recorded = '190503' -mouse_id = 'M439939' -sess_id = '110' -channel = 'green' -data_folder_n = '110_LSNDGC_reorged' -imaging_mode = '2p' # '2p' or 'deepscope' -identifier = '110_LSNDGC' - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\{}-{}-{}" \ - r"\{}".format(date_recorded, mouse_id, imaging_mode, data_folder_n) - - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -file_prefix = '{}_{}_{}'.format(date_recorded, mouse_id, sess_id) - -plane_fns = [f for f in os.listdir(data_folder) if f[:5] == 'plane'] -plane_fns.sort() -print('\n'.join(plane_fns)) - -data_f = h5py.File(file_prefix + '_2p_movies.hdf5') - -for plane_fn in plane_fns: - print('\nprocessing {} ...'.format(plane_fn)) - plane_folder = os.path.join(data_folder, plane_fn, channel, 'corrected') - # mov_fns = [f for f in os.listdir(plane_folder) if f[-14:] == '_corrected.tif'] - mov_fns = [f for f in os.listdir(plane_folder) if f[-4:] == '.tif' and identifier in f] - mov_fns.sort() - print('\n'.join(mov_fns)) - - # get shape of concatenated movie - z1, y, x = tf.imread(os.path.join(plane_folder, mov_fns[0])).shape - z0, _, _ = tf.imread(os.path.join(plane_folder, mov_fns[-1])).shape - z = z0 + z1 * (len(mov_fns) - 1) - - # for mov_fn in mov_fns: - # print('reading {} ...'.format(mov_fn)) - # curr_z, curr_y, curr_x = tf.imread(os.path.join(plane_folder, mov_fn)).shape - # - # if y is None: - # y = curr_y - # else: - # if y != curr_y: - # raise ValueError('y dimension ({}) of file "{}" does not agree with previous file(s) ({}).' - # .format(curr_y, mov_fn, y)) - # - # if x is None: - # x = curr_x - # else: - # if x != curr_x: - # raise ValueError('x dimension ({}) of file "{}" does not agree with previous file(s) ({}).' - # .format(curr_x, mov_fn, x)) - # - # z = z + curr_z - - print((z,y,x)) - dset = data_f.create_dataset(plane_fn, (z, y, x), dtype=np.int16, compression='lzf') - - start_frame = 0 - end_frame = 0 - for mov_fn in mov_fns: - print('reading {} ...'.format(mov_fn)) - curr_mov = tf.imread(os.path.join(plane_folder, mov_fn)) - end_frame = start_frame + curr_mov.shape[0] - dset[start_frame : end_frame] = curr_mov - start_frame = end_frame - - dset.attrs['conversion'] = 1. - dset.attrs['resolution'] = 1. - dset.attrs['unit'] = 'arbiturary_unit' - -data_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/070_get_hdf5_files_for_caiman_soma.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/070_get_hdf5_files_for_caiman_soma.py deleted file mode 100644 index 15442fe..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/070_get_hdf5_files_for_caiman_soma.py +++ /dev/null @@ -1,65 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia -import h5py - -date_recorded = '190809' -mouse_id = 'M471944' -sess_id = '110' -t_downsample_rate = 3 -channel = 'green' -data_folder_n = '110_LSNDGC_reorged' -imaging_mode = '2p' # '2p' or 'deepscope' -identifier = '110_LSNDGC' - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\{}-{}-{}" \ - r"\{}".format(date_recorded, mouse_id, imaging_mode, data_folder_n) - -plane_ns = [p for p in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder, p))] -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -for plane_n in plane_ns: - print('\nprocessing {} ...'.format(plane_n)) - - plane_folder = os.path.join(data_folder, plane_n, channel, 'corrected') - os.chdir(plane_folder) - - # f_ns = [f for f in os.listdir(plane_folder) if f[-14:] == '_corrected.tif'] - f_ns = [f for f in os.listdir(plane_folder) if f[-4:] == '.tif' and identifier in f] - f_ns.sort() - print('\n'.join(f_ns)) - - mov_join = [] - for f_n in f_ns: - print('processing plane: {}; file: {} ...'.format(plane_n, f_n)) - - curr_mov = tf.imread(os.path.join(plane_folder, f_n)) - - if curr_mov.shape[0] % t_downsample_rate != 0: - print('the frame number of {} ({}) is not divisible by t_downsample_rate ({}).' - .format(f_n, curr_mov.shape[0], t_downsample_rate)) - - curr_mov_d = ia.z_downsample(curr_mov, downSampleRate=t_downsample_rate, is_verbose=False) - mov_join.append(curr_mov_d) - - mov_join = np.concatenate(mov_join, axis=0) - - save_name = '{}_{}_{}_{}_downsampled_for_caiman.hdf5'.format(date_recorded, mouse_id, sess_id, plane_n) - save_f = h5py.File(os.path.join(plane_folder, save_name)) - save_f.create_dataset('mov', data=mov_join) - save_f.close() - - # save_name = '{}_d1_{}_d2_{}_d3_1_order_C_frames_{}_.mmap'\ - # .format(base_name, mov_join.shape[2], mov_join.shape[1], mov_join.shape[0]) - # - # mov_join = mov_join.reshape((mov_join.shape[0], mov_join.shape[1] * mov_join.shape[2]), order='F').transpose() - # mov_join_mmap = np.memmap(os.path.join(plane_folder, save_name), shape=mov_join.shape, order='C', - # dtype=np.float32, mode='w+') - # mov_join_mmap[:] = mov_join + add_to_mov - # mov_join_mmap.flush() - # del mov_join_mmap - -print('done!') \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/070_get_mmap_files_for_caiman_bouton.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/070_get_mmap_files_for_caiman_bouton.py deleted file mode 100644 index 963e2b3..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/070_get_mmap_files_for_caiman_bouton.py +++ /dev/null @@ -1,66 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia -import h5py - -date_recorded = '190503' -mouse_id = 'M439939' -sess_id = '110' -t_downsample_rate = 5 -channel = 'green' -data_folder_n = '110_LSNDGC_reorged' -imaging_mode = '2p' # '2p' or 'deepscope' -identifier = '110_LSNDGC' - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\{}-{}-{}" \ - r"\{}".format(date_recorded, mouse_id, imaging_mode, data_folder_n) -base_name = '{}_{}_{}'.format(date_recorded, mouse_id, sess_id) - -plane_ns = [p for p in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder, p))] -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -for plane_n in plane_ns: - print('\nprocessing {} ...'.format(plane_n)) - - plane_folder = os.path.join(data_folder, plane_n, channel, 'corrected') - os.chdir(plane_folder) - - # f_ns = [f for f in os.listdir(plane_folder) if f[-14:] == '_corrected.tif'] - f_ns = [f for f in os.listdir(plane_folder) if f[-4:] == '.tif' and identifier in f] - f_ns.sort() - print('\n'.join(f_ns)) - - mov_join = [] - for f_n in f_ns: - print('processing plane: {}; file: {} ...'.format(plane_n, f_n)) - - curr_mov = tf.imread(os.path.join(plane_folder, f_n)) - - if curr_mov.shape[0] % t_downsample_rate != 0: - print('the frame number of {} ({}) is not divisible by t_downsample_rate ({}).' - .format(f_n, curr_mov.shape[0], t_downsample_rate)) - - curr_mov_d = ia.z_downsample(curr_mov, downSampleRate=t_downsample_rate, is_verbose=False) - mov_join.append(curr_mov_d) - - mov_join = np.concatenate(mov_join, axis=0) - add_to_mov = 10 - np.amin(mov_join) - - save_name = '{}_d1_{}_d2_{}_d3_1_order_C_frames_{}_.mmap'\ - .format(base_name, mov_join.shape[2], mov_join.shape[1], mov_join.shape[0]) - - mov_join = mov_join.reshape((mov_join.shape[0], mov_join.shape[1] * mov_join.shape[2]), order='F').transpose() - mov_join_mmap = np.memmap(os.path.join(plane_folder, save_name), shape=mov_join.shape, order='C', - dtype=np.float32, mode='w+') - mov_join_mmap[:] = mov_join + add_to_mov - mov_join_mmap.flush() - del mov_join_mmap - - save_file = h5py.File(os.path.join(plane_folder, 'caiman_segmentation_results.hdf5')) - save_file['bias_added_to_movie'] = add_to_mov - save_file.close() - -print('done!') \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/110_caiman_segmentation_bouton.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/110_caiman_segmentation_bouton.py deleted file mode 100644 index 6ba8a0f..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/110_caiman_segmentation_bouton.py +++ /dev/null @@ -1,126 +0,0 @@ -""" -run it in command line with old caiman v1.0 - ->>> activate ciaman -""" - -import sys; print('Python %s on %s' % (sys.version, sys.platform)) -sys.path.extend([r"E:\data\github_packages\CaImAn"]) - -import os -import numpy as np -import caiman as cm -import matplotlib.pyplot as plt -from caiman.source_extraction.cnmf import cnmf as cnmf -import h5py -from shutil import copyfile - -def run(): - date_recorded = '190503' - mouse_id = 'M439939' - play_movie = False - resolution = 512 - channel = 'green' - - data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\{}-{}-2p" \ - r"\110_LSNDGC_reorged".format(date_recorded, mouse_id) - - curr_folder = os.path.dirname(os.path.realpath(__file__)) - - plane_ns = [f for f in os.listdir(data_folder) if os.path.isdir(f) and f[:5] == 'plane'] - plane_ns.sort() - print('planes:') - print('\n'.join(plane_ns)) - - # %% start cluster - c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=6, single_thread=False) - - for plane_n in plane_ns: - - print('\nsegmenting plane: {}'.format(plane_n)) - - plane_folder = os.path.join(data_folder, plane_n, channel, 'corrected') - os.chdir(plane_folder) - - fn = [f for f in os.listdir(plane_folder) if f[-5:] == '.mmap'] - if len(fn) > 1: - print('\n'.join(fn)) - raise LookupError('more than one file found.') - elif len(fn) == 0: - raise LookupError('no file found.') - else: - fn = fn[0] - - fn_parts = fn.split('_') - d1 = int(fn_parts[fn_parts.index('d1') + 1]) # column, x - d2 = int(fn_parts[fn_parts.index('d2') + 1]) # row, y - d3 = int(fn_parts[fn_parts.index('d3') + 1]) # channel - d4 = int(fn_parts[fn_parts.index('frames') + 1]) # frame, T - order = fn_parts[fn_parts.index('order') + 1] - - print('playing {} ...'.format(fn)) - - mov = np.memmap(filename=fn, shape=(d1, d2, d4), order=order, dtype=np.float32, mode='r') - mov = mov.transpose((2, 1, 0)) - - print('shape of joined movie: {}.'.format(mov.shape)) - - #%% play movie, press q to quit - if play_movie: - cm.movie(mov).play(fr=50,magnification=1,gain=2.) - - #%% movie cannot be negative! - mov_min = float(np.amin(mov)) - print('minimum pixel value: {}.'.format(mov_min)) - if mov_min < 0: - raise Exception('Movie too negative, add_to_movie should be larger') - - #%% correlation image. From here infer neuron size and density - Cn = cm.movie(mov).local_correlations(swap_dim=False) - # plt.imshow(Cn, cmap='gray') - # plt.show() - - K = 100 # number of neurons expected per patch - gSig = [5, 5] # expected half size of neurons - merge_thresh = 0.9 # merging threshold, max correlation allowed - p = 2 # order of the autoregressive system - cnm = cnmf.CNMF(n_processes, - k=10, # number of neurons expected per patch - gSig=[5, 5] , # expected half size of neurons - merge_thresh=0.9, # merging threshold, max correlation allowed - p=2, # order of the autoregressive system - dview=dview, - Ain=None, - method_deconvolution='oasis', - rolling_sum = False, - method_init='sparse_nmf', - alpha_snmf=10e1, - ssub=1, - tsub=1, - p_ssub=1, - p_tsub=1, - rf=int(resolution / 2), # half-size of the patches in pixels - border_pix=0, - do_merge=False) - cnm = cnm.fit(mov) - A, C, b, f, YrA, sn = cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, cnm.sn - #%% - # crd = cm.utils.visualization.plot_contours(cnm.A, Cn) - # plt.show() - # input("Press enter to continue ...") - - roi_num = cnm.A.shape[1] - save_fn = h5py.File('caiman_segmentation_results.hdf5') - bias = save_fn['bias_added_to_movie'].value - save_fn['masks'] = np.array(cnm.A.todense()).T.reshape((roi_num, 512, 512), order='F') - save_fn['traces'] = cnm.C - bias - save_fn.close() - - copyfile(os.path.join(plane_folder, 'caiman_segmentation_results.hdf5'), - os.path.join(curr_folder, plane_n, 'caiman_segmentation_results.hdf5')) - - plt.close('all') - - -if __name__ == '__main__': - run() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/110_caiman_segmentation_soma.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/110_caiman_segmentation_soma.py deleted file mode 100644 index e5e4c8f..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/110_caiman_segmentation_soma.py +++ /dev/null @@ -1,109 +0,0 @@ -""" -run it in command line with new caiman v1.5.3 - ->>> activate caiman_new -""" - -import os -import numpy as np -from caiman.source_extraction import cnmf as cnmf -import h5py -from shutil import copyfile - - -date_recorded = '190814' -mouse_id = 'M471944' -resolution = (512, 512) -channel = 'green' -data_folder_n = '110_LSNDGCUC_reorg' -imaging_mode = 'deepscope' # '2p' or 'deepscope' - - -# caiman parameters -fr = 2 # frame rate (Hz) -decay_time = 0.5 # approximate length of transient event in seconds -gSig = (8, 8) # expected half size of neurons -p = 1 # order of AR indicator dynamics -min_SNR = 1 # minimum SNR for accepting new components -rval_thr = 0.80 # correlation threshold for new component inclusion -ds_factor = 1 # spatial downsampling factor (increases speed but may lose some fine structure) -gnb = 2 # number of background components -gSig = tuple(np.ceil(np.array(gSig) / ds_factor).astype('int')) # recompute gSig if downsampling is involved -mot_corr = False # flag for online motion correction -pw_rigid = False # flag for pw-rigid motion correction (slower but potentially more accurate) -max_shifts_online = np.ceil(10. / ds_factor).astype('int') # maximum allowed shift during motion correction -sniper_mode = True # flag using a CNN to detect new neurons (o/w space correlation is used) -init_batch = 200 # number of frames for initialization (presumably from the first file) -expected_comps = 500 # maximum number of expected components used for memory pre-allocation (exaggerate here) -dist_shape_update = True # flag for updating shapes in a distributed way -min_num_trial = 10 # number of candidate components per frame -K = 2 # initial number of components -epochs = 2 # number of passes over the data -show_movie = False # show the movie with the results as the data gets processed - -curr_folder = os.path.dirname(os.path.realpath(__file__)) - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\{}-{}-{}" \ - r"\{}".format(date_recorded, mouse_id, imaging_mode, data_folder_n) - - -plane_ns = [f for f in os.listdir(data_folder) if os.path.isdir(f) and f[:5] == 'plane'] -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -for plane_n in plane_ns: - - print('\nsegmenting plane: {}'.format(plane_n)) - - plane_folder = os.path.join(data_folder, plane_n, channel, 'corrected') - os.chdir(plane_folder) - - fn = [f for f in os.listdir(plane_folder) if len(f) > 16 and f[-16:] == '_for_caiman.hdf5'] - if len(fn) > 1: - print('\n'.join(fn)) - raise LookupError('more than one file found.') - elif len(fn) == 0: - raise LookupError('no file found.') - else: - fn = fn[0] - - fp = os.path.join(os.path.realpath(plane_folder), fn) - - params_dict = {'fnames': [fp], - 'fr': fr, - 'decay_time': decay_time, - 'gSig': gSig, - 'p': p, - 'min_SNR': min_SNR, - 'rval_thr': rval_thr, - 'ds_factor': ds_factor, - 'nb': gnb, - 'motion_correct': mot_corr, - 'init_batch': init_batch, - 'init_method': 'bare', - 'normalize': True, - 'expected_comps': expected_comps, - 'sniper_mode': sniper_mode, - 'dist_shape_update': dist_shape_update, - 'min_num_trial': min_num_trial, - 'K': K, - 'epochs': epochs, - 'max_shifts_online': max_shifts_online, - 'pw_rigid': pw_rigid, - 'show_movie': show_movie} - - opts = cnmf.params.CNMFParams(params_dict=params_dict) - - cnm = cnmf.online_cnmf.OnACID(params=opts) - cnm.fit_online() - - roi_num = cnm.estimates.A.shape[1] - print('saving ...') - save_f = h5py.File('caiman_segmentation_results.hdf5') - save_f['masks'] = np.array(cnm.estimates.A.todense()).T.reshape((roi_num, resolution[0], resolution[1]), order='F') - save_f['traces'] = cnm.estimates.C - save_f.close() - - copyfile(os.path.join(plane_folder, 'caiman_segmentation_results.hdf5'), - os.path.join(curr_folder, plane_n, 'caiman_segmentation_results.hdf5')) diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/200_generate_nwb.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/200_generate_nwb.py deleted file mode 100644 index 7105a41..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/200_generate_nwb.py +++ /dev/null @@ -1,48 +0,0 @@ -import os -import corticalmapping.NwbTools as nt - -date_recorded = '190503' -mouse_id = '439939' -sess_num = '110' - -experimenter = 'Jun' -genotype = 'Vipr2-IRES2-Cre-neo' -sex = 'Male' -age = '147' -indicator = 'GCaMP6s' -imaging_rate = 30. # deepscope 37. -imaging_depth = '150 microns' # deepscope [150, 100, 50] or [300, 250, 200] -imaging_location = 'visual cortex' -imaging_device = 'Sutter' # or DeepScope -imaging_excitation_lambda = '920 nanometers' # deepscope 940 nanometers - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -notebook_path = os.path.join(curr_folder, 'notebook.txt') -with open(notebook_path, 'r') as ff: - notes = ff.read() - -general = nt.DEFAULT_GENERAL -general['experimenter'] = experimenter -general['subject']['subject_id'] = mouse_id -general['subject']['genotype'] = genotype -general['subject']['sex'] = sex -general['subject']['age'] = age -general['optophysiology'].update({'imaging_plane_0': {}}) -general['optophysiology']['imaging_plane_0'].update({'indicator': indicator}) -general['optophysiology']['imaging_plane_0'].update({'imaging_rate': imaging_rate}) -general['optophysiology']['imaging_plane_0'].update({'imaging_depth': imaging_depth}) -general['optophysiology']['imaging_plane_0'].update({'location': imaging_location}) -general['optophysiology']['imaging_plane_0'].update({'device': imaging_device}) -general['optophysiology']['imaging_plane_0'].update({'excitation_lambda': imaging_excitation_lambda}) -general['notes'] = notes - -file_name = date_recorded + '_M' + mouse_id + '_' + sess_num + '.nwb' - -rf = nt.RecordedFile(os.path.join(curr_folder, file_name), identifier=file_name[:-4], description='') -rf.add_general(general=general) -rf.close() - - - diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/210_add_vasmap.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/210_add_vasmap.py deleted file mode 100644 index 25a86fa..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/210_add_vasmap.py +++ /dev/null @@ -1,48 +0,0 @@ -import os -import corticalmapping.NwbTools as nt -import corticalmapping.core.ImageAnalysis as ia -import matplotlib.pyplot as plt -import tifffile as tf - - -import os -import corticalmapping.NwbTools as nt -import corticalmapping.core.ImageAnalysis as ia -import matplotlib.pyplot as plt -import tifffile as tf - -is_plot = False -vasmap_dict = { - 'vasmap_wf': 'wide field surface vasculature map through cranial window original', - 'vasmap_wf_rotated': 'wide field surface vasculature map through cranial window rotated', - 'vasmap_2p_green': '2p surface vasculature map through cranial window green original, zoom1', - 'vasmap_2p_green_rotated': '2p surface vasculature map through cranial window green rotated, zoom1', - 'vasmap_2p_red': '2p surface vasculature map through cranial window red original, zoom1', - 'vasmap_2p_red_rotated': '2p surface vasculature map through cranial window red rotated, zoom1' - } - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -for mn, des in vasmap_dict.items(): - try: - curr_m = ia.array_nor(tf.imread(mn + '.tif')) - - if is_plot: - f = plt.figure(figsize=(10, 10)) - ax = f.add_subplot(111) - ax.imshow(curr_m, vmin=0., vmax=1., cmap='gray', interpolation='nearest') - ax.set_axis_off() - ax.set_title(mn) - plt.show() - - print('adding {} to nwb file.'.format(mn)) - nwb_f.add_acquisition_image(mn, curr_m, description=des) - - except Exception as e: - print(e) - -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/220_add_sync_data.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/220_add_sync_data.py deleted file mode 100644 index 2e26ef3..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/220_add_sync_data.py +++ /dev/null @@ -1,23 +0,0 @@ -import os -import corticalmapping.NwbTools as nt - -record_date = '190503' -mouse_id = '439939' -session_id = '110' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = record_date + '_M' + mouse_id + '_' + session_id + '.nwb' - -sync_fn = [f for f in os.listdir(curr_folder) if f[-3:] == '.h5' and record_date in f and 'M' + mouse_id in f] -if len(sync_fn) == 0: - raise LookupError('Did not find sync .h5 file.') -elif len(sync_fn) > 1: - raise LookupError('More than one sync .h5 files found.') -else: - sync_fn = sync_fn[0] - -nwb_f = nt.RecordedFile(nwb_fn) -nwb_f.add_sync_data(sync_fn) -nwb_f.close() diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/221_(optional)_check_2p_timestamps_deepscope.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/221_(optional)_check_2p_timestamps_deepscope.py deleted file mode 100644 index e211042..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/221_(optional)_check_2p_timestamps_deepscope.py +++ /dev/null @@ -1,27 +0,0 @@ -import os -import numpy as np -import h5py -import matplotlib.pyplot as plt - -ts_2p_key = 'digital_vsync_2p_rise' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = h5py.File(nwb_fn, 'r') - -ts_2p = nwb_f['acquisition/timeseries/{}/timestamps'.format(ts_2p_key)].value -print('number of 2p frame timestamps: {}'.format(len(ts_2p))) - -dur_2p = np.diff(ts_2p) -max_ind = np.argmax(dur_2p) -print('maximum 2p frame duration: {}'.format(dur_2p[max_ind])) - -# fis = np.arange(21, dtype=np.int) - 10 + max_ind -# -# for fi in fis: -# print('{}, ctime: {}s, duration: {}s'.format(fns[fi], ctimes[fi], ctime_diff[fi])) - -plt.plot(dur_2p) -plt.show() diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/230_add_image_data.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/230_add_image_data.py deleted file mode 100644 index a913bff..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/230_add_image_data.py +++ /dev/null @@ -1,69 +0,0 @@ -import os -import h5py -import corticalmapping.NwbTools as nt - -dset_ns = ['plane0'] # ['plane0', 'plane1', 'plane2'] -imaging_depths = [150] -temporal_downsample_rate = 5 # down sample rate before motion correction times down sample rate after motion correction -scope = 'sutter' # 'sutter' or 'DeepScope' -zoom = 4 - -description = '2-photon imaging data' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -if scope == 'DeepScope': - pixel_size = 0.000000967 / zoom # meter -elif scope == 'sutter': - pixel_size = 0.0000014 / zoom # meter -else: - raise LookupError('do not understand scope type') - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -ts_2p_tot = nwb_f.file_pointer['/acquisition/timeseries/digital_vsync_2p_rise/timestamps'].value - -# if scope == 'sutter': -# ts_2p_tot = nwb_f.file_pointer['/acquisition/timeseries/digital_vsync_2p_rise/timestamps'].value -# elif scope == 'DeepScope': -# ts_2p_tot = nwb_f.file_pointer['/acquisition/timeseries/digital_2p_vsync_rise/timestamps'].value -# else: -# raise LookupError('do not understand scope type') -# print('total 2p timestamps count: {}'.format(len(ts_2p_tot))) - -mov_fn = os.path.splitext(nwb_fn)[0] + '_2p_movies.hdf5' -mov_f = h5py.File(mov_fn, 'r') - -for mov_i, mov_dn in enumerate(dset_ns): - - if mov_dn is not None: - - curr_dset = mov_f[mov_dn] - if mov_dn is not None: - mov_ts = ts_2p_tot[mov_i::len(dset_ns)] - print('\n{}: total 2p timestamps count: {}'.format(mov_dn, len(mov_ts))) - - mov_ts_d = mov_ts[::temporal_downsample_rate] - print('{}: downsampled 2p timestamps count: {}'.format(mov_dn, len(mov_ts_d))) - print('{}: downsampled 2p movie frame num: {}'.format(mov_dn, curr_dset.shape[0])) - - # if len(mov_ts_d) == curr_dset.shape[0]: - # pass - # elif len(mov_ts_d) == curr_dset.shape[0] + 1: - # mov_ts_d = mov_ts_d[0: -1] - # else: - # raise ValueError('the timestamp count of {} movie ({}) does not equal (or is not greater by one) ' - # 'the frame cound in the movie ({})'.format(mov_dn, len(mov_ts_d), curr_dset.shape[0])) - mov_ts_d = mov_ts_d[:curr_dset.shape[0]] - - curr_description = '{}. Imaging depth: {} micron.'.format(description, imaging_depths[mov_i]) - nwb_f.add_acquired_image_series_as_remote_link('2p_movie_' + mov_dn, image_file_path=mov_fn, - dataset_path=mov_dn, timestamps=mov_ts_d, - description=curr_description, comments='', - data_format='zyx', pixel_size=[pixel_size, pixel_size], - pixel_size_unit='meter') - -mov_f.close() -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/240_add_motion_correction_module.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/240_add_motion_correction_module.py deleted file mode 100644 index 8532270..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/240_add_motion_correction_module.py +++ /dev/null @@ -1,61 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import h5py -import corticalmapping.NwbTools as nt -import corticalmapping.core.ImageAnalysis as ia - -movie_2p_fn = '190503_M439939_110_2p_movies.hdf5' -plane_num = 1 -temporal_downsample_rate = 1 # downsample rate after motion correction - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -input_parameters = [] - -for i in range(plane_num): - - plane_n = 'plane{}'.format(i) - - offsets_path = os.path.join(plane_n, 'correction_offsets.hdf5') - offsets_f = h5py.File(offsets_path, 'r') - offsets_keys = offsets_f.keys() - if 'path_list' in offsets_keys: - offsets_keys.remove('path_list') - - offsets_keys.sort() - offsets = [] - for offsets_key in offsets_keys: - offsets.append(offsets_f[offsets_key].value) - offsets = np.concatenate(offsets, axis=0) - offsets = np.array(zip(offsets[:, 1], offsets[:, 0])) - offsets_f.close() - - mean_projection = tf.imread(os.path.join(plane_n, 'corrected_mean_projection.tif')) - max_projection = tf.imread(os.path.join(plane_n, 'corrected_max_projections.tif')) - max_projection = ia.array_nor(np.max(max_projection, axis=0)) - - input_dict = {'field_name': plane_n, - 'original_timeseries_path': '/acquisition/timeseries/2p_movie_plane' + str(i), - 'corrected_file_path': movie_2p_fn, - 'corrected_dataset_path': plane_n, - 'xy_translation_offsets': offsets, - 'mean_projection': mean_projection, - 'max_projection': max_projection, - 'description': '', - 'comments': '', - 'source': ''} - - input_parameters.append(input_dict) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -nwb_f.add_muliple_dataset_to_motion_correction_module(input_parameters=input_parameters, - module_name='motion_correction', - temporal_downsample_rate=temporal_downsample_rate) -nwb_f.close() - - - diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/250_get_photodiode_onset.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/250_get_photodiode_onset.py deleted file mode 100644 index 7344453..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/250_get_photodiode_onset.py +++ /dev/null @@ -1,45 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import matplotlib.pyplot as plt -import corticalmapping.NwbTools as nt -import corticalmapping.HighLevel as hl - -# photodiode -digitizeThr = 0.15 # deepscope: 0.15 or 0.055, sutter: -0.15 -filterSize = 0.01 # deepscope: 0.01, sutter: 0.01 -segmentThr = 0.02 # deepscope: 0.02, sutter: 0.01 -smallestInterval = 0.05 # deepscope: 0.05, sutter: 0.05 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] - -nwb_f = nt.RecordedFile(nwb_fn) -pd, pd_t = nwb_f.get_analog_data(ch_n='analog_photodiode') -fs = 1. / np.mean(np.diff(pd_t)) -# print fs - -pd_onsets = hl.segmentPhotodiodeSignal(pd, digitizeThr=digitizeThr, filterSize=filterSize, - segmentThr=segmentThr, Fs=fs, smallestInterval=smallestInterval) - -raw_input('press enter to continue ...') - -pdo_ts = nwb_f.create_timeseries('TimeSeries', 'digital_photodiode_rise', modality='other') -pdo_ts.set_time(pd_onsets) -pdo_ts.set_data([], unit='', conversion=np.nan, resolution=np.nan) -pdo_ts.set_value('digitize_threshold', digitizeThr) -pdo_ts.set_value('filter_size', filterSize) -pdo_ts.set_value('segment_threshold', segmentThr) -pdo_ts.set_value('smallest_interval', smallestInterval) -pdo_ts.set_description('Real Timestamps (master acquisition clock) of photodiode onset. ' - 'Extracted from analog photodiode signal by the function:' - 'corticalmapping.HighLevel.segmentPhotodiodeSignal() using parameters saved in the' - 'current timeseries.') -pdo_ts.set_path('/analysis') -pdo_ts.finalize() - -nwb_f.close() - - diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/260_add_visual_stimuli_retinotopic_mapping.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/260_add_visual_stimuli_retinotopic_mapping.py deleted file mode 100644 index 6f021ad..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/260_add_visual_stimuli_retinotopic_mapping.py +++ /dev/null @@ -1,15 +0,0 @@ -import os -import retinotopic_mapping.DisplayLogAnalysis as dla -import corticalmapping.NwbTools as nt - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -stim_pkl_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.pkl'][0] -stim_log = dla.DisplayLogAnalyzer(stim_pkl_fn) - -nwb_f.add_visual_display_log_retinotopic_mapping(stim_log=stim_log) -nwb_f.close() diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/270_analyze_photodiode_onsets.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/270_analyze_photodiode_onsets.py deleted file mode 100644 index af32220..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/270_analyze_photodiode_onsets.py +++ /dev/null @@ -1,43 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt -import corticalmapping.NwbTools as nt -import retinotopic_mapping.DisplayLogAnalysis as dla -import corticalmapping.core.TimingAnalysis as ta - -# for deepscope -vsync_frame_path='acquisition/timeseries/digital_vsync_stim_rise' - -# for sutter -# vsync_frame_path='acquisition/timeseries/digital_vsync_visual_rise' - -pd_ts_pd_path = 'analysis/digital_photodiode_rise' -pd_thr = -0.5 # this is color threshold, not analog photodiode threshold -ccg_t_range = (0., 0.1) -ccg_bins = 100 -is_plot = True - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -stim_pkl_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.pkl'][0] -stim_log = dla.DisplayLogAnalyzer(stim_pkl_fn) - -# get display lag -display_delay = nwb_f.get_display_delay_retinotopic_mapping(stim_log=stim_log, indicator_color_thr=pd_thr, - ccg_t_range=ccg_t_range, ccg_bins=ccg_bins, - is_plot=is_plot, pd_onset_ts_path=pd_ts_pd_path, - vsync_frame_ts_path=vsync_frame_path) - -# analyze photodiode onset -stim_dict = stim_log.get_stim_dict() -pd_onsets_seq = stim_log.analyze_photodiode_onsets_sequential(stim_dict=stim_dict, pd_thr=pd_thr) -pd_onsets_com = stim_log.analyze_photodiode_onsets_combined(pd_onsets_seq=pd_onsets_seq, - is_dgc_blocked=True) -nwb_f.add_photodiode_onsets_combined_retinotopic_mapping(pd_onsets_com=pd_onsets_com, - display_delay=display_delay, - vsync_frame_path=vsync_frame_path) -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/275_add_eyetracking.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/275_add_eyetracking.py deleted file mode 100644 index 228502f..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/275_add_eyetracking.py +++ /dev/null @@ -1,68 +0,0 @@ -import os -import numpy as np -import h5py -import corticalmapping.NwbTools as nt - -diagonal_length = 9.0 # mm, the length of diagonal line of eyetracking field of view -side = 'right' # right eye or left eye -scope = 'deepscope' # 'sutter' or 'deepscope' - -comments = 'small_x=temporal; big_x=nasal; small_y=dorsal; big_y=ventral' - -if scope == 'sutter': - eyetracking_ts_name = 'digital_vsync_right_eye_mon_rise' - frame_shape = (658, 492) -elif scope == 'deepscope': - eyetracking_ts_name = 'digital_cam_eye_rise' - frame_shape = (640, 480) -else: - raise LookupError('do not understand scope type.') - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -eye_folder = os.path.join(curr_folder, 'videomon') -fn = [f for f in os.listdir(eye_folder) if f[-12:] == '_output.hdf5'] -if len(fn) != 1: - print('could not find processed eyetracking data.') -else: - fn = fn[0] - -pixel_size = diagonal_length / np.sqrt(658. ** 2 + 492. ** 2) -print('eyetracking pixel size: {:5.3f} mm/pix'.format(pixel_size)) - -eye_file = h5py.File(os.path.join(eye_folder, fn), 'r') -led_pos = eye_file['led_positions'].value -pup_pos = eye_file['pupil_positions'].value -pup_shape = eye_file['pupil_shapes'].value -pup_shape[:, 0] = pup_shape[:, 0] * pixel_size -pup_shape[:, 1] = pup_shape[:, 1] * pixel_size -pup_shape_meta = 'format: {}; unit: [millimeter, millimeter, degree]'.format(eye_file['pupil_shapes'].attrs['format']) - -if scope == 'sutter': - pup_x = frame_shape[0] - pup_pos[:, 1] -elif scope == 'deepscope': - pup_x = pup_pos[:, 1] -else: - raise LookupError('do not understand scope type.') - -pup_y = pup_pos[:,0] - -pup_pos = (pup_pos - led_pos) * pixel_size -pup_area = pup_shape[:, 0] * pup_shape[:, 1] * np.pi * pixel_size * pixel_size - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) -nwb_f.add_eyetracking_data(ts_path=eyetracking_ts_name, - pupil_x=pup_x, - pupil_y=pup_y, - pupil_area=pup_area, - module_name='eye_tracking', - unit='millimeter', - side=side, - comments=comments, - description='', - source="Jun's eyetracker with adaptive thresholding", - pupil_shape=pup_shape, - pupil_shape_meta=pup_shape_meta) -nwb_f.close() diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/280_add_rois_and_traces_caiman_segmentation.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/280_add_rois_and_traces_caiman_segmentation.py deleted file mode 100644 index 2044bf7..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/280_add_rois_and_traces_caiman_segmentation.py +++ /dev/null @@ -1,173 +0,0 @@ -import os -import h5py -import numpy as np -import matplotlib.pyplot as plt -import tifffile as tf -import corticalmapping.NwbTools as nt -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia - -# for deepscope -# plane_ns = ['plane0', 'plane1', 'plane2'] -# plane_depths = [150, 100, 50] # or [300, 250, 200] - -# for single plane -plane_ns = ['plane0'] -plane_depths = [150] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -def add_rois_and_traces(data_folder, nwb_f, plane_n, imaging_depth, - mov_path='/processing/motion_correction/MotionCorrection'): - - mov_grp = nwb_f.file_pointer[mov_path + '/' + plane_n + '/corrected'] - - data_f = h5py.File(os.path.join(data_folder, 'rois_and_traces.hdf5'), 'r') - mask_arr_c = data_f['masks_center'].value - mask_arr_s = data_f['masks_surround'].value - traces_center_raw = data_f['traces_center_raw'].value - # traces_center_demixed = data_f['traces_center_demixed'].value - traces_center_subtracted = data_f['traces_center_subtracted'].value - # traces_center_dff = data_f['traces_center_dff'].value - traces_surround_raw = data_f['traces_surround_raw'].value - neuropil_r = data_f['neuropil_r'].value - neuropil_err = data_f['neuropil_err'].value - data_f.close() - - - if traces_center_raw.shape[1] != mov_grp['num_samples'].value: - raise ValueError('number of trace time points ({}) does not match frame number of ' - 'corresponding movie ({}).'.format(traces_center_raw.shape[1], mov_grp['num_samples'].value)) - - # traces_center_raw = traces_center_raw[:, :mov_grp['num_samples'].value] - # traces_center_subtracted = traces_center_subtracted[:, :mov_grp['num_samples'].value] - # traces_surround_raw = traces_surround_raw[:, :mov_grp['num_samples'].value] - - rf_img_max = tf.imread(os.path.join(data_folder, 'corrected_max_projection.tif')) - rf_img_mean = tf.imread(os.path.join(data_folder, 'corrected_mean_projection.tif')) - - print 'adding segmentation results ...' - rt_mo = nwb_f.create_module('rois_and_traces_' + plane_n) - rt_mo.set_value('imaging_depth_micron', imaging_depth) - is_if = rt_mo.create_interface('ImageSegmentation') - is_if.create_imaging_plane('imaging_plane', description='') - is_if.add_reference_image('imaging_plane', 'max_projection', rf_img_max) - is_if.add_reference_image('imaging_plane', 'mean_projection', rf_img_mean) - - for i in range(mask_arr_c.shape[0]): - curr_cen = mask_arr_c[i] - curr_cen_n = 'roi_' + ft.int2str(i, 4) - curr_cen_roi = ia.WeightedROI(curr_cen) - curr_cen_pixels_yx = curr_cen_roi.get_pixel_array() - curr_cen_pixels_xy = np.array([curr_cen_pixels_yx[:, 1], curr_cen_pixels_yx[:, 0]]).transpose() - is_if.add_roi_mask_pixels(image_plane='imaging_plane', roi_name=curr_cen_n, desc='', - pixel_list=curr_cen_pixels_xy, weights=curr_cen_roi.weights, width=512, height=512) - - curr_sur = mask_arr_s[i] - curr_sur_n = 'surround_' + ft.int2str(i, 4) - curr_sur_roi = ia.ROI(curr_sur) - curr_sur_pixels_yx = curr_sur_roi.get_pixel_array() - curr_sur_pixels_xy = np.array([curr_sur_pixels_yx[:, 1], curr_sur_pixels_yx[:, 0]]).transpose() - is_if.add_roi_mask_pixels(image_plane='imaging_plane', roi_name=curr_sur_n, desc='', - pixel_list=curr_sur_pixels_xy, weights=None, width=512, height=512) - is_if.finalize() - - - - trace_f_if = rt_mo.create_interface('Fluorescence') - seg_if_path = '/processing/rois_and_traces_' + plane_n + '/ImageSegmentation/imaging_plane' - # print seg_if_path - ts_path = mov_path + '/' + plane_n + '/corrected' - - print 'adding center fluorescence raw' - trace_raw_ts = nwb_f.create_timeseries('RoiResponseSeries', 'f_center_raw') - trace_raw_ts.set_data(traces_center_raw, unit='au', conversion=np.nan, resolution=np.nan) - trace_raw_ts.set_value('data_format', 'roi (row) x time (column)') - trace_raw_ts.set_value('data_range', '[-8192, 8191]') - trace_raw_ts.set_description('fluorescence traces extracted from the center region of each roi') - trace_raw_ts.set_time_as_link(ts_path) - trace_raw_ts.set_value_as_link('segmentation_interface', seg_if_path) - roi_names = ['roi_' + ft.int2str(ind, 4) for ind in range(traces_center_raw.shape[0])] - trace_raw_ts.set_value('roi_names', roi_names) - trace_raw_ts.set_value('num_samples', traces_center_raw.shape[1]) - trace_f_if.add_timeseries(trace_raw_ts) - trace_raw_ts.finalize() - - print 'adding neuropil fluorescence raw' - trace_sur_ts = nwb_f.create_timeseries('RoiResponseSeries', 'f_surround_raw') - trace_sur_ts.set_data(traces_surround_raw, unit='au', conversion=np.nan, resolution=np.nan) - trace_sur_ts.set_value('data_format', 'roi (row) x time (column)') - trace_sur_ts.set_value('data_range', '[-8192, 8191]') - trace_sur_ts.set_description('neuropil traces extracted from the surroud region of each roi') - trace_sur_ts.set_time_as_link(ts_path) - trace_sur_ts.set_value_as_link('segmentation_interface', seg_if_path) - sur_names = ['surround_' + ft.int2str(ind, 4) for ind in range(traces_center_raw.shape[0])] - trace_sur_ts.set_value('roi_names', sur_names) - trace_sur_ts.set_value('num_samples', traces_surround_raw.shape[1]) - trace_f_if.add_timeseries(trace_sur_ts) - trace_sur_ts.finalize() - - roi_center_n_path = '/processing/rois_and_traces_' + plane_n + '/Fluorescence/f_center_raw/roi_names' - # print 'adding center fluorescence demixed' - # trace_demix_ts = nwb_f.create_timeseries('RoiResponseSeries', 'f_center_demixed') - # trace_demix_ts.set_data(traces_center_demixed, unit='au', conversion=np.nan, resolution=np.nan) - # trace_demix_ts.set_value('data_format', 'roi (row) x time (column)') - # trace_demix_ts.set_description('center traces after overlapping demixing for each roi') - # trace_demix_ts.set_time_as_link(mov_path + '/' + plane_n + '/corrected') - # trace_demix_ts.set_value_as_link('segmentation_interface', seg_if_path) - # trace_demix_ts.set_value('roi_names', roi_names) - # trace_demix_ts.set_value('num_samples', traces_center_demixed.shape[1]) - # trace_f_if.add_timeseries(trace_demix_ts) - # trace_demix_ts.finalize() - - print 'adding center fluorescence after neuropil subtraction' - trace_sub_ts = nwb_f.create_timeseries('RoiResponseSeries', 'f_center_subtracted') - trace_sub_ts.set_data(traces_center_subtracted, unit='au', conversion=np.nan, resolution=np.nan) - trace_sub_ts.set_value('data_format', 'roi (row) x time (column)') - trace_sub_ts.set_description('center traces after overlap demixing and neuropil subtraction for each roi') - trace_sub_ts.set_time_as_link(mov_path + '/' + plane_n + '/corrected') - trace_sub_ts.set_value_as_link('segmentation_interface', seg_if_path) - trace_sub_ts.set_value_as_link('roi_names', roi_center_n_path) - trace_sub_ts.set_value('num_samples', traces_center_subtracted.shape[1]) - trace_sub_ts.set_value('r', neuropil_r, dtype='float32') - trace_sub_ts.set_value('rmse', neuropil_err, dtype='float32') - trace_sub_ts.set_comments('value "r": neuropil contribution ratio for each roi. ' - 'value "rmse": RMS error of neuropil subtraction for each roi') - trace_f_if.add_timeseries(trace_sub_ts) - trace_sub_ts.finalize() - - trace_f_if.finalize() - - # print 'adding global dF/F traces for each roi' - # trace_dff_if = rt_mo.create_interface('DfOverF') - # - # trace_dff_ts = nwb_f.create_timeseries('RoiResponseSeries', 'dff_center') - # trace_dff_ts.set_data(traces_center_dff, unit='au', conversion=np.nan, resolution=np.nan) - # trace_dff_ts.set_value('data_format', 'roi (row) x time (column)') - # trace_dff_ts.set_description('global df/f traces for each roi center, input fluorescence is the trace after demixing' - # ' and neuropil subtraction. global df/f is calculated by ' - # 'allensdk.brain_observatory.dff.compute_dff() function.') - # trace_dff_ts.set_time_as_link(ts_path) - # trace_dff_ts.set_value_as_link('segmentation_interface', seg_if_path) - # trace_dff_ts.set_value('roi_names', roi_names) - # trace_dff_ts.set_value('num_samples', traces_center_dff.shape[1]) - # trace_dff_if.add_timeseries(trace_dff_ts) - # trace_dff_ts.finalize() - # trace_dff_if.finalize() - - rt_mo.finalize() - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -for plane_i, plane_n in enumerate(plane_ns): - - print('\n\n' + plane_n) - - data_folder = os.path.join(curr_folder, plane_n) - add_rois_and_traces(data_folder, nwb_f, plane_n, imaging_depth=plane_depths[plane_i]) - -nwb_f.close() - - diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/300_get_response_tables.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/300_get_response_tables.py deleted file mode 100644 index 198aedf..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/300_get_response_tables.py +++ /dev/null @@ -1,24 +0,0 @@ -import os -import corticalmapping.NwbTools as nt -import corticalmapping.core.TimingAnalysis as ta - - -strf_t_win = [-0.5, 2.] -dgc_t_win = [-1., 2.5] - -lsn_stim_name = '001_LocallySparseNoiseRetinotopicMapping' -dgc_stim_name = '003_DriftingGratingCircleRetinotopicMapping' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -nwb_f.get_drifting_grating_response_table_retinotopic_mapping(stim_name=dgc_stim_name, time_window=dgc_t_win) -nwb_f.get_spatial_temporal_receptive_field_retinotopic_mapping(stim_name=lsn_stim_name, time_window=strf_t_win) - -nwb_f.close() - - - diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/310_plot_STRFs.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/310_plot_STRFs.py deleted file mode 100644 index 7fc800d..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/310_plot_STRFs.py +++ /dev/null @@ -1,65 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt -import h5py -import corticalmapping.core.TimingAnalysis as ta -import corticalmapping.SingleCellAnalysis as sca -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -from matplotlib.backends.backend_pdf import PdfPages -import corticalmapping.DatabaseTools as dt - -trace_type = 'sta_f_center_subtracted' -save_folder = 'figures' -bias = 1. - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, save_folder) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = h5py.File(nwb_fn, 'r') - -strf_grp = nwb_f['analysis/strf_001_LocallySparseNoiseRetinotopicMapping'] -plane_ns = strf_grp.keys() -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -for plane_n in plane_ns: - print('plotting rois in {} ...'.format(plane_n)) - - plane_grp = strf_grp[plane_n] - pdff = PdfPages(os.path.join(save_folder, 'STRFs_' + plane_n + '.pdf')) - - roi_lst = nwb_f['processing/rois_and_traces_' + plane_n + '/ImageSegmentation/imaging_plane/roi_list'].value - roi_lst = [r for r in roi_lst if r[:4] == 'roi_'] - roi_lst.sort() - - for roi_ind, roi_n in enumerate(roi_lst): - print('roi: {} / {}'.format(roi_ind + 1, len(roi_lst))) - - curr_trace, _ = dt.get_single_trace(nwb_f=nwb_f, plane_n=plane_n, roi_n=roi_n) - if np.min(curr_trace) < bias: - add_to_trace = -np.min(curr_trace) + bias - else: - add_to_trace = 0. - - curr_strf = sca.get_strf_from_nwb(h5_grp=strf_grp[plane_n], roi_ind=roi_ind, trace_type=trace_type) - - curr_strf_dff = curr_strf.get_local_dff_strf(is_collaps_before_normalize=True, add_to_trace=add_to_trace) - - v_min, v_max = curr_strf_dff.get_data_range() - f = curr_strf_dff.plot_traces(yRange=(v_min, v_max * 1.1), figSize=(16, 10), - columnSpacing=0.002, rowSpacing=0.002) - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - - pdff.close() - -nwb_f.close() diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/320_plot_zscore_RFs.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/320_plot_zscore_RFs.py deleted file mode 100644 index fece3db..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/320_plot_zscore_RFs.py +++ /dev/null @@ -1,76 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt -import h5py -import corticalmapping.core.TimingAnalysis as ta -import corticalmapping.SingleCellAnalysis as sca -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -from matplotlib.backends.backend_pdf import PdfPages -import corticalmapping.DatabaseTools as dt - -trace_type = 'sta_f_center_subtracted' -save_folder = 'figures' -is_local_dff = True -zscore_range = [-4., 4.] -t_window = [0., 0.5] -bias = 1. - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, save_folder) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = h5py.File(nwb_fn, 'r') - -strf_grp = nwb_f['analysis/strf_001_LocallySparseNoiseRetinotopicMapping'] -plane_ns = strf_grp.keys() -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -for plane_n in plane_ns: - print('plotting rois in {} ...'.format(plane_n)) - - plane_grp = strf_grp[plane_n] - pdff = PdfPages(os.path.join(save_folder, 'zscore_RFs_' + plane_n + '.pdf')) - - roi_lst = nwb_f['processing/rois_and_traces_' + plane_n + '/ImageSegmentation/imaging_plane/roi_list'].value - roi_lst = [r for r in roi_lst if r[:4] == 'roi_'] - roi_lst.sort() - - for roi_ind, roi_n in enumerate(roi_lst): - print('roi: {} / {}'.format(roi_ind + 1, len(roi_lst))) - - curr_trace, _ = dt.get_single_trace(nwb_f=nwb_f, plane_n=plane_n, roi_n=roi_n) - if np.min(curr_trace) < bias: - add_to_trace = -np.min(curr_trace) + bias - else: - add_to_trace = 0. - - curr_strf = sca.get_strf_from_nwb(h5_grp=strf_grp[plane_n], roi_ind=roi_ind, trace_type=trace_type) - curr_strf_dff = curr_strf.get_local_dff_strf(is_collaps_before_normalize=True, add_to_trace=add_to_trace) - # v_min, v_max = curr_strf_dff.get_data_range() - - rf_on, rf_off = curr_strf_dff.get_zscore_receptive_field(timeWindow=t_window) - f = plt.figure(figsize=(15, 4)) - f.suptitle('{}: t_window: {}'.format(roi_n, t_window)) - ax_on = f.add_subplot(121) - rf_on.plot_rf(plot_axis=ax_on, is_colorbar=True, cmap='RdBu_r', vmin=zscore_range[0], vmax=zscore_range[1]) - ax_on.set_title('ON zscore RF') - ax_off = f.add_subplot(122) - rf_off.plot_rf(plot_axis=ax_off, is_colorbar=True, cmap='RdBu_r', vmin=zscore_range[0], vmax=zscore_range[1]) - ax_off.set_title('OFF zscore RF') - plt.close() - - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - - pdff.close() - -nwb_f.close() diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/330_plot_RF_contours.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/330_plot_RF_contours.py deleted file mode 100644 index 3233965..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/330_plot_RF_contours.py +++ /dev/null @@ -1,113 +0,0 @@ -import os -import numpy as np -import h5py -import matplotlib.pyplot as plt -import corticalmapping.NwbTools as nt -import corticalmapping.core.TimingAnalysis as ta -import corticalmapping.SingleCellAnalysis as sca -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -from matplotlib.backends.backend_pdf import PdfPages -import corticalmapping.DatabaseTools as dt - -trace_type = 'sta_f_center_subtracted' -roi_t_window = [0., 0.5] -zscore_range = [0., 4.] -save_folder = 'figures' -bias = 1. - -# plot control -thr_ratio = 0.4 -filter_sigma = 1. -interpolate_rate = 10 -absolute_thr = 1.6 -level_num = 1 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, save_folder) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = h5py.File(nwb_fn, 'r') - -strf_grp = nwb_f['analysis/strf_001_LocallySparseNoiseRetinotopicMapping'] -plane_ns = strf_grp.keys() -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -X = None -Y = None - -for plane_n in plane_ns: - print('plotting rois in {} ...'.format(plane_n)) - - plane_grp = strf_grp[plane_n] - - f_all = plt.figure(figsize=(10, 10)) - f_all.suptitle('t window: {}; z threshold: {}'.format(roi_t_window, absolute_thr / thr_ratio)) - ax_all = f_all.add_subplot(111) - - pdff = PdfPages(os.path.join(save_folder, 'RF_contours_' + plane_n + '.pdf')) - - roi_lst = nwb_f['processing/rois_and_traces_' + plane_n + '/ImageSegmentation/imaging_plane/roi_list'].value - roi_lst = [r for r in roi_lst if r[:4] == 'roi_'] - roi_lst.sort() - - for roi_ind, roi_n in enumerate(roi_lst): - print('roi: {} / {}'.format(roi_ind + 1, len(roi_lst))) - - curr_trace, _ = dt.get_single_trace(nwb_f=nwb_f, plane_n=plane_n, roi_n=roi_n) - if np.min(curr_trace) < bias: - add_to_trace = -np.min(curr_trace) + bias - else: - add_to_trace = 0. - - curr_strf = sca.get_strf_from_nwb(h5_grp=strf_grp[plane_n], roi_ind=roi_ind, trace_type=trace_type) - curr_strf_dff = curr_strf.get_local_dff_strf(is_collaps_before_normalize=True, add_to_trace=add_to_trace) - rf_on, rf_off, _ = curr_strf_dff.get_zscore_thresholded_receptive_fields(timeWindow=roi_t_window, - thr_ratio=thr_ratio, - filter_sigma=filter_sigma, - interpolate_rate=interpolate_rate, - absolute_thr=absolute_thr) - - if X is None and Y is None: - X, Y = np.meshgrid(np.arange(len(rf_on.aziPos)), - np.arange(len(rf_on.altPos))) - - levels_on = [np.max(rf_on.get_weighted_mask().flat) * thr_ratio] - levels_off = [np.max(rf_off.get_weighted_mask().flat) * thr_ratio] - ax_all.contour(X, Y, rf_on.get_weighted_mask(), levels=levels_on, colors='r', lw=5) - ax_all.contour(X, Y, rf_off.get_weighted_mask(), levels=levels_off, colors='b', lw=5) - - f_single = plt.figure(figsize=(10, 10)) - ax_single = f_single.add_subplot(111) - ax_single.contour(X, Y, rf_on.get_weighted_mask(), levels=levels_on, colors='r', lw=5) - ax_single.contour(X, Y, rf_off.get_weighted_mask(), levels=levels_off, colors='b', lw=5) - ax_single.set_xticks(range(len(rf_on.aziPos))[::20]) - ax_single.set_xticklabels(['{:3d}'.format(int(round(l))) for l in rf_on.aziPos[::20]]) - ax_single.set_yticks(range(len(rf_on.altPos))[::20]) - ax_single.set_yticklabels(['{:3d}'.format(int(round(l))) for l in rf_on.altPos[::-1][::20]]) - ax_single.set_aspect('equal') - ax_single.set_title('{}: {}. t_window: {}; ON thr:{}; OFF thr:{}.'.format(plane_n, roi_n, roi_t_window, - rf_on.thr, rf_off.thr)) - pdff.savefig(f_single) - f_single.clear() - plt.close(f_single) - - pdff.close() - - ax_all.set_xticks(range(len(rf_on.aziPos))[::20]) - ax_all.set_xticklabels(['{:3d}'.format(int(round(l))) for l in rf_on.aziPos[::20]]) - ax_all.set_yticks(range(len(rf_on.altPos))[::20]) - ax_all.set_yticklabels(['{:3d}'.format(int(round(l))) for l in rf_on.altPos[::-1][::20]]) - ax_all.set_aspect('equal') - ax_all.set_title('{}, abs_zscore_thr:{}'.format(plane_n, absolute_thr)) - - f_all.savefig(os.path.join(save_folder, 'RF_contours_' + plane_n + '_all.pdf'), dpi=300) - -nwb_f.close() - diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/340_plot_dgc_response_all.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/340_plot_dgc_response_all.py deleted file mode 100644 index 21275e9..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/340_plot_dgc_response_all.py +++ /dev/null @@ -1,169 +0,0 @@ -import os -import h5py -import numpy as np -import tifffile as tf -import matplotlib.pyplot as plt -import corticalmapping.core.PlottingTools as pt -from matplotlib.backends.backend_pdf import PdfPages -import matplotlib.gridspec as gridspec -import corticalmapping.DatabaseTools as dt - -trace_type = 'f_center_subtracted' -response_table_path = 'analysis/response_table_003_DriftingGratingCircleRetinotopicMapping' - -baseline_span = [-0.5, 0.] -response_span = [0., 1.] -bias = 1. - -face_cmap = 'RdBu_r' - -def get_dff(traces, t_axis, response_span, baseline_span): - """ - - :param traces: dimension, trial x timepoint - :param t_axis: - :return: - """ - - baseline_ind = np.logical_and(t_axis > baseline_span[0], t_axis <= baseline_span[1]) - response_ind = np.logical_and(t_axis > response_span[0], t_axis <= response_span[1]) - baseline = np.mean(traces[:, baseline_ind], axis=1, keepdims=True) - dff_traces = (traces - baseline) / baseline - - trace_mean = np.mean(traces, axis=0) - baseline_mean = np.mean(trace_mean[baseline_ind]) - dff_trace_mean = (trace_mean - baseline_mean) / baseline_mean - dff_mean = np.mean(dff_trace_mean[response_ind]) - - return dff_traces, dff_trace_mean, dff_mean - - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, 'figures') -if not os.path.isdir(save_folder): - os.mkdir(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -print(nwb_fn) -nwb_f = h5py.File(nwb_fn, 'r') - -plane_ns = nwb_f[response_table_path].keys() -plane_ns.sort() - -for plane_n in plane_ns: - - print('\nprocessing {} ...'.format(plane_n)) - - res_grp = nwb_f['{}/{}'.format(response_table_path, plane_n)] - t_axis = res_grp.attrs['sta_timestamps'] - - roi_lst = nwb_f['processing/rois_and_traces_' + plane_n + '/ImageSegmentation/imaging_plane/roi_list'].value - roi_lst = [r for r in roi_lst if r[:4] == 'roi_'] - roi_lst.sort() - - grating_ns = res_grp.keys() - - # remove blank sweep - grating_ns = [gn for gn in grating_ns if gn[-37:] != '_sf0.00_tf00.0_dire000_con0.00_rad000'] - - dire_lst = np.array(list(set([str(gn[38:41]) for gn in grating_ns]))) - dire_lst.sort() - tf_lst = np.array(list(set([str(gn[29:33]) for gn in grating_ns]))) - tf_lst.sort() - sf_lst = np.array(list(set([str(gn[22:26]) for gn in grating_ns]))) - sf_lst.sort() - - print('\nall directions (deg): {}'.format(dire_lst)) - print('all temporal frequencies (Hz): {}'.format(tf_lst)) - print('all spatial frequencies (dpd): {}\n'.format(sf_lst)) - - pdff = PdfPages(os.path.join(save_folder, 'STA_DriftingGrating_' + plane_n + '_all.pdf')) - - for roi_i, roi_n in enumerate(roi_lst): - print('plotting: {} ...'.format(roi_n)) - - curr_trace, _ = dt.get_single_trace(nwb_f=nwb_f, plane_n=plane_n, roi_n=roi_n) - if np.min(curr_trace) < bias: - add_to_trace = -np.min(curr_trace) + bias - else: - add_to_trace = 0. - - f = plt.figure(figsize=(8.5, 11)) - gs_out = gridspec.GridSpec(len(tf_lst), 1) - gs_in_dict = {} - for gs_ind, gs_o in enumerate(gs_out): - curr_gs_in = gridspec.GridSpecFromSubplotSpec(len(sf_lst), len(dire_lst), subplot_spec=gs_o, - wspace=0.0, hspace=0.0) - gs_in_dict[gs_ind] = curr_gs_in - - v_max = 0 - v_min = 0 - dff_mean_max=0 - dff_mean_min=0 - - for grating_n in grating_ns: - grating_grp = res_grp[grating_n] - - curr_sta = grating_grp['sta_' + trace_type].value[roi_i] + add_to_trace - dff_traces, dff_trace_mean, dff_mean = get_dff(traces=curr_sta, t_axis=t_axis, response_span=response_span, - baseline_span=baseline_span) - v_max = max([np.amax(dff_traces), v_max]) - v_min = min([np.amin(dff_traces), v_min]) - dff_mean_max = max([dff_mean, dff_mean_max]) - dff_mean_min = min([dff_mean, dff_mean_min]) - - dff_mean_max = max([abs(dff_mean_max), abs(dff_mean_min)]) - dff_mean_min = - dff_mean_max - - - for grating_n in grating_ns: - grating_grp = res_grp[grating_n] - - curr_sta = grating_grp['sta_' + trace_type].value[roi_i] + add_to_trace - dff_traces, dff_trace_mean, dff_mean = get_dff(traces=curr_sta, t_axis=t_axis, response_span=response_span, - baseline_span=baseline_span) - - curr_tf = grating_n[29:33] - tf_i = np.where(tf_lst == curr_tf)[0][0] - curr_sf = grating_n[22:26] - sf_i = np.where(sf_lst == curr_sf)[0][0] - curr_dire = grating_n[38:41] - dire_i = np.where(dire_lst == curr_dire)[0][0] - ax = plt.Subplot(f, gs_in_dict[tf_i][sf_i * len(dire_lst) + dire_i]) - f_color = pt.value_2_rgb(value=(dff_mean - dff_mean_min) / (dff_mean_max - dff_mean_min), - cmap=face_cmap) - - # f_color = pt.value_2_rgb(value=dff_mean / dff_mean_max, cmap=face_cmap) - - # print f_color - ax.set_axis_bgcolor(f_color) - ax.set_xticks([]) - ax.set_yticks([]) - for sp in ax.spines.values(): - sp.set_visible(False) - ax.axhline(y=0, ls='--', color='#888888', lw=1) - ax.axvspan(response_span[0], response_span[1], alpha=0.5, color='#888888', ec='none') - for t in dff_traces: - ax.plot(t_axis, t, '-', color='#888888', lw=0.5) - ax.plot(t_axis, dff_trace_mean, '-r', lw=1) - f.add_subplot(ax) - - all_axes = f.get_axes() - for ax in all_axes: - ax.set_ylim([v_min, v_max]) - ax.set_xlim([t_axis[0], t_axis[-1]]) - - f.suptitle('roi:{:04d}; trace type:{}; baseline:{}; response:{}; \ntrace range:{}; color range:{}' - .format(roi_i, trace_type, baseline_span, response_span, [v_min, v_max], - [dff_mean_min, dff_mean_max]), fontsize=8) - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - - pdff.close() -nwb_f.close() - -print('done!') \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/350_plot_dgc_response_mean.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/350_plot_dgc_response_mean.py deleted file mode 100644 index f8967cc..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/350_plot_dgc_response_mean.py +++ /dev/null @@ -1,158 +0,0 @@ -import os -import h5py -import numpy as np -import tifffile as tf -import matplotlib.pyplot as plt -import corticalmapping.core.PlottingTools as pt -from matplotlib.backends.backend_pdf import PdfPages -import matplotlib.gridspec as gridspec -import corticalmapping.DatabaseTools as dt - -trace_type = 'f_center_subtracted' -response_table_path = 'analysis/response_table_003_DriftingGratingCircleRetinotopicMapping' - -baseline_span = [-0.5, 0.] -response_span = [0., 1.] -bias = 1. - -face_cmap = 'RdBu_r' - -def get_dff(traces, t_axis, response_span, baseline_span): - """ - - :param traces: dimension, trial x timepoint - :param t_axis: - :return: - """ - - trace_mean = np.mean(traces, axis=0) - trace_std = np.std(traces, axis=0) - trace_sem = trace_std / np.sqrt(traces.shape[0]) - - baseline_ind = np.logical_and(t_axis > baseline_span[0], t_axis <= baseline_span[1]) - response_ind = np.logical_and(t_axis > response_span[0], t_axis <= response_span[1]) - baseline = np.mean(trace_mean[baseline_ind]) - dff_trace_mean = (trace_mean - baseline) / baseline - dff_trace_std = trace_std / baseline - dff_trace_sem = trace_sem / baseline - dff_mean = np.mean(dff_trace_mean[response_ind]) - - return dff_trace_mean, dff_trace_std, dff_trace_sem, dff_mean - - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, 'figures') -if not os.path.isdir(save_folder): - os.mkdir(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -print(nwb_fn) -nwb_f = h5py.File(nwb_fn, 'r') - -plane_ns = nwb_f[response_table_path].keys() -plane_ns.sort() - -for plane_n in plane_ns: - - print('\nprocessing {} ...'.format(plane_n)) - - res_grp = nwb_f['{}/{}'.format(response_table_path, plane_n)] - t_axis = res_grp.attrs['sta_timestamps'] - - roi_lst = nwb_f['processing/rois_and_traces_' + plane_n + '/ImageSegmentation/imaging_plane/roi_list'].value - roi_lst = [r for r in roi_lst if r[:4] == 'roi_'] - roi_lst.sort() - - grating_ns = res_grp.keys() - # remove blank sweep - grating_ns = [gn for gn in grating_ns if gn[-37:] != '_sf0.00_tf00.0_dire000_con0.00_rad000'] - - dire_lst = np.array(list(set([str(gn[38:41]) for gn in grating_ns]))) - dire_lst.sort() - tf_lst = np.array(list(set([str(gn[29:33]) for gn in grating_ns]))) - tf_lst.sort() - sf_lst = np.array(list(set([str(gn[22:26]) for gn in grating_ns]))) - sf_lst.sort() - - pdff = PdfPages(os.path.join(save_folder, 'STA_DriftingGrating_' + plane_n + '_mean.pdf')) - - for roi_i, roi_n in enumerate(roi_lst): - print(roi_n) - - curr_trace, _ = dt.get_single_trace(nwb_f=nwb_f, plane_n=plane_n, roi_n=roi_n) - if np.min(curr_trace) < bias: - add_to_trace = -np.min(curr_trace) + bias - else: - add_to_trace = 0. - - f = plt.figure(figsize=(8.5, 11)) - gs_out = gridspec.GridSpec(len(tf_lst), 1) - gs_in_dict = {} - for gs_ind, gs_o in enumerate(gs_out): - curr_gs_in = gridspec.GridSpecFromSubplotSpec(len(sf_lst), len(dire_lst), subplot_spec=gs_o, - wspace=0.05, hspace=0.05) - gs_in_dict[gs_ind] = curr_gs_in - - v_max = 0 - v_min = 0 - dff_mean_max=0 - dff_mean_min=0 - for grating_n in grating_ns: - grating_grp = res_grp[grating_n] - curr_sta = grating_grp['sta_' + trace_type].value[roi_i] + add_to_trace - _ = get_dff(traces=curr_sta, t_axis=t_axis, response_span=response_span, baseline_span=baseline_span) - dff_trace_mean, dff_trace_std, dff_trace_sem, dff_mean = _ - v_max = max([np.amax(dff_trace_mean + dff_trace_sem), v_max]) - v_min = min([np.amin(dff_trace_mean - dff_trace_sem), v_min]) - dff_mean_max = max([dff_mean, dff_mean_max]) - dff_mean_min = min([dff_mean, dff_mean_min]) - dff_mean_max = max([abs(dff_mean_max), abs(dff_mean_min)]) - dff_mean_min = - dff_mean_max - - for grating_n in grating_ns: - grating_grp = res_grp[grating_n] - curr_sta = grating_grp['sta_' + trace_type].value[roi_i] + add_to_trace - _ = get_dff(traces=curr_sta, t_axis=t_axis, response_span=response_span, baseline_span=baseline_span) - dff_trace_mean, dff_trace_std, dff_trace_sem, dff_mean = _ - curr_tf = grating_n[29:33] - tf_i = np.where(tf_lst == curr_tf)[0][0] - curr_sf = grating_n[22:26] - sf_i = np.where(sf_lst == curr_sf)[0][0] - curr_dire = grating_n[38:41] - dire_i = np.where(dire_lst == curr_dire)[0][0] - ax = plt.Subplot(f, gs_in_dict[tf_i][sf_i * len(dire_lst) + dire_i]) - f_color = pt.value_2_rgb(value=(dff_mean - dff_mean_min) / (dff_mean_max - dff_mean_min), - cmap=face_cmap) - - # f_color = pt.value_2_rgb(value=dff_mean / dff_mean_max, cmap=face_cmap) - - # print f_color - ax.set_axis_bgcolor(f_color) - ax.set_xticks([]) - ax.set_yticks([]) - for sp in ax.spines.values(): - sp.set_visible(False) - ax.axhline(y=0, ls='--', color='#888888', lw=1) - ax.axvspan(response_span[0], response_span[1], alpha=0.5, color='#888888', ec='none') - ax.fill_between(t_axis, dff_trace_mean - dff_trace_sem, dff_trace_mean + dff_trace_sem, edgecolor='none', - facecolor='#880000', alpha=0.5) - ax.plot(t_axis, dff_trace_mean, '-r', lw=1) - f.add_subplot(ax) - - all_axes = f.get_axes() - for ax in all_axes: - ax.set_ylim([v_min, v_max]) - ax.set_xlim([t_axis[0], t_axis[-1]]) - - f.suptitle('roi:{:04d}; trace type:{}; baseline:{}; response:{}; \ntrace range:{}; color range:{}' - .format(roi_i, trace_type, baseline_span, response_span, [v_min, v_max], - [dff_mean_min, dff_mean_max]), fontsize=8) - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - - pdff.close() -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/360_plot_dgc_tuning_curves.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/360_plot_dgc_tuning_curves.py deleted file mode 100644 index 1efaf2f..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/360_plot_dgc_tuning_curves.py +++ /dev/null @@ -1,198 +0,0 @@ -import os -import h5py -import numpy as np -import pandas as pd -import matplotlib.pyplot as plt -from matplotlib.backends.backend_pdf import PdfPages -import corticalmapping.DatabaseTools as dt - -trace_type = 'f_center_subtracted' -response_table_path = 'analysis/response_table_003_DriftingGratingCircleRetinotopicMapping' - -baseline_span = [-0.5, 0.] -response_span = [0., 1.5] -bias = 1. - -def get_response(traces, t_axis, response_span, baseline_span): - """ - - :param traces: dimension, trial x timepoint - :param t_axis: - :return: - """ - - baseline_ind = np.logical_and(t_axis > baseline_span[0], t_axis <= baseline_span[1]) - response_ind = np.logical_and(t_axis > response_span[0], t_axis <= response_span[1]) - - trace_mean = np.mean(traces, axis=0) - baseline_mean = np.mean(trace_mean[baseline_ind]) - dff_trace_mean = (trace_mean - baseline_mean) / baseline_mean - dff_mean = np.mean(dff_trace_mean[response_ind]) - - baselines = np.mean(traces[:, baseline_ind], axis=1, keepdims=True) - dff_traces = (traces - baselines) / baselines - dffs = np.mean(dff_traces[:, response_ind], axis=1) - dff_std = np.std(dffs) - dff_sem = dff_std / np.sqrt(traces.shape[0]) - - return dff_mean, dff_std, dff_sem - - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, 'figures') -if not os.path.isdir(save_folder): - os.mkdir(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -print(nwb_fn) -nwb_f = h5py.File(nwb_fn, 'r') - -plane_ns = nwb_f[response_table_path].keys() -plane_ns.sort() - -for plane_n in plane_ns: - - print('\nprocessing {} ...'.format(plane_n)) - - res_grp = nwb_f['{}/{}'.format(response_table_path, plane_n)] - t_axis = res_grp.attrs['sta_timestamps'] - - roi_lst = nwb_f['processing/rois_and_traces_' + plane_n + '/ImageSegmentation/imaging_plane/roi_list'].value - roi_lst = [r for r in roi_lst if r[:4] == 'roi_'] - roi_lst.sort() - - grating_ns = res_grp.keys() - - # remove blank sweep - grating_ns = [gn for gn in grating_ns if gn[-37:] != '_sf0.00_tf00.0_dire000_con0.00_rad000'] - - dire_lst = np.array(list(set([str(gn[38:41]) for gn in grating_ns]))) - dire_lst.sort() - tf_lst = np.array(list(set([str(gn[29:33]) for gn in grating_ns]))) - tf_lst.sort() - sf_lst = np.array(list(set([str(gn[22:26]) for gn in grating_ns]))) - sf_lst.sort() - - pdff = PdfPages(os.path.join(save_folder, 'tuning_curve_DriftingGrating_' + plane_n + '_mean.pdf')) - - for roi_i, roi_n in enumerate(roi_lst): - print(roi_n) - - curr_trace, _ = dt.get_single_trace(nwb_f=nwb_f, plane_n=plane_n, roi_n=roi_n) - if np.min(curr_trace) < bias: - add_to_trace = -np.min(curr_trace) + bias - else: - add_to_trace = 0. - - # get response table - res_tab = pd.DataFrame(columns=['con', 'tf', 'sf', 'dire', 'dff_mean', 'dff_std', 'dff_sem']) - row_ind = 0 - - for grating_n in grating_ns: - grating_grp = res_grp[grating_n] - curr_sta = grating_grp['sta_' + trace_type].value[roi_i] + add_to_trace - _ = get_response(traces=curr_sta, t_axis=t_axis, response_span=response_span, baseline_span=baseline_span) - dff_mean, dff_std, dff_sem = _ - - con = float(grating_n.split('_')[5][3:]) - tf = float(grating_n.split('_')[3][2:]) - sf = float(grating_n.split('_')[2][2:]) - dire = int(grating_n.split('_')[4][4:]) - - res_tab.loc[row_ind] = [con, tf, sf, dire, dff_mean, dff_std, dff_sem] - row_ind += 1 - - # find the preferred condition - top_condition = res_tab[res_tab.dff_mean == max(res_tab.dff_mean)] - - # make figure - f = plt.figure(figsize=(8.5, 11)) - - # get tf plot - tf_conditions = res_tab[(res_tab.sf == float(top_condition.sf)) & \ - (res_tab.dire == int(top_condition.dire))] - tf_conditions = tf_conditions.sort_values(by='tf') - - tf_log = np.log(tf_conditions.tf) - - ax_tf = f.add_subplot(311) - ax_tf.fill_between(x=tf_log, y1=tf_conditions.dff_mean + tf_conditions.dff_sem, - y2=tf_conditions.dff_mean - tf_conditions.dff_sem, edgecolor='none', - facecolor='#888888', alpha=0.5) - ax_tf.axhline(y=0, ls='--', color='k', lw=1) - ax_tf.plot(tf_log, tf_conditions.dff_mean, 'r-', lw=2) - ax_tf.set_title('temporal frequency tuning', rotation='vertical', x=-0.4, y=0.5, va='center', ha='center', - size=10) - ax_tf.set_xticks(tf_log) - ax_tf.set_xticklabels(list(tf_conditions.tf)) - ax_tf.set_xlim(np.log([0.9, 16])) - ax_tf_xrange = ax_tf.get_xlim()[1] - ax_tf.get_xlim()[0] - ax_tf_yrange = ax_tf.get_ylim()[1] - ax_tf.get_ylim()[0] - ax_tf.set_aspect(aspect=(ax_tf_xrange / ax_tf_yrange)) - ax_tf.set_ylabel('mean df/f', size=10) - ax_tf.set_xlabel('temporal freqency (Hz)', size=10) - ax_tf.tick_params(axis='both', which='major', labelsize=10) - - # get sf plot - sf_conditions = res_tab[(res_tab.tf == float(top_condition.tf)) & \ - (res_tab.dire == int(top_condition.dire))] - sf_conditions = sf_conditions.sort_values(by='sf') - - sf_log = np.log(sf_conditions.sf) - - ax_sf = f.add_subplot(312) - ax_sf.fill_between(x=sf_log, y1=sf_conditions.dff_mean + sf_conditions.dff_sem, - y2=sf_conditions.dff_mean - sf_conditions.dff_sem, edgecolor='none', - facecolor='#888888', alpha=0.5) - ax_sf.axhline(y=0, ls='--', color='k', lw=1) - ax_sf.plot(sf_log, sf_conditions.dff_mean, '-r', lw=2) - ax_sf.set_title('spatial frequency tuning', rotation='vertical', x=-0.4, y=0.5, va='center', ha='center', - size=10) - ax_sf.set_xticks(sf_log) - ax_sf.set_xticklabels(['{:04.2f}'.format(s) for s in list(sf_conditions.sf)]) - ax_sf.set_xlim(np.log([0.008, 0.4])) - ax_sf_xrange = ax_sf.get_xlim()[1] - ax_sf.get_xlim()[0] - ax_sf_yrange = ax_sf.get_ylim()[1] - ax_sf.get_ylim()[0] - ax_sf.set_aspect(aspect=(ax_sf_xrange / ax_sf_yrange)) - ax_sf.set_ylabel('mean df/f', size=10) - ax_sf.set_xlabel('spatial freqency (cpd)', size=10) - ax_sf.tick_params(axis='both', which='major', labelsize=10) - - # get dire plot - dire_conditions = res_tab[(res_tab.tf == float(top_condition.tf)) & \ - (res_tab.sf == float(top_condition.sf))] - dire_conditions = dire_conditions.sort_values(by='dire') - dire_arc = list(dire_conditions.dire * np.pi / 180.) - dire_arc.append(dire_arc[0]) - dire_dff = np.array(dire_conditions.dff_mean) - dire_dff[dire_dff < 0.] = 0. - dire_dff = list(dire_dff) - dire_dff.append(dire_dff[0]) - dire_dff_sem = list(dire_conditions.dff_sem) - dire_dff_sem.append(dire_dff_sem[0]) - dire_dff_low = np.array(dire_dff) - np.array(dire_dff_sem) - dire_dff_low[dire_dff_low < 0.] = 0. - dire_dff_high = np.array(dire_dff) + np.array(dire_dff_sem) - - r_ticks = [0, round(max(dire_dff) * 10000.) / 10000.] - - ax_dire = f.add_subplot(313, projection='polar') - ax_dire.fill_between(x=dire_arc, y1=dire_dff_low, y2=dire_dff_high, edgecolor='none', facecolor='#888888', - alpha=0.5) - ax_dire.plot(dire_arc, dire_dff, '-r', lw=2) - ax_dire.set_title('orientation tuning', rotation='vertical', x=-0.4, y=0.5, va='center', ha='center', size=10) - ax_dire.set_rticks(r_ticks) - ax_dire.set_xticks(dire_arc) - ax_dire.tick_params(axis='both', which='major', labelsize=10) - - f.suptitle('roi:{:04d}; trace type:{}; baseline:{}; response:{}' - .format(roi_i, trace_type, baseline_span, response_span), fontsize=10) - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - - pdff.close() -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/batch_generate_nwb.bat b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/batch_generate_nwb.bat deleted file mode 100644 index 878d0e8..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/batch_generate_nwb.bat +++ /dev/null @@ -1,16 +0,0 @@ -call activate bigmess - -set PYTHONPATH=%PYTHONPATH%;E:\data\python_packages\corticalmapping;E:\data\python_packages\allensdk_internal;E:\data\python_packages\ainwb\ainwb;E:\data\github_packages\retinotopic_mapping; - -python 200_generate_nwb.py -python 210_add_vasmap.py -python 220_add_sync_data.py -python 230_add_image_data.py -python 240_add_motion_correction_module.py -python 250_get_photodiode_onset.py -python 260_add_visual_stimuli_retinotopic_mapping.py -python 270_analyze_photodiode_onsets.py -python 275_add_eyetracking.py -python 280_add_rois_and_traces_caiman_segmentation.py - -PAUSE \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/old/090_(optional)_show_mmap_movie.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/old/090_(optional)_show_mmap_movie.py deleted file mode 100644 index e38d1c4..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/old/090_(optional)_show_mmap_movie.py +++ /dev/null @@ -1,35 +0,0 @@ -import sys; print('Python %s on %s' % (sys.version, sys.platform)) -sys.path.extend([r"E:\data\github_packages\CaImAn"]) - -import os -import numpy as np -import caiman as cm -import matplotlib.pyplot as plt - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180813-M386444-2p\movie_2p\110_LSN_reorged\plane0\green\corrected" -fn = '180813_M386444_110_d1_512_d2_512_d3_1_order_C_frames_3300_.mmap' - -fn_parts = fn.split('_') -d1 = int(fn_parts[fn_parts.index('d1') + 1]) # column, x -d2 = int(fn_parts[fn_parts.index('d2') + 1]) # row, y -d3 = int(fn_parts[fn_parts.index('d3') + 1]) # channel -d4 = int(fn_parts[fn_parts.index('frames') + 1]) # frame, T -order = fn_parts[fn_parts.index('order') + 1] - -mov = np.memmap(filename=os.path.join(data_folder, fn), shape=(d1, d2, d4), order=order, dtype=np.float32, mode='r') -mov = mov.transpose((2, 1, 0)) - -print('movie shape: {}'.format(mov.shape)) - -f = plt.figure(figsize=(8, 5)) -ax = f.add_subplot(111) -fig = ax.imshow(np.mean(mov, axis=0), vmin=300, vmax=1500, cmap='inferno', interpolation='nearest') -f.colorbar(fig) -plt.show() - -input("Press enter to continue ...") - -print('playing {} ...'.format(fn)) -cm.movie(mov).play(fr=30,magnification=1,gain=2.) - diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/old/290_get_drifting_grating_response_tables.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/old/290_get_drifting_grating_response_tables.py deleted file mode 100644 index 6d8f02e..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/old/290_get_drifting_grating_response_tables.py +++ /dev/null @@ -1,18 +0,0 @@ -import os -import h5py -import numpy as np -import corticalmapping.NwbTools as nt - -# plane_ns = ['plane0', 'plane1', 'plane2', 'plane3', 'plane4'] -stim_name = '003_DriftingGratingCircleRetinotopicMapping' -t_win = [-1., 2.5] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -nwb_f.get_drifting_grating_response_table_retinotopic_mapping(stim_name=stim_name, time_window=t_win) - -nwb_f.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/old/300_get_STRFs.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/old/300_get_STRFs.py deleted file mode 100644 index 3abfef7..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/old/300_get_STRFs.py +++ /dev/null @@ -1,101 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt -import corticalmapping.NwbTools as nt -import corticalmapping.core.TimingAnalysis as ta -import corticalmapping.SingleCellAnalysis as sca -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -from matplotlib.backends.backend_pdf import PdfPages - -stim_name = '001_LocallySparseNoiseRetinotopicMapping' -trace_source = 'f_center_subtracted' -start_time = -0.5 -end_time = 2. - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = nt.RecordedFile(nwb_fn) - -# deleting existing strf group -if 'STRFs' in nwb_f.file_pointer['analysis']: - del nwb_f.file_pointer['analysis/STRFs'] - -probe_grp = nwb_f.file_pointer['analysis/photodiode_onsets/' + stim_name] -probe_ns = probe_grp.keys() -probe_ns.sort() - -probe_locations = [[float(pn[3: 9]), float(pn[13: 19])] for pn in probe_ns] -probe_signs = [float(pn[-2:]) for pn in probe_ns] -# print(probe_locations) - -plane_ns = nwb_f.file_pointer['processing'].keys() -plane_ns = [pn.split('_')[-1] for pn in plane_ns if 'rois_and_traces_plane' in pn] -plane_ns.sort() -print('\n'.join(plane_ns)) - -strf_grp = nwb_f.file_pointer['analysis'].create_group('STRFs') - -for plane_n in plane_ns: - print('\ngetting STRFs for {} ...'.format(plane_n)) - - roi_ns = nwb_f.file_pointer['processing/rois_and_traces_' + plane_n + - '/ImageSegmentation/imaging_plane/roi_list'].value - roi_ns = [rn for rn in roi_ns if rn[0: 4] == 'roi_'] - roi_ns.sort() - roi_num = len(roi_ns) - - plane_strf_grp = strf_grp.create_group(plane_n) - plane_traces = nwb_f.file_pointer['processing/rois_and_traces_' + plane_n + '/Fluorescence/' + - trace_source + '/data'].value - plane_trace_ts = nwb_f.file_pointer['processing/rois_and_traces_' + plane_n + '/Fluorescence/' + - trace_source + '/timestamps'].value - - plane_mean_frame_dur = np.mean(np.diff(plane_trace_ts)) - plane_chunk_frame_dur = int(np.ceil((end_time - start_time) / plane_mean_frame_dur)) - plane_chunk_frame_start = int(np.floor(start_time / plane_mean_frame_dur)) - plane_t = (np.arange(plane_chunk_frame_dur) + plane_chunk_frame_start) * plane_mean_frame_dur - print '{}: STRF time axis: \n{}'.format(plane_n, plane_t) - - plane_roi_traces = [] - trigger_ts_lst = [] - - for probe_ind, probe_n in enumerate(probe_ns): - - probe_ts = probe_grp[probe_n]['pd_onset_ts_sec'].value - trigger_ts_lst.append(probe_ts) - probe_traces = [] - for curr_probe_ts in probe_ts: - curr_frame_start = ta.find_nearest(plane_trace_ts, curr_probe_ts) + plane_chunk_frame_start - curr_frame_end = curr_frame_start + plane_chunk_frame_dur - if curr_frame_start >= 0 and curr_frame_end <= len(plane_trace_ts): - probe_traces.append(plane_traces[:, curr_frame_start: curr_frame_end]) - - plane_roi_traces.append(np.array(probe_traces)) - print('probe: {} / {}; shape: {}'.format(probe_ind + 1, len(probe_ns), np.array(probe_traces).shape)) - - # plane_roi_traces = np.array(plane_roi_traces) - - print('saving ...') - for roi_ind in range(roi_num): - - print "roi: {} / {}".format(roi_ind + 1, roi_num) - curr_unit_traces = [pt[:, roi_ind, :] for pt in plane_roi_traces] - curr_unit_traces = [list(t) for t in curr_unit_traces] - curr_strf = sca.SpatialTemporalReceptiveField(locations=probe_locations, - signs=probe_signs, - traces=curr_unit_traces, - time=plane_t, - # trigger_ts=trigger_ts_lst, - trigger_ts=None, - name='roi_{:04d}'.format(roi_ind), - trace_data_type=trace_source) - - curr_strf_grp = plane_strf_grp.create_group('strf_roi_{:04d}'.format(roi_ind)) - curr_strf.to_h5_group(curr_strf_grp) - -nwb_f.close() - - diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/old/310_plot_STRFs.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/old/310_plot_STRFs.py deleted file mode 100644 index b227207..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/old/310_plot_STRFs.py +++ /dev/null @@ -1,61 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt -import h5py -import corticalmapping.core.TimingAnalysis as ta -import corticalmapping.SingleCellAnalysis as sca -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -from matplotlib.backends.backend_pdf import PdfPages - -save_folder = 'figures' -is_add_to_traces = True - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, save_folder) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = h5py.File(nwb_fn, 'r') - -strf_grp = nwb_f['analysis/STRFs'] -plane_ns = strf_grp.keys() -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -for plane_n in plane_ns: - print('plotting rois in {} ...'.format(plane_n)) - - if is_add_to_traces: - add_to_trace = h5py.File(os.path.join(plane_n, "caiman_segmentation_results.hdf5"), - 'r')['bias_added_to_movie'].value - else: - add_to_trace = 0. - - plane_grp = strf_grp[plane_n] - pdff = PdfPages(os.path.join(save_folder, 'STRFs_' + plane_n + '.pdf')) - - roi_ns = [rn[-8:] for rn in plane_grp.keys()] - roi_ns.sort() - - for roi_ind, roi_n in enumerate(roi_ns): - print('roi: {} / {}'.format(roi_ind + 1, len(roi_ns))) - curr_strf = sca.SpatialTemporalReceptiveField.from_h5_group(plane_grp['strf_' + roi_n]) - - curr_strf_dff = curr_strf.get_local_dff_strf(is_collaps_before_normalize=True, add_to_trace=add_to_trace) - - v_min, v_max = curr_strf_dff.get_data_range() - f = curr_strf_dff.plot_traces(yRange=(v_min, v_max * 1.1), figSize=(16, 10), - columnSpacing=0.002, rowSpacing=0.002) - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - - pdff.close() - -nwb_f.close() diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/old/320_plot_zscore_RFs.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/old/320_plot_zscore_RFs.py deleted file mode 100644 index f870f5f..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/old/320_plot_zscore_RFs.py +++ /dev/null @@ -1,72 +0,0 @@ -import os -import numpy as np -import matplotlib.pyplot as plt -import h5py -import corticalmapping.core.TimingAnalysis as ta -import corticalmapping.SingleCellAnalysis as sca -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -from matplotlib.backends.backend_pdf import PdfPages - -save_folder = 'figures' -is_local_dff = True -zscore_range = [-4., 4.] -t_window = [0., 0.5] -is_add_to_traces = True - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, save_folder) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'][0] -nwb_f = h5py.File(nwb_fn, 'r') - -strf_grp = nwb_f['analysis/STRFs'] -plane_ns = strf_grp.keys() -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -for plane_n in plane_ns: - print('plotting rois in {} ...'.format(plane_n)) - - if is_add_to_traces: - add_to_trace = h5py.File(os.path.join(plane_n, "caiman_segmentation_results.hdf5"), - 'r')['bias_added_to_movie'].value - else: - add_to_trace = 0. - - plane_grp = strf_grp[plane_n] - pdff = PdfPages(os.path.join(save_folder, 'zscore_RFs_' + plane_n + '.pdf')) - - roi_ns = [rn[-8:] for rn in plane_grp.keys()] - roi_ns.sort() - - for roi_ind, roi_n in enumerate(roi_ns): - print('roi: {} / {}'.format(roi_ind + 1, len(roi_ns))) - curr_strf = sca.SpatialTemporalReceptiveField.from_h5_group(plane_grp['strf_' + roi_n]) - curr_strf_dff = curr_strf.get_local_dff_strf(is_collaps_before_normalize=True, add_to_trace=add_to_trace) - # v_min, v_max = curr_strf_dff.get_data_range() - - rf_on, rf_off = curr_strf_dff.get_zscore_receptive_field(timeWindow=t_window) - f = plt.figure(figsize=(15, 4)) - f.suptitle('{}: t_window: {}'.format(roi_n, t_window)) - ax_on = f.add_subplot(121) - rf_on.plot_rf(plot_axis=ax_on, is_colorbar=True, cmap='RdBu_r', vmin=zscore_range[0], vmax=zscore_range[1]) - ax_on.set_title('ON zscore RF') - ax_off = f.add_subplot(122) - rf_off.plot_rf(plot_axis=ax_off, is_colorbar=True, cmap='RdBu_r', vmin=zscore_range[0], vmax=zscore_range[1]) - ax_off.set_title('OFF zscore RF') - plt.close() - - # plt.show() - pdff.savefig(f) - f.clear() - plt.close(f) - - pdff.close() - -nwb_f.close() diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/old/330_plot_RF_contours.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/old/330_plot_RF_contours.py deleted file mode 100644 index d45003b..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/old/330_plot_RF_contours.py +++ /dev/null @@ -1,115 +0,0 @@ -import os -import numpy as np -import h5py -import matplotlib.pyplot as plt -import corticalmapping.NwbTools as nt -import corticalmapping.core.TimingAnalysis as ta -import corticalmapping.SingleCellAnalysis as sca -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -from matplotlib.backends.backend_pdf import PdfPages - -roi_t_window = [0., 0.5] -zscore_range = [0., 4.] -save_folder = 'figures' -is_add_to_traces = True - -# plot control -thr_ratio = 0.4 -filter_sigma = 1. -interpolate_rate = 10 -absolute_thr = 1.6 -level_num = 1 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -save_folder = os.path.join(curr_folder, save_folder) -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -nwb_fn = [f for f in os.listdir(curr_folder) if f[-4:] == '.nwb'] -print('\n'.join(nwb_fn)) - -if len(nwb_fn) != 1: - raise LookupError - -nwb_fn = nwb_fn[0] -rff = h5py.File(nwb_fn, 'r') - -strf_grp = rff['analysis/STRFs'] -plane_ns = strf_grp.keys() -plane_ns.sort() -print('planes:') -print('\n'.join(plane_ns)) - -X = None -Y = None - -for plane_n in plane_ns: - print('plotting rois in {} ...'.format(plane_n)) - - if is_add_to_traces: - add_to_trace = h5py.File(os.path.join(plane_n, "caiman_segmentation_results.hdf5"), - 'r')['bias_added_to_movie'].value - else: - add_to_trace = 0. - - plane_grp = strf_grp[plane_n] - - roi_ns = [rn[-8:] for rn in plane_grp.keys()] - roi_ns.sort() - - f_all = plt.figure(figsize=(10, 10)) - f_all.suptitle('t window: {}; z threshold: {}'.format(roi_t_window, absolute_thr/thr_ratio)) - ax_all = f_all.add_subplot(111) - - pdff = PdfPages(os.path.join(save_folder, 'RF_contours_' + plane_n + '.pdf')) - - for roi_ind, roi_n in enumerate(roi_ns): - print('roi: {} / {}'.format(roi_ind + 1, len(roi_ns))) - curr_strf = sca.SpatialTemporalReceptiveField.from_h5_group(plane_grp['strf_' + roi_n]) - curr_strf_dff = curr_strf.get_local_dff_strf(is_collaps_before_normalize=True, add_to_trace=add_to_trace) - rf_on, rf_off, _ = curr_strf_dff.get_zscore_thresholded_receptive_fields(timeWindow=roi_t_window, - thr_ratio=thr_ratio, - filter_sigma=filter_sigma, - interpolate_rate=interpolate_rate, - absolute_thr=absolute_thr) - - if X is None and Y is None: - X, Y = np.meshgrid(np.arange(len(rf_on.aziPos)), - np.arange(len(rf_on.altPos))) - - levels_on = [np.max(rf_on.get_weighted_mask().flat) * thr_ratio] - levels_off = [np.max(rf_off.get_weighted_mask().flat) * thr_ratio] - ax_all.contour(X, Y, rf_on.get_weighted_mask(), levels=levels_on, colors='r', lw=5) - ax_all.contour(X, Y, rf_off.get_weighted_mask(), levels=levels_off, colors='b', lw=5) - - f_single = plt.figure(figsize=(10, 10)) - ax_single = f_single.add_subplot(111) - ax_single.contour(X, Y, rf_on.get_weighted_mask(), levels=levels_on, colors='r', lw=5) - ax_single.contour(X, Y, rf_off.get_weighted_mask(), levels=levels_off, colors='b', lw=5) - ax_single.set_xticks(range(len(rf_on.aziPos))[::20]) - ax_single.set_xticklabels(['{:3d}'.format(int(round(l))) for l in rf_on.aziPos[::20]]) - ax_single.set_yticks(range(len(rf_on.altPos))[::20]) - ax_single.set_yticklabels(['{:3d}'.format(int(round(l))) for l in rf_on.altPos[::-1][::20]]) - ax_single.set_aspect('equal') - ax_single.set_title('{}: {}. t_window: {}; ON thr:{}; OFF thr:{}.'.format(plane_n, roi_n, roi_t_window, - rf_on.thr, rf_off.thr)) - pdff.savefig(f_single) - f_single.clear() - plt.close(f_single) - - pdff.close() - - ax_all.set_xticks(range(len(rf_on.aziPos))[::20]) - ax_all.set_xticklabels(['{:3d}'.format(int(round(l))) for l in rf_on.aziPos[::20]]) - ax_all.set_yticks(range(len(rf_on.altPos))[::20]) - ax_all.set_yticklabels(['{:3d}'.format(int(round(l))) for l in rf_on.altPos[::-1][::20]]) - ax_all.set_aspect('equal') - ax_all.set_title('{}, abs_zscore_thr:{}'.format(plane_n, absolute_thr)) - - f_all.savefig(os.path.join(save_folder, 'RF_contours_' + plane_n + '_all.pdf'), dpi=300) - -rff.close() - diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/120_get_cells_file_bouton.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/120_get_cells_file_bouton.py deleted file mode 100644 index 7366ec2..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/120_get_cells_file_bouton.py +++ /dev/null @@ -1,87 +0,0 @@ -import os -import numpy as np -import h5py -import tifffile as tf -import allensdk_internal.brain_observatory.mask_set as mask_set -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.core.PlottingTools as pt -import scipy.ndimage as ni -import matplotlib.pyplot as plt - - -isSave = True -is_filter = True - -filter_sigma = 0. # parameters only used if filter the rois -# dilation_iterations = 1. # parameters only used if filter the rois -cut_thr = 2.5 # low for more rois, high for less rois - -bg_fn = "corrected_mean_projections.tif" -save_folder = 'figures' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -data_f = h5py.File('caiman_segmentation_results.hdf5') -masks = data_f['masks'].value -data_f.close() - -bg = ia.array_nor(np.max(tf.imread(bg_fn), axis=0)) - -final_roi_dict = {} - -for i, mask in enumerate(masks): - - if is_filter: - mask_nor = (mask - np.mean(mask.flatten())) / np.abs(np.std(mask.flatten())) - mask_nor_f = ni.filters.gaussian_filter(mask_nor, filter_sigma) - mask_bin = np.zeros(mask_nor_f.shape, dtype=np.uint8) - mask_bin[mask_nor_f > cut_thr] = 1 - - else: - mask_bin = np.zeros(mask.shape, dtype=np.uint8) - mask_bin[mask > 0] = 1 - - mask_labeled, mask_num = ni.label(mask_bin) - curr_mask_dict = ia.get_masks(labeled=mask_labeled, keyPrefix='caiman_mask_{:03d}'.format(i), labelLength=5) - for roi_key, roi_mask in curr_mask_dict.items(): - final_roi_dict.update({roi_key: ia.WeightedROI(roi_mask * mask)}) - -print 'Total number of ROIs:',len(final_roi_dict) - -f = plt.figure(figsize=(15, 8)) -ax1 = f.add_subplot(121) -ax1.imshow(bg, vmin=0, vmax=0.5, cmap='gray', interpolation='nearest') -colors1 = pt.random_color(masks.shape[0]) -for i, mask in enumerate(masks): - pt.plot_mask_borders(mask, plotAxis=ax1, color=colors1[i]) -ax1.set_title('original ROIs') -ax1.set_axis_off() -ax2 = f.add_subplot(122) -ax2.imshow(ia.array_nor(bg), vmin=0, vmax=0.5, cmap='gray', interpolation='nearest') -colors2 = pt.random_color(len(final_roi_dict)) -i = 0 -for roi in final_roi_dict.values(): - pt.plot_mask_borders(roi.get_binary_mask(), plotAxis=ax2, color=colors2[i]) - i = i + 1 -ax2.set_title('filtered ROIs') -ax2.set_axis_off() -plt.show() - -if isSave: - - if not os.path.isdir(save_folder): - os.makedirs(save_folder) - - f.savefig(os.path.join(save_folder, 'caiman_segmentation_filtering.pdf'), dpi=300) - - cell_file = h5py.File('cells.hdf5', 'w') - - i = 0 - for key, value in sorted(final_roi_dict.iteritems()): - curr_grp = cell_file.create_group('cell{:04d}'.format(i)) - curr_grp.attrs['name'] = key - value.to_h5_group(curr_grp) - i += 1 - - cell_file.close() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/120_get_cells_file_soma.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/120_get_cells_file_soma.py deleted file mode 100644 index e97e5d5..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/120_get_cells_file_soma.py +++ /dev/null @@ -1,81 +0,0 @@ -import os -import numpy as np -import h5py -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.core.PlottingTools as pt -import scipy.ndimage as ni -import matplotlib.pyplot as plt -import corticalmapping.HighLevel as hl - -plt.ioff() - -def run(): - isSave = True - - filter_sigma = 2. # parameters only used if filter the rois - thr_high = 0.0 - thr_low = 0.1 - - bg_fn = "corrected_mean_projections.tif" - save_folder = 'figures' - - curr_folder = os.path.dirname(os.path.realpath(__file__)) - os.chdir(curr_folder) - - data_f = h5py.File('caiman_segmentation_results.hdf5') - masks = data_f['masks'].value - data_f.close() - - bg = ia.array_nor(np.max(tf.imread(bg_fn), axis=0)) - - final_roi_dict = {} - - roi_ind = 0 - for i, mask in enumerate(masks): - mask_dict = hl.threshold_mask_by_energy(mask, sigma=filter_sigma, thr_high=thr_high, thr_low=thr_low) - for mask_roi in mask_dict.values(): - final_roi_dict.update({'roi_{:04d}'.format(roi_ind): mask_roi}) - roi_ind += 1 - - print 'Total number of ROIs:',len(final_roi_dict) - - f = plt.figure(figsize=(15, 8)) - ax1 = f.add_subplot(121) - ax1.imshow(bg, vmin=0, vmax=0.5, cmap='gray', interpolation='nearest') - colors1 = pt.random_color(masks.shape[0]) - for i, mask in enumerate(masks): - pt.plot_mask_borders(mask, plotAxis=ax1, color=colors1[i]) - ax1.set_title('original ROIs') - ax1.set_axis_off() - ax2 = f.add_subplot(122) - ax2.imshow(ia.array_nor(bg), vmin=0, vmax=0.5, cmap='gray', interpolation='nearest') - colors2 = pt.random_color(len(final_roi_dict)) - i = 0 - for roi in final_roi_dict.values(): - pt.plot_mask_borders(roi.get_binary_mask(), plotAxis=ax2, color=colors2[i]) - i = i + 1 - ax2.set_title('filtered ROIs') - ax2.set_axis_off() - # plt.show() - - if isSave: - - if not os.path.isdir(save_folder): - os.makedirs(save_folder) - - f.savefig(os.path.join(save_folder, 'caiman_segmentation_filtering.pdf'), dpi=300) - - cell_file = h5py.File('cells.hdf5', 'w') - - i = 0 - for key, value in sorted(final_roi_dict.iteritems()): - curr_grp = cell_file.create_group('cell{:04d}'.format(i)) - curr_grp.attrs['name'] = key - value.to_h5_group(curr_grp) - i += 1 - - cell_file.close() - -if __name__ == '__main__': - run() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/130_refine_cells_bouton.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/130_refine_cells_bouton.py deleted file mode 100644 index 925d1dd..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/130_refine_cells_bouton.py +++ /dev/null @@ -1,173 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Jun 30 17:44:42 2015 - -@author: junz -""" -import os -import h5py -import numpy as np -import operator -import matplotlib.pyplot as plt -import scipy.ndimage as ni -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.core.FileTools as ft -import corticalmapping.core.PlottingTools as pt -import corticalmapping.SingleCellAnalysis as sca - -plt.ioff() - -# pixels, masks with center location within this pixel region at the image border will be discarded -center_margin = [10, 10, 10, 20] # [top margin, bottom margin, left margin, right margin] - -# area range, range of number of pixels of a valid roi -area_range = [10, 100] - -# for the two masks that are overlapping, if the ratio between overlap and the area of the smaller mask is larger than -# this value, the smaller mask will be discarded. -overlap_thr = 0.2 - -save_folder = 'figures' - -data_file_name = 'cells.hdf5' -save_file_name = 'cells_refined.hdf5' -background_file_name = "corrected_mean_projections.tif" - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -if not os.path.isdir(save_folder): - os.makedirs(save_folder) - -# read cells -dfile = h5py.File(data_file_name) -cells = {} -for cellname in dfile.iterkeys(): - cells.update({cellname:ia.WeightedROI.from_h5_group(dfile[cellname])}) - -print 'total number of cells:', len(cells) - -# get the names of cells which are on the edge -edge_cells = [] -for cellname, cellmask in cells.iteritems(): - dimension = cellmask.dimension - center = cellmask.get_center() - if center[0] < center_margin[0] or \ - center[0] > dimension[0] - center_margin[1] or \ - center[1] < center_margin[2] or \ - center[1] > dimension[1] - center_margin[3]: - - # cellmask.plot_binary_mask_border(color='#ff0000', borderWidth=1) - # plt.title(cellname) - # plt.show() - - edge_cells.append(cellname) - -print '\ncells to be removed because they are on the edges:' -print '\n'.join(edge_cells) - -# remove edge cells -for edge_cell in edge_cells: - _ = cells.pop(edge_cell) - -# get dictionary of cell areas -cell_areas = {} -for cellname, cellmask in cells.iteritems(): - cell_areas.update({cellname: cellmask.get_binary_area()}) - - -# remove cellnames that have area outside of the area_range -invalid_cell_ns = [] -for cellname, cellarea in cell_areas.items(): - if cellarea < area_range[0] or cellarea > area_range[1]: - invalid_cell_ns.append(cellname) -print "cells to be removed because they do not meet area criterion:" -print "\n".join(invalid_cell_ns) -for invalid_cell_n in invalid_cell_ns: - cell_areas.pop(invalid_cell_n) - - -# sort cells with their binary area -cell_areas_sorted = sorted(cell_areas.items(), key=operator.itemgetter(1)) -cell_areas_sorted.reverse() -cell_names_sorted = [c[0] for c in cell_areas_sorted] -# print '\n'.join([str(c) for c in cell_areas_sorted]) - -# get the name of cells that needs to be removed because of overlapping -retain_cells = [] -remove_cells = [] -for cell1_name in cell_names_sorted: - cell1_mask = cells[cell1_name] - is_remove = 0 - cell1_area = cell1_mask.get_binary_area() - for cell2_name in retain_cells: - cell2_mask = cells[cell2_name] - cell2_area = cell2_mask.get_binary_area() - curr_overlap = cell1_mask.binary_overlap(cell2_mask) - - if float(curr_overlap) / cell1_area > overlap_thr: - remove_cells.append(cell1_name) - is_remove = 1 - print cell1_name, ':', cell1_mask.get_binary_area(), ': removed' - - # f = plt.figure(figsize=(10,10)) - # ax = f.add_subplot(111) - # cell1_mask.plot_binary_mask_border(plotAxis=ax, color='#ff0000', borderWidth=1) - # cell2_mask.plot_binary_mask_border(plotAxis=ax, color='#0000ff', borderWidth=1) - # ax.set_title('red:'+cell1_name+'; blue:'+cell2_name) - # plt.show() - break - - if is_remove == 0: - retain_cells.append(cell1_name) - print cell1_name, ':', cell1_mask.get_binary_area(), ': retained' - -print '\ncells to be removed because of overlapping:' -print '\n'.join(remove_cells) - -print '\ntotal number of reatined cells:', len(retain_cells) - -# plotting -colors = pt.random_color(len(cells.keys())) -bgImg = ia.array_nor(np.max(tf.imread(background_file_name), axis=0)) - -f = plt.figure(figsize=(10, 10)) -ax = f.add_subplot(111) -ax.imshow(ia.array_nor(bgImg), cmap='gray', vmin=0, vmax=0.5, interpolation='nearest') - -f2 = plt.figure(figsize=(10, 10)) -ax2 = f2.add_subplot(111) -ax2.imshow(np.zeros(bgImg.shape, dtype=np.uint8), vmin=0, vmax=1, cmap='gray', interpolation='nearest') - -i = 0 -for retain_cell in retain_cells: - cells[retain_cell].plot_binary_mask_border(plotAxis=ax, color=colors[i], borderWidth=1) - cells[retain_cell].plot_binary_mask_border(plotAxis=ax2, color=colors[i], borderWidth=1) - i += 1 -plt.show() - -# save figures -pt.save_figure_without_borders(f, os.path.join(save_folder, '2P_refined_ROIs_with_background.png'), dpi=300) -pt.save_figure_without_borders(f2, os.path.join(save_folder, '2P_refined_ROIs_without_background.png'), dpi=300) - -# save h5 file -save_file = h5py.File(save_file_name, 'w') -i = 0 -for retain_cell in retain_cells: - print retain_cell, ':', cells[retain_cell].get_binary_area() - - currGroup = save_file.create_group('cell' + ft.int2str(i, 4)) - currGroup.attrs['name'] = retain_cell - roiGroup = currGroup.create_group('roi') - cells[retain_cell].to_h5_group(roiGroup) - i += 1 - -for attr, value in dfile.attrs.iteritems(): - save_file.attrs[attr] = value - -save_file.close() -dfile.close() - - - diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/130_refine_cells_soma.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/130_refine_cells_soma.py deleted file mode 100644 index f25d873..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/130_refine_cells_soma.py +++ /dev/null @@ -1,175 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Jun 30 17:44:42 2015 - -@author: junz -""" -import os -import h5py -import numpy as np -import operator -import matplotlib.pyplot as plt -import scipy.ndimage as ni -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.core.FileTools as ft -import corticalmapping.core.PlottingTools as pt -import corticalmapping.SingleCellAnalysis as sca - -plt.ioff() - -def run(): - # pixels, masks with center location within this pixel region at the image border will be discarded - center_margin = [10, 20, 25, 10] # [top margin, bottom margin, left margin, right margin] - - # area range, range of number of pixels of a valid roi - area_range = [150, 1000] - - # for the two masks that are overlapping, if the ratio between overlap and the area of the smaller mask is larger than - # this value, the smaller mask will be discarded. - overlap_thr = 0.2 - - save_folder = 'figures' - - data_file_name = 'cells.hdf5' - save_file_name = 'cells_refined.hdf5' - background_file_name = "corrected_mean_projections.tif" - - curr_folder = os.path.dirname(os.path.realpath(__file__)) - os.chdir(curr_folder) - - if not os.path.isdir(save_folder): - os.makedirs(save_folder) - - # read cells - dfile = h5py.File(data_file_name) - cells = {} - for cellname in dfile.iterkeys(): - cells.update({cellname:ia.WeightedROI.from_h5_group(dfile[cellname])}) - - print 'total number of cells:', len(cells) - - # get the names of cells which are on the edge - edge_cells = [] - for cellname, cellmask in cells.iteritems(): - dimension = cellmask.dimension - center = cellmask.get_center() - if center[0] < center_margin[0] or \ - center[0] > dimension[0] - center_margin[1] or \ - center[1] < center_margin[2] or \ - center[1] > dimension[1] - center_margin[3]: - - # cellmask.plot_binary_mask_border(color='#ff0000', borderWidth=1) - # plt.title(cellname) - # plt.show() - - edge_cells.append(cellname) - - print '\ncells to be removed because they are on the edges:' - print '\n'.join(edge_cells) - - # remove edge cells - for edge_cell in edge_cells: - _ = cells.pop(edge_cell) - - # get dictionary of cell areas - cell_areas = {} - for cellname, cellmask in cells.iteritems(): - cell_areas.update({cellname: cellmask.get_binary_area()}) - - - # remove cellnames that have area outside of the area_range - invalid_cell_ns = [] - for cellname, cellarea in cell_areas.items(): - if cellarea < area_range[0] or cellarea > area_range[1]: - invalid_cell_ns.append(cellname) - print "cells to be removed because they do not meet area criterion:" - print "\n".join(invalid_cell_ns) - for invalid_cell_n in invalid_cell_ns: - cell_areas.pop(invalid_cell_n) - - - # sort cells with their binary area - cell_areas_sorted = sorted(cell_areas.items(), key=operator.itemgetter(1)) - cell_areas_sorted.reverse() - cell_names_sorted = [c[0] for c in cell_areas_sorted] - # print '\n'.join([str(c) for c in cell_areas_sorted]) - - # get the name of cells that needs to be removed because of overlapping - retain_cells = [] - remove_cells = [] - for cell1_name in cell_names_sorted: - cell1_mask = cells[cell1_name] - is_remove = 0 - cell1_area = cell1_mask.get_binary_area() - for cell2_name in retain_cells: - cell2_mask = cells[cell2_name] - cell2_area = cell2_mask.get_binary_area() - curr_overlap = cell1_mask.binary_overlap(cell2_mask) - - if float(curr_overlap) / cell1_area > overlap_thr: - remove_cells.append(cell1_name) - is_remove = 1 - print cell1_name, ':', cell1_mask.get_binary_area(), ': removed' - - # f = plt.figure(figsize=(10,10)) - # ax = f.add_subplot(111) - # cell1_mask.plot_binary_mask_border(plotAxis=ax, color='#ff0000', borderWidth=1) - # cell2_mask.plot_binary_mask_border(plotAxis=ax, color='#0000ff', borderWidth=1) - # ax.set_title('red:'+cell1_name+'; blue:'+cell2_name) - # plt.show() - break - - if is_remove == 0: - retain_cells.append(cell1_name) - print cell1_name, ':', cell1_mask.get_binary_area(), ': retained' - - print '\ncells to be removed because of overlapping:' - print '\n'.join(remove_cells) - - print '\ntotal number of reatined cells:', len(retain_cells) - - # plotting - colors = pt.random_color(len(cells.keys())) - bgImg = ia.array_nor(np.max(tf.imread(background_file_name), axis=0)) - - f = plt.figure(figsize=(10, 10)) - ax = f.add_subplot(111) - ax.imshow(ia.array_nor(bgImg), cmap='gray', vmin=0, vmax=0.5, interpolation='nearest') - - f2 = plt.figure(figsize=(10, 10)) - ax2 = f2.add_subplot(111) - ax2.imshow(np.zeros(bgImg.shape, dtype=np.uint8), vmin=0, vmax=1, cmap='gray', interpolation='nearest') - - i = 0 - for retain_cell in retain_cells: - cells[retain_cell].plot_binary_mask_border(plotAxis=ax, color=colors[i], borderWidth=1) - cells[retain_cell].plot_binary_mask_border(plotAxis=ax2, color=colors[i], borderWidth=1) - i += 1 - # plt.show() - - # save figures - pt.save_figure_without_borders(f, os.path.join(save_folder, '2P_refined_ROIs_with_background.png'), dpi=300) - pt.save_figure_without_borders(f2, os.path.join(save_folder, '2P_refined_ROIs_without_background.png'), dpi=300) - - # save h5 file - save_file = h5py.File(save_file_name, 'w') - i = 0 - for retain_cell in retain_cells: - print retain_cell, ':', cells[retain_cell].get_binary_area() - - currGroup = save_file.create_group('cell' + ft.int2str(i, 4)) - currGroup.attrs['name'] = retain_cell - roiGroup = currGroup.create_group('roi') - cells[retain_cell].to_h5_group(roiGroup) - i += 1 - - for attr, value in dfile.attrs.iteritems(): - save_file.attrs[attr] = value - - save_file.close() - dfile.close() - -if __name__ == '__main__': - run() - diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/135_generate_marked_avi.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/135_generate_marked_avi.py deleted file mode 100644 index 5122b93..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/135_generate_marked_avi.py +++ /dev/null @@ -1,132 +0,0 @@ -import os -import numpy as np -import h5py -import scipy.ndimage as ni -import matplotlib.pyplot as plt -from multiprocessing import Pool -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.core.PlottingTools as pt -import cv2 -import PIL -from cStringIO import StringIO - -plt.ioff() - -chunk_size = 1000 -process_num = 5 -downsample_r = 10 -frame_size = 8 # inch - -def downsample_for_multiprocessing(params): - - nwb_path, dset_path, frame_start_i, frame_end_i, dr = params - - print('\tdownsampling frame {} - {}'.format(frame_start_i, frame_end_i)) - - ff = h5py.File(nwb_path, 'r') - chunk = ff[dset_path][frame_start_i:frame_end_i, :, :] - ff.close() - chunk_d = ia.z_downsample(chunk, downSampleRate=dr, is_verbose=False) - return chunk_d - -def downsample_mov(nwb_path, dset_path, dr): - - ff = h5py.File(nwb_path, 'r') - frame_num = ff[dset_path].shape[0] - print('\tshape of movie: {}'.format(ff[dset_path].shape)) - chunk_starts = np.array(range(0, frame_num, chunk_size)) - chunk_ends = chunk_starts + chunk_size - chunk_ends[-1] = frame_num - - params = [] - for i, chunk_start in enumerate(chunk_starts): - params.append((nwb_path, dset_path, chunk_start, chunk_ends[i], dr)) - - p = Pool(process_num) - mov_d = p.map(downsample_for_multiprocessing, params) - - return np.concatenate(mov_d, axis=0) - -def run(): - - curr_folder = os.path.dirname(os.path.realpath(__file__)) - os.chdir(curr_folder) - - print('getting total mask ...') - cell_f = h5py.File('cells_refined.hdf5', 'r') - h, w = cell_f['cell0000']['roi'].attrs['dimension'] - total_mask = np.zeros((h, w), dtype=np.uint8) - for cell_n, cell_grp in cell_f.items(): - curr_roi = ia.WeightedROI.from_h5_group(cell_grp['roi']) - curr_mask = curr_roi.get_binary_mask() - total_mask = np.logical_or(total_mask, curr_mask) - cell_f.close() - total_mask = ni.binary_dilation(total_mask, iterations=1) - # plt.imshow(total_mask) - # plt.title('total_mask') - # plt.show() - - nwb_folder = os.path.dirname(curr_folder) - nwb_fn = [f for f in os.listdir(nwb_folder) if f[-4:] == '.nwb'][0] - nwb_path = os.path.join(nwb_folder, nwb_fn) - - plane_n = os.path.split(curr_folder)[1] - dset_path = 'processing/motion_correction/MotionCorrection/{}/corrected/data'.format(plane_n) - - print('downsampling movie ...') - print('\tnwb_path: {}'.format(nwb_path)) - print('\tdset_path: {}'.format(dset_path)) - - nwb_f = h5py.File(nwb_path, 'r') - dset = nwb_f[dset_path] - print('\ttotal shape: {}'.format(dset.shape)) - nwb_f.close() - - mov_d = downsample_mov(nwb_path=nwb_path, dset_path=dset_path, dr=downsample_r) - v_min = np.amin(mov_d) - v_max = np.amax(mov_d) - print('\tshape of downsampled movie: {}'.format(mov_d.shape)) - - print('\n\tgenerating avi ...') - - if cv2.__version__[0:3] == '3.1': - codex = 'XVID' - fourcc = cv2.VideoWriter_fourcc(*codex) - out = cv2.VideoWriter('marked_mov.avi', fourcc, 30, (frame_size * 100, frame_size * 100), isColor=True) - elif cv2.__version__[0:6] == '2.4.11': - out = cv2.VideoWriter('marked_mov.avi', -1, 30, (frame_size * 100, frame_size * 100), isColor=True) - elif cv2.__version__[0:3] == '2.4': - codex = 'XVID' - fourcc = cv2.cv.CV_FOURCC(*codex) - out = cv2.VideoWriter('marked_mov.avi', fourcc, 30, (frame_size * 100, frame_size * 100), isColor=True) - else: - raise EnvironmentError('Do not understand opencv cv2 version: {}.'.format(cv2.__version__)) - - f = plt.figure(figsize=(frame_size, frame_size)) - for frame_i, frame in enumerate(mov_d): - print('\tframe: {} / {}'.format(frame_i, mov_d.shape[0])) - f.clear() - ax = f.add_subplot(111) - ax.imshow(frame, vmin=v_min, vmax=v_max*0.5, cmap='gray', interpolation='nearest') - pt.plot_mask_borders(total_mask, plotAxis=ax, color='#ff0000', zoom=1, borderWidth=1) - ax.set_aspect('equal') - # plt.show() - - buffer_ = StringIO() - pt.save_figure_without_borders(f, buffer_, dpi=100) - buffer_.seek(0) - image = PIL.Image.open(buffer_) - curr_frame = np.asarray(image) - r, g, b, a = np.rollaxis(curr_frame, axis=-1) - curr_frame = (np.dstack((b, g, r))) - # print(r.dtype) - # print(curr_frame.shape) - - out.write(curr_frame) - - out.release() - cv2.destroyAllWindows() - print('\t.avi movie generated.') - -if __name__ == '__main__': - run() diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/140_get_weighted_rois_and_surrounds.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/140_get_weighted_rois_and_surrounds.py deleted file mode 100644 index 787c122..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/140_get_weighted_rois_and_surrounds.py +++ /dev/null @@ -1,127 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Jun 30 17:44:42 2015 - -@author: junz -""" - -import os -import numpy as np -import h5py -import tifffile as tf -import allensdk_internal.brain_observatory.mask_set as mask_set -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.core.PlottingTools as pt -import scipy.ndimage as ni -import matplotlib.pyplot as plt - -plt.ioff() - - -def run(): - data_file_name = 'cells_refined.hdf5' - background_file_name = "corrected_mean_projections.tif" - save_folder = 'figures' - - overlap_threshold = 0.9 - surround_limit = [1, 8] - - curr_folder = os.path.dirname(os.path.realpath(__file__)) - os.chdir(curr_folder) - - if not os.path.isdir(save_folder): - os.makedirs(save_folder) - - print 'reading cells file ...' - data_f = h5py.File(data_file_name, 'r') - - cell_ns = data_f.keys() - cell_ns.sort() - - binary_mask_array = [] - weight_mask_array = [] - - for cell_n in cell_ns: - curr_roi = ia.ROI.from_h5_group(data_f[cell_n]['roi']) - binary_mask_array.append(curr_roi.get_binary_mask()) - weight_mask_array.append(curr_roi.get_weighted_mask()) - - data_f.close() - binary_mask_array = np.array(binary_mask_array) - weight_mask_array = np.array(weight_mask_array) - print 'starting mask_array shape:', weight_mask_array.shape - - print 'getting total mask ...' - total_mask = np.zeros((binary_mask_array.shape[1], binary_mask_array.shape[2]), dtype=np.uint8) - for curr_mask in binary_mask_array: - total_mask = np.logical_or(total_mask, curr_mask) - total_mask = np.logical_not(total_mask) - - plt.imshow(total_mask, interpolation='nearest') - plt.title('total_mask') - # plt.show() - - print 'getting and surround masks ...' - binary_surround_array = [] - for binary_center in binary_mask_array: - curr_surround = np.logical_xor(ni.binary_dilation(binary_center, iterations=surround_limit[1]), - ni.binary_dilation(binary_center, iterations=surround_limit[0])) - curr_surround = np.logical_and(curr_surround, total_mask).astype(np.uint8) - binary_surround_array.append(curr_surround) - # plt.imshow(curr_surround) - # plt.show() - binary_surround_array = np.array(binary_surround_array) - - print "saving rois ..." - center_areas = [] - surround_areas = [] - for mask_ind in range(binary_mask_array.shape[0]): - center_areas.append(np.sum(binary_mask_array[mask_ind].flat)) - surround_areas.append(np.sum(binary_surround_array[mask_ind].flat)) - roi_f = h5py.File('rois_and_traces.hdf5') - roi_f['masks_center'] = weight_mask_array - roi_f['masks_surround'] = binary_surround_array - - roi_f.close() - print 'minimum surround area:', min(surround_areas), 'pixels.' - - f = plt.figure(figsize=(10, 10)) - ax_center = f.add_subplot(211) - ax_center.hist(center_areas, bins=30) - ax_center.set_title('roi center area distribution') - ax_surround = f.add_subplot(212) - ax_surround.hist(surround_areas, bins=30) - ax_surround.set_title('roi surround area distribution') - # plt.show() - - print 'plotting ...' - colors = pt.random_color(weight_mask_array.shape[0]) - bg = ia.array_nor(np.max(tf.imread(background_file_name), axis=0)) - - f_c_bg = plt.figure(figsize=(10, 10)) - ax_c_bg = f_c_bg.add_subplot(111) - ax_c_bg.imshow(bg, cmap='gray', vmin=0, vmax=0.5, interpolation='nearest') - f_c_nbg = plt.figure(figsize=(10, 10)) - ax_c_nbg = f_c_nbg.add_subplot(111) - ax_c_nbg.imshow(np.zeros(bg.shape,dtype=np.uint8),vmin=0,vmax=1,cmap='gray',interpolation='nearest') - f_s_nbg = plt.figure(figsize=(10, 10)) - ax_s_nbg = f_s_nbg.add_subplot(111) - ax_s_nbg.imshow(np.zeros(bg.shape,dtype=np.uint8),vmin=0,vmax=1,cmap='gray',interpolation='nearest') - - i = 0 - for mask_ind in range(binary_mask_array.shape[0]): - pt.plot_mask_borders(binary_mask_array[mask_ind], plotAxis=ax_c_bg, color=colors[i], borderWidth=1) - pt.plot_mask_borders(binary_mask_array[mask_ind], plotAxis=ax_c_nbg, color=colors[i], borderWidth=1) - pt.plot_mask_borders(binary_surround_array[mask_ind], plotAxis=ax_s_nbg, color=colors[i], borderWidth=1) - i += 1 - - # plt.show() - - print 'saving figures ...' - pt.save_figure_without_borders(f_c_bg, os.path.join(save_folder, '2P_ROIs_with_background.png'), dpi=300) - pt.save_figure_without_borders(f_c_nbg, os.path.join(save_folder, '2P_ROIs_without_background.png'), dpi=300) - pt.save_figure_without_borders(f_s_nbg, os.path.join(save_folder, '2P_ROI_surrounds_background.png'), dpi=300) - f.savefig(os.path.join(save_folder, 'roi_area_distribution.pdf'), dpi=300) - -if __name__ == '__main__': - run() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/150_get_raw_center_and_surround_traces.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/150_get_raw_center_and_surround_traces.py deleted file mode 100644 index 0e57582..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/150_get_raw_center_and_surround_traces.py +++ /dev/null @@ -1,129 +0,0 @@ -import os -import numpy as np -import h5py -import time -import corticalmapping.core.ImageAnalysis as ia -import corticalmapping.core.PlottingTools as pt -import corticalmapping.core.FileTools as ft -import corticalmapping.NwbTools as nt -import matplotlib.pyplot as plt -from multiprocessing import Pool - -CHUNK_SIZE = 2000 -PROCESS_NUM = 5 - -def get_chunk_frames(frame_num, chunk_size): - chunk_num = frame_num // chunk_size - if frame_num % chunk_size > 0: - chunk_num = chunk_num + 1 - - print("total number of frames:", frame_num) - print("total number of chunks:", chunk_num) - - chunk_ind = [] - chunk_starts = [] - chunk_ends = [] - - for chunk_i in range(chunk_num): - chunk_ind.append(chunk_i) - chunk_starts.append(chunk_i * chunk_size) - - if chunk_i < chunk_num - 1: - chunk_ends.append((chunk_i + 1) * chunk_size) - else: - chunk_ends.append(frame_num) - - return zip(chunk_ind, chunk_starts, chunk_ends) - -def get_traces(params): - t0 = time.time() - - chunk_ind, chunk_start, chunk_end, nwb_path, data_path, curr_folder, center_array, surround_array = params - - nwb_f = h5py.File(nwb_path, 'r') - print('\nstart analyzing chunk: {}'.format(chunk_ind)) - curr_mov = nwb_f[data_path][chunk_start: chunk_end] - nwb_f.close() - - # print 'extracting traces' - curr_traces_center = np.empty((center_array.shape[0], curr_mov.shape[0]), dtype=np.float32) - curr_traces_surround = np.empty((center_array.shape[0], curr_mov.shape[0]), dtype=np.float32) - for i in range(center_array.shape[0]): - curr_center = ia.WeightedROI(center_array[i]) - curr_surround = ia.ROI(surround_array[i]) - curr_traces_center[i, :] = curr_center.get_weighted_trace_pixelwise(curr_mov) - - # scale surround trace to be similar as center trace - mean_center_weight = curr_center.get_mean_weight() - curr_traces_surround[i, :] = curr_surround.get_binary_trace_pixelwise(curr_mov) * mean_center_weight - - # print 'saveing chunk {} ...'.format(chunk_ind) - chunk_folder = os.path.join(curr_folder, 'chunks') - if not os.path.isdir(chunk_folder): - os.mkdir(chunk_folder) - chunk_f = h5py.File(os.path.join(chunk_folder, 'chunk_temp_' + ft.int2str(chunk_ind, 4) + '.hdf5')) - chunk_f['traces_center'] = curr_traces_center - chunk_f['traces_surround'] = curr_traces_surround - chunk_f.close() - - print('\n\t{:06d} seconds: chunk: {}; demixing finished.'.format(int(time.time() - t0), chunk_ind)) - - return None - -def run(): - - curr_folder = os.path.dirname(os.path.realpath(__file__)) - os.chdir(curr_folder) - - plane_n = os.path.split(curr_folder)[1] - print(plane_n) - - print('getting masks ...') - rois_f = h5py.File('rois_and_traces.hdf5') - center_array = rois_f['masks_center'].value - surround_array = rois_f['masks_surround'].value - - print('\nanalyzing movie in chunks of size:', CHUNK_SIZE , 'frames.') - - nwb_folder = os.path.dirname(curr_folder) - nwb_fn = [f for f in os.listdir(nwb_folder) if f[-4:] == '.nwb'][0] - nwb_path = os.path.join(nwb_folder, nwb_fn) - print('\n' + nwb_path) - data_path = '/processing/motion_correction/MotionCorrection/' + plane_n + '/corrected/data' - - nwb_f = h5py.File(nwb_path, 'r') - total_frame = nwb_f[data_path].shape[0] - nwb_f.close() - - chunk_frames = get_chunk_frames(total_frame, CHUNK_SIZE) - chunk_params = [(cf[0], cf[1], cf[2], nwb_path, data_path, - curr_folder, center_array, surround_array) for cf in chunk_frames] - - p = Pool(PROCESS_NUM) - p.map(get_traces, chunk_params) - - chunk_folder = os.path.join(curr_folder, 'chunks') - chunk_fns = [f for f in os.listdir(chunk_folder) if f[0:11] == 'chunk_temp_'] - chunk_fns.sort() - print('\nreading chunks files ...') - print('\n'.join(chunk_fns)) - - traces_raw = [] - traces_surround = [] - - for chunk_fn in chunk_fns: - curr_chunk_f = h5py.File(os.path.join(chunk_folder, chunk_fn)) - traces_raw.append(curr_chunk_f['traces_center'].value) - traces_surround.append(curr_chunk_f['traces_surround'].value) - - print("saving ...") - traces_raw = np.concatenate(traces_raw, axis=1) - traces_surround = np.concatenate(traces_surround, axis=1) - rois_f['traces_center_raw'] = traces_raw - rois_f['traces_surround_raw'] = traces_surround - print('done.') - - -if __name__ == '__main__': - run() - diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/160_get_neuropil_subtracted_traces.py b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/160_get_neuropil_subtracted_traces.py deleted file mode 100644 index 3605fdf..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/160_get_neuropil_subtracted_traces.py +++ /dev/null @@ -1,120 +0,0 @@ -import sys -import os -import h5py -import numpy as np -import corticalmapping.HighLevel as hl -import corticalmapping.core.FileTools as ft -import matplotlib.pyplot as plt -from multiprocessing import Pool - - -lam = 0.05 -plot_chunk_size = 5000 -process_num = 5 - - -def plot_traces_chunks(traces, labels, chunk_size, roi_ind): - """ - - :param traces: np.array, shape=[trace_type, t_num] - :param labels: - :param chunk_size: - :param figures_folder: - :param roi_ind: - :return: - """ - - t_num = traces.shape[1] - chunk_num = t_num // chunk_size - - chunks = [] - for chunk_ind in range(chunk_num): - chunks.append([chunk_ind * chunk_size, (chunk_ind + 1) * chunk_size]) - - if t_num % chunk_size != 0: - chunks.append([chunk_num * chunk_size, t_num]) - - v_max = np.amax(traces) - v_min = np.amin(traces) - - fig = plt.figure(figsize=(75, 20)) - fig.suptitle('neuropil subtraction for ROI: {}'.format(roi_ind)) - for chunk_ind, chunk in enumerate(chunks): - curr_ax = fig.add_subplot(len(chunks), 1, chunk_ind + 1) - for trace_ind in range(traces.shape[0]): - curr_ax.plot(traces[trace_ind, chunk[0]: chunk[1]], label=labels[trace_ind]) - - curr_ax.set_xlim([0, chunk_size]) - curr_ax.set_ylim([v_min, v_max * 1.2]) - curr_ax.legend() - - return fig - -def plot_traces_for_multi_process(params): - - curr_traces, plot_chunk_size, roi_ind, figures_folder = params - - print('roi_{:04d}'.format(roi_ind)) - - curr_fig = plot_traces_chunks(traces=curr_traces, - labels=['center', 'surround', 'subtracted'], - chunk_size=plot_chunk_size, - roi_ind=roi_ind) - curr_fig.savefig(os.path.join(figures_folder, 'neuropil_subtraction_ROI_{:04d}.png'.format(roi_ind))) - curr_fig.clear() - plt.close(curr_fig) - -def run(): - curr_folder = os.path.dirname(os.path.realpath(__file__)) - os.chdir(curr_folder) - - data_f = h5py.File('rois_and_traces.hdf5') - traces_raw = data_f['traces_center_raw'].value - traces_srround = data_f['traces_surround_raw'].value - - traces_subtracted = np.zeros(traces_raw.shape, np.float32) - ratio = np.zeros(traces_raw.shape[0], np.float32) - err = np.zeros(traces_raw.shape[0], np.float32) - - for i in range(traces_raw.shape[0]): - curr_trace_c = traces_raw[i] - curr_trace_s = traces_srround[i] - curr_r, curr_err, curr_trace_sub = hl.neural_pil_subtraction(curr_trace_c, curr_trace_s, lam=lam) - print "roi_%s \tr = %.4f; error = %.4f." % (ft.int2str(i, 5), curr_r, curr_err) - traces_subtracted[i] = curr_trace_sub - ratio[i] = curr_r - err[i] = curr_err - - print('\nplotting neuropil subtraction results ...') - figures_folder = 'figures/neuropil_subtraction_lam_{}'.format(lam) - if not os.path.isdir(figures_folder): - os.makedirs(figures_folder) - - params = [] - for roi_ind in range(traces_raw.shape[0]): - - curr_traces = np.array([traces_raw[roi_ind], traces_srround[roi_ind], traces_subtracted[roi_ind]]) - - params.append((curr_traces, plot_chunk_size, roi_ind, figures_folder)) - - p = Pool(process_num) - p.map(plot_traces_for_multi_process, params) - - # wait for keyboard abortion - # msg = raw_input('Do you want to save? (y/n)\n') - # while True: - # if msg == 'y': - # break - # elif msg == 'n': - # sys.exit('Stop process without saving.') - # else: - # msg = raw_input('Do you want to save? (y/n)\n') - - data_f['traces_center_subtracted'] = traces_subtracted - data_f['neuropil_r'] = ratio - data_f['neuropil_err'] = err - - data_f.close() - -if __name__ == "__main__": - run() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/batch_processing_bouton.bat b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/batch_processing_bouton.bat deleted file mode 100644 index 74aa492..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/batch_processing_bouton.bat +++ /dev/null @@ -1,10 +0,0 @@ -call activate bigmess - -set PYTHONPATH=%PYTHONPATH%;E:\data\python_packages\corticalmapping;E:\data\python_packages\allensdk_internal;E:\data\python_packages\ainwb\ainwb - -python 120_get_cells_file_bouton.py -python 130_refine_cells_bouton.py -python 140_get_weighted_rois_and_surrounds.py -python 150_get_raw_center_and_surround_traces.py -python 160_get_neuropil_subtracted_traces.py -python 135_generate_marked_avi.py \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/batch_processing_soma.bat b/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/batch_processing_soma.bat deleted file mode 100644 index 790c20d..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_movie/within_plane_folder/batch_processing_soma.bat +++ /dev/null @@ -1,10 +0,0 @@ -call activate bigmess - -set PYTHONPATH=%PYTHONPATH%;E:\data\python_packages\corticalmapping;E:\data\python_packages\allensdk_internal;E:\data\python_packages\ainwb\ainwb - -python 120_get_cells_file_soma.py -python 130_refine_cells_soma.py -python 140_get_weighted_rois_and_surrounds.py -python 150_get_raw_center_and_surround_traces.py -python 160_get_neuropil_subtracted_traces.py -python 135_generate_marked_avi.py \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/000_reorganize_files.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/000_reorganize_files.py deleted file mode 100644 index 6d101e1..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/000_reorganize_files.py +++ /dev/null @@ -1,54 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia - - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\181102-M412052-deepscope\zstack1" -file_identifier = 'zstack1' -ch_ns = ['red'] -frames_per_step = 200 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -file_ns = [f for f in os.listdir(data_folder) if f[-4:] == '.tif' and file_identifier in f] -file_ns.sort() -print('\n'.join(file_ns)) - -save_folders = [] -for ch_n in ch_ns: - curr_save_folder = os.path.join(data_folder, file_identifier, ch_n) - if not os.path.isdir(curr_save_folder): - os.makedirs(curr_save_folder) - save_folders.append(curr_save_folder) - -curr_step = 0 - -for file_n in file_ns: - curr_mov = tf.imread(os.path.join(data_folder, file_n)) - - - curr_frame_num = curr_mov.shape[0] / len(ch_ns) - - if curr_frame_num % frames_per_step != 0: - raise ValueError('{}: total frame number is not divisible by frames per step.'.format(file_n)) - - curr_mov_chs = [] - for ch_i in range(len(ch_ns)): - curr_mov_chs.append(curr_mov[ch_i::len(ch_ns)]) - - steps = curr_frame_num // frames_per_step - for step_ind in range(steps): - - print ('current step: {}'.format(curr_step)) - - for ch_i in range(len(ch_ns)): - curr_step_mov_ch = curr_mov_chs[ch_i][step_ind * frames_per_step:(step_ind + 1) * frames_per_step,:,:] - curr_step_n = 'step_' + ft.int2str(curr_step, 4) - curr_step_folder = os.path.join(save_folders[ch_i], curr_step_n) - os.mkdir(curr_step_folder) - tf.imsave(os.path.join(curr_step_folder, curr_step_n + '.tif'), curr_step_mov_ch) - - curr_step += 1 diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/050_motion_correction_multichannel.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/050_motion_correction_multichannel.py deleted file mode 100644 index afe5f13..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/050_motion_correction_multichannel.py +++ /dev/null @@ -1,98 +0,0 @@ -import os -import numpy as np -import h5py -import tifffile as tf -import stia.motion_correction as mc -from warnings import warn -from multiprocessing import Pool - -def run(): - data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180816-M376019-zstack\zstack_2p_zoom2" - ref_ch_n = 'red' - n_process = 8 - - anchor_frame_ind_chunk = 10 - iteration_chunk = 10 - max_offset_chunk = (50., 50.) - preprocessing_type = 0 - fill_value = 0. - - is_apply = True - avi_downsample_rate = None - is_equalizing_histogram = False - - curr_folder = os.path.dirname(os.path.realpath(__file__)) - os.chdir(curr_folder) - - ref_data_folder = os.path.join(data_folder, ref_ch_n) - - steps = [f for f in os.listdir(ref_data_folder) if os.path.isdir(os.path.join(ref_data_folder, f)) - and f[0:5] == 'step_'] - steps.sort() - print('\n'.join(steps)) - - params = [] - for step in steps: - - folder_ref = os.path.join(data_folder, ref_ch_n, step) - params.append((folder_ref, anchor_frame_ind_chunk, iteration_chunk, max_offset_chunk, preprocessing_type, - fill_value, is_apply, avi_downsample_rate, is_equalizing_histogram)) - - chunk_p = Pool(n_process) - chunk_p.map(correct_single_step, params) - - -def correct_single_step(param): - - folder_ref, anchor_frame_ind_chunk, iteration_chunk, max_offset_chunk, preprocessing_type, fill_value,\ - is_apply, avi_downsample_rate, is_equalizing_histogram= param - - step_n = os.path.split(folder_ref)[1] - print('\nStart correcting step {} ...'.format(step_n)) - - mov_ref_n = [f for f in os.listdir(folder_ref) if f[-4:] == '.tif' and step_n in f] - if len(mov_ref_n) != 1: - warn('step {}: number of green movie does not equal 1.'.format(step_n)) - return - - mov_paths, _ = mc.motion_correction(input_folder=folder_ref, - input_path_identifier='.tif', - process_num=1, - output_folder=folder_ref, - anchor_frame_ind_chunk=anchor_frame_ind_chunk, - anchor_frame_ind_projection=0, - iteration_chunk=iteration_chunk, - iteration_projection=10, - max_offset_chunk=max_offset_chunk, - max_offset_projection=(30., 30.), - align_func=mc.phase_correlation, - preprocessing_type=preprocessing_type, - fill_value=fill_value) - - if is_apply: - - offsets_path = os.path.join(folder_ref, 'correction_offsets.hdf5') - offsets_f = h5py.File(offsets_path) - ref_path = offsets_f['file_0000'].attrs['path'] - offsets_f.close() - - movie_path = mov_paths[0] - - mc.apply_correction_offsets(offsets_path=offsets_path, - path_pairs=[[ref_path, movie_path]], - output_folder=folder_ref, - process_num=1, - fill_value=fill_value, - avi_downsample_rate=avi_downsample_rate, - is_equalizing_histogram=is_equalizing_histogram) - - - -if __name__ == "__main__": - run() - - - - - diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/060_apply_offsets_multichannel.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/060_apply_offsets_multichannel.py deleted file mode 100644 index af0e46d..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/060_apply_offsets_multichannel.py +++ /dev/null @@ -1,48 +0,0 @@ -import os -import stia.motion_correction as mc -import numpy as np -import h5py -import tifffile as tf -from multiprocessing import Pool - -def run(): - data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180816-M376019-zstack\zstack_2p_zoom2" - ref_ch_n = 'red' - apply_ch_ns = ['red'] - n_process = 8 - - step_ns = [f for f in os.listdir(os.path.join(data_folder, ref_ch_n))] - step_ns = [f for f in step_ns if os.path.isdir(os.path.join(data_folder, ref_ch_n, f))] - step_ns.sort() - - chunk_p = Pool(n_process) - - for ch_n in apply_ch_ns: - mc_params = [] - for step_n in step_ns: - movie_path = os.path.join(data_folder, ch_n, step_n, step_n + '.tif') - offsets_path = os.path.join(data_folder, ref_ch_n, step_n, 'correction_offsets.hdf5') - mc_params.append((movie_path, offsets_path)) - - chunk_p.map(apply_offset_single, mc_params) - - -def apply_offset_single(param): - - movie_path, offsets_path = param - - offsets_f = h5py.File(offsets_path) - ref_path = offsets_f['file_0000'].attrs['path'] - offsets_f.close() - - mc.apply_correction_offsets(offsets_path=offsets_path, - path_pairs=[[ref_path, movie_path]], - output_folder=os.path.split(os.path.realpath(movie_path))[0], - process_num=1, - fill_value=0., - avi_downsample_rate=None, - is_equalizing_histogram=False) - -if __name__ == '__main__': - run() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/065_get_uncorrected_zstack_from_server.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/065_get_uncorrected_zstack_from_server.py deleted file mode 100644 index d4938f7..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/065_get_uncorrected_zstack_from_server.py +++ /dev/null @@ -1,27 +0,0 @@ -import os -import numpy as np -import tifffile as tf - -data_folder = r"\\sd2\SD2\jun_backup\raw_data_temp\181217-M421211-2p\zstack" - -chns = ['green', 'red'] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -for chn in chns: - print('processing channel: {} ...'.format(chn)) - ch_folder = os.path.join(data_folder, chn) - steps = [f for f in os.listdir(ch_folder) if os.path.isdir(os.path.join(ch_folder, f)) and f[0:5] == 'step_'] - steps.sort() - print('\ntotal number of steps: {}'.format(len(steps))) - - zstack = [] - for step in steps: - print("\t{}".format(step)) - movie = tf.imread(os.path.join(ch_folder, step, step + '.tif')) - zstack.append(np.mean(movie, axis=0)) - - zstack = np.array(zstack, dtype=np.float32) - save_n = os.path.split(data_folder)[1] + '_uncorrected_' + chn + '.tif' - tf.imsave(save_n, zstack) diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/070_get_zstack_from_server.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/070_get_zstack_from_server.py deleted file mode 100644 index 404feb4..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/070_get_zstack_from_server.py +++ /dev/null @@ -1,27 +0,0 @@ -import os -import numpy as np -import tifffile as tf - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180816-M376019-zstack\zstack_2p_zoom2" - -chns = ['green', 'red'] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -for chn in chns: - print('processing channel: {} ...'.format(chn)) - ch_folder = os.path.join(data_folder, chn) - steps = [f for f in os.listdir(ch_folder) if os.path.isdir(os.path.join(ch_folder, f)) and f[0:5] == 'step_'] - steps.sort() - print('\ntotal number of steps: {}'.format(len(steps))) - - zstack = [] - for step in steps: - print("\t{}".format(step)) - zstack.append(tf.imread(os.path.join(ch_folder, step, 'corrected_mean_projection.tif'))) - - zstack = np.array(zstack) - save_n = os.path.split(data_folder)[1] + '_' + chn + '.tif' - tf.imsave(save_n, zstack) diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/080_remove_uncorrected_files.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/080_remove_uncorrected_files.py deleted file mode 100644 index 10cc96e..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/080_remove_uncorrected_files.py +++ /dev/null @@ -1,26 +0,0 @@ -import os - -base_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180619-M386444-2p\zstack_zoom2\zstack_zoom2" - -channels = ['green', 'red'] - -for ch_n in channels: - print('remove uncorrected files for channle: {}'.format(ch_n)) - - step_fns = [f for f in os.listdir(os.path.join(base_folder, ch_n)) if f.split('_')[-2] == 'step'] - step_fns.sort() - print('\n'.join(step_fns)) - - for step_fn in step_fns: - - print('\n' + step_fn) - step_folder = os.path.join(base_folder, ch_n, step_fn) - - fns = os.listdir(step_folder) - - if step_fn + '.tif' in fns: - os.remove(os.path.join(step_folder, step_fn + '.tif')) - else: - print('Cannot find uncorrected file. Skip.') - \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/090_remove_corrected_files.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/090_remove_corrected_files.py deleted file mode 100644 index caa207f..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/090_remove_corrected_files.py +++ /dev/null @@ -1,46 +0,0 @@ -import os - -base_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\181112-M424454-2p\zstack" -channels = ['green', 'red'] -is_remove_img = False - -for ch in channels: - print('processing channel: {} ...'.format(ch)) - ch_folder = os.path.join(base_folder, ch) - - step_fns = [f for f in os.listdir(ch_folder) if f.split('_')[-2] == 'step'] - step_fns.sort() - print('\n'.join(step_fns)) - - for step_fn in step_fns: - - print('\n' + step_fn) - step_folder = os.path.join(ch_folder, step_fn) - fns = os.listdir(step_folder) - - - if is_remove_img: - if 'corrected_max_projection.tif' in fns: - print('removing corrected_max_projection.tif') - os.remove(os.path.join(step_folder, 'corrected_max_projection.tif')) - - if 'corrected_max_projections.tif' in fns: - print('removing corrected_max_projections.tif') - os.remove(os.path.join(step_folder, 'corrected_max_projections.tif')) - - if 'corrected_mean_projection.tif' in fns: - print('removing corrected_mean_projection.tif') - os.remove(os.path.join(step_folder, 'corrected_mean_projection.tif')) - - if 'corrected_mean_projections.tif' in fns: - print('removing corrected_mean_projections.tif') - os.remove(os.path.join(step_folder, 'corrected_mean_projections.tif')) - - if 'correction_offsets.hdf5' in fns: - print('removing correction_offsets.hdf5') - os.remove(os.path.join(step_folder, 'correction_offsets.hdf5')) - - fn_cor = [f for f in fns if f[-14:] == '_corrected.tif'] - if len(fn_cor) == 1: - print('removing ' + fn_cor[0]) - os.remove(os.path.join(step_folder, fn_cor[0])) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/095_split_channel.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/095_split_channel.py deleted file mode 100644 index c240344..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/095_split_channel.py +++ /dev/null @@ -1,18 +0,0 @@ -import os -import tifffile as tf - -data_fn = 'zstack_zoom2_00001_00001.tif' -ch_ns = ['green', 'red'] -save_prefix = 'zstack' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -ch_num = len(ch_ns) - -stack = tf.imread(data_fn) - -for ch_i, ch_n in enumerate(ch_ns): - tf.imsave('{}_{}.tif'.format(save_prefix, ch_n), stack[ch_i::ch_num]) - -print('done.') \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/100_get_fine_zstack.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/100_get_fine_zstack.py deleted file mode 100644 index 68f4747..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/100_get_fine_zstack.py +++ /dev/null @@ -1,57 +0,0 @@ -import os -import h5py -import numpy as np -import tifffile as tf -import stia.motion_correction as mc -import stia.utility.image_analysis as ia - -identifier = 'zstack1' -ch_ref = 'red' -ch_app = ['green', 'red'] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -stack_ref = tf.imread('{}_{}.tif'.format(identifier, ch_ref)) - -step_offsets = [[0., 0.]] # offsets between adjacent steps - -print('calculating step offsets ...') -for step_i in range(1, stack_ref.shape[0]): - curr_offset = mc.phase_correlation(stack_ref[step_i], stack_ref[step_i - 1]) - step_offsets.append(curr_offset) -step_offsets = np.array([np.array(so) for so in step_offsets], dtype=np.float32) -print('\nsetp offsets:') -print(step_offsets) - -print('\ncalculating final offsets ...') -final_offsets_y = np.cumsum(step_offsets[:, 0]) -final_offsets_x = np.cumsum(step_offsets[:, 1]) -final_offsets = np.array([final_offsets_x, final_offsets_y], dtype=np.float32).transpose() - -middle_frame_ind = stack_ref.shape[0] // 2 -middle_offsets = final_offsets[middle_frame_ind: middle_frame_ind + 1] -final_offsets = final_offsets - middle_offsets -print('\nfinal offsets:') -print(final_offsets) - -print('applying final offsets ...') - -for ch in ch_app: - - stack_app = tf.imread('{}_{}.tif'.format(identifier, ch)) - stack_aligned = [] - - for step_i in range(stack_app.shape[0]): - curr_offset = final_offsets[step_i] - frame = stack_app[step_i] - frame_aligned = ia.rigid_transform_cv2_2d(frame, offset=curr_offset, fill_value=0).astype(np.float32) - stack_aligned.append(frame_aligned) - - stack_aligned = np.array(stack_aligned, dtype=np.float32) - - tf.imsave('{}_{}_aligned.tif'.format(identifier, ch), stack_aligned) - # tf.imsave('{}_{}_max_projection.tif'.format(identifier, ch), np.max(stack_aligned, axis=0)) - - - diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/110_rotate_zstack.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/110_rotate_zstack.py deleted file mode 100644 index e7f14de..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/110_rotate_zstack.py +++ /dev/null @@ -1,31 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import stia.motion_correction as mc -import stia.utility.image_analysis as ia - -fn = 'stack1_final.tif' -scope = 'deepscope' # 'sutter' or 'deepscope' or 'scientifica' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -stack = tf.imread(fn) - -if scope == 'sutter': - stack_r = stack.transpose((0, 2, 1))[:, ::-1, :] - -elif scope == 'deepscope': - h_new = int(stack.shape[1] * np.sqrt(2)) - w_new = int(stack.shape[2] * np.sqrt(2)) - stack_r = ia.rigid_transform_cv2(stack, rotation=140, output_shape=(h_new, w_new))[:, :, ::-1] - -elif scope == 'scientifica': - h_new = int(stack.shape[1] * np.sqrt(2)) - w_new = int(stack.shape[2] * np.sqrt(2)) - stack_r = ia.rigid_transform_cv2(stack[:,::-1,:], rotation=135, output_shape=(h_new, w_new)) - -else: - raise LookupError("Do not understand scope type. Should be 'sutter' or 'deepscope' or 'scientifica'.") - -tf.imsave(os.path.splitext(fn)[0] + '_rotated.tif', stack_r.astype(stack.dtype)) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/120_get_depth_profile.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/120_get_depth_profile.py deleted file mode 100644 index 2748ae1..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/120_get_depth_profile.py +++ /dev/null @@ -1,35 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import matplotlib.pyplot as plt -import corticalmapping.core.ImageAnalysis as ia - -data_fn = 'zstack_2p_zoom2_red_aligned.tif' -save_fn = '2018-08-16-M376019-depth-profile-red.png' -start_depth = 50 # micron -step_depth = 2 # micron -pix_size = 0.7 # sutter scope, zoom2, 512 x 512 -resolution = 512 - -curr_folder = os.path.dirname(os.path.abspath(__file__)) -os.chdir(curr_folder) - -data = tf.imread(data_fn) -dp = ia.array_nor(np.mean(data, axis=1)) - -depth_i = np.array(range(0, dp.shape[0], 50)) -depth_l = depth_i * step_depth + start_depth - -f = plt.figure(figsize=(8, 8)) -ax = f.add_subplot(111) -ax.imshow(dp, vmin=0, vmax=1, cmap='magma', aspect=step_depth / pix_size) -ax.set_xticks([0, resolution-1]) -ax.set_xticklabels(['0', '{:7.2f}'.format(resolution*pix_size)]) -ax.set_yticks(depth_i) -ax.set_yticklabels(depth_l) -ax.set_xlabel('horizontal dis (um)') -ax.set_ylabel('depth (um)') - -plt.show() - -f.savefig(save_fn) diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/130_combine_channels.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/130_combine_channels.py deleted file mode 100644 index 704e5d4..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/130_combine_channels.py +++ /dev/null @@ -1,32 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import cv2 - -fn_lst = ['FOV2_projection_site_zstack_green_aligned.tif', - 'FOV2_projection_site_zstack_red_aligned.tif'] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -mov = [] - -for fn in fn_lst: - curr_mov = tf.imread(fn).astype(np.float32) - - curr_mov_adjust = [] - # histogram equalization - for frame in curr_mov: - # display_frame = (frame - np.amin(frame)) / (np.amax(frame) - np.amin(frame)) - # display_frame = (display_frame * 255).astype(np.uint8) - # display_frame = cv2.equalizeHist(display_frame) - - display_frame = frame - np.mean(frame[:]) - curr_mov_adjust.append(display_frame) - - curr_mov_adjust = np.array(curr_mov_adjust) - mov.append(curr_mov_adjust) - -mov = np.concatenate(mov, axis=2) - -tf.imsave('zstack_aligned_combined.tif', mov) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/000_reorganize_files.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/000_reorganize_files.py deleted file mode 100644 index 7ae2a62..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/000_reorganize_files.py +++ /dev/null @@ -1,55 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.FileTools as ft - - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project\180605-M391355-2p\zstack" -file_identifier = 'zstack_zoom2' -ch_ns = ['green', 'red'] -frames_per_step = 300 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -file_ns = [f for f in os.listdir(data_folder) if f[-4:] == '.tif' and file_identifier in f] -file_ns.sort() -print('\n'.join(file_ns)) - -save_folders = [] -for ch_n in ch_ns: - curr_save_folder = os.path.join(data_folder, file_identifier, ch_n) - if not os.path.isdir(curr_save_folder): - os.makedirs(curr_save_folder) - save_folders.append(curr_save_folder) - -curr_step = 0 - -for file_n in file_ns: - curr_mov = tf.imread(os.path.join(data_folder, file_n)) - - # reorient movie - curr_mov = curr_mov.transpose((0, 2, 1))[:, ::-1, :] - - curr_frame_num = curr_mov.shape[0] / len(ch_ns) - - if curr_frame_num % frames_per_step != 0: - raise ValueError('{}: total frame number is not divisible by frames per step.'.format(file_n)) - - curr_mov_chs = [] - for ch_i in range(len(ch_ns)): - curr_mov_chs.append(curr_mov[ch_i::len(ch_ns)]) - - steps = curr_frame_num // frames_per_step - for step_ind in range(steps): - - print ('current step: {}'.format(curr_step)) - - for ch_i in range(len(ch_ns)): - curr_step_mov_ch = curr_mov_chs[ch_i][step_ind * frames_per_step:(step_ind + 1) * frames_per_step,:,:] - curr_step_n = 'step_' + ft.int2str(curr_step, 4) - curr_step_folder = os.path.join(save_folders[ch_i], curr_step_n) - os.mkdir(curr_step_folder) - tf.imsave(os.path.join(curr_step_folder, curr_step_n + '.tif'), curr_step_mov_ch) - - curr_step += 1 diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/010_motion_correction_zstack_caiman_multichannel.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/010_motion_correction_zstack_caiman_multichannel.py deleted file mode 100644 index fae38c7..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/010_motion_correction_zstack_caiman_multichannel.py +++ /dev/null @@ -1,107 +0,0 @@ -import sys -sys.path.extend([r"E:\data\github_packages\CaImAn"]) - -import caiman as cm -import numpy as np -import os -from caiman.motion_correction import MotionCorrect, tile_and_correct, motion_correction_piecewise -import tifffile as tf -import h5py -import warnings -from multiprocessing import Pool - -base_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180605-M391355-2p\zstack\zstack_zoom2" - -reference_chn = 'green' - -n_processes = 5 - -def correct_single_movie(folder_path): - - #=======================================setup parameters============================================== - # number of iterations for rigid motion correction - niter_rig = 5 - - # maximum allowed rigid shift in pixels (view the movie to get a sense of motion) - max_shifts = (30, 30) - - # for parallelization split the movies in num_splits chuncks across time - # if none all the splits are processed and the movie is saved - splits_rig = 56 - - # intervals at which patches are laid out for motion correction - # num_splits_to_process_rig = None - - # create a new patch every x pixels for pw-rigid correction - strides = (48, 48) - - # overlap between pathes (size of patch strides+overlaps) - overlaps = (24, 24) - - # for parallelization split the movies in num_splits chuncks across time - splits_els = 56 - - # num_splits_to_process_els = [28, None] - - # upsample factor to avoid smearing when merging patches - upsample_factor_grid = 4 - - # maximum deviation allowed for patch with respect to rigid shifts - max_deviation_rigid = 3 - - # if True, apply shifts fast way (but smoothing results) by using opencv - shifts_opencv = True - - # if True, make the SAVED movie and template mostly nonnegative by removing min_mov from movie - nonneg_movie = False - # =======================================setup parameters============================================== - - - offset_mov = 0. - - file_path = [f for f in os.listdir(folder_path) if f[-4:] == '.tif'] - if len(file_path) == 0: - raise LookupError('no tif file found in folder: {}'.format(folder_path)) - elif len(file_path) > 1: - raise LookupError('more than one tif files found in folder: {}'.format(folder_path)) - else: - file_path = os.path.join(folder_path, file_path[0]) - - # create a motion correction object# creat - mc = MotionCorrect(file_path, offset_mov, - dview=None, max_shifts=max_shifts, niter_rig=niter_rig, - splits_rig=splits_rig, strides=strides, overlaps=overlaps, - splits_els=splits_els, upsample_factor_grid=upsample_factor_grid, - max_deviation_rigid=max_deviation_rigid, - shifts_opencv=shifts_opencv, nonneg_movie=nonneg_movie) - - mc.motion_correct_rigid(save_movie=True) - # load motion corrected movie - m_rig = cm.load(mc.fname_tot_rig) - m_rig = m_rig.astype(np.int16) - save_name = os.path.splitext(file_path)[0] + '_corrected.tif' - tf.imsave(os.path.join(folder_path, save_name), m_rig) - tf.imsave(os.path.join(folder_path, 'corrected_mean_projection.tif'), - np.mean(m_rig, axis=0).astype(np.float32)) - tf.imsave(os.path.join(folder_path, 'corrected_max_projection.tif'), - np.max(m_rig, axis=0).astype(np.float32)) - - offset_f = h5py.File(os.path.join(folder_path, 'correction_offsets.hdf5')) - offsets = mc.shifts_rig - offsets = np.array([np.array(o) for o in offsets]).astype(np.float32) - offset_dset = offset_f.create_dataset(name='file_0000', data=offsets) - offset_dset.attrs['format'] = 'height, width' - offset_dset.attrs['path'] = file_path - - os.remove(mc.fname_tot_rig[0]) - - -if __name__ == '__main__': - data_folder = os.path.join(base_folder, reference_chn) - chunk_p = Pool(n_processes) - folder_list = [f for f in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder, f))] - folder_list.sort() - print('\n'.join(folder_list)) - folder_list = [os.path.join(data_folder, f) for f in folder_list] - chunk_p.map(correct_single_movie, folder_list) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/015_apply_offsets_multichannel.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/015_apply_offsets_multichannel.py deleted file mode 100644 index dc91988..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/015_apply_offsets_multichannel.py +++ /dev/null @@ -1,51 +0,0 @@ -import os -import stia.motion_correction as mc -import numpy as np -import h5py -import tifffile as tf -from multiprocessing import Pool - -def run(): - data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180605-M391355-2p\zstack\zstack_zoom2" - ref_ch_n = 'green' - apply_ch_ns = ['green', 'red'] - n_process = 5 - - step_ns = [f for f in os.listdir(os.path.join(data_folder, ref_ch_n))] - step_ns = [f for f in step_ns if os.path.isdir(os.path.join(data_folder, ref_ch_n, f))] - step_ns.sort() - print('\n'.join(step_ns)) - - chunk_p = Pool(n_process) - - for ch_n in apply_ch_ns: - mc_params = [] - for step_n in step_ns: - movie_path = os.path.join(data_folder, ch_n, step_n, step_n + '.tif') - offsets_path = os.path.join(data_folder, ref_ch_n, step_n, 'correction_offsets.hdf5') - mc_params.append((movie_path, offsets_path)) - # print(mc_params) - - chunk_p.map(apply_offset_single, mc_params) - - -def apply_offset_single(params): - - movie_path, offsets_path = params - - offsets_f = h5py.File(offsets_path) - ref_path = offsets_f['file_0000'].attrs['path'] - offsets_f.close() - - mc.apply_correction_offsets(offsets_path=offsets_path, - path_pairs=[[ref_path, movie_path]], - output_folder=os.path.split(os.path.realpath(movie_path))[0], - process_num=1, - fill_value=0., - avi_downsample_rate=None, - is_equalizing_histogram=False) - - -if __name__ == '__main__': - run() \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/020_get_zstack_from_server.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/020_get_zstack_from_server.py deleted file mode 100644 index 8d4d7c5..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/020_get_zstack_from_server.py +++ /dev/null @@ -1,26 +0,0 @@ -import os -import numpy as np -import tifffile as tf - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \ - r"\180605-M391355-2p\zstack\zstack_zoom2" - -chns = ['green', 'red'] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -for chn in chns: - print('processing channel: {} ...'.format(chn)) - ch_folder = os.path.join(data_folder, chn) - steps = [f for f in os.listdir(ch_folder) if os.path.isdir(os.path.join(ch_folder, f)) and f[0:5] == 'step_'] - steps.sort() - print('\n'.join(steps)) - - zstack = [] - for step in steps: - zstack.append(tf.imread(os.path.join(ch_folder, step, 'corrected_mean_projection.tif'))) - - zstack = np.array(zstack) - save_n = os.path.split(data_folder)[1] + '_' + chn + '.tif' - tf.imsave(save_n, zstack) diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/030_get_fine_zstack.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/030_get_fine_zstack.py deleted file mode 100644 index 147bb6c..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/030_get_fine_zstack.py +++ /dev/null @@ -1,49 +0,0 @@ -import os -import h5py -import numpy as np -import tifffile as tf -import stia.motion_correction as mc -import stia.utility.image_analysis as ia - -zstack_fn = 'FOV2_projection_site_zstack_red.tif' - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -zstack = tf.imread(zstack_fn) - -step_offsets = [[0., 0.]] # offsets between adjacent steps - -print('calculating step offsets ...') -for step_i in range(1, zstack.shape[0]): - curr_offset = mc.phase_correlation(zstack[step_i], zstack[step_i - 1]) - step_offsets.append(curr_offset) -step_offsets = np.array([np.array(so) for so in step_offsets], dtype=np.float32) -print('\nsetp offsets:') -print(step_offsets) - -print('\ncalculating final offsets ...') -final_offsets_y = np.cumsum(step_offsets[:, 0]) -final_offsets_x = np.cumsum(step_offsets[:, 1]) -final_offsets = np.array([final_offsets_x, final_offsets_y], dtype=np.float32).transpose() -print('\nfinal offsets:') -print(final_offsets) - -print('applying final offsets ...') - -zstack_f = [] # fine zstack - -for step_i in range(zstack.shape[0]): - - curr_offset = final_offsets[step_i] - - frame = zstack[step_i] - frame_f = ia.rigid_transform_cv2_2d(frame, offset=curr_offset, fill_value=0.).astype(np.float32) - zstack_f.append(frame_f) - -zstack_f = np.array(zstack_f, dtype=np.float32) - -tf.imsave(os.path.splitext(zstack_fn)[0] + '_aligned.tif', zstack_f) -tf.imsave(os.path.splitext(zstack_fn)[0] + '_max_projection.tif', np.max(zstack_f, axis=0)) - - diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/040_combine_channels.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/040_combine_channels.py deleted file mode 100644 index 704e5d4..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/040_combine_channels.py +++ /dev/null @@ -1,32 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import cv2 - -fn_lst = ['FOV2_projection_site_zstack_green_aligned.tif', - 'FOV2_projection_site_zstack_red_aligned.tif'] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -mov = [] - -for fn in fn_lst: - curr_mov = tf.imread(fn).astype(np.float32) - - curr_mov_adjust = [] - # histogram equalization - for frame in curr_mov: - # display_frame = (frame - np.amin(frame)) / (np.amax(frame) - np.amin(frame)) - # display_frame = (display_frame * 255).astype(np.uint8) - # display_frame = cv2.equalizeHist(display_frame) - - display_frame = frame - np.mean(frame[:]) - curr_mov_adjust.append(display_frame) - - curr_mov_adjust = np.array(curr_mov_adjust) - mov.append(curr_mov_adjust) - -mov = np.concatenate(mov, axis=2) - -tf.imsave('zstack_aligned_combined.tif', mov) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/050_get_2p_vas_maps.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/050_get_2p_vas_maps.py deleted file mode 100644 index 359d533..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/050_get_2p_vas_maps.py +++ /dev/null @@ -1,37 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia -import matplotlib.pyplot as plt - - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project\180605-M391355-2p\vasmap_2p" -file_ns = ["vasmap_2p_zoom2_00001_00001.tif"] - -save_name = 'vasmap_2p_zoom2' - -channels = ['green', 'red'] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -vasmaps = {} -for chn in channels: - vasmaps.update({chn: []}) - -for file_n in file_ns: - print(file_n) - - curr_vasmap = tf.imread(os.path.join(data_folder, file_n)) - - for ch_i, ch_n in enumerate(channels): - curr_vasmap_ch = curr_vasmap[ch_i::len(channels)] - vasmaps[ch_n].append(curr_vasmap_ch.transpose((0, 2, 1))[:, ::-1, :]) - # print(curr_vasmap_ch.shape) - -for ch_n, ch_vasmap in vasmaps.items(): - save_vasmap = np.concatenate(ch_vasmap, axis=0) - # print(save_vasmap.shape) - save_vasmap = ia.array_nor(np.mean(save_vasmap, axis=0)) - # print(save_vasmap.shape) - tf.imsave('{}_{}.tif'.format(save_name, ch_n), save_vasmap.astype(np.float32)) diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/060_get_wf_vas_maps.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/060_get_wf_vas_maps.py deleted file mode 100644 index 7a2ab89..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/060_get_wf_vas_maps.py +++ /dev/null @@ -1,27 +0,0 @@ -import os -import numpy as np -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -import tifffile as tf - - -vas_map_paths= [r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project\180605-M391355-2p\vasmap_wf\180605JCamF100"] - -saveFolder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(saveFolder) - -vas_maps = [] - -for vas_map_path in vas_map_paths: - - vas_map_focused, _, _ = ft.importRawJCamF(vas_map_path, column=1024, row=1024, headerLength = 116, - tailerLength=452) - vas_map_focused = vas_map_focused[2:] - vas_map_focused = vas_map_focused[:, ::-1, :] - vas_map_focused[vas_map_focused > 50000] = 400 - vas_map_focused = np.mean(vas_map_focused, axis=0) - vas_maps.append(ia.array_nor(vas_map_focused)) - -vas_map = ia.array_nor(np.mean(vas_maps, axis=0)) - -tf.imsave('vas_map_focused_wf_green.tif', vas_map.astype(np.float32)) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/deepscope/000_reorganize_files.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/deepscope/000_reorganize_files.py deleted file mode 100644 index ccd9742..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/deepscope/000_reorganize_files.py +++ /dev/null @@ -1,60 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia - - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\181102-M412052-deepscope\zstack2" -file_identifier = 'zstack2' -ch_ns = ['red'] -frames_per_step = 200 -is_rotate = True - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -file_ns = [f for f in os.listdir(data_folder) if f[-4:] == '.tif' and file_identifier in f] -file_ns.sort() -print('\n'.join(file_ns)) - -save_folders = [] -for ch_n in ch_ns: - curr_save_folder = os.path.join(data_folder, file_identifier, ch_n) - if not os.path.isdir(curr_save_folder): - os.makedirs(curr_save_folder) - save_folders.append(curr_save_folder) - -curr_step = 0 - -for file_n in file_ns: - curr_mov = tf.imread(os.path.join(data_folder, file_n)) - - # reorient movie - if is_rotate: - h_new = int(curr_mov.shape[1] * np.sqrt(2)) - w_new = int(curr_mov.shape[2] * np.sqrt(2)) - curr_mov = ia.rigid_transform_cv2(curr_mov, rotation=140, outputShape=(h_new, w_new))[:, :, ::-1] - - curr_frame_num = curr_mov.shape[0] / len(ch_ns) - - if curr_frame_num % frames_per_step != 0: - raise ValueError('{}: total frame number is not divisible by frames per step.'.format(file_n)) - - curr_mov_chs = [] - for ch_i in range(len(ch_ns)): - curr_mov_chs.append(curr_mov[ch_i::len(ch_ns)]) - - steps = curr_frame_num // frames_per_step - for step_ind in range(steps): - - print ('current step: {}'.format(curr_step)) - - for ch_i in range(len(ch_ns)): - curr_step_mov_ch = curr_mov_chs[ch_i][step_ind * frames_per_step:(step_ind + 1) * frames_per_step,:,:] - curr_step_n = 'step_' + ft.int2str(curr_step, 4) - curr_step_folder = os.path.join(save_folders[ch_i], curr_step_n) - os.mkdir(curr_step_folder) - tf.imsave(os.path.join(curr_step_folder, curr_step_n + '.tif'), curr_step_mov_ch) - - curr_step += 1 diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/scientifica/000_reorganize_files.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/scientifica/000_reorganize_files.py deleted file mode 100644 index 3bb77ea..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/scientifica/000_reorganize_files.py +++ /dev/null @@ -1,56 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia - - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project\180816-M376019-zstack" -file_identifier = 'zstack_2p_zoom2' -ch_ns = ['green', 'red'] -frames_per_step = 300 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -file_ns = [f for f in os.listdir(data_folder) if f[-4:] == '.tif' and file_identifier in f] -file_ns.sort() -print('\n'.join(file_ns)) - -save_folders = [] -for ch_n in ch_ns: - curr_save_folder = os.path.join(data_folder, file_identifier, ch_n) - if not os.path.isdir(curr_save_folder): - os.makedirs(curr_save_folder) - save_folders.append(curr_save_folder) - -curr_step = 0 - -for file_n in file_ns: - curr_mov = tf.imread(os.path.join(data_folder, file_n)) - - # reorient movie - curr_mov = ia.rigid_transform_cv2_2d(curr_mov[:, ::-1, :], rotation=135) - - curr_frame_num = curr_mov.shape[0] / len(ch_ns) - - if curr_frame_num % frames_per_step != 0: - raise ValueError('{}: total frame number is not divisible by frames per step.'.format(file_n)) - - curr_mov_chs = [] - for ch_i in range(len(ch_ns)): - curr_mov_chs.append(curr_mov[ch_i::len(ch_ns)]) - - steps = curr_frame_num // frames_per_step - for step_ind in range(steps): - - print ('current step: {}'.format(curr_step)) - - for ch_i in range(len(ch_ns)): - curr_step_mov_ch = curr_mov_chs[ch_i][step_ind * frames_per_step:(step_ind + 1) * frames_per_step,:,:] - curr_step_n = 'step_' + ft.int2str(curr_step, 4) - curr_step_folder = os.path.join(save_folders[ch_i], curr_step_n) - os.mkdir(curr_step_folder) - tf.imsave(os.path.join(curr_step_folder, curr_step_n + '.tif'), curr_step_mov_ch) - - curr_step += 1 diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/sutter_regular_2p/000_reorganize_files.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/sutter_regular_2p/000_reorganize_files.py deleted file mode 100644 index 354c8c2..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/sutter_regular_2p/000_reorganize_files.py +++ /dev/null @@ -1,55 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.FileTools as ft - - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project\180816-M376019-zstack" -file_identifier = 'zstack_2p_zoom2' -ch_ns = ['green', 'red'] -frames_per_step = 300 - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -file_ns = [f for f in os.listdir(data_folder) if f[-4:] == '.tif' and file_identifier in f] -file_ns.sort() -print('\n'.join(file_ns)) - -save_folders = [] -for ch_n in ch_ns: - curr_save_folder = os.path.join(data_folder, file_identifier, ch_n) - if not os.path.isdir(curr_save_folder): - os.makedirs(curr_save_folder) - save_folders.append(curr_save_folder) - -curr_step = 0 - -for file_n in file_ns: - curr_mov = tf.imread(os.path.join(data_folder, file_n)) - - # reorient movie - curr_mov = curr_mov.transpose((0, 2, 1))[:, ::-1, :] - - curr_frame_num = curr_mov.shape[0] / len(ch_ns) - - if curr_frame_num % frames_per_step != 0: - raise ValueError('{}: total frame number is not divisible by frames per step.'.format(file_n)) - - curr_mov_chs = [] - for ch_i in range(len(ch_ns)): - curr_mov_chs.append(curr_mov[ch_i::len(ch_ns)]) - - steps = curr_frame_num // frames_per_step - for step_ind in range(steps): - - print ('current step: {}'.format(curr_step)) - - for ch_i in range(len(ch_ns)): - curr_step_mov_ch = curr_mov_chs[ch_i][step_ind * frames_per_step:(step_ind + 1) * frames_per_step,:,:] - curr_step_n = 'step_' + ft.int2str(curr_step, 4) - curr_step_folder = os.path.join(save_folders[ch_i], curr_step_n) - os.mkdir(curr_step_folder) - tf.imsave(os.path.join(curr_step_folder, curr_step_n + '.tif'), curr_step_mov_ch) - - curr_step += 1 diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/sutter_regular_2p/050_get_2p_vas_maps.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/sutter_regular_2p/050_get_2p_vas_maps.py deleted file mode 100644 index 2da226a..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/sutter_regular_2p/050_get_2p_vas_maps.py +++ /dev/null @@ -1,39 +0,0 @@ -import os -import numpy as np -import tifffile as tf -import corticalmapping.core.ImageAnalysis as ia -import matplotlib.pyplot as plt - - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project\180816-M376019-zstack" -file_ns = ["vasmap_2p_zoom1_00001_00001.tif", - "vasmap_2p_zoom1_00002_00001.tif", - "vasmap_2p_zoom1_00003_00001.tif"] - -save_name = 'vasmap_2p_zoom1' - -channels = ['green', 'red'] - -curr_folder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(curr_folder) - -vasmaps = {} -for chn in channels: - vasmaps.update({chn: []}) - -for file_n in file_ns: - print(file_n) - - curr_vasmap = tf.imread(os.path.join(data_folder, file_n)) - - for ch_i, ch_n in enumerate(channels): - curr_vasmap_ch = curr_vasmap[ch_i::len(channels)] - vasmaps[ch_n].append(curr_vasmap_ch.transpose((0, 2, 1))[:, ::-1, :]) - # print(curr_vasmap_ch.shape) - -for ch_n, ch_vasmap in vasmaps.items(): - save_vasmap = np.concatenate(ch_vasmap, axis=0) - # print(save_vasmap.shape) - save_vasmap = ia.array_nor(np.mean(save_vasmap, axis=0)) - # print(save_vasmap.shape) - tf.imsave('{}_{}.tif'.format(save_name, ch_n), save_vasmap.astype(np.float32)) diff --git a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/sutter_regular_2p/060_get_wf_vas_maps.py b/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/sutter_regular_2p/060_get_wf_vas_maps.py deleted file mode 100644 index 538d884..0000000 --- a/corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/sutter_regular_2p/060_get_wf_vas_maps.py +++ /dev/null @@ -1,31 +0,0 @@ -import os -import numpy as np -import corticalmapping.core.FileTools as ft -import corticalmapping.core.ImageAnalysis as ia -import tifffile as tf - -data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project\180816-M376019-zstack" - -vas_map_fns= ["180816JCamF100",] - -saveFolder = os.path.dirname(os.path.realpath(__file__)) -os.chdir(saveFolder) - -vas_map_paths = [os.path.join(data_folder, f) for f in vas_map_fns] - -vas_maps = [] - -for vas_map_path in vas_map_paths: - - # if not work, try: tailerLength=218 - vas_map_focused, _, _ = ft.importRawJCamF(vas_map_path, column=1024, row=1024, headerLength = 116, - tailerLength=452) - vas_map_focused = vas_map_focused[2:] - vas_map_focused = vas_map_focused[:, ::-1, :] - vas_map_focused[vas_map_focused > 50000] = 400 - vas_map_focused = np.mean(vas_map_focused, axis=0) - vas_maps.append(ia.array_nor(vas_map_focused)) - -vas_map = ia.array_nor(np.mean(vas_maps, axis=0)) - -tf.imsave('vas_map_focused_wf_green.tif', vas_map.astype(np.float32)) \ No newline at end of file diff --git a/corticalmapping/scripts/post_recording/00_old/batch_analyzeDetrend.py b/corticalmapping/scripts/post_recording/batch_analyzeDetrend.py similarity index 100% rename from corticalmapping/scripts/post_recording/00_old/batch_analyzeDetrend.py rename to corticalmapping/scripts/post_recording/batch_analyzeDetrend.py diff --git a/corticalmapping/scripts/recording/batch_displayKSstimAllDir.py b/corticalmapping/scripts/recording/batch_displayKSstimAllDir.py index 5ffec14..d8e3ed0 100644 --- a/corticalmapping/scripts/recording/batch_displayKSstimAllDir.py +++ b/corticalmapping/scripts/recording/batch_displayKSstimAllDir.py @@ -1,30 +1,37 @@ import matplotlib.pyplot as plt -import corticalmapping.VisualStim as vs +import CorticalMapping.corticalmapping.VisualStim as vs -mouseID = 'TEST' #'147861' #'TEST' -userID = 'Jun' -numOfTrials = 2 # 20 +mouseID = '326981' #'147861' #'TEST' +userID = 'Natalia' +numOfTrials = 40 # 20 logFolder = r'C:\data' - +isTriggered = False +isRemoteSync = False +psychopyMonitor = 'smartTVgamma' #'smartTVgamma' +logFolder = r'C:\data' +backupFolder = r'\\W7DTMJ03jgl2\data' +remoteSyncIP = 'w7dtmj19vtx' +remoteSyncPort = 11001 +syncOutputFolder = None mon=vs.Monitor(resolution=(1080, 1920), - dis=15.3, - monWcm=88.8, - monHcm=50.1, - C2Tcm=31.1, - C2Acm=41.91, - monTilt=26.56, + dis=9.5, + monWcm=52.0, + monHcm=30.25, + C2Tcm=15.125, + C2Acm=25.5, + monTilt=0.0, downSampleRate=5) #mon.plot_map() #plt.show() indicator=vs.Indicator(mon, - width_cm=3., - height_cm=3., + width_cm=6., + height_cm=6., position = 'southeast', isSync=True, freq=1.) @@ -44,14 +51,14 @@ ds = vs.DisplaySequence(logdir=logFolder, backupdir=None, displayIteration=numOfTrials, - psychopyMonitor='testMonitor', + psychopyMonitor=psychopyMonitor, displayOrder=1, mouseid=mouseID, userid=userID, isInterpolate=False, isRemoteSync=False, - remoteSyncIP='localhost', - remoteSyncPort=10003, + remoteSyncIP=remoteSyncIP, + remoteSyncPort=remoteSyncPort, remoteSyncTriggerEvent="positiveEdge", remoteSyncSaveWaitTime=5., isTriggered=False, @@ -63,7 +70,7 @@ syncPulseNIDev='Dev1', syncPulseNIPort=1, syncPulseNILine=1, - displayScreen=0, + displayScreen=1, initialBackgroundColor=0., isVideoRecord=False, videoRecordIP='w7dtmj007lhu', diff --git a/corticalmapping/setup.py b/corticalmapping/setup.py new file mode 100644 index 0000000..1a37804 --- /dev/null +++ b/corticalmapping/setup.py @@ -0,0 +1,74 @@ +__author__ = 'junz' + +from setuptools import setup, find_packages +from setuptools.command.test import test as TestCommand +import io +import os +import sys + +here = os.path.abspath(os.path.dirname(__file__)) + +def read(*filenames, **kwargs): + encoding = kwargs.get('encoding', 'utf-8') + sep = kwargs.get('sep', '\n') + buf = [] + for filename in filenames: + with io.open(filename, encoding=encoding) as f: + buf.append(f.read()) + return sep.join(buf) + +long_description = read('README.md') + +def prepend_find_packages(*roots): + ''' + Recursively traverse nested packages under the root directories + ''' + packages = [] + + for root in roots: + packages += [root] + packages += [root + '.' + s for s in find_packages(root)] + + return packages + +class PyTest(TestCommand): + def finalize_options(self): + TestCommand.finalize_options(self) + self.test_args = ['--junitxml=result.xml'] + self.test_args_cov = self.test_args + ['--cov=corticalmapping', '--cov-report=term', '--cov-report=html'] + self.test_suite = True + + def run_tests(self): + import pytest + + try: + errcode = pytest.main(self.test_args_cov) + except: + errcode = pytest.main(self.test_args) + sys.exit(errcode) + +setup( + name='corticalmapping', + version = '2.0.0', + url='http://stash.corp.alleninstitute.org/users/junz/repos/corticalmapping/', + author='Jun Zhuang', + tests_require=['pytest'], + install_requires=['numpy','scipy','opencv-python','PyDAQmx','scikit-image','tifffile'], + cmdclass={'test': PyTest}, + author_email='junz@alleninstitute.org', + description='cortical mapping tools', + long_description=long_description, + packages=prepend_find_packages('corticalmapping'), + include_package_data=True, + package_data={'':['*.md', '*.txt', '*.cfg', '*.hdf5']}, + platforms='any', + classifiers = [ + 'Programming Language :: Python', + 'Development Status :: 4 - Beta', + 'Natural Language :: English', + 'Operating System :: OS Independent', + ], + extras_require={ + 'testing': ['pytest'], + } +) \ No newline at end of file diff --git a/corticalmapping/test/data/test.hdf5 b/corticalmapping/test/data/test.hdf5 index f02a396..98c4300 100644 Binary files a/corticalmapping/test/data/test.hdf5 and b/corticalmapping/test/data/test.hdf5 differ diff --git a/corticalmapping/test/test_ImageAnalysis.py b/corticalmapping/test/test_ImageAnalysis.py index 4f5c1b1..63d8db0 100644 --- a/corticalmapping/test/test_ImageAnalysis.py +++ b/corticalmapping/test/test_ImageAnalysis.py @@ -30,6 +30,7 @@ def test_getTrace(self): trace4 = ia.get_trace(mov, mask4, maskMode='weightedNan') assert(trace4[2] == 58) + def test_ROI_binary_overlap(self): roi1 = np.zeros((10, 10)) roi1[4:8, 3:7] = 1 @@ -39,99 +40,7 @@ def test_ROI_binary_overlap(self): roi2 = ia.ROI(roi2) assert(roi1.binary_overlap(roi2) == 6) - def test_ROI(self): - a = np.zeros((10, 10)) - a[5:7, 3:6] = 1 - a[8:9, 7:10] = np.nan - roi = ia.ROI(a) - # plt.imshow(roi.get_binary_mask(),interpolation='nearest') - assert (list(roi.get_center()) == [5.5, 4.]) - - def test_ROI_getBinaryTrace(self): - mov = np.random.rand(5, 4, 4) - mask = np.zeros((4, 4)) - mask[2, 3] = 1 - trace1 = mov[:, 2, 3] - roi = ia.ROI(mask) - trace2 = roi.get_binary_trace(mov) - assert (np.array_equal(trace1, trace2)) - - def test_WeigthedROI_getWeightedCenter(self): - aa = np.random.rand(5, 5); - mask = np.zeros((5, 5)) - mask[2, 3] = aa[2, 3]; - mask[1, 4] = aa[1, 4]; - mask[3, 4] = aa[3, 4] - roi = ia.WeightedROI(mask) - center = roi.get_weighted_center() - assert (center[0] == (2 * aa[2, 3] + 1 * aa[1, 4] + 3 * aa[3, 4]) / (aa[2, 3] + aa[1, 4] + aa[3, 4])) - - def test_plot_ROIs(self): - aa = np.zeros((50, 50)); - aa[15:20, 30:35] = np.random.rand(5, 5) - roi1 = ia.ROI(aa) - _ = roi1.plot_binary_mask_border(); - _ = roi1.plot_binary_mask() - roi2 = ia.WeightedROI(aa) - _ = roi2.plot_binary_mask_border(); - _ = roi2.plot_binary_mask(); - _ = roi2.plot_weighted_mask() - - def test_WeightedROI_getWeightedCenterInCoordinate(self): - aa = np.zeros((5, 5)); - aa[1:3, 2:4] = 0.5 - roi = ia.WeightedROI(aa) - assert (list(roi.get_weighted_center_in_coordinate(range(2, 7), range(1, 6))) == [3.5, 3.5]) - - def test_mergeROIs(self): - - import corticalmapping.core.ImageAnalysis as ia - roi1 = ia.WeightedROI(np.arange(9).reshape((3, 3))) - roi2 = ia.WeightedROI(np.arange(1, 10).reshape((3, 3))) - - merged_ROI = ia.merge_weighted_rois(roi1, roi2) - merged_ROI2 = ia.merge_binary_rois(roi1, roi2) - - assert (np.array_equal(merged_ROI.get_weighted_mask(), np.arange(1, 18, 2).reshape((3, 3)))) - assert (np.array_equal(merged_ROI2.get_binary_mask(), np.ones((3, 3)))) - - def test_get_circularity(self): - aa = np.zeros((10, 10)) - aa[3:5, 3:5] = 1 - cir1 = ia.get_circularity(aa, is_skimage=False) - # print(cir1) - assert(0.7853981633974483 - 1e-15 < cir1 < 0.7853981633974483 + 1e-15) - - print(ia.get_circularity(aa, is_skimage=True)) - - aa[3:5, 5] = 1 - cir2 = ia.get_circularity(aa, is_skimage=False) - # print(cir2) - assert (0.7539822368615503 - 1e-15 < cir2 < 0.7539822368615503 + 1e-15) - - def test_fit_ellipse(self): - - mask = np.zeros((100, 100), dtype=np.uint8) - mask[20:50, 30:80] = 255 - mask[40:50, 70:80] = 0 - mask[20:30, 30:40] = 0 - - # import matplotlib.pyplot as plt - # f = plt.figure(figsize=(4, 4)) - # ax = f.add_subplot(111) - # ax.set_aspect('equal') - # ax.imshow(mask, interpolation='nearest') - # plt.show() - - ell = ia.fit_ellipse(mask) - print(ell.info()) - - assert((np.round(ell.angle * 100) / 100) % 180. == 44.16) - import cv2 - img = np.array([mask, mask, mask]).transpose((1, 2, 0)).copy() - img = ell.draw(img=img, thickness=1) - img = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB) - import matplotlib.pyplot as plt - plt.imshow(img, interpolation='nearest') - plt.show() \ No newline at end of file +if __name__ == "__main__": + TestImageAnalysis.test_getTrace() + TestImageAnalysis.test_ROI_binary_overlap() \ No newline at end of file diff --git a/corticalmapping/test/test_SingleCellAnalysis.py b/corticalmapping/test/test_SingleCellAnalysis.py index 2fce240..26abee7 100644 --- a/corticalmapping/test/test_SingleCellAnalysis.py +++ b/corticalmapping/test/test_SingleCellAnalysis.py @@ -1,3 +1,8 @@ +import corticalmapping.core.ImageAnalysis + +__author__ = 'junz' + + import os import h5py import numpy as np @@ -16,7 +21,7 @@ STRFDataPath = os.path.join(testDataFolder,'cellsSTRF.hdf5') -print sparseNoiseDisplayLogPath +print(sparseNoiseDisplayLogPath) def test_mergeROIs(): roi1 = corticalmapping.core.ImageAnalysis.WeightedROI(np.arange(9).reshape((3, 3))) roi2 = corticalmapping.core.ImageAnalysis.WeightedROI(np.arange(1, 10).reshape((3, 3))) @@ -41,6 +46,37 @@ def test_SpatialTemporalReceptiveField_from_h5_group(): assert((float(trace[4, 8])+0.934942364693) < 1e-10) # STRF.plot_traces(figSize=(15,10),yRange=[-5,50],columnSpacing=0.002,rowSpacing=0.002) +def test_ROI(): + a = np.zeros((10,10)) + a[5:7,3:6]=1 + a[8:9,7:10]=np.nan + roi = corticalmapping.core.ImageAnalysis.ROI(a) + # plt.imshow(roi.get_binary_mask(),interpolation='nearest') + assert(list(roi.get_center()) == [5.5, 4.]) + +def test_ROI_getBinaryTrace(): + mov = np.random.rand(5,4,4); mask = np.zeros((4,4)); mask[2,3]=1; trace1 = mov[:,2,3] + roi = corticalmapping.core.ImageAnalysis.ROI(mask);trace2 = roi.get_binary_trace(mov) + assert(np.array_equal(trace1,trace2)) + +def test_WeigthedROI_getWeightedCenter(): + aa = np.random.rand(5,5); mask = np.zeros((5,5)) + mask[2,3]=aa[2,3]; mask[1,4]=aa[1,4]; mask[3,4]=aa[3,4] + roi = corticalmapping.core.ImageAnalysis.WeightedROI(mask); center = roi.get_weighted_center() + assert(center[0] == (2*aa[2,3]+1*aa[1,4]+3*aa[3,4])/(aa[2,3]+aa[1,4]+aa[3,4])) + +def test_plot_ROIs(): + aa = np.zeros((50,50));aa[15:20,30:35] = np.random.rand(5,5) + roi1 = corticalmapping.core.ImageAnalysis.ROI(aa) + _ = roi1.plot_binary_mask_border(); _ = roi1.plot_binary_mask() + roi2 = corticalmapping.core.ImageAnalysis.WeightedROI(aa) + _ = roi2.plot_binary_mask_border(); _ = roi2.plot_binary_mask(); _ = roi2.plot_weighted_mask() + +def test_WeightedROI_getWeightedCenterInCoordinate(): + aa = np.zeros((5,5));aa[1:3,2:4] = 0.5 + roi = corticalmapping.core.ImageAnalysis.WeightedROI(aa) + assert(list(roi.get_weighted_center_in_coordinate(list(range(2, 7)), list(range(1, 6)))) == [3.5, 3.5]) + def test_SpatialTemporalReceptiveField(): locations = [[3.0, 4.0], [3.0, 5.0], [2.0, 4.0], [2.0, 5.0],[3.0, 4.0], [3.0, 5.0], [2.0, 4.0], [2.0, 5.0]] signs = [1,1,1,1,-1,-1,-1,-1] @@ -135,90 +171,8 @@ def test_SpatialReceptiveField_interpolate(): SRF.interpolate(5) assert(SRF.get_weighted_mask().shape == (20, 20)) -def test_get_orientation_properties(): - import pandas as pd - dires = np.arange(8) * 45 - resps = np.ones(8) - resps[2] = 2. - dire_tuning = pd.DataFrame() - dire_tuning['dire'] = dires - dire_tuning['resp_mean'] = resps - # print(dire_tuning) - - OSI_raw, DSI_raw, gOSI_raw, gDSI_raw, OSI_ele, DSI_ele, gOSI_ele, \ - gDSI_ele, OSI_rec, DSI_rec, gOSI_rec, gDSI_rec, peak_dire_raw, vs_dire_raw, \ - vs_dire_ele, vs_dire_rec = sca.DriftingGratingResponseTable.get_dire_tuning_properties(dire_tuning=dire_tuning, - response_dir='pos', - elevation_bias=0.) - - # print('\nOSI_raw: {}'.format(OSI_raw)) - # print('DSI_raw: {}'.format(DSI_raw)) - # print('gOSI_raw: {}'.format(gOSI_raw)) - # print('gDSI_raw: {}'.format(gDSI_raw)) - # print('\nOSI_ele: {}'.format(OSI_ele)) - # print('DSI_ele: {}'.format(DSI_ele)) - # print('gOSI_ele: {}'.format(gOSI_ele)) - # print('gDSI_ele: {}'.format(gDSI_ele)) - # print('\nOSI_rec: {}'.format(OSI_rec)) - # print('DSI_rec: {}'.format(DSI_rec)) - # print('gOSI_rec: {}'.format(gOSI_rec)) - # print('gDSI_rec: {}'.format(gDSI_rec)) - # print('\npeak_dire_raw: {}'.format(peak_dire_raw)) - # print('\nvs_dire_raw: {}'.format(vs_dire_raw)) - # print('vs_orie_raw: {}'.format(vs_orie_raw)) - # print('\nvs_dire_ele: {}'.format(vs_dire_ele)) - - assert (OSI_raw == OSI_ele == OSI_rec == 1. / 3.) - assert (DSI_raw == DSI_ele == DSI_rec == 1. / 3.) - - assert (gOSI_raw == gOSI_ele == gOSI_rec == 1. / 9.) - assert (gDSI_raw == gDSI_ele == gDSI_rec == 1. / 9.) - - assert (peak_dire_raw == int(vs_dire_raw) == int(vs_dire_ele) == int(vs_dire_rec) == 90) - - dire_tuning.loc[6, 'resp_mean'] = -1. - # print(dire_tuning) - - OSI_raw, DSI_raw, gOSI_raw, gDSI_raw, OSI_ele, DSI_ele, gOSI_ele, \ - gDSI_ele, OSI_rec, DSI_rec, gOSI_rec, gDSI_rec, peak_dire_raw, vs_dire_raw, \ - vs_dire_ele, vs_dire_rec = sca.DriftingGratingResponseTable.get_dire_tuning_properties(dire_tuning=dire_tuning, - response_dir='pos', - elevation_bias=0.) - - # print('\nOSI_raw: {}'.format(OSI_raw)) - # print('DSI_raw: {}'.format(DSI_raw)) - # print('gOSI_raw: {}'.format(gOSI_raw)) - # print('gDSI_raw: {}'.format(gDSI_raw)) - # print('\nOSI_ele: {}'.format(OSI_ele)) - # print('DSI_ele: {}'.format(DSI_ele)) - # print('gOSI_ele: {}'.format(gOSI_ele)) - # print('gDSI_ele: {}'.format(gDSI_ele)) - # print('\nOSI_rec: {}'.format(OSI_rec)) - # print('DSI_rec: {}'.format(DSI_rec)) - # print('gOSI_rec: {}'.format(gOSI_rec)) - # print('gDSI_rec: {}'.format(gDSI_rec)) - # print('\npeak_dire_raw: {}'.format(peak_dire_raw)) - # print('\nvs_dire_raw: {}'.format(vs_dire_raw)) - # print('\nvs_dire_ele: {}'.format(vs_dire_ele)) - # print('\nvs_dire_rec: {}'.format(vs_dire_rec)) - - assert (OSI_raw == OSI_rec == 1. / 3.) - assert (DSI_raw == 3.0) - assert (DSI_ele == DSI_rec == 1.0) - assert (OSI_ele == 0.2) - assert (gOSI_raw < (1. / 7. + 1E-7)) - assert (gOSI_raw > (1. / 7. - 1E-7)) - assert (gDSI_raw == 3. / 7.) - assert (gOSI_ele < (1. / 15. + 1E-7)) - assert (gOSI_ele > (1. / 15. - 1E-7)) - assert (gDSI_ele == 3. / 15.) - assert (gOSI_rec == 0.) - assert (gDSI_rec == 2. / 8.) - - assert (peak_dire_raw == int(vs_dire_raw) == int(vs_dire_ele) == int(vs_dire_rec) == 90) - plt.show() if __name__ == '__main__': - test_get_orientation_properties() + test_SpatialTemporalReceptiveField_getAmpLitudeMap() diff --git a/corticalmapping/test/test_TimingAnalysis.py b/corticalmapping/test/test_TimingAnalysis.py index 6158f20..aa272c2 100644 --- a/corticalmapping/test/test_TimingAnalysis.py +++ b/corticalmapping/test/test_TimingAnalysis.py @@ -49,8 +49,8 @@ def test_get_event_with_pre_iei(self): assert (np.array_equal(ts5, np.array([2, 6, 8]))) np.random.shuffle(ts3) ts6 = ta.get_event_with_pre_iei(ts3, iei=0.5) - print ts3 - print ts6 + print(ts3) + print(ts6) assert (np.array_equal(ts6, np.array([2, 3, 4, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]))) ts7 = ta.get_event_with_pre_iei(ts3, iei=1) assert (np.array_equal(ts7, np.array([2, 6, 8]))) @@ -92,20 +92,6 @@ def test_haramp(self): assert (len(har) == 4) assert (round(1000. * har[1] / har[0]) / 1000. == 2.) - def test_threshold_to_intervals(self): - trace = np.array([2.3, 4.5, 6.7, 5.5, 3.3, 9.2, 4.4, 3.2, 1.0, 0.8, 5.5]) - - intvs1 = ta.threshold_to_intervals(trace=trace, thr=5.0, comparison='>=') - # print(intvs1) - for intv in intvs1: - # print(trace[intv[0]: intv[1]]) - assert(np.min(trace[intv[0]: intv[1]]) >= 5.0) - - intvs2 = ta.threshold_to_intervals(trace=trace, thr=3.0, comparison='<') - for intv in intvs2: - # print(trace[intv[0]: intv[1]]) - assert(np.max(trace[intv[0]: intv[1]]) < 3.0) - if __name__ == '__main__': TestTimingAnalysis.test_get_onset_time_stamps()