-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathCreate_Mousecam_Widefield_Sample_Video.py
272 lines (199 loc) · 9.72 KB
/
Create_Mousecam_Widefield_Sample_Video.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
from matplotlib.colors import Normalize
import h5py
import tables
from scipy import signal, ndimage, stats
import os
import cv2
from datetime import datetime
from matplotlib.colors import LinearSegmentedColormap
from sklearn.decomposition import PCA, FactorAnalysis, TruncatedSVD
from skimage.transform import resize
from scipy.ndimage import gaussian_filter
from tqdm import tqdm
import time
from matplotlib.backends.backend_agg import FigureCanvasAgg
import pickle
from Widefield_Utils import widefield_utils
def load_downsampled_mask(base_directory):
mask = np.load(os.path.join(base_directory, "Generous_Mask.npy"))
# Transform Mask
mask = resize(mask, (300, 304), preserve_range=True, order=0, anti_aliasing=True)
image_height = np.shape(mask)[0]
image_width = np.shape(mask)[1]
mask = np.where(mask > 0.1, 1, 0)
mask = mask.astype(int)
flat_mask = np.ndarray.flatten(mask)
indicies = np.argwhere(flat_mask)
indicies = np.ndarray.astype(indicies, int)
indicies = np.ndarray.flatten(indicies)
return indicies, image_height, image_width
def get_delta_f_sample_from_svd(base_directory, sample_start, sample_end, window_size=3):
# Extract Raw Delta F
corrected_svt = np.load(os.path.join(base_directory, "Churchland_Preprocessing", "Corrected_SVT.npy"))
print("SVT Shape", np.shape(corrected_svt))
u = np.load(os.path.join(base_directory, "Churchland_Preprocessing", "U.npy"))
print("U Shape", np.shape(u))
delta_f_sample = np.dot(u, corrected_svt[:, sample_start:sample_end])
print("Delta F Sample", np.shape(delta_f_sample))
delta_f_sample = np.moveaxis(delta_f_sample, 2, 0)
print("Delta F Sample", np.shape(delta_f_sample))
# Load Mask
indicies, image_height, image_width = widefield_utils.load_tight_mask()
# Reconstruct Data
reconstructed_delta_f = []
#plt.ion()
#colourmap = widefield_utils.get_musall_cmap()
number_of_frames = (sample_end - sample_start) + window_size
for frame_index in range(number_of_frames):
frame_data = delta_f_sample[frame_index:frame_index + window_size]
frame_data = np.mean(frame_data, axis=0)
template = frame_data
#template = np.zeros(image_height * image_width)
#template[indicies] = frame_data
#template = np.reshape(template, (image_height, image_width))
template = ndimage.gaussian_filter(template, sigma=1)
reconstructed_delta_f.append(template)
#plt.imshow(template, vmin=-0.05, vmax=0.05, cmap=colourmap)
#plt.draw()
#plt.pause(0.1)
#plt.clf()
reconstructed_delta_f = np.array(reconstructed_delta_f)
return reconstructed_delta_f
def get_delta_f_sample(base_directory, sample_start, sample_end, window_size=3):
# Extract Raw Delta F
delta_f_file = os.path.join(base_directory, "Downsampled_Delta_F.h5")
delta_f_file_container = tables.open_file(delta_f_file, mode="r")
delta_f_matrix = delta_f_file_container.root["Data"]
delta_f_sample = delta_f_matrix[sample_start-window_size:sample_end]
delta_f_sample = np.nan_to_num(delta_f_sample)
# Denoise with dimensionality reduction
model = PCA(n_components=150)
transformed_data = model.fit_transform(delta_f_sample)
delta_f_sample = model.inverse_transform(transformed_data)
# Load Mask
indicies, image_height, image_width = load_downsampled_mask(base_directory)
# Reconstruct Data
reconstructed_delta_f = []
number_of_frames = (sample_end - sample_start) + window_size
for frame_index in range(number_of_frames):
frame_data = delta_f_sample[frame_index :frame_index + window_size]
frame_data = np.mean(frame_data, axis=0)
template = np.zeros(image_height * image_width)
template[indicies] = frame_data
template = np.reshape(template, (image_height, image_width))
template = ndimage.gaussian_filter(template, sigma=1)
reconstructed_delta_f.append(template)
reconstructed_delta_f = np.array(reconstructed_delta_f)
delta_f_file_container.close()
return reconstructed_delta_f
def extract_mousecam_data(video_file, frame_list):
# Open Video File
cap = cv2.VideoCapture(video_file)
# Extract Selected Frames
extracted_data = []
for frame in frame_list:
cap.set(cv2.CAP_PROP_POS_FRAMES, frame-1)
ret, frame = cap.read()
frame = frame[:, :, 0]
extracted_data.append(frame)
cap.release()
extracted_data = np.array(extracted_data)
return extracted_data
def get_mousecam_sample(base_directory, mousecam_filename, sample_start, sample_end):
# Load Widefield Frame Dict
widefield_frame_dict = np.load(os.path.join(base_directory, "Stimuli_Onsets", "widfield_to_mousecam_frame_dict.npy"), allow_pickle=True)[()]
# Get Mousecam Frames
mousecam_frames = []
for widefield_frame in range(sample_start, sample_end):
corresponding_mousecam_frame = widefield_frame_dict[widefield_frame]
mousecam_frames.append(corresponding_mousecam_frame)
# Extract Mousecam Data
mousecam_data = extract_mousecam_data(os.path.join(base_directory, mousecam_filename), mousecam_frames)
return mousecam_data
def create_sample_video_with_mousecam(delta_f_directory, bodycam_directory, output_directory):
sample_start = 5000
sample_length = 5000
sample_end = sample_start + sample_length
# Get Delta F Sample
print("Getting Delta F Sample", datetime.now())
delta_f_sample = get_delta_f_sample_from_svd(delta_f_directory, sample_start, sample_end)
print("Finished Getting Delta F Sample", datetime.now())
# Get Mousecam Sample
print("Getting Mousecam Sample", datetime.now())
bodycam_filename = widefield_utils.get_bodycam_filename(base_directory)
eyecam_filename = widefield_utils.get_eyecam_filename(base_directory)
bodycam_sample = get_mousecam_sample(base_directory, bodycam_filename, sample_start, sample_end)
eyecam_sample = get_mousecam_sample(base_directory, eyecam_filename, sample_start, sample_end)
print("Finished Getting Mousecam Sample", datetime.now())
# Get Running Sample
downsampled_ai_matrix = np.load(os.path.join(base_directory, "Downsampled_AI_Matrix_Framewise.npy"))
stimuli_dictionary = widefield_utils.create_stimuli_dictionary()
running_trace = downsampled_ai_matrix[stimuli_dictionary["Running"]]
# Create Colourmaps
widefield_colourmap = widefield_utils.get_musall_cmap()
widefield_colourmap = plt.cm.ScalarMappable(norm=Normalize(vmin=-0.05, vmax=0.05), cmap=widefield_colourmap)
mousecam_colourmap = plt.cm.ScalarMappable(norm=Normalize(vmin=0, vmax=255), cmap=cm.get_cmap('Greys_r'))
# Load Mask
indicies, image_height, image_width = widefield_utils.load_tight_mask()
background_pixels = widefield_utils.get_background_pixels(indicies, image_height, image_width)
# Create Video File
video_name = os.path.join(output_directory, "Brain_Behaviour_Video.avi")
video_codec = cv2.VideoWriter_fourcc(*'DIVX')
video = cv2.VideoWriter(video_name, video_codec, frameSize=(1500, 500), fps=30) # 0, 12
figure_1 = plt.figure(figsize=(15, 5))
canvas = FigureCanvasAgg(figure_1)
rows = 1
columns = 4
for frame_index in tqdm(range(sample_length)):
brain_axis = figure_1.add_subplot(rows, columns, 1)
body_axis = figure_1.add_subplot(rows, columns, 2)
eye_axis = figure_1.add_subplot(rows, columns, 3)
running_axis = figure_1.add_subplot(rows, columns, 4)
# Extract Frames
brain_frame = delta_f_sample[frame_index]
body_frame = bodycam_sample[frame_index]
eye_frame = eyecam_sample[frame_index]
# Set Colours
brain_frame = widefield_colourmap.to_rgba(brain_frame)
body_frame = mousecam_colourmap.to_rgba(body_frame)
eye_frame = mousecam_colourmap.to_rgba(eye_frame)
#brain_frame[background_pixels] = (1,1,1,1)
# Get Running Sample
running_sample_window = 50
running_sample_start = frame_index + sample_start - running_sample_window
running_sample_stop = frame_index + sample_start + running_sample_window
running_sample_start = np.clip(running_sample_start, a_min=0, a_max=None)
running_sample = running_trace[running_sample_start:running_sample_stop]
running_x_values = list(range(-running_sample_window, running_sample_window))
running_x_values = np.multiply(running_x_values, 36)
running_axis.plot(running_x_values, running_sample)
running_axis.axvline(x=0, c='k', linestyle='dashed')
running_axis.set_ylim([0, 1.5])
running_axis.set_title("Running Speed")
# Display Images
brain_axis.imshow(brain_frame)
body_axis.imshow(body_frame)
eye_axis.imshow(eye_frame)
# Remove Axis
brain_axis.axis('off')
body_axis.axis('off')
eye_axis.axis('off')
figure_1.canvas.draw()
# Write To Video
canvas.draw()
buf = canvas.buffer_rgba()
image_from_plot = np.asarray(buf)
print(np.shape(image_from_plot))
image_from_plot = cv2.cvtColor(image_from_plot, cv2.COLOR_RGB2BGR)
video.write(image_from_plot)
plt.clf()
cv2.destroyAllWindows()
video.release()
delta_f_directory = r"//media/matthew/Expansion/Control_Data/NRXN78.1A/2020_11_02_Spontaneous"
base_directory = r"//media/matthew/Expansion/Control_Data/NRXN78.1A/2020_11_02_Spontaneous"
output_directory = r"//media/matthew/Expansion/Control_Data/NRXN78.1A/2020_11_02_Spontaneous"
base_directory = r"/media/matthew/Expansion/Control_Data/NXAK7.1B/2021_01_25_Spontaneous"
create_sample_video_with_mousecam(base_directory, base_directory, base_directory)