-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy paths_DeepDTI_prepData.m
254 lines (212 loc) · 9.24 KB
/
s_DeepDTI_prepData.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
%% introduction
% s_DeepDTI_prepData.m
%
% A script for preparing input and output data for convolutional neural
% network in DeepDTI.
%
% Source code:
% https://github.com/qiyuantian/DeepDTI/blob/main/s_DeepDTI_prepData.m
%
% HTML file can be automatically generaged using command:
% publish('s_DeepDTI_prepData.m', 'html');
%
% Reference:
% [1] Tian Q, Bilgic B, Fan Q, Liao C, Ngamsombat C, Hu Y, Witzel T,
% Setsompop K, Polimeni JR, Huang SY. DeepDTI: High-fidelity
% six-direction diffusion tensor imaging using deep learning.
% NeuroImage. 2020;219:117017.
%
% [2] Tian Q, Li Z, Fan Q, Ngamsombat C, Hu Y, Liao C, Wang F,
% Setsompop K, Polimeni JR, Bilgic B, Huang SY. SRDTI: Deep
% learning-based super-resolution for diffusion tensor MRI. arXiv
% preprint. 2021; arXiv:2102.09069.
%
% (c) Qiyuan Tian, Harvard, 2021
%% load data
clear, clc, close all
tmp = load('data.mat'); % example data was saved in unit16 to save sapce to be shared on Github.
data = double(tmp.data); % 18 b = 0 volumes and 90 dwi volumes
% due to size limit of github, only 70 axial slices are used here as
% example data. actual implementation should use whole brain data.
% diffusion data were corrected for intensity bias already, which can be
% performed for new data using dwibiascorrect function from MRtrix3:
% https://mrtrix.readthedocs.io/en/latest/reference/commands/dwibiascorrect.html
% we did not evaluate the effects of bias on the performance of CNN, but do
% not expect significant difference in results.
bvals = tmp.bvals; % b = 1000 s/mm^2
bvecs = tmp.bvecs;
mask = tmp.mask;
b0s = data(:, :, :, bvals < 100); % all 18 b = 0 images
meanb0 = mean(b0s, 4); % mean b = 0 images
dwis = data(:, :, :, bvals > 100); % all 90 dwis
bvals_dwi = bvals(bvals > 100); % bvals for dwis
bvecs_dwi = bvecs(bvals > 100, :); % bvecs for dwis
dirs = bvecs(bvals > 100, :); % 90 diffusion-encoding directions
dirs_vis = dirs .* sign(dirs(:, 3)); % directions flipped to z > 0 for visualization
sz_data = size(data);
figure; % display image data
subplot(1, 2, 1)
imshow(rot90(data(:, :, 35, 1)), [0, 10000])
title('b = 0 image');
subplot(1, 2, 2)
imshow(rot90(data(:, :, 35, 2)), [0, 4000])
title('diffusion weighted image');
figure; % display diffusion encoding directions, all flipped to z > 0
plot3(dirs_vis(:, 1), dirs_vis(:, 2), dirs_vis(:, 3), '.');
grid on, axis equal
zlim([-1, 1])
title('90 diffusion encoding directions');
%% select optimized encoding directions
% 6 optimized directions from the DSM scheme that minimize the condition
% number of the diffusion tensor transformation matrix while are as uniform
% as possible. from S Skare et al., J Magn Reson. 2000;147(2):340-52.
dsm6 = [0.91, 0.416, 0; ...
0, 0.91, 0.416; ...
0.416, 0, 0.91; ...
0.91, -0.416, 0; ...
0, 0.91, -0.416; ...
-0.416, 0, 0.91];
dsm6_norm = dsm6 ./ sqrt(dsm6(:, 1) .^ 2 + dsm6(:, 2) .^ 2 + dsm6(:, 3) .^ 2); % normalize vectors
% randomly rotate the DSM6 dirs, select their nearest 6 dirs from acquired
% dirs, keep those with low condition number and angel difference.
rotang_all = [];
angerr_all = [];
condnum_all = [];
ind_all = [];
for ii = 1 : 100000 % number of iterations can be increased in actual implementation
rotangs = rand(1, 3) * 2 * pi; % random angles to rotate around x, y, z axis
R = rot3d(rotangs); % rotation matrix
dsm6_rot = (R * dsm6_norm')'; % roated directions
% find 6 nearest dirs in acquired dirs
angerrors = acosd(abs(dsm6_rot * dirs')); % angles btw rotated DSM6 dirs and acquired dirs
[minerrors, ind] = min(angerrors, [], 2); % 6 dirs with min angles compared to rotated DSM6 dirs
meanangerr = mean(minerrors); % mean angle errors of selected dirs
condnum = cond(amatrix(dirs(ind, :))); % cond number of tensor tx matrix of selected dirs
if meanangerr < 5 && condnum < 1.6 % only use dirs with low angle error and cond number
if isempty(ind_all) || ~any(sum(ind_all == sort(ind'), 2) == 6) % make sure no repetition
% record params for satisfied sets
angerr_all = cat(1, angerr_all, meanangerr);
condnum_all = cat(1, condnum_all, condnum);
ind_all = cat(1, ind_all, sort(ind'));
rotang_all = cat(1, rotang_all, rotangs);
end
end
end
% here only select 5 sets of directions with lowest condition number
% better to make sure all dwis can be equally slected and used for training
[~, ind_sort] = sort(condnum_all);
ind_use = ind_all(ind_sort(1 : 5), :);
condnum_use = condnum_all(ind_sort(1 : 5));
angerr_use = angerr_all(ind_sort(1 : 5));
rotang_use = rotang_all(ind_sort(1 : 5), :);
figure; % display two selected sets of 6 optimal directions
for ii = 1 : 2
subplot(1, 2, ii)
plot3(dirs_vis(:, 1), dirs_vis(:, 2), dirs_vis(:, 3), '.'); % all dirs
hold on
visdirs_use = dirs_vis(ind_use(ii, :), :);
plot3(visdirs_use(:, 1), visdirs_use(:, 2), visdirs_use(:, 3), 'o'); % selected dirs
R = rot3d(rotang_use(ii, :));
dsm6_rot = (R * dsm6_norm')';
dsm6_rot_vis = dsm6_rot .* sign(dsm6_rot(:, 3));
plot3(dsm6_rot_vis(:, 1), dsm6_rot_vis(:, 2), dsm6_rot_vis(:, 3), 'x'); % rotated DSM6 dirs
grid on, axis equal
xlim([-1, 1])
ylim([-1, 1])
zlim([-1, 1])
title(['cond num=' num2str(condnum_use(ii))]);
end
%% generate input data of CNN
input_all = {};
tensor_all = {};
bval_synth = 1000; % b value for synthesized dwis
for ii = 1 : 5
b0 = b0s(:, :, :, ii); % a single b=0 image for each set of selected 6 dwis
dwis6 = dwis(:, :, :, ind_use(ii, :));
bvals6 = bvals_dwi(ind_use(ii, :));
bvecs6 = bvecs_dwi(ind_use(ii, :), :);
% compute apparent diffusion coefficients
adcs6 = log(dwis6 ./ b0); % s = s0 * exp(-b * adc)
for jj = 1 : length(bvals6)
adcs6(:, :, :, jj) = adcs6(:, :, :, jj) / (-bvals6(jj)); % in case bvalues might be different for acquired dwis
end
% compute tensors
adcs6_vec = reshape(adcs6, sz_data(1)*sz_data(2)*sz_data(3), size(adcs6, 4)); % transform volume data to vectors
A = amatrix(bvecs6); % tensor transformation matrix
tensor_vec = A \ adcs6_vec'; %i.e., inv(A) * adcs_vec'; solve Ax = b
tensor = reshape(tensor_vec', [sz_data(1:3), 6]); % transform tensors in vector form to volume
tensor(isnan(tensor)) = 0;
tensor(isinf(tensor)) = 0;
tensor_all{ii} = tensor;
% synthesize dwis along DSM6 dirs
dwis6norm_vec_synth = exp(-bval_synth .* amatrix(dsm6_norm) * tensor_vec); % normalized dwi
dwis6_synth = b0 .* reshape(dwis6norm_vec_synth', [sz_data(1:3), size(dwis6norm_vec_synth, 1)]);
dwis6_synth(isnan(dwis6_synth)) = 0;
dwis6_synth(isinf(dwis6_synth)) = 0;
diff_input = cat(4, b0, dwis6_synth);
input_all{ii} = diff_input;
end
figure; % display synthesized dwis
for ii = 1 : 2
diff_input = input_all{ii};
subplot(1, 2, ii)
imshow(rot90(diff_input(:, :, 35, 2)), [0, 4000])
title('synthsized dwi');
end
figure; % display synthesized dwis
for ii = 1 : 2
tensor = tensor_all{ii};
subplot(1, 2, ii)
dtimetrics = decompose_tensor(tensor, mask);
fa = dtimetrics.fa;
imshow(rot90(fa(:, :, 35)), [0, 1])
title('fractional anisotropy');
end
%% generate ground-truth data of CNN
% compute apparent diffusion coefficients
adcs = log(dwis ./ meanb0); % s = b0 * exp(-b * adc)
for ii = 1 : size(adcs, 4)
adcs(:, :, :, ii) = adcs(:, :, :, ii) / (-bvals_dwi(ii)); % in case bvalues might be different for acquired dwis
end
adcs_vec = reshape(adcs, sz_data(1)*sz_data(2)*sz_data(3), size(adcs, 4)); % tx volume data to vectors
tensor_gt_vec = amatrix(bvecs_dwi) \ adcs_vec'; % solve tensors
tensor_gt = reshape(tensor_gt_vec', [sz_data(1:3), 6]); % tx vectors to volume
tensor_gt(isnan(tensor_gt)) = 0;
tensor_gt(isinf(tensor_gt)) = 0;
% synthesize dwis along DSM6 dirs
dwis6norm_vec_gt = exp(-bval_synth .* amatrix(dsm6_norm) * tensor_gt_vec);
dwis6_gt = b0 .* reshape(dwis6norm_vec_gt', [sz_data(1:3), size(dwis6norm_vec_gt, 1)]);
dwis6_gt(isnan(dwis6_gt)) = 0;
dwis6_gt(isinf(dwis6_gt)) = 0;
diff_gt = cat(4, meanb0, dwis6_gt);
figure; % display ground-truth dwi and fa
subplot(1, 2, 1)
imshow(rot90(diff_gt(:, :, 35, 2)), [0, 4000])
title('ground-truth dwi');
dtimetrics = decompose_tensor(tensor_gt, mask);
fa = dtimetrics.fa;
subplot(1, 2, 2)
imshow(rot90(fa(:, :, 35)), [0, 1])
title('ground-truth fa');
figure; % display residuals btx input dwis and ground truth
for ii = 1 : 2
diff_input = input_all{ii};
subplot(1, 2, ii)
imagesc(rot90(diff_gt(:, :, 35, 2)) - rot90(diff_input(:, :, 35, 2)), [-1000, 1000])
axis image
title('residual image');
colormap(bgr_colormap);
colorbar;
end
%% save data
% data is saved in unit16 to save sapce to be shared on Github.
% actual implemenation should use floating point to maintain precision.
% these input and output data can be used to train any CNN for denoising.
diff_input1 = uint16(input_all{1});
diff_input2 = uint16(input_all{2});
diff_input3 = uint16(input_all{3});
diff_input4 = uint16(input_all{4});
diff_input5 = uint16(input_all{5});
diff_gt = uint16(diff_gt);
save('cnn_inout.mat', 'diff_input1', 'diff_input2', 'diff_input3', ...
'diff_input4', 'diff_input5', 'diff_gt', 'mask');