forked from mhaghighat/ccaFuse
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathccaFuse.m
174 lines (143 loc) · 4.54 KB
/
ccaFuse.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
function [trainZ,testZ] = ccaFuse(trainX, trainY, testX, testY, mode)
% CCAFUSE applies feature level fusion using a method based on Canonical
% Correlation Analysis (CCA).
% Feature fusion is the process of combining two feature vectors to obtain
% a single feature vector, which is more discriminative than any of the
% input feature vectors.
% CCAFUSE gets the train and test data matrices from two modalities X & Y,
% and consolidates them into a single feature set Z.
%
%
% Details can be found in:
%
% M. Haghighat, M. Abdel-Mottaleb, W. Alhalabi, "Fully Automatic Face
% Normalization and Single Sample Face Recognition in Unconstrained
% Environments," Expert Systems With Applications, vol. 47, pp. 23-34,
% April 2016.
% http://dx.doi.org/10.1016/j.eswa.2015.10.047
%
%
% Inputs:
% trainX : nxp matrix containing the first set of training data
% n: number of training samples
% p: dimensionality of the first feature set
%
% trainY : nxq matrix containing the second set of training data
% q: dimensionality of the second feature set
%
% testX : mxp matrix containing the first set of test data
% m: number of test samples
%
% testY : mxq matrix containing the second set of test data
%
% mode : fusion mode: 'concat' or 'sum' (default: 'sum')
%
% Outputs:
% trainZ : matrix containing the fused training data
% testZ : matrix containing the fused test data
%
%
% Sample use:
% [trainZ,testZ] = ccaFuse(trainX, trainY, testX, testY, 'sum');
%
%
% (C) Mohammad Haghighat, University of Miami
% haghighat@ieee.org
% PLEASE CITE THE ABOVE PAPER IF YOU USE THIS CODE.
[n,p] = size(trainX);
if size(trainY,1) ~= n
error('trainX and trainY must have the same number of samples.');
elseif n == 1
error('trainX and trainY must have more than one sample.');
end
q = size(trainY,2);
if size(testX,2) ~= p
error('trainX and testX must have the same dimensions.');
end
if size(testY,2) ~= q
error('trainY and testY must have the same dimensions.');
end
if size(testX,1) ~= size(testY,1)
error('testX and testY must have the same number of samples.');
end
if ~exist('mode', 'var')
mode = 'sum'; % Default fusion mode
end
%% Center the variables
meanX = mean(trainX);
meanY = mean(trainY);
trainX = bsxfun(@minus, trainX, meanX);
testX = bsxfun(@minus, testX, meanX);
trainY = bsxfun(@minus, trainY, meanY);
testY = bsxfun(@minus, testY, meanY);
%% Dimensionality reduction using PCA for the first data X
% Calculate the covariance matrix
if n >= p
C = trainX' * trainX; % pxp
else
C = trainX * trainX'; % nxn
end
% Perform eigenvalue decomposition
[eigVecs, eigVals] = eig(C);
eigVals = abs(diag(eigVals));
% Ignore zero eigenvalues
maxEigVal = max(eigVals);
zeroEigIdx = find((eigVals/maxEigVal)<1e-6);
eigVals(zeroEigIdx) = [];
eigVecs(:,zeroEigIdx) = [];
% Sort in descending order
[~,index] = sort(eigVals,'descend');
eigVals = eigVals(index);
eigVecs = eigVecs(:,index);
% Obtain the projection matrix
if n >= p
Wxpca = eigVecs;
else
Wxpca = trainX' * eigVecs * diag(1 ./ sqrt(eigVals));
end
clear C eigVecs eigVals maxEigVal zeroEigIndex
% Update the first train and test data
trainX = trainX * Wxpca;
testX = testX * Wxpca;
%% Dimensionality reduction using PCA for the second data Y
% Calculate the covariance matrix
if n >= q
C = trainY' * trainY; % qxq
else
C = trainY * trainY'; % nxn
end
% Perform eigenvalue decomposition
[eigVecs, eigVals] = eig(C);
eigVals = abs(diag(eigVals));
% Ignore zero eigenvalues
maxEigVal = max(eigVals);
zeroEigIndex = find((eigVals/maxEigVal)<1e-6);
eigVals(zeroEigIndex) = [];
eigVecs(:,zeroEigIndex) = [];
% Sort in descending order
[~,index] = sort(eigVals,'descend');
eigVals = eigVals(index);
eigVecs = eigVecs(:,index);
% Obtain the projection matrix
if n >= q
Wypca = eigVecs;
else
Wypca = trainY' * eigVecs * diag(1 ./ sqrt(eigVals));
end
clear C eigVecs eigVals maxEigVal zeroEigIndex
% Update the second train and test data
trainY = trainY * Wypca;
testY = testY * Wypca;
%% Fusion using Canonical Correlation Analysis (CCA)
[Wxcca,Wycca] = canoncorr(trainX,trainY);
trainXcca = trainX * Wxcca;
trainYcca = trainY * Wycca;
testXcca = testX * Wxcca;
testYcca = testY * Wycca;
if strcmp(mode, 'concat') % Fusion by concatenation (Z1)
trainZ = [trainXcca, trainYcca];
testZ = [testXcca, testYcca];
else % Fusion by summation (Z2)
trainZ = trainXcca + trainYcca;
testZ = testXcca + testYcca;
end