-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathDIY_PCA_SVM2.m
141 lines (133 loc) · 4.47 KB
/
DIY_PCA_SVM2.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
% DIY_PCA_SVM dataset 2
%% setting 1
name_class={'1','2','3','4','5','6'};
acc1 = zeros(1,2);
for SL = 2:3
clear -regexp [^SL,^acc1,^acc2]
clc
load('D:\A_\Enose_datasets\4个月原始数据\datasetB_prep.mat')
Source = batch1;
if SL == 2
Target = batch2;
Tbatch_label = batch2_label;
elseif SL == 3
Target = batch3;
Tbatch_label = batch3_label;
end
Source_label=(vec2ind(batch1_label'))';
Target_label=(vec2ind(Tbatch_label'))';
%归一化
Source=Source./repmat(sqrt(sum(Source.^2,1)),size(Source,1),1);
Target=Target./repmat(sqrt(sum(Target.^2,1)),size(Target,1),1);
% PCA
[COEFF_train,SCORE_train,latent,tsquare] = pca(Source);
%各成分的方差贡献率
latent_percent=latent./sum(latent).*100;
sumcounts=1;
sum_score=latent_percent(sumcounts,1);
while(sum_score<99)
sumcounts=sumcounts+1;
sum_score=sum_score+latent_percent(sumcounts,1);
end
npc=sumcounts; %npc表示要取的主成分维数
Re_Source=SCORE_train(:,1:npc);%训练样本进行PCA
%测试样本进行PCA投影
SCORE_test= bsxfun(@minus,Target,mean(Source,1))*COEFF_train; %测试样本投影到训练样本确定的PCA空间
Re_Target=SCORE_test(:,1:npc);
error1=0;
counter=0;
for m=-5:0.2:5
c=10^m;
for n=-5:0.2:5
gama=10^n;
counter=counter+1;
try
% SVM:
cmd=[' -c ',num2str(c),' -g ',num2str(gama)]; %svmtrain参数
model = svmtrain(Source_label,Re_Source,cmd);
[predict_label_test,] = svmpredict(Target_label,Re_Target, model);
[confusion_matrix]=compute_confusion_matrix(Target_label,num_in_class,name_class);
d=diff([predict_label_test';Target_label']);
N = numel(find(d==0));
accur_test=N/size(Re_Target,1);
catch
error1 =error1 + 1;
accur_test = 0;
end
result(counter,1)=m;
result(counter,2)=n;
result(counter,3)=accur_test;
end
end
[max,index]=max(result(:,3));
acc1(SL-1) = max;
end
acc1_mean = mean(acc1)
%% setting 2
acc2 = zeros(1,2);
for SL = 2:3
clear -regexp [^SL,^acc1,^acc2,^acc1_mean]
clc
load('D:\A_\Enose_datasets\4个月原始数据\datasetB_prep.mat')
if SL-1 == 1
Source = batch1;
Sbatch_label = batch1_label;
elseif SL-1 == 2
Source = batch2;
Sbatch_label = batch2_label;
end
if SL == 2
Target = batch2;
Tbatch_label = batch2_label;
elseif SL == 3
Target = batch3;
Tbatch_label = batch3_label;
end
Source_label=(vec2ind(Sbatch_label'))';
Target_label=(vec2ind(Tbatch_label'))';
%归一化
Source=Source./repmat(sqrt(sum(Source.^2,1)),size(Source,1),1);
Target=Target./repmat(sqrt(sum(Target.^2,1)),size(Target,1),1);
% PCA
[COEFF_train,SCORE_train,latent,tsquare] = pca(Source);
%各成分的方差贡献率
latent_percent=latent./sum(latent).*100;
sumcounts=1;
sum_score=latent_percent(sumcounts,1);
while(sum_score<99)
sumcounts=sumcounts+1;
sum_score=sum_score+latent_percent(sumcounts,1);
end
npc=sumcounts; %npc表示要取的主成分维数
Re_Source=SCORE_train(:,1:npc);%训练样本进行PCA
%测试样本进行PCA投影
SCORE_test= bsxfun(@minus,Target,mean(Source,1))*COEFF_train; %测试样本投影到训练样本确定的PCA空间
Re_Target=SCORE_test(:,1:npc);
error1=0;
counter=0;
for m=-6:0.2:6
c=10^m;
for n=-6:0.2:6
gama=10^n;
counter=counter+1;
try
% SVM:
cmd=[' -c ',num2str(c),' -g ',num2str(gama)]; %svmtrain参数
model = svmtrain(Source_label,Re_Source,cmd);
[predict_label_test,] = svmpredict(Target_label,Re_Target, model);
d=diff([predict_label_test';Target_label']);
N = numel(find(d==0));
accur_test=N/size(Re_Target,1);
catch
error1 =error1 + 1;
accur_test = 0;
end
result(counter,1)=m;
result(counter,2)=n;
result(counter,3)=accur_test;
end
end
[max,index]=max(result(:,3));
acc2(SL-1) = max;
end
acc2_mean = mean(acc2)