核心提示:MATLAB代做-FPGA代做- SVM向量机的设置...
%% Use_precomputed_kernelForLibsvm_example
% faruto
% last modified by 2011.07.07
%%
tic;
clear;
clc;
close all;
format compact;
%%
%load heart_scale.mat;
% Split Data
%load letterr.mat;
load wine.mat
train_data = wine_data(1:95,:);
train_label =wine_label(1:95,:);
test_data = wine_data(96:178,:);
test_label = wine_label(96:178,:);
%train_data = wine_data(1:30 60:90 131:145,:);
%load testData.mat;
%load trainData.mat;
load heart_scale.mat
train_data =heart_scale_inst(1:30,:);
train_label =heart_scale_label(1:30,:);
test_data = heart_scale_inst(31:270,:);
test_label=heart_scale_label(31:270,:);
%test_data = TrainData(201:400,:);
%test_label= trainLabel(201:400,:);
%model = svmtrain(train_label,train_data,'-s 0 -t 2 -c 1 -g 1/13');
%model = svmtrain(train_label,train_data,'-s 0 -t 2');
%[predictlabel,accuracy] = svmpredict(test_label ,test_data,model);
%accuracy
%% Linear Kernel
model_linear = svmtrain(train_label, train_data, '-t 0');
[predict_label_L, accuracy_L, dec_values_L] = svmpredict(test_label, test_data, model_linear);
accuracy_L
%85 85 92.7711
%多项式和函数
model_linear = svmtrain(train_label, train_data, '-t 1');
[predict_label_L, accuracy_L, dec_values_L] = svmpredict(test_label, test_data, model_linear);
accuracy_L
%82.5
%rbf核函数
model_linear = svmtrain(train_label, train_data, '-t 2 ');
%gamma
[predict_label_L, accuracy_L, dec_values_L] = svmpredict(test_label, test_data, model_linear);
accuracy_L
% 84.1667
%sigmoid核函数
model_linear = svmtrain(train_label, train_data, '-t 3');
[predict_label_L, accuracy_L, dec_values_L] = svmpredict(test_label, test_data, model_linear);
accuracy_L
%84.1667
%% Precomputed Kernel One
% 使用的核函数 K(x,x') = (x * x')
% 核矩阵
%ktrain1 = train_data*train_data';
%Ktrain1 = [(1:150)',ktrain1];
%Ktrain1 = [(1:200)',ktrain1];
ktrain2(i,j) =exp( -gamma * norm(train_data(i,:)-train_data(j,:)))^2;
model_precomputed1 = svmtrain(train_label, Ktrain1, '-t 4');
%ktest1 = test_data*train_data';
%Ktest1 = [(1:120)', ktest1];
ktrain2(i,j) =exp( -gamma * norm(train_data(i,:)-train_data(j,:)))^2;
%Ktest1 = [(1:200)', ktest1];
[predict_label_P1, accuracy_P1, dec_values_P1] = svmpredict(test_label, Ktest1, model_precomputed1);
%
%function K = kfun_rbf(U, V, gamma)
% rbf 核函数
[m1 n1] = size(train_data);
[m2 n2] = size(test_data);
[m1 n1] = size(train_data);
K = zeros(m1, m2);
for ii = 1:m1
for jj = 1:m1
% K(ii,jj) =U(ii,:)*U(jj,:)';%线性核函数
K(ii,jj)=exp(-gamma*norm(train_data(ii,:)-train_data(jj,:))^2);
end
end
%% Precomputed Kernel Two
% 使用的核函数 K(x,x') = ||x|| * ||x'||
% 核矩阵
gamma=1/13;
ktrain2 = ones(30,30);
%[m1 n1] = size(train_data);
%K = zeros(m1, m1);
%for ii = 1:m1
% for jj = 1:m1
% K(ii,jj) =U(ii,:)*U(jj,:)';%线性核函数
% K(ii,jj)=exp(-gamma*norm(train_data(ii,:)-train_data(jj,:))^2);
% end
%end
for i = 1:30
for j = 1:30
% ktrain2(i,j) = 0.3*exp( -gamma * norm(train_data(i,:)-train_data(j,:))^2 )+0.7*train_data(i,:)*train_data(j,:)';
% ktrain2(i,j) =exp( -gamma * norm(train_data(i,:)-train_data(j,:))^2);
ktest2(i,j) = 0.1*exp( -gamma * norm(test_data(i,:)-train_data(j,:))^2 )+0.9*test_data(i,:)*train_data(j,:)';
% ktrain2(i,j) = train_data(i,:)*train_data(j,:)';
% ktrain2(i,j) = train_data(i,:)*train_data(j,:)'/(sum(train_data(i,:).^2)^0.5 * sum(train_data(j,:).^2)^0.5);
end
end%混合核函数81.25 81.6667
%for i = 1:150
% for j = 1:150
% ktrain2(i,j) = sum(train_data(i,:).^2)^0.5 * sum(train_data(j,:).^2)^0.5;
% end
%end
Ktrain2 = [(1:30)',ktrain2];
model_precomputed2 = svmtrain(train_label, Ktrain2, '-t 4');
ktest2 = ones(240,30);
for i = 1:240
for j = 1:30
% ktest2(i,j) = sum(test_data(i,:).^2)^0.5 * sum(train_data(j,:).^2)^0.5;
%ktest2(i, j) = max(exp( -gamma * norm(test_data(i,:)-train_data(j,:))^2 ),test_data(i,:)*train_data(j,:)');
% q=norm(test_data(i,:)-train_data(j,:));
q=norm(test_data(i,:)-train_data(j,:));
%display('q是:');
%q
if q<2.5%5左右是分界线 wine 改装数据集0.85 50 100
ktest2(i,j)=10*exp( -gamma * norm(test_data(i,:)-train_data(j,:))^2 );
% ktest2(i,j) = 0.1*exp( -gamma * norm(test_data(i,:)-train_data(j,:))^2 )+0.9*test_data(i,:)*train_data(j,:)';
else if test_data(i,:)<train_data(j,:)
ktest2(i,j) =(-1)*test_data(i,:)*train_data(j,:)';
else if test_data(i,:)>train_data(j,:)
ktest2(i,j) =test_data(i,:)*train_data(j,:)';
end
end
%display('hehe');
% ktest2(i,j) = 0.1*exp( -gamma * norm(test_data(i,:)-train_data(j,:))^2 )+0.9*test_data(i,:)*train_data(j,:)';
end
%ktest2(i,j) = 0.3*exp( -gamma * norm(test_data(i,:)-train_data(j,:))^2 )+0.7*test_data(i,:)*train_data(j,:)';
% ktest2(i,j) =test_data(i,:)*train_data(j,:)'/(sum(test_data(i,:).^2)^0.5 * sum(train_data(j,:).^2)^0.5);
end
end
Ktest2 = [(1:240)', ktest2];
[predict_label_P2, accuracy_P2, dec_values_P2] = svmpredict(test_label, Ktest2, model_precomputed2);
accuracy_P2
%% Precomputed Kernel Three
% 使用的核函数 K(x,x') = (x * x') / ||x|| * ||x'||
% 核矩阵
ktrain3 = ones(150,150);
for i = 1:150
for j = 1:150
ktrain3(i,j) = ...
train_data(i,:)*train_data(j,:)'/(sum(train_data(i,:).^2)^0.5 * sum(train_data(j,:).^2)^0.5);
end
end
Ktrain3 = [(1:150)',ktrain3];
model_precomputed3 = svmtrain(train_label, Ktrain3, '-t 4');
ktest3 = ones(120,150);
for i = 1:120
for j = 1:150
ktest3(i,j) = ...
test_data(i,:)*train_data(j,:)'/(sum(test_data(i,:).^2)^0.5 * sum(train_data(j,:).^2)^0.5);
end
end
Ktest3 = [(1:120)', ktest3];
[predict_label_P3, accuracy_P3, dec_values_P3] = svmpredict(test_label, Ktest3, model_precomputed3);
%% Display the accuracy
accuracyL = accuracy_L(1) % Display the accuracy using linear kernel
accuracyP1 = accuracy_P1(1) % Display the accuracy using precomputed kernel One
accuracyP2 = accuracy_P2(1) % Display the accuracy using precomputed kernel Two
accuracyP3 = accuracy_P3(1) % Display the accuracy using precomputed kernel Three
%%
toc;
联系:highspeedlogic
QQ :1224848052
微信:HuangL1121
邮箱:1224848052@qq.com
微信扫一扫: