# -*- coding: utf-8 -*-
"""
Created on Thu Aug 10 16:42:38 2017
@author: 飘的心
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets,linear_model,cross_validation,svm
def load_data_regression():
diabetes=datasets.load_diabetes()
return cross_validation.train_test_split(diabetes.data,diabetes.target,
test_size=0.25,random_state=0)
def load_data_classification():
iris=datasets.load_iris()
x_train=iris.data
y_train=iris.target
return cross_validation.train_test_split(x_train,y_train,
test_size=0.25,random_state=0)
def test_LinearSVC(*data):
x_train,x_test,y_train,y_test=data
cls=svm.LinearSVC()
cls.fit(x_train,y_train)
print(cls.score(x_test,y_test))
x_train,x_test,y_train,y_test=load_data_classification()
test_LinearSVC(x_train,x_test,y_train,y_test)
#不同损失函数
def test_LinearSVC_loss(*data):
x_train,x_test,y_train,y_test=data
losses=['hinge','squared_hinge']
for loss in losses:
cls=svm.LinearSVC(loss=loss)
cls.fit(x_train,y_train)
print('loss :{},accuracy:{}'.format(loss,cls.score(x_test,y_test)))
test_LinearSVC_loss(x_train,x_test,y_train,y_test)
#不同惩罚想
def test_LinearSVC_l(*data):
x_train,x_test,y_train,y_test=data
l=['l1','l2']
for p in l:
cls=svm.LinearSVC(penalty=p,dual=False)
cls.fit(x_train,y_train)
print('l:{},accuracy:{}'.format(p,cls.score(x_test,y_test)))
test_LinearSVC_l(x_train,x_test,y_train,y_test)
def test_LinearSVC_C(*data):
x_train,x_test,y_train,y_test=data
cs=np.logspace(-2,1)
train_score=[]
test_score=[]
for c in cs:
cls=svm.LinearSVC(C=c)
cls.fit(x_train,y_train)
train_score.append(cls.score(x_train,y_train))
test_score.append(cls.score(x_test,y_test))
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.plot(cs,train_score,label='Train score')
ax.plot(cs,test_score,label='test score')
ax.set_xlabel('c')
ax.set_ylabel('score')
ax.legend(loc='best')
ax.set_xscale('log')
plt.show()
test_LinearSVC_C(x_train,x_test,y_train,y_test)
#线性回归SVR
def test_LinearSVR(*data):
x_train,x_test,y_train,y_test=data
regr=svm.LinearSVR()
regr.fit(x_train,y_train)
print(regr.score(x_test,y_test))
x_train,x_test,y_train,y_test=load_data_regression()
test_LinearSVR(x_train,x_test,y_train,y_test)#得出结果为负值,说明预测性能很差
def test_LinearSVR_loss(*data):
x_train,x_test,y_train,y_test=data
losses=['epsilon_insensitive','squared_epsilon_insensitive']
for loss in losses:
regr=svm.LinearSVR(loss=loss)
regr.fit(x_train,y_train)
print('loss :{},score:{}'.format(loss,regr.score(x_test,y_test)))
test_LinearSVR_loss(x_train,x_test,y_train,y_test)
def test_LinearSVR_epsilon(*data):
x_train,x_test,y_train,y_test=data
epsilons=np.logspace(-2,2)
train_scores=[]
test_scores=[]
for epsilon in epsilons:
regr=svm.LinearSVR(epsilon=epsilon,loss='squared_epsilon_insensitive')
regr.fit(x_train,y_train)
train_scores.append(regr.score(x_train,y_train))
test_scores.append(regr.score(x_test,y_test))
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.plot(epsilons,train_scores,label='train')
ax.plot(epsilons,test_scores,label='test')
ax.set_title('linear svr')
ax.set_xscale('log')
ax.set_ylim(-1,1.05)
ax.legend(loc='best')
plt.show()
test_LinearSVR_epsilon(x_train,x_test,y_train,y_test)
#考察惩罚项系数
def test_LinearSVR_C(*data):
x_train,x_test,y_train,y_test=data
cs=np.logspace(-2,1)
train_scores=[]
test_scores=[]
for c in cs:
regr=svm.LinearSVR(C=c,epsilon=0.1,loss='squared_epsilon_insensitive')
regr.fit(x_train,y_train)
train_scores.append(regr.score(x_train,y_train))
test_scores.append(regr.score(x_test,y_test))
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.plot(cs,train_scores,label='train')
ax.plot(cs,test_scores,label='test')
ax.set_xscale('log')
ax.set_title('linear svr')
ax.set_xlable('c')
ax.set_ylabel('score')
ax.set_ylim(-1,1.05)
ax.legend(loc='best',framealpha=0.5)
plt.show()
test_LinearSVR_C(x_train,x_test,y_train,y_test)
python进行机器学习中的SVM
最新推荐文章于 2023-11-11 10:35:16 发布