鸢尾花分类(主成分分析)

import numpy as np                
from matplotlib import colors     
from sklearn import svm            
from sklearn.svm import SVC
from sklearn import model_selection
import matplotlib.pyplot as plt
import matplotlib as mpl
#对数据进行处理
data_path='D:\python learning material\iris.data'
selection=[0,2]
def iris_type(s):
    it = {b'Iris-setosa':0, b'Iris-versicolor':1, b'Iris-virginica':2}
    return it[s]

def datadivision(data_path,selection):
    #数据与结果分开,分隔符为“,”,第5列进行转化
    data = np.loadtxt(data_path,dtype=float,delimiter=',',converters={4:iris_type})                                
    #data为二维数组,data.shape=(150, 5)
    x, y = np.split(data,(4,),axis=1)   #要切分的数组 #沿轴切分的位置,位置为4,5列中间 #axis参数代表纵向分割,按列分割
    x = x[:, selection] #取特征值    
    #print(x)#print(x.shape)
    x_train,x_test,y_train,y_test=model_selection.train_test_split(x,y,random_state=1,test_size=0.3)              
    #所要划分的样本特征集#所要划分的样本结果#随机数种子#测试样本占比
    return x,x_train,x_test,y_train,y_test,y
#模型建立
def classifier():
    clf = svm.SVC(C=0.5,                         #误差项惩罚系数,默认值是1
                  kernel='linear',               #线性核 kenrel="rbf":高斯核
                  decision_function_shape='ovr') #决策函数
    return clf

def train(clf,x_train,y_train):
    clf.fit(x_train,         #训练集特征向量
            y_train.ravel()) #训练集目标值 ravel函数是将(150*1)数组变为一个(1*150)降维,也可用flatten()代替
    

#模型评估
def show_accuracy(a, b, tip):
    acc = a.ravel() == b.ravel()
    print('%s Accuracy:%.3f' %(tip, np.mean(acc)))
def print_accuracy(clf,x_train,y_train,x_test,y_test):
    #分别打印训练集和测试集的准确率  score(x_train,y_train):表示输出x_train,y_train在模型上的准确率
    print('trianing prediction:%.3f' %(clf.score(x_train, y_train)))
    print('test data prediction:%.3f' %(clf.score(x_test, y_test)))
    #原始结果与预测结果进行对比   predict()表示对x_train样本进行预测,返回样本类别
    show_accuracy(clf.predict(x_train), y_train, 'traing data')
    show_accuracy(clf.predict(x_test), y_test, 'testing data')
    #计算决策函数的值,表示x到各分割平面的距离???
    #print('decision_function:\n', clf.decision_function(x_train))

```python
def draw(clf, x,selection):
    iris_feature = 'sepal length', 'sepal width', 'petal lenght', 'petal width'
    # 开始画图
    x1_min, x1_max = x[:, 0].min(), x[:, 0].max()               #第0列的范围
    x2_min, x2_max = x[:, 1].min(), x[:, 1].max()               #第1列的范围
    x1, x2 = np.mgrid[x1_min:x1_max:200j, x2_min:x2_max:200j]   #生成网格采样点(200*200)的点
    #print(x1)#print(x1.shape) x1x2是200*200的矩阵
    grid_test = np.stack((x1.flat, x2.flat), axis=1)            #stack():沿着新的轴加入一系列数组  #flat函数就是一个一维的迭代器
    #print('grid_test:\n', grid_test)
    # 输出样本到决策面的距离
    z = clf.decision_function(grid_test)
    #print('the distance to decision plane:\n', z)
    
    grid_hat = clf.predict(grid_test)                           # 预测分类值 得到【0,0.。。。2,2,2】
    print('grid_hat:\n', grid_hat)  
    grid_hat = grid_hat.reshape(x1.shape)                       # reshape grid_hat和x1形状一致
                                                                #若3*3矩阵e,则e.shape()为3*3,表示3行3列   
 
    cm_light = mpl.colors.ListedColormap(['#A0FFA0', '#FFA0A0', '#A0A0FF'])
    cm_dark = mpl.colors.ListedColormap(['g', 'b', 'r'])
 
    plt.pcolormesh(x1, x2, grid_hat, cmap=cm_light)                                   # pcolormesh(x,y,z,cmap)这里参数代入
                                                                                      # x1,x2,grid_hat,cmap=cm_light绘制的是背景。
    plt.scatter(x[:, 0], x[:, 1], c=np.squeeze(y), edgecolor='k', s=50, cmap=cm_dark) # 样本点
    #plt.scatter(x_test[:, 0], x_test[:, 1], s=120, facecolor='none', zorder=10)       # 测试点
    plt.xlabel(iris_feature[selection[0]], fontsize=20)
    plt.ylabel(iris_feature[selection[1]], fontsize=20)
    plt.xlim(x1_min, x1_max)
    plt.ylim(x2_min, x2_max)
    plt.title('svm in iris data classification', fontsize=30)
    plt.grid()
    plt.show()
#draw(clf, x)
#执行
data_path='D:\python learning material\iris.data'
selection=[0,1]
clf=classifier()
x=datadivision(data_path,selection)[0]
x_train=datadivision(data_path,selection)[1]
x_test=datadivision(data_path,selection)[2]
y_train=datadivision(data_path,selection)[3]
y_test=datadivision(data_path,selection)[4]
y=datadivision(data_path,selection)[5]
train(clf,x_train,y_train)
print_accuracy(clf,x_train,y_train,x_test,y_test)
draw(clf, x,selection)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值