19划分测试集
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
import sklearn.model_selection as ms
import sklearn.naive_bayes as nb
import matplotlib.pyplot as mp
x, y = [], []
with open('../../data/multiple1.txt', 'r') as f:
for line in f.readlines():
data = [float(substr)
for substr in line.split(',')]
x.append(data[:-1])
y.append(data[-1])
x = np.array(x)
y = np.array(y, dtype=int)
train_x, test_x, train_y, test_y = ms.train_test_split(
x, y, test_size=0.25, random_state=5)
# 创建朴素贝叶斯分类器模型
model = nb.GaussianNB()
model.fit(train_x, train_y)
l, r, h = x[:, 0].min() - 1, x[:, 0].max() + 1, 0.005
b, t, v = x[:, 1].min() - 1, x[:, 1].max() + 1, 0.005
grid_x = np.meshgrid(
np.arange(l, r, h),
np.arange(b, t, v))
flat_x = np.c_[grid_x[0].ravel(), grid_x[1].ravel()]
flat_y = model.predict(flat_x)
grid_y = flat_y.reshape(grid_x[0].shape)
pred_test_y = model.predict(test_x)
print((pred_test_y == test_y).sum() / pred_test_y.size)
mp.figure(num='Naive Bayes Classification',
facecolor='lightgray')
mp.title('Naive Bayes Classification', fontsize=20)
mp.xlabel('x', fontsize=14)
mp.ylabel('y', fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y,
cmap='brg')
mp.scatter(train_x[:, 0], train_x[:, 1], s=80,
c=train_y, cmap='RdYlBu')
mp.scatter(test_x[:, 0], test_x[:, 1], s=80,
marker='D', c=test_y, cmap='RdYlBu')
mp.scatter(test_x[:, 0], test_x[:, 1], s=80,
marker='x', c=pred_test_y, cmap='RdYlBu')
mp.show()
ML20: 交叉验证
from __future__ import unicode_literals
import numpy as np
import sklearn.model_selection as ms
import sklearn.naive_bayes as nb
import matplotlib.pyplot as mp
x, y = [], []
with open('../../data/multiple1.txt', 'r') as f:
for line in f.readlines():
data = [float(substr)
for substr in line.split(',')]
x.append(data[:-1])
y.append(data[-1])
x = np.array(x)
y = np.array(y, dtype=int)
train_x, test_x, train_y, test_y = ms.train_test_split(
x, y, test_size=0.25, random_state=5)
# 创建朴素贝叶斯分类器模型
model = nb.GaussianNB()
f1_scores = ms.cross_val_score(model, x, y, cv=10,
scoring='f1_weighted')
print(f1_scores.mean())
model.fit(train_x, train_y)
l, r, h = x[:, 0].min() - 1, x[:, 0].max() + 1, 0.005
b, t, v = x[:, 1].min() - 1, x[:, 1].max() + 1, 0.005
grid_x = np.meshgrid(
np.arange(l, r, h),
np.arange(b, t, v))
flat_x = np.c_[grid_x[0].ravel(), grid_x[1].ravel()]
flat_y = model.predict(flat_x)
grid_y = flat_y.reshape(grid_x[0].shape)
pred_test_y = model.predict(test_x)
print((pred_test_y == test_y).sum() / pred_test_y.size)
mp.figure(num='Naive Bayes Classification',
facecolor='lightgray')
mp.title('Naive Bayes Classification', fontsize=20)
mp.xlabel('x', fontsize=14)
mp.ylabel('y', fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y,
cmap='brg')
mp.scatter(train_x[:, 0], train_x[:, 1], s=80,
c=train_y, cmap='RdYlBu')
mp.scatter(test_x[:, 0], test_x[:, 1], s=80,
marker='D', c=test_y, cmap='RdYlBu')
mp.scatter(test_x[:, 0], test_x[:, 1], s=80,
marker='x', c=pred_test_y, cmap='RdYlBu')
mp.show()
21 混淆矩阵
混淆矩阵中每行表示一个实际分类的样本,每列表示预测分类的样本,主对角线上的值表示被正确预测的样本数,用主对角线上的值比上其所在列各元素之和就是该类别的查准率,用主对角线上的值比上其所在行各元素之和就是该类别的召回率。理想情况是所有的非零元素都落在主对角线上。
sklearn.metrics.confusion_matrix(实际输出, 预测输出)->混淆矩阵
sklearn.metrics.classification_report(实际输出, 预测输出)->性能报告
代码:cm.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
import sklearn.model_selection as ms
import sklearn.naive_bayes as nb
import sklearn.metrics as sm
import matplotlib.pyplot as mp
x, y = [], []
with open('../../data/multiple1.txt', 'r') as f:
for line in f.readlines():
data = [float(substr)
for substr in line.split(',')]
x.append(data[:-1])
y.append(data[-1])
x = np.array(x)
y = np.array(y, dtype=int)
train_x, test_x, train_y, test_y = ms.train_test_split(
x, y, test_size=0.25, random_state=5)
# 创建朴素贝叶斯分类器模型
model = nb.GaussianNB()
f1_scores = ms.cross_val_score(model, x, y, cv=10,
scoring='f1_weighted')
print(f1_scores.mean())
model.fit(train_x, train_y)
l, r, h = x[:, 0].min() - 1, x[:, 0].max() + 1, 0.005
b, t, v = x[:, 1].min() - 1, x[:, 1].max() + 1, 0.005
grid_x = np.meshgrid(
np.arange(l, r, h),
np.arange(b, t, v))
flat_x = np.c_[grid_x[0].ravel(), grid_x[1].ravel()]
flat_y = model.predict(flat_x)
grid_y = flat_y.reshape(grid_x[0].shape)
pred_test_y = model.predict(test_x)
print((pred_test_y == test_y).sum() / pred_test_y.size)
cm = sm.confusion_matrix(test_y, pred_test_y)
print(cm)
cr = sm.classification_report(test_y, pred_test_y)
print(cr)
mp.figure(num='Naive Bayes Classification',
facecolor='lightgray')
mp.title('Naive Bayes Classification', fontsize=20)
mp.xlabel('x', fontsize=14)
mp.ylabel('y', fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y,
cmap='brg')
mp.scatter(train_x[:, 0], train_x[:, 1], s=80,
c=train_y, cmap='RdYlBu')
mp.scatter(test_x[:, 0], test_x[:, 1], s=80,
marker='D', c=test_y, cmap='RdYlBu')
mp.scatter(test_x[:, 0], test_x[:, 1], s=80,
marker='x', c=pred_test_y, cmap='RdYlBu')
mp.figure(num='Confusion Matrix', facecolor='lightgray')
mp.title('Confusion Matrix', fontsize=20)
mp.xlabel('Predicted Class', fontsize=14)
mp.ylabel('True Class', fontsize=14)
mp.tick_params(labelsize=10)
mp.imshow(cm, cmap='jet')
mp.show()
0.9949874686716793
0.98
[[23 0 0 0]
[ 0 20 1 0]
[ 0 0 34 0]
[ 1 0 0 21]]
precision recall f1-score support
0 0.96 1.00 0.98 23
1 1.00 0.95 0.98 21
2 0.97 1.00 0.99 34
3 1.00 0.95 0.98 22
avg / total 0.98 0.98 0.98 100
22 十五、基于随机森林分类器的汽车品质评估
代码:car.py
这里写代码片