哈哈哈笔记--小杨

“”"
demo01_scale.py 均值移除
“”"
import numpy as np
import sklearn.preprocessing as sp

samples = np.array([[17, 100, 4000],
[20, 80, 5000],
[23, 60, 5500]])

r_samples = sp.scale(samples)
print(r_samples)
print(r_samples.mean(axis=0))
print(r_samples.std(axis=0))

“”"
demo02_mms.py 范围缩放
“”"
import numpy as np
import sklearn.preprocessing as sp

samples = np.array([[17., 100., 4000.],
[20., 80., 5000.],
[23., 60., 5500.]])

mms = sp.MinMaxScaler(feature_range=(0, 1))
r_samples = mms.fit_transform(samples)
print(r_samples)

手动实现范围缩放

for col in samples.T:
col_min = col.min()
col_max = col.max()
A = np.array([[col_min, 1], [col_max, 1]])
B = np.array([0, 1])
X = np.linalg.lstsq(A, B)[0]
col *= X[0]
col += X[1]
print(samples)

“”"
demo03_normalization.py 归一化(正则化)
“”"
import numpy as np
import sklearn.preprocessing as sp

samples = np.array([[17., 100., 4000.],
[20., 80., 5000.],
[23., 60., 5500.]])

r_samples = sp.normalize(samples, norm=‘l1’)
print(r_samples)

“”"
demo04_bin.py 二值化
“”"
import numpy as np
import sklearn.preprocessing as sp

samples = np.array([[17., 100., 4000.],
[20., 80., 5000.],
[23., 60., 5500.]])

bin = sp.Binarizer(threshold=80)
r_samples = bin.transform(samples)
print(r_samples)

samples[samples<=80] = 0
samples[samples>80] = 1
print(samples)

“”"
demo05_ohe.py 独热编码
“”"
import numpy as np
import sklearn.preprocessing as sp

samples = np.array([[1, 3, 2],
[7, 5, 4],
[1, 8, 6],
[7, 3, 9]])

构建独热编码器对象

ohe = sp.OneHotEncoder(sparse=True, dtype=‘i4’)
r_samples = ohe.fit_transform(samples)
print(r_samples, type(r_samples))

encoder_dict = ohe.fit(samples)
r_samples = encoder_dict.transform(samples)
print(r_samples, type(r_samples))

“”"
demo06_lbe.py 标签编码器
“”"
import numpy as np
import sklearn.preprocessing as sp

samples = np.array([‘audi’, ‘ford’, ‘audi’,
‘toyota’, ‘ford’, ‘bmw’, ‘toyota’,
‘audi’, ‘redflag’])

获取标签编码器对象

lbe = sp.LabelEncoder()
r_samples = lbe.fit_transform(samples)
print(r_samples)

r_returns = [0, 3, 3, 2, 4, 1]
returns = lbe.inverse_transform(r_returns)
print(returns)

“”"
demo07_LOSS.py 绘制损失函数图像
“”"
import numpy as np
import matplotlib.pyplot as mp
from mpl_toolkits.mplot3d import axes3d

n = 1000
w0, w1 = np.meshgrid(np.linspace(-30, 30, n),
np.linspace(-30, 30, n))

基于for循环,遍历5个样本,计算5个样本的总样本误差

loss = 0
xs = [0.5, 0.6, 0.8, 1.1, 1.4]
ys = [5.0, 5.5, 6.0, 6.8, 7.0]
for x, y in zip(xs, ys):
loss += (w0+w1*x-y)**2 / 2

绘制

mp.figure(‘3D Surface’, facecolor=‘lightgray’)
mp.title(‘3D Surface’)
ax3d = mp.gca(projection=‘3d’)
ax3d.set_xlabel(‘w0’)
ax3d.set_ylabel(‘w1’)
ax3d.set_zlabel(‘loss’)
ax3d.plot_surface(w0, w1, loss, rstride=30,
cstride=30, cmap=‘jet’)
mp.show()

import numpy as np
import matplotlib.pyplot as mp

train_x = np.array([0.5, 0.6, 0.8, 1.1, 1.4])
train_y = np.array([5.0, 5.5, 6.0, 6.8, 7.0])

times = 1000 # 定义梯度下降次数
lrate = 0.01 # 记录每次梯度下降参数变化率
w0, w1 = [1], [1]
for i in range(1, times + 1):
# d0是损失函数在w0方向上的偏导数
d0 = (w0[-1] + w1[-1] * train_x - train_y).sum()
# d1是损失函数在w1方向上的偏导数
d1 = (((w0[-1] + w1[-1] * train_x) - train_y) * train_x).sum()
# 让w0 w1不断更新
w0.append(w0[-1] - lrate * d0)
w1.append(w1[-1] - lrate * d1)

pred_train_y = w0[-1] + w1[-1] * train_x
mp.figure(‘Linear Regression’, facecolor=‘lightgray’)
mp.title(‘Linear Regression’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.grid(linestyle=’:’)
mp.scatter(train_x, train_y, marker=‘s’, c=‘dodgerblue’, alpha=0.5, s=80, label=‘Training’)
mp.plot(train_x, pred_train_y, ‘–’, c=‘limegreen’, label=‘Regression’, linewidth=1)
mp.legend()
mp.show()

“”"
demo01_lr.py 线性回归
“”"
import numpy as np
import matplotlib.pyplot as mp

train_x = np.array([0.5, 0.6, 0.8, 1.1, 1.4])
train_y = np.array([5.0, 5.5, 6.0, 6.8, 7.0])

times = 1000 # 定义梯度下降次数
lrate = 0.01 # 记录每次梯度下降参数变化率
epoches = [] # 记录迭代过程中的索引
w0, w1, losses = [1], [1], []
for i in range(1, times + 1):
epoches.append(i)
# 求取loss函数值
loss = ((w0[-1] + w1[-1]*train_x - train_y) ** 2).sum() / 2
losses.append(loss)
# 输出epoches、w0、w1、loss的状态
print(’{:4}> w0={:.6f}, w1={:.6f}, loss={:.6f}’.format(epoches[-1], w0[-1], w1[-1], losses[-1]))

# d0是损失函数在w0方向上的偏导数
d0 = (w0[-1] + w1[-1] * train_x - train_y).sum()
# d1是损失函数在w1方向上的偏导数
d1 = (((w0[-1] + w1[-1] * train_x) - train_y) * train_x).sum()
# 让w0   w1不断更新  
w0.append(w0[-1] - lrate * d0)
w1.append(w1[-1] - lrate * d1)

pred_train_y = w0[-1] + w1[-1] * train_x
mp.figure(‘Linear Regression’, facecolor=‘lightgray’)
mp.title(‘Linear Regression’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.grid(linestyle=’:’)
mp.scatter(train_x, train_y, marker=‘s’, c=‘dodgerblue’, alpha=0.5, s=80, label=‘Training’)
mp.plot(train_x, pred_train_y, ‘–’, c=‘limegreen’, label=‘Regression’, linewidth=1)
mp.legend()

绘制随着每次梯度下降,w0,w1,loss的变化曲线。

w0 = w0[:-1]
w1 = w1[:-1]

mp.figure(‘Training Progress’, facecolor=‘lightgray’)
mp.subplot(311)
mp.title(‘Training Progress’, fontsize=16)
mp.ylabel(‘w0’, fontsize=13)
mp.grid(linestyle=’:’)
mp.plot(epoches, w0, color=‘dodgerblue’,
label=‘w0’)
mp.legend()
mp.subplot(312)
mp.ylabel(‘w1’, fontsize=13)
mp.grid(linestyle=’:’)
mp.plot(epoches, w1, color=‘orangered’,
label=‘w1’)
mp.legend()
mp.subplot(313)
mp.title(‘Training Progress’, fontsize=16)
mp.ylabel(‘loss’, fontsize=13)
mp.grid(linestyle=’:’)
mp.plot(epoches, losses, color=‘red’,
label=‘loss’)
mp.legend()
mp.tight_layout()

基于三维曲面绘制梯度下降过程中的每一个点

import mpl_toolkits.mplot3d as axes3d

grid_w0, grid_w1 = np.meshgrid(
np.linspace(0, 9, 500),
np.linspace(0, 3.5, 500))
grid_loss = np.zeros_like(grid_w0)
for x, y in zip(train_x, train_y):
grid_loss += (grid_w0+grid_w1*x - y)**2 / 2

绘图

mp.figure(‘Loss Function’)
ax3d = mp.gca(projection=‘3d’)
ax3d.set_xlabel(‘w0’, fontsize=14)
ax3d.set_ylabel(‘w1’, fontsize=14)
ax3d.set_zlabel(‘loss’, fontsize=14)
ax3d.plot_surface(grid_w0, grid_w1, grid_loss,
rstride=30, cstride=30, cmap=‘jet’)
ax3d.plot(w0, w1, losses, ‘o-’,
color=‘orangered’, label=‘BGD’)

#以等高线的方式绘制梯度下降的过程。
mp.figure(‘Batch Gradient Descent’, facecolor=‘lightgray’)
mp.title(‘Batch Gradient Descent’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.grid(linestyle=’:’)
mp.contourf(grid_w0, grid_w1, grid_loss, 10, cmap=‘jet’)
cntr = mp.contour(grid_w0, grid_w1, grid_loss, 10,
colors=‘black’, linewidths=0.5)
mp.clabel(cntr, inline_spacing=0.1, fmt=’%.2f’,
fontsize=8)
mp.plot(w0, w1, ‘o-’, c=‘orangered’, label=‘BGD’)
mp.legend()

mp.tight_layout()
mp.show()

“”"
demo02_linearRegression.py 线性回归
“”"
import numpy as np
import matplotlib.pyplot as mp
import sklearn.linear_model as lm

采集数据

x, y = np.loadtxt(’…/ml_data/single.txt’,
delimiter=’,’, usecols=(0,1),
unpack=True)

训练模型

x = x.reshape(-1, 1) #把x变为 n行1列
model = lm.LinearRegression()
model.fit(x, y)

模型预测 把样本的x传入模型,预测输出

pred_y = model.predict(x)

图像绘制

mp.figure(‘Linear Regression’, facecolor=‘lightgray’)
mp.title(‘Linear Regression’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.grid(linestyle=’:’)
mp.scatter(x, y, c=‘dodgerblue’, alpha=0.75,
s=60, label=‘Sample’)
mp.plot(x, pred_y, c=‘orangered’,
label=‘Regression Line’)
mp.legend()
mp.show()

“”"
demo03_matrics.py 回归模型评估相关API
“”"
import numpy as np
import matplotlib.pyplot as mp
import sklearn.linear_model as lm
import sklearn.metrics as sm

采集数据

x, y = np.loadtxt(’…/ml_data/single.txt’,
delimiter=’,’, usecols=(0,1),
unpack=True)

训练模型

x = x.reshape(-1, 1) #把x变为 n行1列
model = lm.LinearRegression()
model.fit(x, y)

模型预测 把样本的x传入模型,预测输出

pred_y = model.predict(x)

----------------------------------

模型评估

----------------------------------

m1 = sm.mean_absolute_error(y, pred_y)
m2 = sm.mean_squared_error(y, pred_y)
m3 = sm.median_absolute_error(y, pred_y)
r2 = sm.r2_score(y, pred_y)
print(‘mean_absolute_error:’, m1)
print(‘mean_squared_error:’, m2)
print(‘median_absolute_error:’, m3)
print(‘r2:’, r2)

图像绘制

mp.figure(‘Linear Regression’, facecolor=‘lightgray’)
mp.title(‘Linear Regression’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.grid(linestyle=’:’)
mp.scatter(x, y, c=‘dodgerblue’, alpha=0.75,
s=60, label=‘Sample’)
mp.plot(x, pred_y, c=‘orangered’,
label=‘Regression Line’)
mp.legend()
mp.show()

“”"
demo04_dump.py 保存模型
“”"
import numpy as np
import matplotlib.pyplot as mp
import sklearn.linear_model as lm
import sklearn.metrics as sm
import pickle

采集数据

x, y = np.loadtxt(’…/ml_data/single.txt’,
delimiter=’,’, usecols=(0,1),
unpack=True)

训练模型

x = x.reshape(-1, 1) #把x变为 n行1列
model = lm.LinearRegression()
model.fit(x, y)

保存

with open(’…/ml_data/lm.pkl’, ‘wb’) as f:
pickle.dump(model, f)

print(‘dump success!’)

“”"
demo05_load.py 加载模型对象
“”"
import numpy as np
import matplotlib.pyplot as mp
import sklearn.linear_model as lm
import sklearn.metrics as sm
import pickle

采集数据

x, y = np.loadtxt(’…/ml_data/single.txt’,
delimiter=’,’, usecols=(0,1),
unpack=True)

训练模型

x = x.reshape(-1, 1) #把x变为 n行1列

加载模型

with open(’…/ml_data/lm.pkl’, ‘rb’) as f:
model = pickle.load(f)

模型预测 把样本的x传入模型,预测输出

pred_y = model.predict(x)

----------------------------------

模型评估

----------------------------------

m1 = sm.mean_absolute_error(y, pred_y)
m2 = sm.mean_squared_error(y, pred_y)
m3 = sm.median_absolute_error(y, pred_y)
r2 = sm.r2_score(y, pred_y)
print(‘mean_absolute_error:’, m1)
print(‘mean_squared_error:’, m2)
print(‘median_absolute_error:’, m3)
print(‘r2:’, r2)

图像绘制

mp.figure(‘Linear Regression’, facecolor=‘lightgray’)
mp.title(‘Linear Regression’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.grid(linestyle=’:’)
mp.scatter(x, y, c=‘dodgerblue’, alpha=0.75,
s=60, label=‘Sample’)
mp.plot(x, pred_y, c=‘orangered’,
label=‘Regression Line’)
mp.legend()
mp.show()

“”"
demo06_ridge.py 岭回归
“”"
import numpy as np
import matplotlib.pyplot as mp
import sklearn.linear_model as lm
import sklearn.metrics as sm
import pickle

采集数据

x, y = np.loadtxt(’…/ml_data/abnormal.txt’,
delimiter=’,’, usecols=(0,1),
unpack=True)

训练模型

x = x.reshape(-1, 1) #把x变为 n行1列

model = lm.Ridge(150, fit_intercept=False,
max_iter=1000)
model.fit(x, y)

模型预测 把样本的x传入模型,预测输出

pred_y = model.predict(x)

----------------------------------

模型评估

----------------------------------

m1 = sm.mean_absolute_error(y, pred_y)
m2 = sm.mean_squared_error(y, pred_y)
m3 = sm.median_absolute_error(y, pred_y)
r2 = sm.r2_score(y, pred_y)
print(‘mean_absolute_error:’, m1)
print(‘mean_squared_error:’, m2)
print(‘median_absolute_error:’, m3)
print(‘r2:’, r2)

图像绘制

mp.figure(‘Linear Regression’, facecolor=‘lightgray’)
mp.title(‘Linear Regression’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.grid(linestyle=’:’)
mp.scatter(x, y, c=‘dodgerblue’, alpha=0.75,
s=60, label=‘Sample’)
mp.plot(x, pred_y, c=‘orangered’,
label=‘Regression Line’)
mp.legend()
mp.show()

“”"
demo07_poly.py 多项式回归模型
“”"
import numpy as np
import matplotlib.pyplot as mp
import sklearn.linear_model as lm
import sklearn.preprocessing as sp
import sklearn.pipeline as pl
import sklearn.metrics as sm

采集数据

x, y = np.loadtxt(’…/ml_data/single.txt’,
delimiter=’,’, usecols=(0,1),
unpack=True)

训练模型

x = x.reshape(-1, 1) #把x变为 n行1列

训练多项式回归模型

model = pl.make_pipeline(
sp.PolynomialFeatures(10),
lm.LinearRegression())
model.fit(x, y)

模型预测 把样本的x传入模型,预测输出

pred_y = model.predict(x)
print(sm.r2_score(y, pred_y))

绘图所需参数

px = np.linspace(x.min(), x.max(), 1000)
px = px.reshape(-1, 1)
py = model.predict(px)

多项式回归模型不能做超出x范围的预测业务

cx = np.array([[-8], [-9], [-10], [-20]])
cy = model.predict(cx)
print(cx)
print(cy)

图像绘制

mp.figure(‘Linear Regression’, facecolor=‘lightgray’)
mp.title(‘Linear Regression’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.grid(linestyle=’:’)
mp.scatter(x, y, c=‘dodgerblue’, alpha=0.75,
s=60, label=‘Sample’)
mp.plot(px, py, c=‘orangered’,
label=‘Regression Line’)
mp.legend()
mp.show()

“”"
demo08_dt.py 决策树
“”"
import sklearn.datasets as sd
import sklearn.utils as su
import sklearn.tree as st
import sklearn.metrics as sm

加载数据集

boston = sd.load_boston()
print(boston.data.shape, boston.data[0]) # 输入集
print(boston.target.shape, boston.target[0]) # 输出集
print(boston.feature_names)

打乱数据集

x, y = su.shuffle(boston.data, boston.target,
random_state=7)

划分训练集与测试集

train_size = int(len(x) * 0.8)
train_x, test_x, train_y, test_y =
x[:train_size], x[train_size:],
y[:train_size], y[train_size:]

构建决策树模型, 训练模型

model = st.DecisionTreeRegressor(max_depth=4)
model.fit(train_x, train_y)

预测

pred_test_y = model.predict(test_x)
print(sm.r2_score(test_y, pred_test_y))

“”"
demo09_se.py 集合算法
“”"
import sklearn.datasets as sd
import sklearn.utils as su
import sklearn.tree as st
import sklearn.metrics as sm
import sklearn.ensemble as se

加载数据集

boston = sd.load_boston()
print(boston.data.shape, boston.data[0]) # 输入集
print(boston.target.shape, boston.target[0]) # 输出集
print(boston.feature_names)

打乱数据集

x, y = su.shuffle(boston.data, boston.target,
random_state=7)

划分训练集与测试集

train_size = int(len(x) * 0.8)
train_x, test_x, train_y, test_y =
x[:train_size], x[train_size:],
y[:train_size], y[train_size:]

构建决策树模型, 训练模型

model = st.DecisionTreeRegressor(max_depth=4)

构建正向激励决策树模型

model = se.AdaBoostRegressor(model,
n_estimators=400, random_state=7)
model.fit(train_x, train_y)

预测

pred_test_y = model.predict(test_x)
print(sm.r2_score(test_y, pred_test_y))

“”"
demo01_fi.py 特征重要性
“”"
import sklearn.datasets as sd
import sklearn.utils as su
import sklearn.tree as st
import sklearn.metrics as sm
import matplotlib.pyplot as mp
import numpy as np
import sklearn.ensemble as se

加载数据集

boston = sd.load_boston()
print(boston.data.shape) # 输入集
print(boston.target.shape) # 输出集
fnames = boston.feature_names
print(fnames)

打乱数据集

x, y = su.shuffle(boston.data, boston.target,
random_state=7)

划分训练集与测试集

train_size = int(len(x) * 0.8)
train_x, test_x, train_y, test_y =
x[:train_size], x[train_size:],
y[:train_size], y[train_size:]

构建决策树模型, 训练模型

model = st.DecisionTreeRegressor(max_depth=4)
model.fit(train_x, train_y)

预测

pred_test_y = model.predict(test_x)
print(sm.r2_score(test_y, pred_test_y))

输出单棵决策树的特征重要性

dt_fi = model.feature_importances_

正向激励决策树的特征重要性

构建正向激励决策树模型

model = se.AdaBoostRegressor(model,
n_estimators=400, random_state=7)
model.fit(train_x, train_y)
ad_fi = model.feature_importances_

柱状图显示特征重要性

mp.figure(‘Feature Importance’, facecolor=‘lightgray’)
mp.subplot(211)
mp.title(‘Decision Tree’, fontsize=16)
mp.ylabel(‘Importance’, fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(axis=‘y’, linestyle=’:’)
x = np.arange(13)
sorted_indices = np.argsort(dt_fi)[::-1]
mp.bar(x, dt_fi[sorted_indices], 0.8,
color=‘dodgerblue’, label=‘DT FI’)
mp.xticks(x, fnames[sorted_indices])
mp.legend()

mp.subplot(212)
mp.title(‘AdaBoostRegressor’, fontsize=16)
mp.ylabel(‘Importance’, fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(axis=‘y’, linestyle=’:’)
sorted_indices = np.argsort(ad_fi)[::-1]
mp.bar(x, ad_fi[sorted_indices], 0.8,
color=‘orangered’, label=‘AdaBoost FI’)
mp.xticks(x, fnames[sorted_indices])
mp.legend()
mp.show()

“”"
demo02_rf.py 随机森林回归器
“”"
import numpy as np
import matplotlib.pyplot as mp
import sklearn.utils as su
import sklearn.ensemble as se
import sklearn.metrics as sm

读取bike_day.csv数据集

headers = None
data = []
with open(’…/ml_data/bike_day.csv’, ‘r’) as f:
for i, line in enumerate(f.readlines()):
if i==0:
headers = line.split(’,’)[2:]
else:
data.append(line.split(’,’)[2:])
headers = np.array(headers)
data = np.array(data, dtype=‘f8’)

整理数据集

x = data[:, 0:11]
y = data[:, -1]

拆分测试集与训练集

x, y = su.shuffle(x, y, random_state=7)
train_size = int(len(x) * 0.9)
train_x, test_x, train_y, test_y =
x[:train_size], x[train_size:],
y[:train_size], y[train_size:]

构建随机森林回归器模型 并训练模型

model = se.RandomForestRegressor(max_depth=10,
n_estimators=1000, min_samples_split=2)
model.fit(train_x, train_y)

针对测试集进行预测 输出评估得分

pred_test_y = model.predict(test_x)
print(‘r2 for day.csv:’, sm.r2_score(test_y, pred_test_y))

获取特征重要性

day_fi = model.feature_importances_
day_headers = headers[0:11]
print(day_headers)

读取bike_hour.csv数据集

headers = None
data = []
with open(’…/ml_data/bike_hour.csv’, ‘r’) as f:
for i, line in enumerate(f.readlines()):
if i==0:
headers = line.split(’,’)[2:]
else:
data.append(line.split(’,’)[2:])
headers = np.array(headers)
data = np.array(data, dtype=‘f8’)

整理数据集

x = data[:, 0:12]
y = data[:, -1]

拆分测试集与训练集

x, y = su.shuffle(x, y, random_state=7)
train_size = int(len(x) * 0.9)
train_x, test_x, train_y, test_y =
x[:train_size], x[train_size:],
y[:train_size], y[train_size:]

构建随机森林回归器模型 并训练模型

model = se.RandomForestRegressor(max_depth=10,
n_estimators=1000, min_samples_split=2)
model.fit(train_x, train_y)

针对测试集进行预测 输出评估得分

pred_test_y = model.predict(test_x)
print(‘r2 for hour.csv:’, sm.r2_score(test_y, pred_test_y))

获取特征重要性

hour_fi = model.feature_importances_
hour_headers = headers[0:12]

绘制特征重要性柱状图

柱状图显示特征重要性

mp.figure(‘Feature Importance’, facecolor=‘lightgray’)
mp.subplot(211)
mp.title(‘Feature Importances’, fontsize=16)
mp.ylabel(‘Importance’, fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(axis=‘y’, linestyle=’:’)
x = np.arange(day_fi.size)
sorted_indices = np.argsort(day_fi)[::-1]
mp.bar(x, day_fi[sorted_indices], 0.8,
color=‘dodgerblue’, label=‘day FI’)
mp.xticks(x, day_headers[sorted_indices])
mp.legend()

mp.subplot(212)
mp.ylabel(‘Importance’, fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(axis=‘y’, linestyle=’:’)
x = np.arange(hour_fi.size)
sorted_indices = np.argsort(hour_fi)[::-1]
mp.bar(x, hour_fi[sorted_indices], 0.8,
color=‘orangered’, label=‘hour FI’)
mp.xticks(x, hour_headers[sorted_indices])
mp.legend()
mp.show()

“”"
demo03_sc.py 简单人工分类
“”"
import numpy as np
import matplotlib.pyplot as mp
x = np.array([
[3, 1],
[2, 5],
[1, 8],
[6, 4],
[5, 2],
[3, 5],
[4, 7],
[4, -1]])
y = np.array([0, 1, 1, 0, 0, 1, 1, 0])

把整个空间分为500*500的网格化矩阵

l, r = x[:,0].min()-1, x[:,0].max()+1
b, t = x[:,1].min()-1, x[:,1].max()+1
grid_x, grid_y = np.meshgrid(
np.linspace(l, r, 500),
np.linspace(b, t, 500))

grid_z存储了网格化矩阵中每个坐标点的类别标签:0 / 1

grid_z = np.piecewise(grid_x,
[grid_x>grid_y, grid_x<grid_y] ,
[0, 1])

绘制散点图

mp.figure(‘Simple Classification’, facecolor=‘lightgray’)
mp.title(‘Simple Classification’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)

为网格化矩阵中的每个元素填充背景颜色

mp.pcolormesh(grid_x, grid_y, grid_z, cmap=‘gray’)
mp.scatter(x[:, 0], x[:, 1], c=y, cmap=‘brg’, s=80)
mp.show()

“”"
demo04_lr.py 逻辑分类
“”"
import numpy as np
import matplotlib.pyplot as mp
import sklearn.linear_model as lm

x = np.array([
[3, 1],
[2, 5],
[1, 8],
[6, 4],
[5, 2],
[3, 5],
[4, 7],
[4, -1]])
y = np.array([0, 1, 1, 0, 0, 1, 1, 0])

把整个空间分为500*500的网格化矩阵

l, r = x[:,0].min()-1, x[:,0].max()+1
b, t = x[:,1].min()-1, x[:,1].max()+1
grid_x, grid_y = np.meshgrid(
np.linspace(l, r, 500),
np.linspace(b, t, 500))

把grid_x与grid_y抻平并在一起成两列,作为测试集x

mesh_x = np.column_stack((grid_x.ravel(),
grid_y.ravel()))

创建模型,针对test_x预测相应输出

model = lm.LogisticRegression(
solver=‘liblinear’, C=1)
model.fit(x, y)
mesh_y = model.predict(mesh_x)

把预测结果变维:500*500,用于绘制分类边界线

grid_z = mesh_y.reshape(grid_x.shape)

绘制散点图

mp.figure(‘Simple Classification’, facecolor=‘lightgray’)
mp.title(‘Simple Classification’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)

为网格化矩阵中的每个元素填充背景颜色

mp.pcolormesh(grid_x, grid_y, grid_z, cmap=‘gray’)
mp.scatter(x[:, 0], x[:, 1], c=y, cmap=‘brg’, s=80)
mp.show()

“”"
demo05_nb.py 朴素贝叶斯
“”"
import sklearn.naive_bayes as nb
import numpy as np
import matplotlib.pyplot as mp

data = np.loadtxt(’…/ml_data/multiple1.txt’,
delimiter=’,’, usecols=(0,1,2),
unpack=False)
print(data.shape)
x = data[:, :2]
y = data[:, 2]

创建并且训练模型

model = nb.GaussianNB()
model.fit(x, y)

绘制背景颜色,显示分类边界线

把整个空间分为500*500的网格化矩阵

l, r = x[:,0].min()-1, x[:,0].max()+1
b, t = x[:,1].min()-1, x[:,1].max()+1
grid_x, grid_y = np.meshgrid(
np.linspace(l, r, 500),
np.linspace(b, t, 500))

把grid_x与grid_y抻平并在一起成两列,作为测试集x

mesh_x = np.column_stack((grid_x.ravel(),
grid_y.ravel()))
mesh_y = model.predict(mesh_x)
grid_z = mesh_y.reshape(grid_x.shape)

#把所有样本点都绘制出来
mp.figure(‘Naive Bayes Classification’, facecolor=‘lightgray’)
mp.title(‘Naive Bayes Classification’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x, grid_y, grid_z, cmap=‘gray’)
mp.scatter(x[:, 0], x[:, 1], c=y, cmap=‘brg’, s=80)
mp.show()

“”"
demo04_mlr.py 多元逻辑分类
“”"
import numpy as np
import matplotlib.pyplot as mp
import sklearn.linear_model as lm

x = np.array([
[4, 7],
[3.5, 8],
[3.1, 6.2],
[0.5, 1],
[1, 2],
[1.2, 1.9],
[6, 2],
[5.7, 1.5],
[5.4, 2.2]])
y = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])

把整个空间分为500*500的网格化矩阵

l, r = x[:,0].min()-1, x[:,0].max()+1
b, t = x[:,1].min()-1, x[:,1].max()+1
grid_x, grid_y = np.meshgrid(
np.linspace(l, r, 500),
np.linspace(b, t, 500))

把grid_x与grid_y抻平并在一起成两列,作为测试集x

mesh_x = np.column_stack((grid_x.ravel(),
grid_y.ravel()))

创建模型,针对test_x预测相应输出

model = lm.LogisticRegression(
solver=‘liblinear’, C=200)
model.fit(x, y)
mesh_y = model.predict(mesh_x)

把预测结果变维:500*500,用于绘制分类边界线

grid_z = mesh_y.reshape(grid_x.shape)

绘制散点图

mp.figure(‘Simple Classification’, facecolor=‘lightgray’)
mp.title(‘Simple Classification’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)

为网格化矩阵中的每个元素填充背景颜色

mp.pcolormesh(grid_x, grid_y, grid_z, cmap=‘gray’)
mp.scatter(x[:, 0], x[:, 1], c=y, cmap=‘brg’, s=80)
mp.show()

“”"
demo01_tts.py 训练集测试集划分
“”"
import sklearn.naive_bayes as nb
import numpy as np
import matplotlib.pyplot as mp
import sklearn.model_selection as ms

data = np.loadtxt(’…/ml_data/multiple1.txt’,
delimiter=’,’, usecols=(0,1,2),
unpack=False)
print(data.shape)
x = data[:, :2]
y = data[:, 2]

划分测试集与训练集

train_x, test_x, train_y, test_y =
ms.train_test_split(x, y, test_size=0.25,
random_state=7)

创建并且训练模型

model = nb.GaussianNB()

基于新创建的模型进行交叉验证

ac = ms.cross_val_score(model, train_x, train_y,
cv=5, scoring=‘accuracy’)
print(ac.mean())
pw = ms.cross_val_score(model, train_x, train_y,
cv=5, scoring=‘precision_weighted’)
print(pw.mean())
rw = ms.cross_val_score(model, train_x, train_y,
cv=5, scoring=‘recall_weighted’)
print(rw.mean())
f1 = ms.cross_val_score(model, train_x, train_y,
cv=5, scoring=‘f1_weighted’)
print(f1.mean())

model.fit(train_x, train_y)

把训练好的模型应用于测试集,输出准确率

pred_test_y = model.predict(test_x)
print((test_y == pred_test_y).sum() / test_y.size)

绘制背景颜色,显示分类边界线

把整个空间分为500*500的网格化矩阵

l, r = x[:,0].min()-1, x[:,0].max()+1
b, t = x[:,1].min()-1, x[:,1].max()+1
grid_x, grid_y = np.meshgrid(
np.linspace(l, r, 500),
np.linspace(b, t, 500))

把grid_x与grid_y抻平并在一起成两列,作为测试集x

mesh_x = np.column_stack((grid_x.ravel(),
grid_y.ravel()))
mesh_y = model.predict(mesh_x)
grid_z = mesh_y.reshape(grid_x.shape)

#把所有样本点都绘制出来
mp.figure(‘Naive Bayes Classification’, facecolor=‘lightgray’)
mp.title(‘Naive Bayes Classification’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x, grid_y, grid_z, cmap=‘gray’)
mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap=‘brg’, s=80)
mp.show()

“”"
demo01_tts.py 训练集测试集划分
“”"
import sklearn.naive_bayes as nb
import numpy as np
import matplotlib.pyplot as mp
import sklearn.model_selection as ms

data = np.loadtxt(’…/ml_data/multiple1.txt’,
delimiter=’,’, usecols=(0,1,2),
unpack=False)
print(data.shape)
x = data[:, :2]
y = data[:, 2]

划分测试集与训练集

train_x, test_x, train_y, test_y =
ms.train_test_split(x, y, test_size=0.25,
random_state=7)

创建并且训练模型

model = nb.GaussianNB()

基于新创建的模型进行交叉验证

ac = ms.cross_val_score(model, train_x, train_y,
cv=5, scoring=‘accuracy’)
print(ac.mean())
pw = ms.cross_val_score(model, train_x, train_y,
cv=5, scoring=‘precision_weighted’)
print(pw.mean())
rw = ms.cross_val_score(model, train_x, train_y,
cv=5, scoring=‘recall_weighted’)
print(rw.mean())
f1 = ms.cross_val_score(model, train_x, train_y,
cv=5, scoring=‘f1_weighted’)
print(f1.mean())

model.fit(train_x, train_y)

把训练好的模型应用于测试集,输出准确率

pred_test_y = model.predict(test_x)
print((test_y == pred_test_y).sum() / test_y.size)

绘制背景颜色,显示分类边界线

把整个空间分为500*500的网格化矩阵

l, r = x[:,0].min()-1, x[:,0].max()+1
b, t = x[:,1].min()-1, x[:,1].max()+1
grid_x, grid_y = np.meshgrid(
np.linspace(l, r, 500),
np.linspace(b, t, 500))

把grid_x与grid_y抻平并在一起成两列,作为测试集x

mesh_x = np.column_stack((grid_x.ravel(),
grid_y.ravel()))
mesh_y = model.predict(mesh_x)
grid_z = mesh_y.reshape(grid_x.shape)

#把所有样本点都绘制出来
mp.figure(‘Naive Bayes Classification’, facecolor=‘lightgray’)
mp.title(‘Naive Bayes Classification’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x, grid_y, grid_z, cmap=‘gray’)
mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap=‘brg’, s=80)
mp.show()

“”"
demo03_cm.py 混淆矩阵
“”"
import sklearn.naive_bayes as nb
import numpy as np
import matplotlib.pyplot as mp
import sklearn.model_selection as ms
import sklearn.metrics as sm

data = np.loadtxt(’…/ml_data/multiple1.txt’,
delimiter=’,’, usecols=(0,1,2),
unpack=False)
print(data.shape)
x = data[:, :2]
y = data[:, 2]

划分测试集与训练集

train_x, test_x, train_y, test_y =
ms.train_test_split(x, y, test_size=0.25,
random_state=7)

创建并且训练模型

model = nb.GaussianNB()

基于新创建的模型进行交叉验证

ac = ms.cross_val_score(model, train_x, train_y,
cv=5, scoring=‘accuracy’)
print(ac.mean())
pw = ms.cross_val_score(model, train_x, train_y,
cv=5, scoring=‘precision_weighted’)
print(pw.mean())
rw = ms.cross_val_score(model, train_x, train_y,
cv=5, scoring=‘recall_weighted’)
print(rw.mean())
f1 = ms.cross_val_score(model, train_x, train_y,
cv=5, scoring=‘f1_weighted’)
print(f1.mean())

model.fit(train_x, train_y)

把训练好的模型应用于测试集,输出准确率

pred_test_y = model.predict(test_x)

输出混淆矩阵

cm = sm.confusion_matrix(test_y, pred_test_y)
print(cm)

mp.imshow(cm, cmap=‘gray’)
mp.show()

“”"
demo03_cr.py 分类报告
“”"
import sklearn.naive_bayes as nb
import numpy as np
import matplotlib.pyplot as mp
import sklearn.model_selection as ms
import sklearn.metrics as sm

data = np.loadtxt(’…/ml_data/multiple1.txt’,
delimiter=’,’, usecols=(0,1,2),
unpack=False)
print(data.shape)
x = data[:, :2]
y = data[:, 2]

划分测试集与训练集

train_x, test_x, train_y, test_y =
ms.train_test_split(x, y, test_size=0.25,
random_state=7)

创建并且训练模型

model = nb.GaussianNB()

基于新创建的模型进行交叉验证

ac = ms.cross_val_score(model, train_x, train_y,
cv=5, scoring=‘accuracy’)
print(ac.mean())
pw = ms.cross_val_score(model, train_x, train_y,
cv=5, scoring=‘precision_weighted’)
print(pw.mean())
rw = ms.cross_val_score(model, train_x, train_y,
cv=5, scoring=‘recall_weighted’)
print(rw.mean())
f1 = ms.cross_val_score(model, train_x, train_y,
cv=5, scoring=‘f1_weighted’)
print(f1.mean())

model.fit(train_x, train_y)

把训练好的模型应用于测试集,输出准确率

pred_test_y = model.predict(test_x)

输出混淆矩阵

cm = sm.confusion_matrix(test_y, pred_test_y)
print(cm)

输出分类报告

cr = sm.classification_report(test_y, pred_test_y)
print(cr)

“”"
demo05_car.py
“”"
import numpy as np
import sklearn.preprocessing as sp
import sklearn.ensemble as se
import sklearn.model_selection as ms

def f(s):
return str(s, encoding=‘utf-8’)

读取文件

data = np.loadtxt(’…/ml_data/car.txt’,
delimiter=’,’, dtype=‘U20’,
converters={0:f, 1:f, 2:f, 3:f, 4:f, 5:f, 6:f})

整理训练集的输入与输出

data = data.T
train_x, train_y = [], []
encoders = []
for col in range(len(data)):
lbe = sp.LabelEncoder()
if col < len(data)-1: # 不是最后一列
train_x.append(lbe.fit_transform(data[col]))
else:
train_y = lbe.fit_transform(data[col])
encoders.append(lbe) #保存每列的标签编码器

train_x = np.array(train_x).T
print(train_x)

交叉验证 训练模型

model = se.RandomForestClassifier(max_depth=6,
n_estimators=200, random_state=7)
cv = ms.cross_val_score(model, train_x, train_y,
cv=5, scoring=‘f1_weighted’)
model.fit(train_x, train_y)

模型测试

data = [
[‘high’, ‘med’, ‘5more’, ‘4’, ‘big’, ‘low’, ‘unacc’],
[‘high’, ‘high’, ‘4’, ‘4’, ‘med’, ‘med’, ‘acc’],
[‘low’, ‘low’, ‘2’, ‘4’, ‘small’, ‘high’, ‘good’],
[‘low’, ‘med’, ‘3’, ‘4’, ‘med’, ‘high’, ‘vgood’]]

在训练时需要把所有的LabelEncoder保存下来,

在测试时,对测试数据的每一列使用相同的编码器进行编码,

然后进行预测,得出预测结果

data = np.array(data).T
test_x, test_y = [], []
for col in range(len(data)):
encoder = encoders[col]
if col<len(data)-1:
test_x.append(encoder.transform(data[col]))
else:
test_y = encoder.transform(data[col])
test_x = np.array(test_x).T
pred_test_y = model.predict(test_x)
print(encoders[-1].inverse_transform(pred_test_y))
print(encoders[-1].inverse_transform(test_y))

“”"
demo06_vc.py 验证曲线
“”"
import numpy as np
import sklearn.preprocessing as sp
import sklearn.ensemble as se
import sklearn.model_selection as ms
import matplotlib.pyplot as mp

def f(s):
return str(s, encoding=‘utf-8’)

读取文件

data = np.loadtxt(’…/ml_data/car.txt’,
delimiter=’,’, dtype=‘U20’,
converters={0:f, 1:f, 2:f, 3:f, 4:f, 5:f, 6:f})

整理训练集的输入与输出

data = data.T
train_x, train_y = [], []
encoders = []
for col in range(len(data)):
lbe = sp.LabelEncoder()
if col < len(data)-1: # 不是最后一列
train_x.append(lbe.fit_transform(data[col]))
else:
train_y = lbe.fit_transform(data[col])
encoders.append(lbe) #保存每列的标签编码器

train_x = np.array(train_x).T
print(train_x)

交叉验证 训练模型

model = se.RandomForestClassifier(max_depth=6,
n_estimators=140, random_state=7)

使用validation curve选择最优超参数

train_scores, test_scores =
ms.validation_curve(model, train_x,
train_y, ‘n_estimators’,
np.arange(100, 200, 10), cv=5)

画图显示超参数取值与模型性能之间的关系

x = np.arange(100, 200, 10)
y = test_scores.mean(axis=1)
mp.figure(‘n_estimators’, facecolor=‘lightgray’)
mp.title(‘n_estimators’, fontsize=20)
mp.xlabel(‘n_estimators’, fontsize=14)
mp.ylabel(‘F1 Score’, fontsize=14)
mp.tick_params(labelsize=10)
mp.grid(linestyle=’:’)
mp.plot(x, y, ‘o-’, c=‘dodgerblue’, label=‘Training’)
mp.xticks(x)
mp.legend()
mp.show()

model.fit(train_x, train_y)

模型测试

data = [
[‘high’, ‘med’, ‘5more’, ‘4’, ‘big’, ‘low’, ‘unacc’],
[‘high’, ‘high’, ‘4’, ‘4’, ‘med’, ‘med’, ‘acc’],
[‘low’, ‘low’, ‘2’, ‘4’, ‘small’, ‘high’, ‘good’],
[‘low’, ‘med’, ‘3’, ‘4’, ‘med’, ‘high’, ‘vgood’]]

在训练时需要把所有的LabelEncoder保存下来,

在测试时,对测试数据的每一列使用相同的编码器进行编码,

然后进行预测,得出预测结果

data = np.array(data).T
test_x, test_y = [], []
for col in range(len(data)):
encoder = encoders[col]
if col<len(data)-1:
test_x.append(encoder.transform(data[col]))
else:
test_y = encoder.transform(data[col])
test_x = np.array(test_x).T
pred_test_y = model.predict(test_x)
print(encoders[-1].inverse_transform(pred_test_y))
print(encoders[-1].inverse_transform(test_y))

“”"
demo06_vc.py 验证曲线
“”"
import numpy as np
import sklearn.preprocessing as sp
import sklearn.ensemble as se
import sklearn.model_selection as ms
import matplotlib.pyplot as mp

def f(s):
return str(s, encoding=‘utf-8’)

读取文件

data = np.loadtxt(’…/ml_data/car.txt’,
delimiter=’,’, dtype=‘U20’,
converters={0:f, 1:f, 2:f, 3:f, 4:f, 5:f, 6:f})

整理训练集的输入与输出

data = data.T
train_x, train_y = [], []
encoders = []
for col in range(len(data)):
lbe = sp.LabelEncoder()
if col < len(data)-1: # 不是最后一列
train_x.append(lbe.fit_transform(data[col]))
else:
train_y = lbe.fit_transform(data[col])
encoders.append(lbe) #保存每列的标签编码器

train_x = np.array(train_x).T
print(train_x)

交叉验证 训练模型

model = se.RandomForestClassifier(max_depth=9,
n_estimators=140, random_state=7)

# 使用validation curve选择最优超参数

train_scores, test_scores = \

ms.validation_curve(model, train_x,

train_y, ‘max_depth’,

np.arange(5, 15), cv=5)

# 画图显示超参数取值与模型性能之间的关系

x = np.arange(5, 15)

y = test_scores.mean(axis=1)

mp.figure(‘max_depth’, facecolor=‘lightgray’)

mp.title(‘max_depth’, fontsize=20)

mp.xlabel(‘max_depth’, fontsize=14)

mp.ylabel(‘F1 Score’, fontsize=14)

mp.tick_params(labelsize=10)

mp.grid(linestyle=’:’)

mp.plot(x, y, ‘o-’, c=‘dodgerblue’, label=‘Training’)

mp.xticks(x)

mp.legend()

mp.show()

model.fit(train_x, train_y)

模型测试

data = [
[‘high’, ‘med’, ‘5more’, ‘4’, ‘big’, ‘low’, ‘unacc’],
[‘high’, ‘high’, ‘4’, ‘4’, ‘med’, ‘med’, ‘acc’],
[‘low’, ‘low’, ‘2’, ‘4’, ‘small’, ‘high’, ‘good’],
[‘low’, ‘med’, ‘3’, ‘4’, ‘med’, ‘high’, ‘vgood’]]

在训练时需要把所有的LabelEncoder保存下来,

在测试时,对测试数据的每一列使用相同的编码器进行编码,

然后进行预测,得出预测结果

data = np.array(data).T
test_x, test_y = [], []
for col in range(len(data)):
encoder = encoders[col]
if col<len(data)-1:
test_x.append(encoder.transform(data[col]))
else:
test_y = encoder.transform(data[col])
test_x = np.array(test_x).T
pred_test_y = model.predict(test_x)
print(encoders[-1].inverse_transform(pred_test_y))
print(encoders[-1].inverse_transform(test_y))

“”"
demo08_lc.py 学习曲线
“”"
import numpy as np
import sklearn.preprocessing as sp
import sklearn.ensemble as se
import sklearn.model_selection as ms
import matplotlib.pyplot as mp
import sklearn.metrics as sm

def f(s):
return str(s, encoding=‘utf-8’)

读取文件

data = np.loadtxt(’…/ml_data/car.txt’,
delimiter=’,’, dtype=‘U20’,
converters={0:f, 1:f, 2:f, 3:f, 4:f, 5:f, 6:f})

整理训练集的输入与输出

data = data.T
train_x, train_y = [], []
encoders = []
for col in range(len(data)):
lbe = sp.LabelEncoder()
if col < len(data)-1: # 不是最后一列
train_x.append(lbe.fit_transform(data[col]))
else:
train_y = lbe.fit_transform(data[col])
encoders.append(lbe) #保存每列的标签编码器

train_x = np.array(train_x).T
print(train_x)

交叉验证 训练模型

model = se.RandomForestClassifier(max_depth=9,
n_estimators=140, random_state=7)

# 通过学习曲线,获取最优训练集大小

train_sizes = np.arange(0.4, 0.8, 0.05)

_, train_scores, test_scores = \

ms.learning_curve(model, train_x, train_y,

train_sizes=train_sizes, cv=5)

# 画图显示超参数取值与模型性能之间的关系

y = test_scores.mean(axis=1)

mp.figure(‘Learning Curve’, facecolor=‘lightgray’)

mp.title(‘Learning Curve’, fontsize=20)

mp.xlabel(‘Learning Curve’, fontsize=14)

mp.ylabel(‘F1 Score’, fontsize=14)

mp.tick_params(labelsize=10)

mp.grid(linestyle=’:’)

mp.plot(train_sizes, y, ‘o-’, c=‘dodgerblue’, label=‘Training’)

mp.xticks(train_sizes)

mp.legend()

mp.show()

通过学习曲线得到当使用70%做训练集时效果比较好

train_x, test_x, train_y, test_y =
ms.train_test_split(train_x, train_y,
test_size=0.3, random_state=7)
model.fit(train_x, train_y)

针对测试集测试

pred_test_y = model.predict(test_x)
print(sm.classification_report(test_y, pred_test_y))

模型测试

data = [
[‘high’, ‘med’, ‘5more’, ‘4’, ‘big’, ‘low’, ‘unacc’],
[‘high’, ‘high’, ‘4’, ‘4’, ‘med’, ‘med’, ‘acc’],
[‘low’, ‘low’, ‘2’, ‘4’, ‘small’, ‘high’, ‘good’],
[‘low’, ‘med’, ‘3’, ‘4’, ‘med’, ‘high’, ‘vgood’]]

在训练时需要把所有的LabelEncoder保存下来,

在测试时,对测试数据的每一列使用相同的编码器进行编码,

然后进行预测,得出预测结果

data = np.array(data).T
test_x, test_y = [], []
for col in range(len(data)):
encoder = encoders[col]
if col<len(data)-1:
test_x.append(encoder.transform(data[col]))
else:
test_y = encoder.transform(data[col])
test_x = np.array(test_x).T
pred_test_y = model.predict(test_x)
print(encoders[-1].inverse_transform(pred_test_y))
print(encoders[-1].inverse_transform(test_y))

“”"
demo09_svm.py 支持向量机
“”"
import numpy as np
import sklearn.model_selection as ms
import sklearn.svm as svm
import sklearn.metrics as sm
import matplotlib.pyplot as mp

data = np.loadtxt(’…/ml_data/multiple2.txt’,
delimiter=’,’, dtype=‘f8’)
x = data[:, :-1]
y = data[:, -1]

选择svm做分类

train_x, test_x, train_y, test_y =
ms.train_test_split(x, y, test_size=0.25,
random_state=5)
model = svm.SVC(kernel=‘rbf’)
model.fit(train_x, train_y)
pred_test_y = model.predict(test_x)
print(sm.classification_report(test_y, pred_test_y))

绘制分类边界线

n = 500
l, r = x[:, 0].min() - 1, x[:, 0].max() + 1
b, t = x[:, 1].min() - 1, x[:, 1].max() + 1
grid_x = np.meshgrid(np.linspace(l, r, n),
np.linspace(b, t, n))
flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel()))
flat_y = model.predict(flat_x)
grid_y = flat_y.reshape(grid_x[0].shape)

mp.figure(‘SVM Linear Classification’, facecolor=‘lightgray’)
mp.title(‘SVM Linear Classification’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y,
cmap=‘gray’)
mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap=‘brg’, s=80)
mp.show()

“”"
demo01_poly.py 支持向量机
“”"
import numpy as np
import sklearn.model_selection as ms
import sklearn.svm as svm
import sklearn.metrics as sm
import matplotlib.pyplot as mp

data = np.loadtxt(’…/ml_data/multiple2.txt’,
delimiter=’,’, dtype=‘f8’)
x = data[:, :-1]
y = data[:, -1]

选择svm做分类

train_x, test_x, train_y, test_y =
ms.train_test_split(x, y, test_size=0.25,
random_state=5)
model = svm.SVC(kernel=‘poly’, degree=3)
model.fit(train_x, train_y)
pred_test_y = model.predict(test_x)
print(sm.classification_report(test_y, pred_test_y))

绘制分类边界线

n = 500
l, r = x[:, 0].min() - 1, x[:, 0].max() + 1
b, t = x[:, 1].min() - 1, x[:, 1].max() + 1
grid_x = np.meshgrid(np.linspace(l, r, n),
np.linspace(b, t, n))
flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel()))
flat_y = model.predict(flat_x)
grid_y = flat_y.reshape(grid_x[0].shape)

mp.figure(‘SVM Linear Classification’, facecolor=‘lightgray’)
mp.title(‘SVM Linear Classification’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y,
cmap=‘gray’)
mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap=‘brg’, s=80)
mp.show()

“”"
demo02_rbf.py 支持向量机
“”"
import numpy as np
import sklearn.model_selection as ms
import sklearn.svm as svm
import sklearn.metrics as sm
import matplotlib.pyplot as mp

data = np.loadtxt(’…/ml_data/multiple2.txt’,
delimiter=’,’, dtype=‘f8’)
x = data[:, :-1]
y = data[:, -1]

选择svm做分类

train_x, test_x, train_y, test_y =
ms.train_test_split(x, y, test_size=0.25,
random_state=5)
model = svm.SVC(kernel=‘rbf’, gamma=0.01, C=600,
probability=True)
model.fit(train_x, train_y)
pred_test_y = model.predict(test_x)
print(sm.classification_report(test_y, pred_test_y))

新增样本

prob_x = np.array([
[2, 1.5],
[8, 9],
[4.8, 5.2],
[4, 4],
[2.5, 7],
[7.6, 2],
[5.4, 5.9]])
pred_prob_y = model.predict(prob_x)
probs = model.predict_proba(prob_x)
print(probs)

绘制分类边界线

n = 500
l, r = x[:, 0].min() - 1, x[:, 0].max() + 1
b, t = x[:, 1].min() - 1, x[:, 1].max() + 1
grid_x = np.meshgrid(np.linspace(l, r, n),
np.linspace(b, t, n))
flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel()))
flat_y = model.predict(flat_x)
grid_y = flat_y.reshape(grid_x[0].shape)

mp.figure(‘Probability’, facecolor=‘lightgray’)
mp.title(‘Probability’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y,
cmap=‘gray’)
mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap=‘brg’, s=80)
mp.scatter(prob_x[:,0], prob_x[:,1], c=pred_prob_y,
cmap=‘jet_r’, s=80, marker=‘D’)
for i in range(len(probs)):
mp.annotate(
‘{}% {}%’.format(
round(probs[i, 0] * 100, 2),
round(probs[i, 1] * 100, 2)),
xy=(prob_x[i, 0], prob_x[i, 1]),
xytext=(12, -12),
textcoords=‘offset points’,
horizontalalignment=‘left’,
verticalalignment=‘top’,
fontsize=9,
bbox={‘boxstyle’: ‘round,pad=0.6’,
‘fc’: ‘orange’, ‘alpha’: 0.8})
mp.show()

“”"
demo03_balance.py 样本类别均衡化
“”"
import numpy as np
import sklearn.model_selection as ms
import sklearn.svm as svm
import sklearn.metrics as sm
import matplotlib.pyplot as mp

data = np.loadtxt(’…/ml_data/imbalance.txt’,
delimiter=’,’, dtype=‘f8’)
x = data[:, :-1]
y = data[:, -1]

选择svm做分类

train_x, test_x, train_y, test_y =
ms.train_test_split(x, y, test_size=0.25,
random_state=5)
model = svm.SVC(kernel=‘linear’,
class_weight=‘balanced’)
model.fit(train_x, train_y)
pred_test_y = model.predict(test_x)
print(sm.classification_report(test_y, pred_test_y))

绘制分类边界线

n = 500
l, r = x[:, 0].min() - 1, x[:, 0].max() + 1
b, t = x[:, 1].min() - 1, x[:, 1].max() + 1
grid_x = np.meshgrid(np.linspace(l, r, n),
np.linspace(b, t, n))
flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel()))
flat_y = model.predict(flat_x)
grid_y = flat_y.reshape(grid_x[0].shape)

mp.figure(‘Class Balanced’, facecolor=‘lightgray’)
mp.title(‘Class Balanced’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y,
cmap=‘gray’)
mp.scatter(x[:, 0], x[:, 1], c=y, cmap=‘brg’, s=80)
mp.show()

“”"
demo04_prob.py 置信概率
“”"
import numpy as np
import sklearn.model_selection as ms
import sklearn.svm as svm
import sklearn.metrics as sm
import matplotlib.pyplot as mp

data = np.loadtxt(’…/ml_data/multiple2.txt’,
delimiter=’,’, dtype=‘f8’)
x = data[:, :-1]
y = data[:, -1]

选择svm做分类

train_x, test_x, train_y, test_y =
ms.train_test_split(x, y, test_size=0.25,
random_state=5)
model = svm.SVC(kernel=‘rbf’, gamma=0.01, C=600,
probability=True)
model.fit(train_x, train_y)
pred_test_y = model.predict(test_x)
print(sm.classification_report(test_y, pred_test_y))

新增样本

prob_x = np.array([
[2, 1.5],
[8, 9],
[4.8, 5.2],
[4, 4],
[2.5, 7],
[7.6, 2],
[5.4, 5.9]])
pred_prob_y = model.predict(prob_x)
probs = model.predict_proba(prob_x)
print(probs)

绘制分类边界线

n = 500
l, r = x[:, 0].min() - 1, x[:, 0].max() + 1
b, t = x[:, 1].min() - 1, x[:, 1].max() + 1
grid_x = np.meshgrid(np.linspace(l, r, n),
np.linspace(b, t, n))
flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel()))
flat_y = model.predict(flat_x)
grid_y = flat_y.reshape(grid_x[0].shape)

mp.figure(‘Probability’, facecolor=‘lightgray’)
mp.title(‘Probability’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y,
cmap=‘gray’)
mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap=‘brg’, s=80)
mp.scatter(prob_x[:,0], prob_x[:,1], c=pred_prob_y,
cmap=‘jet_r’, s=80, marker=‘D’)
for i in range(len(probs)):
mp.annotate(
‘{}% {}%’.format(
round(probs[i, 0] * 100, 2),
round(probs[i, 1] * 100, 2)),
xy=(prob_x[i, 0], prob_x[i, 1]),
xytext=(12, -12),
textcoords=‘offset points’,
horizontalalignment=‘left’,
verticalalignment=‘top’,
fontsize=9,
bbox={‘boxstyle’: ‘round,pad=0.6’,
‘fc’: ‘orange’, ‘alpha’: 0.8})
mp.show()

“”"
demo05_gridsearch.py 网格搜索
“”"
import numpy as np
import sklearn.model_selection as ms
import sklearn.svm as svm
import sklearn.metrics as sm
import matplotlib.pyplot as mp

data = np.loadtxt(’…/ml_data/multiple2.txt’,
delimiter=’,’, dtype=‘f8’)
x = data[:, :-1]
y = data[:, -1]

选择svm做分类

train_x, test_x, train_y, test_y =
ms.train_test_split(x, y, test_size=0.25,
random_state=5)
model = svm.SVC(probability=True)

根据网格搜索选择最优模型

params = [{‘kernel’:[‘linear’],‘C’:[1, 10, 100, 1000]},
{‘kernel’:[‘poly’], ‘C’:[1], ‘degree’:[2, 3]},
{‘kernel’:[‘rbf’], ‘C’:[1,10,100,1000],
‘gamma’:[1, 0.1, 0.01, 0.001]}]
model = ms.GridSearchCV(model, params, cv=5)
model.fit(train_x, train_y)

print(model.best_params_)
print(model.best_score_)
print(model.best_estimator_)

输出每个超参数组合信息及其得分

for param, score in zip(
model.cv_results_[‘params’],
model.cv_results_[‘mean_test_score’]):
print(param, ‘->’, score)

pred_test_y = model.predict(test_x)
print(sm.classification_report(test_y, pred_test_y))

新增样本

prob_x = np.array([
[2, 1.5],
[8, 9],
[4.8, 5.2],
[4, 4],
[2.5, 7],
[7.6, 2],
[5.4, 5.9]])
pred_prob_y = model.predict(prob_x)
probs = model.predict_proba(prob_x)
print(probs)

绘制分类边界线

n = 500
l, r = x[:, 0].min() - 1, x[:, 0].max() + 1
b, t = x[:, 1].min() - 1, x[:, 1].max() + 1
grid_x = np.meshgrid(np.linspace(l, r, n),
np.linspace(b, t, n))
flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel()))
flat_y = model.predict(flat_x)
grid_y = flat_y.reshape(grid_x[0].shape)

mp.figure(‘Probability’, facecolor=‘lightgray’)
mp.title(‘Probability’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y,
cmap=‘gray’)
mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap=‘brg’, s=80)
mp.scatter(prob_x[:,0], prob_x[:,1], c=pred_prob_y,
cmap=‘jet_r’, s=80, marker=‘D’)
for i in range(len(probs)):
mp.annotate(
‘{}% {}%’.format(
round(probs[i, 0] * 100, 2),
round(probs[i, 1] * 100, 2)),
xy=(prob_x[i, 0], prob_x[i, 1]),
xytext=(12, -12),
textcoords=‘offset points’,
horizontalalignment=‘left’,
verticalalignment=‘top’,
fontsize=9,
bbox={‘boxstyle’: ‘round,pad=0.6’,
‘fc’: ‘orange’, ‘alpha’: 0.8})
mp.show()

“”"
demo06_event.py 事件预测

  1. 读文件,整理二维数组: data shape:(5040, 5)
  2. 解析data,数据预处理后整理输入集与输出集
    x:(5040,4) y:(5040,)
    数据预处理时:非数字字符串的特征需要做标签编码,
    数字字符串的特征需要做转换编码
  3. 拆分测试集与训练集
  4. 构建svm模型并训练模型,使用测试集进行测试
  5. 模型评估
  6. 业务应用
    “”"
    import numpy as np
    import sklearn.preprocessing as sp
    import sklearn.model_selection as ms
    import sklearn.svm as svm
    import sklearn.metrics as sm

class DigitEncoder():
# 模拟LabelEncoder编写的数字编码器

def fit_transform(self, y):
	return y.astype('i4')
	
def transform(self, y):
	return y.astype('i4')

def inverse_transform(self, y):
	return y.astype('str')

data = []
with open(’…/ml_data/events.txt’, ‘r’) as f:
for line in f.readlines():
data.append(line.split(’,’))
data = np.array(data)
data = np.delete(data, 1, axis=1)
cols = data.shape[1] #获取一共有多少列
x, y = [], []
encoders = []
for i in range(cols):
col = data[:, i]
# 判断当前列是否是数字字符串
if col[0].isdigit():
encoder = DigitEncoder()
else:
encoder = sp.LabelEncoder()
# 使用编码器对数据进行编码
if i < cols-1:
x.append(encoder.fit_transform(col))
else:
y = encoder.fit_transform(col)
encoders.append(encoder)

x = np.array(x).T
y = np.array(y)

拆分测试集与训练集

train_x, test_x, train_y, test_y =
ms.train_test_split(x, y, test_size=0.25,
random_state=7)

构建模型

model=svm.SVC(kernel=‘rbf’, class_weight=‘balanced’)
model.fit(train_x, train_y)

测试

pred_test_y = model.predict(test_x)
print(sm.classification_report(test_y,pred_test_y))

业务应用

data = [[‘Tuesday’, ‘13:30:00’, ‘21’, ‘23’]]
data = np.array(data).T
x = []
for row in range(len(data)):
encoder = encoders[row]
x.append(encoder.transform(data[row]))
x = np.array(x).T
pred_y = model.predict(x)
print(encoders[-1].inverse_transform(pred_y))

“”"
demo07_07.py 车流量预测
“”"

import numpy as np
import sklearn.preprocessing as sp
import sklearn.model_selection as ms
import sklearn.svm as svm
import sklearn.metrics as sm

class DigitEncoder():
def fit_transform(self, y):
return y.astype(int)

def transform(self, y):
    return y.astype(int)

def inverse_transform(self, y):
    return y.astype(str)

data = []

回归

data = np.loadtxt(’…/ml_data/traffic.txt’,
delimiter=’,’, dtype=‘U20’)
data = data.T
encoders, x = [], []
for row in range(len(data)):
if data[row][0].isdigit():
encoder = DigitEncoder()
else:
encoder = sp.LabelEncoder()
if row < len(data) - 1:
x.append(encoder.fit_transform(data[row]))
else:
y = encoder.fit_transform(data[row])
encoders.append(encoder)
x = np.array(x).T
train_x, test_x, train_y, test_y =
ms.train_test_split(x, y, test_size=0.25, random_state=5)

支持向量机回归器

model = svm.SVR(kernel=‘rbf’, C=10, epsilon=0.2)
model.fit(train_x, train_y)
pred_test_y = model.predict(test_x)
print(sm.r2_score(test_y, pred_test_y))

#业务应用
data = [[‘Tuesday’, ‘13:35’, ‘San Francisco’, ‘yes’]]
data = np.array(data).T
x = []
for row in range(len(data)):
encoder = encoders[row]
x.append(encoder.transform(data[row]))
x = np.array(x).T
pred_y = model.predict(x)
print(int(pred_y))

“”"
demo08_kmeans.py kmeans聚类
“”"
import numpy as np
import sklearn.cluster as sc
import matplotlib.pyplot as mp

x = np.loadtxt(’…/ml_data/multiple3.txt’,
delimiter=’,’)

构建聚类模型

model = sc.KMeans(n_clusters=4)
model.fit(x)

返回每个样本的聚类的类别标签: 0/1/2/3

pred_y = model.labels_

返回所有的聚类中心样本

centers = model.cluster_centers_
print(centers)

绘制分类边界线

n = 500
l, r = x[:, 0].min() - 1, x[:, 0].max() + 1
b, t = x[:, 1].min() - 1, x[:, 1].max() + 1
grid_x = np.meshgrid(np.linspace(l, r, n),
np.linspace(b, t, n))
flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel()))
flat_y = model.predict(flat_x)
grid_y = flat_y.reshape(grid_x[0].shape)

mp.figure(‘K-Means Cluster’, facecolor=‘lightgray’)
mp.title(‘K-Means Cluster’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y,
cmap=‘gray’)
mp.scatter(x[:, 0], x[:, 1], c=pred_y,
cmap=‘brg’, s=80)
mp.scatter(centers[:, 0], centers[:,1],
c=‘red’, marker=’+’, s=500)

mp.show()

import numpy as np
import sklearn.cluster as sc
import matplotlib.pyplot as mp
import scipy.misc as sm

通过K均值聚类量化图像中的颜色

def quant(image, n_clusters):
# x代表所有图片像素点的亮度值数组
x = image.reshape(-1, 1)
model = sc.KMeans(n_clusters=n_clusters)
model.fit(x)
# 每个像素的亮度所属的聚类类别标签: 0/1/2/3
# y = [0,1,3,0,1,2,3,1,2,0,3,2,1,3,1,0,2…]
y = model.labels_
# 获取聚类中心 [30, 70, 110, 190]
centers = model.cluster_centers_.ravel()
return centers[y].reshape(image.shape)

original = sm.imread(’…/ml_data/lily.jpg’, True)
quant4 = quant(original, 4)
quant3 = quant(original, 3)
quant2 = quant(original, 2)
mp.figure(‘Image Quant’, facecolor=‘lightgray’)
mp.subplot(221)
mp.title(‘Original’, fontsize=16)
mp.axis(‘off’)
mp.imshow(original, cmap=‘gray’)
mp.subplot(222)
mp.title(‘Quant-4’, fontsize=16)
mp.axis(‘off’)
mp.imshow(quant4, cmap=‘gray’)
mp.subplot(223)
mp.title(‘Quant-3’, fontsize=16)
mp.axis(‘off’)
mp.imshow(quant3, cmap=‘gray’)
mp.subplot(224)
mp.title(‘Quant-2’, fontsize=16)
mp.axis(‘off’)
mp.imshow(quant2, cmap=‘gray’)
mp.tight_layout()
mp.show()

“”"
demo10_meanshift.py 均值漂移
“”"
import numpy as np
import sklearn.cluster as sc
import matplotlib.pyplot as mp

x = np.loadtxt(’…/ml_data/multiple3.txt’,
delimiter=’,’)

构建聚类模型

bw = sc.estimate_bandwidth(x, n_samples=len(x),
quantile=0.2)
model = sc.MeanShift(bandwidth=bw, bin_seeding=True)
model.fit(x)

返回每个样本的聚类的类别标签: 0/1/2/3

pred_y = model.labels_

返回所有的聚类中心样本

centers = model.cluster_centers_
print(centers)

绘制分类边界线

n = 500
l, r = x[:, 0].min() - 1, x[:, 0].max() + 1
b, t = x[:, 1].min() - 1, x[:, 1].max() + 1
grid_x = np.meshgrid(np.linspace(l, r, n),
np.linspace(b, t, n))
flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel()))
flat_y = model.predict(flat_x)
grid_y = flat_y.reshape(grid_x[0].shape)

mp.figure(‘K-Means Cluster’, facecolor=‘lightgray’)
mp.title(‘K-Means Cluster’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y,
cmap=‘gray’)
mp.scatter(x[:, 0], x[:, 1], c=pred_y,
cmap=‘brg’, s=80)
mp.scatter(centers[:, 0], centers[:,1],
c=‘red’, marker=’+’, s=500)

mp.show()

“”"
demo01_amc.py 凝聚层次算法
“”"
import numpy as np
import sklearn.cluster as sc
import matplotlib.pyplot as mp

x = np.loadtxt(’…/ml_data/multiple3.txt’,
delimiter=’,’)

凝聚层次聚类器

model = sc.AgglomerativeClustering(n_clusters=4)
pred_y = model.fit_predict(x)
mp.figure(‘Agglomerative Cluster’, facecolor=‘lightgray’)
mp.title(‘Agglomerative Cluster’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.scatter(x[:, 0], x[:, 1], c=pred_y,
cmap=‘brg’, s=80)
mp.show()

“”"
基于连续性近邻筛选器的凝聚层次算法
“”"
import numpy as np
import sklearn.cluster as sc
import sklearn.neighbors as nb
import matplotlib.pyplot as mp
n_samples = 500
x = np.linspace(-1, 1, n_samples)
y = np.sin(x * 2 * np.pi)
n = 0.3 * np.random.rand(n_samples, 2)
x = np.column_stack((x, y)) + n

无连续性的凝聚层次聚类器

model_nonc = sc.AgglomerativeClustering( linkage=‘average’, n_clusters=3)
pred_y_nonc = model_nonc.fit_predict(x)

近邻筛选器

conn = nb.kneighbors_graph( x, 10, include_self=False)

有连续性的凝聚层次聚类器

model_conn = sc.AgglomerativeClustering(
linkage=‘average’, n_clusters=3, connectivity=conn)
pred_y_conn = model_conn.fit_predict(x)
mp.figure(‘Nonconnectivity’, facecolor=‘lightgray’)
mp.title(‘Nonconnectivity’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.scatter(x[:, 0], x[:, 1], c=pred_y_nonc, cmap=‘brg’, alpha=0.5, s=30)
mp.figure(‘Connectivity’, facecolor=‘lightgray’)
mp.title(‘Connectivity’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.scatter(x[:, 0], x[:, 1], c=pred_y_conn, cmap=‘brg’, alpha=0.5, s=30)
mp.show()

“”"
demo03_silhouette.py 轮廓系数
“”"
import numpy as np
import sklearn.cluster as sc
import matplotlib.pyplot as mp
import sklearn.metrics as sm

x = np.loadtxt(’…/ml_data/multiple3.txt’,
delimiter=’,’)

构建聚类模型

model = sc.KMeans(n_clusters=4)
model.fit(x)

返回每个样本的聚类的类别标签: 0/1/2/3

pred_y = model.labels_

输出轮廓系数

s=sm.silhouette_score(x, pred_y, sample_size=len(x),
metric=‘euclidean’)
print(s)

返回所有的聚类中心样本

centers = model.cluster_centers_
print(centers)

绘制分类边界线

n = 500
l, r = x[:, 0].min() - 1, x[:, 0].max() + 1
b, t = x[:, 1].min() - 1, x[:, 1].max() + 1
grid_x = np.meshgrid(np.linspace(l, r, n),
np.linspace(b, t, n))
flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel()))
flat_y = model.predict(flat_x)
grid_y = flat_y.reshape(grid_x[0].shape)

mp.figure(‘K-Means Cluster’, facecolor=‘lightgray’)
mp.title(‘K-Means Cluster’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y,
cmap=‘gray’)
mp.scatter(x[:, 0], x[:, 1], c=pred_y,
cmap=‘brg’, s=80)
mp.scatter(centers[:, 0], centers[:,1],
c=‘red’, marker=’+’, s=500)

mp.show()

“”"
demo04_dbscan.py dbscan算法
“”"
import numpy as np
import sklearn.cluster as sc
import sklearn.metrics as sm
import matplotlib.pyplot as mp

x = np.loadtxt(’…/ml_data/perf.txt’, delimiter=’,’)
epsilons, scores, models = np.linspace(0.3, 1.2, 10), [], []
for epsilon in epsilons:
# DBSCAN聚类器
model = sc.DBSCAN(eps=epsilon, min_samples=5)
model.fit(x)
score = sm.silhouette_score(
x, model.labels_, sample_size=len(x), metric=‘euclidean’)
scores.append(score)
models.append(model)
scores = np.array(scores)
best_index = scores.argmax()
best_epsilon = epsilons[best_index]
print(best_epsilon)
best_score = scores[best_index]
print(best_score)
best_model = models[best_index]

获取核心样本、外周样本、孤立样本。并且使用不同的点型绘图。

core_mask = np.zeros(len(x), dtype=bool)
core_mask[best_model.core_sample_indices_] = True
offset_mask = best_model.labels_ == -1
periphery_mask = ~(core_mask | offset_mask)
mp.figure(‘DBSCAN Cluster’, facecolor=‘lightgray’)
mp.title(‘DBSCAN Cluster’, fontsize=20)
mp.xlabel(‘x’, fontsize=14)
mp.ylabel(‘y’, fontsize=14)
mp.tick_params(labelsize=10)
labels = best_model.labels_
mp.scatter(x[core_mask][:, 0], x[core_mask][:, 1], c=labels[core_mask],
cmap=‘brg’, s=80, label=‘Core’)
mp.scatter(x[periphery_mask][:, 0], x[periphery_mask][:, 1], alpha=0.5,
c=labels[periphery_mask], cmap=‘brg’, marker=‘s’, s=80, label=‘Periphery’)
mp.scatter(x[offset_mask][:, 0], x[offset_mask][:, 1],
c=labels[offset_mask], cmap=‘brg’, marker=‘x’, s=80, label=‘Offset’)
mp.legend()
mp.show()

“”"
demo05_movie.py 电影推荐
“”"
import json
import numpy as np

with open(’…/ml_data/ratings.json’, ‘r’) as f:
ratings = json.loads(f.read())

获取所有用户列表

users = list(ratings.keys())

存储用户与用户之间相似度信息的矩阵

scmat = []
for user1 in users:
scrow = [] # 存储user1对所有用户的相似度得分
for user2 in users:
movies = set() # 存储两个人都看过的电影
for movie in ratings[user1]:
if movie in ratings[user2]:
movies.add(movie)
# 通过movies列表 计算两人的相似度得分
if len(movies) == 0:
score = 0
else:
# a:存储user1的电影评分 b:存储user2的电影评分
a, b = [], []
for movie in movies:
a.append(ratings[user1][movie])
b.append(ratings[user2][movie])
# 通过欧式距离得分算法计算相似度
a = np.array(a)
b = np.array(b)
score = np.corrcoef(a,b)[0,1]
# score = 1 / (1+np.sqrt(((a-b)**2).sum()))
scrow.append(score)
scmat.append(scrow)

print(np.round(scmat, 2))

scmat = np.array(scmat)
users = np.array(users)

按照相似度从高到低排列每个用户的相似用户

for i, user in enumerate(users):
sorted_indices=scmat[i].argsort()[::-1]
# 除去自己,对相似用户排序 从大到小
sorted_indices=sorted_indices[sorted_indices!=i]
sim_users = users[sorted_indices]
sim_scores = scmat[i, sorted_indices]
# print(user, sim_users, sim_scores)
# 相似度得分正相关的掩码
positive_mask = sim_scores > 0
sim_users = sim_users[positive_mask]
# {‘name1’:[2.5, 3.0], ‘name2’:[1.0, 4.5]…}
recom_movies = {}
for i, sim_user in enumerate(sim_users):
for movie, score in ratings[sim_user].items():
# 若相似用户看过,但当前用户没看过
if movie not in ratings[user].keys():
if movie not in recom_movies:
recom_movies[movie] = [score]
else:
recom_movies[movie].append(score)

print(user)
# 排序
movie_list = sorted(recom_movies.items(), 
	key=lambda x: np.average(x[1]), 
	reverse=True)
print(movie_list)

“”"
demo06_tk.py 分词器
“”"
import nltk.tokenize as tk
doc = “Are you curious about tokenization? "
“Let’s see how it works! "
“We need to analyze a couple of sentences "
“with punctuations to see it in action.”
print(doc)
tokens = tk.sent_tokenize(doc)
for i, token in enumerate(tokens):
print(”%2d” % (i + 1), token)
print(’-’ * 15)
tokens = tk.word_tokenize(doc)
for i, token in enumerate(tokens):
print(”%2d" % (i + 1), token)
print(’-’ * 15)
tokenizer = tk.WordPunctTokenizer()
tokens = tokenizer.tokenize(doc)
for i, token in enumerate(tokens):
print("%2d" % (i + 1), token)

“”"
demo07_stem.py 词干提取器
“”"
import nltk.stem.porter as pt
import nltk.stem.lancaster as lc
import nltk.stem.snowball as sb

words = [‘table’, ‘probably’, ‘wolves’, ‘playing’,
‘is’, ‘dog’, ‘the’, ‘beaches’, ‘grounded’,
‘dreamt’, ‘envision’]
pt_stemmer = pt.PorterStemmer()
lc_stemmer = lc.LancasterStemmer()
sb_stemmer = sb.SnowballStemmer(‘english’)
for word in words:
pt_stem = pt_stemmer.stem(word)
lc_stem = lc_stemmer.stem(word)
sb_stem = sb_stemmer.stem(word)
print(’%8s %8s %8s %8s’ % (
word, pt_stem, lc_stem, sb_stem))

import nltk.stem as ns
words = [‘table’, ‘probably’, ‘wolves’, ‘playing’,
‘is’, ‘dog’, ‘the’, ‘beaches’, ‘grounded’,
‘dreamt’, ‘envision’]
lemmatizer = ns.WordNetLemmatizer()
for word in words:
n_lemma = lemmatizer.lemmatize(word, pos=‘n’)
v_lemma = lemmatizer.lemmatize(word, pos=‘v’)
print(’%8s %8s %8s’ % (word, n_lemma, v_lemma))

“”"
demo09_cv.py 词袋模型
“”"
import nltk.tokenize as tk
import sklearn.feature_extraction.text as ft

doc = 'The brown dog is running. ’
'The black dog is in the black room. ’
‘Running in the room is forbidden.’

对doc按照句子进行拆分

sents = tk.sent_tokenize(doc)

构建词袋模型

cv = ft.CountVectorizer()
bow = cv.fit_transform(sents)
print(bow.toarray())

“”"
demo09_cv.py 词袋模型
“”"
import nltk.tokenize as tk
import sklearn.feature_extraction.text as ft
import numpy as np

doc = 'The brown dog is running. ’
'The black dog is in the black room. ’
‘Running in the room is forbidden.’

对doc按照句子进行拆分

sents = tk.sent_tokenize(doc)

构建词袋模型

cv = ft.CountVectorizer()
bow = cv.fit_transform(sents)
print(bow.toarray())
print(cv.get_feature_names())

TFIDF

tt = ft.TfidfTransformer()
tfidf = tt.fit_transform(bow)
print(np.round(tfidf.toarray(), 2))

“”"
demo11_ztsb.py 主题识别
“”"
import numpy as np
import sklearn.datasets as sd
import sklearn.feature_extraction.text as ft
import sklearn.naive_bayes as nb

train = sd.load_files(’…/ml_data/20news’,
encoding=‘latin1’, shuffle=True,
random_state=7)

train.data: 2968个样本,每个样本都是一篇邮件文档

print(np.array(train.data).shape)

train.target: 2968个样本,每个样本都是文档对应的类别

print(np.array(train.target).shape)
print(train.target_names)

基于train.data训练集, 生成tfidf矩阵

cv = ft.CountVectorizer()
bow = cv.fit_transform(train.data)
tt = ft.TfidfTransformer()
tfidf = tt.fit_transform(bow)
print(tfidf.shape)

交给朴素贝叶斯模型,进行训练

model = nb.MultinomialNB()
model.fit(tfidf, train.target)

自定义测试集进行测试

test_data = [
‘The curveballs of right handed pitchers tend to curve to the left’,
‘Caesar cipher is an ancient form of encryption’,
‘This two-wheeler is really good on slippery roads’]

怎么训练的,就必须怎么预测

bow = cv.transform(test_data)
tfidf = tt.transform(bow)
pred_y = model.predict(tfidf)

for sent, index in zip(test_data, pred_y):
print(sent, ‘->’, train.target_names[index])

“”"
demo01_nltkc.py 情感分析 nltk分类器
“”"
import nltk.corpus as nc
import nltk.classify as cf
import nltk.classify.util as cu

存储所有的正向样本

pdata: [({单词:true}, ‘pos’),(),()…]

pdata = []

pos文件夹中的每个文件的路径

fileids = nc.movie_reviews.fileids(‘pos’)

print(len(fileids))

整理所有正面评论单词,存入pdata列表

for fileid in fileids:
sample = {}
# words: 把当前文档分词处理
words = nc.movie_reviews.words(fileid)
for word in words:
sample[word] = True
pdata.append((sample, ‘POSITIVE’))

整理所有反向样本,存入ndata列表

ndata = []
fileids = nc.movie_reviews.fileids(‘neg’)
for fileid in fileids:
sample = {}
words = nc.movie_reviews.words(fileid)
for word in words:
sample[word] = True
ndata.append((sample, ‘NEGATIVE’))

拆分测试集与训练集数量(80%作为训练集)

pnumb, nnumb = int(0.8 * len(pdata)), int(0.8 * len(ndata))
train_data = pdata[:pnumb] + ndata[:nnumb]
test_data = pdata[pnumb:] + ndata[nnumb:]

基于朴素贝叶斯分类器训练测试数据

model = cf.NaiveBayesClassifier.train(train_data)
ac = cu.accuracy(model, test_data)
print(ac)

模拟业务场景

reviews = [
‘It is an amazing movie.’,
‘This is a dull movie. I would never recommend it to anyone.’,
‘The cinematography is pretty great in this movie.’,
‘The direction was terrible and the story was all over the place.’]
for review in reviews:
sample = {}
words = review.split()
for word in words:
sample[word] = True
pcls = model.classify(sample)
print(review, ‘->’, pcls)

import nltk.tokenize as tk
import nltk.corpus as nc
import nltk.stem.snowball as sb
import gensim.models.ldamodel as gm
import gensim.corpora as gc
doc = []
with open(’…/ml_data/topic.txt’, ‘r’) as f:
for line in f.readlines():
doc.append(line[:-1])
tokenizer = tk.WordPunctTokenizer()
stopwords = nc.stopwords.words(‘english’)
signs = [’,’, ‘.’, ‘!’]
stemmer = sb.SnowballStemmer(‘english’)
lines_tokens = []
for line in doc:
tokens = tokenizer.tokenize(line.lower())
line_tokens = []
for token in tokens:
if token not in stopwords and token not in signs:
token = stemmer.stem(token)
line_tokens.append(token)
lines_tokens.append(line_tokens)

把lines_tokens中出现的单词都存入gc提供的词典对象,对每一个单词做编码。

dic = gc.Dictionary(lines_tokens)

遍历每一行,构建词袋列表

bow = []
for line_tokens in lines_tokens:
row = dic.doc2bow(line_tokens)
bow.append(row)
n_topics = 2

通过词袋、分类数、词典、每个主题保留的最大主题词个数构建LDA模型

model = gm.LdaModel(bow, num_topics=n_topics, id2word=dic, passes=25)

输出每个类别中对类别贡献最大的4个主题词

topics = model.print_topics(num_topics=n_topics, num_words=4)
for label, words in topics:
print(label, ‘->’, words)

“”"
demo03_freq.py
“”"
import numpy as np
import numpy.fft as nf
import scipy.io.wavfile as wf
import matplotlib.pyplot as mp

sample_rate, sigs = wf.read(’…/ml_data/freq.wav’)
print(sample_rate)
print(sigs.shape, sigs.dtype)
sigs = sigs / 2 ** 15
times = np.arange(len(sigs)) / sample_rate
freqs = nf.fftfreq(sigs.size, 1 / sample_rate)
ffts = nf.fft(sigs)
pows = np.abs(ffts)
mp.figure(‘Audio’, facecolor=‘lightgray’)
mp.subplot(121)
mp.title(‘Time Domain’, fontsize=16)
mp.xlabel(‘Time’, fontsize=12)
mp.ylabel(‘Signal’, fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(linestyle=’:’)
mp.plot(times, sigs, c=‘dodgerblue’, label=‘Signal’)
mp.legend()
mp.subplot(122)
mp.title(‘Frequency Domain’, fontsize=16)
mp.xlabel(‘Frequency’, fontsize=12)
mp.ylabel(‘Power’, fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(linestyle=’:’)
mp.plot(freqs[freqs >= 0], pows[freqs >= 0], c=‘orangered’, label=‘Power’)
mp.legend()
mp.tight_layout()
mp.show()

“”"
demo04_mfcc.py mfcc矩阵
“”"
import scipy.io.wavfile as wf
import python_speech_features as sf
import matplotlib.pyplot as mp

sample_rate, sigs = wf.read(
‘…/ml_data/speeches/training/banana/banana01.wav’)
mfcc = sf.mfcc(sigs, sample_rate)

mp.matshow(mfcc.T, cmap=‘gist_rainbow’)
mp.show()

“”"
demo05_speech.py 语音识别

  1. 读取training文件夹中的训练音频样本,
    每个音频对应一个mfcc矩阵,每个mfcc都有一个类别(apple)。

  2. 把所有类别为apple的mfcc合并在一起,形成训练集。
    | mfcc | |
    | mfcc | apple |
    | mfcc | |

    由上述训练集样本可以训练一个用于匹配apple的HMM。

  3. 训练7个HMM分别对应每个水果类别。 保存在列表中。

  4. 读取testing文件夹中的测试样本,整理测试样本
    | mfcc | apple |
    | mfcc | lime |

  5. 针对每一个测试样本:

    1. 分别使用7个HMM模型,对测试样本计算score得分。
    2. 取7个模型中得分最高的模型所属类别作为预测类别。
      “”"

import os
import numpy as np
import scipy.io.wavfile as wf
import python_speech_features as sf
import hmmlearn.hmm as hl

#1. 读取training文件夹中的训练音频样本,每个音频对应一个mfcc矩阵,每个mfcc都有一个类别(apple)。
def search_file(directory):
# 使传过来的directory匹配当前操作系统
# {‘apple’:[url, url, url … ], ‘banana’:[…]}
directory = os.path.normpath(directory)
objects = {}
# curdir:当前目录
# subdirs: 当前目录下的所有子目录
# files: 当前目录下的所有文件名
for curdir, subdirs, files in os.walk(directory):
for file in files:
if file.endswith(’.wav’):
label = curdir.split(os.path.sep)[-1]
if label not in objects:
objects[label] = []
# 把路径添加到label对应的列表中
path = os.path.join(curdir, file)
objects[label].append(path)
return objects

#读取训练集数据
train_samples =
search_file(’…/ml_data/speeches/training’)

‘’’
2. 把所有类别为apple的mfcc合并在一起,形成训练集。
| mfcc | |
| mfcc | apple |
| mfcc | |

由上述训练集样本可以训练一个用于匹配apple的HMM。
‘’’
train_x, train_y = [], []

遍历7次 apple/banana/…

for label, filenames in train_samples.items():
mfccs = np.array([])
for filename in filenames:
sample_rate, sigs = wf.read(filename)
mfcc = sf.mfcc(sigs, sample_rate)
if len(mfccs)==0:
mfccs = mfcc
else:
mfccs = np.append(mfccs, mfcc, axis=0)
train_x.append(mfccs)
train_y.append(label)
‘’’
训练集:
train_x train_y
----------------
| mfcc | |
| mfcc | apple |
| mfcc | |
----------------
| mfcc | |
| mfcc | banana |
| mfcc | |
-----------------
| mfcc | |
| mfcc | lime |
| mfcc | |
-----------------
‘’’

{‘apple’:object, ‘banana’:object …}

models = {}
for mfccs, label in zip(train_x, train_y):
model = hl.GaussianHMM(n_components=4,
covariance_type=‘diag’, n_iter=1000)
models[label] = model.fit(mfccs)

‘’’
4. 读取testing文件夹中的测试样本,针对每一个测试样本:

  1. 分别使用7个HMM模型,对测试样本计算score得分。
  2. 取7个模型中得分最高的模型所属类别作为预测类别。
    ‘’’
    #读取测试集数据
    test_samples =
    search_file(’…/ml_data/speeches/testing’)

test_x, test_y = [], []
for label, filenames in test_samples.items():
mfccs = np.array([])
for filename in filenames:
sample_rate, sigs = wf.read(filename)
mfcc = sf.mfcc(sigs, sample_rate)
if len(mfccs)==0:
mfccs = mfcc
else:
mfccs = np.append(mfccs, mfcc, axis=0)
test_x.append(mfccs)
test_y.append(label)

‘’‘测试集:
test_x test_y
-----------------
| mfcc | apple |
-----------------
| mfcc | banana |
-----------------
| mfcc | lime |
-----------------
‘’’
pred_test_y = []
for mfccs in test_x:
# 判断mfccs与哪一个HMM模型更加匹配
best_score, best_label = None, None
for label, model in models.items():
score = model.score(mfccs)
if (best_score is None) or (best_score<score):
best_score = score
best_label = label
pred_test_y.append(best_label)

print(test_y)
print(pred_test_y)

import json
import numpy as np
import scipy.io.wavfile as wf
with open(’…/ml_data/12.json’, ‘r’) as f:
freqs = json.loads(f.read())
tones = [
(‘G5’, 1.5),
(‘A5’, 0.5),
(‘G5’, 1.5),
(‘E5’, 0.5),
(‘D5’, 0.5),
(‘E5’, 0.25),
(‘D5’, 0.25),
(‘C5’, 0.5),
(‘A4’, 0.5),
(‘C5’, 0.75)]
sample_rate = 44100
music = np.empty(shape=1)
for tone, duration in tones:
times = np.linspace(0, duration, duration * sample_rate)
sound = np.sin(2 * np.pi * freqs[tone] * times)
music = np.append(music, sound)
music *= 2 ** 15
music = music.astype(np.int16)
wf.write(’…/ml_data/music.wav’, sample_rate, music)

“”"
demo07_opencv.py opencv基础
“”"

import numpy as np
import cv2 as cv

读取图片并显示

original = cv.imread(’…/ml_data/forest.jpg’)
cv.imshow(‘Original’, original)

显示图片某个颜色通道的图像

blue = np.zeros_like(original)
blue[:, :, 0] = original[:, :, 0] # 0 - 蓝色通道
cv.imshow(‘Blue’, blue)
green = np.zeros_like(original)
green[:, :, 1] = original[:, :, 1] # 1 - 绿色通道
cv.imshow(‘Green’, green)
red = np.zeros_like(original)
red[:, :, 2] = original[:, :, 2] # 2 - 红色通道
cv.imshow(‘Red’, red)

图像裁剪

h, w = original.shape[:2]
l, t = int(w / 4), int(h / 4)
r, b = int(w * 3 / 4), int(h * 3 / 4)
cropped = original[t:b, l:r]
cv.imshow(‘Cropped’, cropped)
#图像缩放 interpolation=线型插值
scaled1 = cv.resize(original, (int(w / 4), int(h / 4)),
interpolation=cv.INTER_LINEAR)
cv.imshow(‘Scaled1’, scaled1)
scaled2 = cv.resize(
scaled1, None, fx=4, fy=4,
interpolation=cv.INTER_LINEAR)
cv.imshow(‘Scaled2’, scaled2)
cv.waitKey()

图像文件保存

cv.imwrite(’…/ml_data/blue.jpg’, blue)
cv.imwrite(’…/ml_data/green.jpg’, green)
cv.imwrite(’…/ml_data/red.jpg’, red)
cv.imwrite(’…/ml_data/cropped.jpg’, cropped)
cv.imwrite(’…/ml_data/scaled1.jpg’, scaled1)
cv.imwrite(’…/ml_data/scaled2.jpg’, scaled2)

“”"
demo08_canny.py 边缘识别
“”"
import cv2 as cv

original = cv.imread( ‘…/ml_data/chair.jpg’,
cv.IMREAD_GRAYSCALE)
cv.imshow(‘Original’, original)
hsobel = cv.Sobel(original, cv.CV_64F, 1, 0, ksize=5)
cv.imshow(‘H-Sobel’, hsobel)
vsobel = cv.Sobel(original, cv.CV_64F, 0, 1, ksize=5)
cv.imshow(‘V-Sobel’, vsobel)
sobel = cv.Sobel(original, cv.CV_64F, 1, 1, ksize=5)
cv.imshow(‘Sobel’, sobel)
laplacian = cv.Laplacian(original, cv.CV_64F)
cv.imshow(‘Laplacian’, laplacian)
canny = cv.Canny(original, 50, 80)
cv.imshow(‘Canny’, canny)
cv.waitKey()

“”"
demo09_hist.py 直方图均衡化
“”"
import cv2 as cv

original = cv.imread(’…/ml_data/sunrise.jpg’)
cv.imshow(‘Original’, original)
gray = cv.cvtColor(original, cv.COLOR_BGR2GRAY)
cv.imshow(‘Gray’, gray)
equalized_gray = cv.equalizeHist(gray)
cv.imshow(‘Equalized Gray’, equalized_gray)

YUV:亮度,色度,饱和度

yuv = cv.cvtColor(original, cv.COLOR_BGR2YUV)
yuv[…, 0] = cv.equalizeHist(yuv[…, 0])
equalized_color = cv.cvtColor(yuv, cv.COLOR_YUV2BGR)
cv.imshow(‘Equalized Color’, equalized_color)
cv.waitKey()

“”"
demo10_harris.py 哈里斯角点检测
“”"
import cv2 as cv

original = cv.imread(’…/ml_data/box.png’)
cv.imshow(‘Original’, original)
gray = cv.cvtColor(original, cv.COLOR_BGR2GRAY)
cv.imshow(‘Gray’, gray)
corners = cv.cornerHarris(gray, 7, 5, 0.04)

图像混合

mixture = original.copy()
mixture[corners > corners.max() * 0.01] = [0, 0, 255]
cv.imshow(‘Corner’, mixture)
cv.waitKey()

“”"
demo11_star.py star特征点检测器
“”"
import cv2 as cv

original = cv.imread(’…/ml_data/table.jpg’)
cv.imshow(‘Original’, original)
gray = cv.cvtColor(original, cv.COLOR_BGR2GRAY)
cv.imshow(‘Gray’, gray)
star = cv.xfeatures2d.StarDetector_create()
keypoints = star.detect(gray)
mixture = original.copy()
cv.drawKeypoints(
original, keypoints, mixture,
flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv.imshow(‘Mixture’, mixture)
cv.waitKey()

“”"
demo12_sift.py sift特征点检测器
“”"
import cv2 as cv

original = cv.imread(’…/ml_data/table.jpg’)
cv.imshow(‘Original’, original)
gray = cv.cvtColor(original, cv.COLOR_BGR2GRAY)
cv.imshow(‘Gray’, gray)
sift = cv.xfeatures2d.SIFT_create()
keypoints = sift.detect(gray)
mixture = original.copy()
cv.drawKeypoints(original, keypoints, mixture,
flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv.imshow(‘Mixture’, mixture)
cv.waitKey()

“”"
demo13_desc.py 特征值描述矩阵
“”"
import cv2 as cv
import matplotlib.pyplot as mp

original = cv.imread(’…/ml_data/table.jpg’)
cv.imshow(‘Original’, original)
gray = cv.cvtColor(original, cv.COLOR_BGR2GRAY)
cv.imshow(‘Gray’, gray)
sift = cv.xfeatures2d.SIFT_create()
keypoints = sift.detect(gray)
_, desc = sift.compute(gray, keypoints)
print(desc.shape)
mp.matshow(desc.T, cmap=‘jet’, fignum=‘Description’)
mp.title(‘Description’, fontsize=20)
mp.xlabel(‘Feature’, fontsize=14)
mp.ylabel(‘Sample’, fontsize=14)
mp.tick_params(which=‘both’, top=False, labeltop=False, labelbottom=True, labelsize=10)
mp.show()

‘’’

  1. 读取training文件夹中的训练图片样本,
    每个图片对应一个desc矩阵,每个desc都有一个类别(car)。

  2. 把所有类别为car的desc合并在一起,形成训练集。
    | desc | |
    | desc | car |
    | desc | |

    由上述训练集样本可以训练一个用于匹配car的HMM。

  3. 训练3个HMM分别对应每个物体类别。 保存在列表中。

  4. 读取testing文件夹中的测试样本,整理测试样本
    | desc | car |
    | desc | moto |

  5. 针对每一个测试样本:

    1. 分别使用3个HMM模型,对测试样本计算score得分。
    2. 取3个模型中得分最高的模型所属类别作为预测类别。
      ‘’’
      import os
      import numpy as np
      import cv2 as cv
      import hmmlearn.hmm as hl

def search_files(directory):
directory = os.path.normpath(directory)

objects = {}
for curdir, subdirs, files in os.walk(directory):
    for file in files:
        if(file.endswith('.jpg')):
            label = curdir.split(os.path.sep)[-1]
            if label not in objects:
                objects[label] = []
            path = os.path.join(curdir, file)
            objects[label].append(path)
return objects

#加载训练集样本数据,训练模型,模型存储
train_objects = search_files(’…/ml_data/objects/training’)
train_x, train_y = [], []
for label, filenames in train_objects.items():
descs = np.array([])
for filename in filenames:
image = cv.imread(filename)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
#范围缩放,使特征描述矩阵样本数量一致
h, w = gray.shape[:2]
f = 200 / min(h, w)
gray = cv.resize(gray, None, fx=f, fy=f)
sift = cv.xfeatures2d.SIFT_create()
keypoints = sift.detect(gray)
_, desc = sift.compute(gray, keypoints)
if len(descs) == 0:
descs = desc
else:
descs = np.append(descs, desc, axis=0)
train_x.append(descs)
train_y.append(label)
models = {}
for descs, label in zip(train_x, train_y):
model = hl.GaussianHMM(n_components=4, covariance_type=‘diag’, n_iter=100)
models[label] = model.fit(descs)

#测试模型
test_objects = search_files(’…/ml_data/objects/testing’)
test_x, test_y = [], []
for label, filenames in test_objects.items():
descs = np.array([])
for filename in filenames:
image = cv.imread(filename)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
sift = cv.xfeatures2d.SIFT_create()
keypoints = sift.detect(gray)
_, desc = sift.compute(gray, keypoints)
if len(descs) == 0:
descs = desc
else:
descs = np.append(descs, desc, axis=0)
test_x.append(descs)
test_y.append(label)

遍历所有测试样本 使用model匹配测试样本查看每个模型的匹配分数

pred_y = []
for descs, test_label in zip(test_x, test_y):
best_score, best_label = None, None
for pred_label, model in models.items():
score = model.score(descs)
if (best_score==None) or (best_score<score):
best_score = score
best_label = pred_label
print(test_label, ‘->’, pred_label, score)
pred_y.append(best_label)

print(test_y)
print(pred_y)

“”"
demo15_vc.py 视频捕获
“”"
import cv2 as cv

获取视频捕获设备

video_capture = cv.VideoCapture(0)

while True:
frame = video_capture.read()[1]
cv.imshow(‘frame’, frame)
# 每隔33毫秒自动更新图像
if cv.waitKey(33) == 27:
break

video_capture.release()
cv.destroyAllWindows()

import cv2 as cv

哈尔级联人脸定位器

fd = cv.CascadeClassifier(’…/ml_data/haar/face.xml’)
ed = cv.CascadeClassifier(’…/ml_data/haar/eye.xml’)
nd = cv.CascadeClassifier(’…/ml_data/haar/nose.xml’)

vc = cv.VideoCapture(0)
while True:
frame = vc.read()[1]
faces = fd.detectMultiScale(frame, 1.3, 5)
for l, t, w, h in faces:
a, b = int(w / 2), int(h / 2)
cv.ellipse(frame, (l + a, t + b), (a, b), 0, 0, 360, (255, 0, 255), 2)
# 把face裁出来,然后识别眼和鼻子的位置
face = frame[t:t + h, l:l + w]
eyes = ed.detectMultiScale(face, 1.3, 5)
for l, t, w, h in eyes:
a, b = int(w / 2), int(h / 2)
cv.ellipse(face, (l + a, t + b), (a, b), 0, 0, 360, (0, 255, 0), 2)
noses = nd.detectMultiScale(face, 1.3, 5)
for l, t, w, h in noses:
a, b = int(w / 2), int(h / 2)
cv.ellipse(face, (l + a, t + b), (a, b), 0, 0, 360, (0, 255, 255), 2)
cv.imshow(‘VideoCapture’, frame)
if cv.waitKey(33) == 27:
break

vc.release()
cv.destroyAllWindows()

“”"

  1. 读取样本图片数据,整理图片的路径列表

  2. 读取每张图片,基于haar裁剪每张人脸,把人脸数据
    放入train_x,作为训练数据。
    在整理train_y时,由于Bob、Sala、Roy是字符串,
    需要把字符串做一个标签编码 LabelEncoder

  3. 遍历训练集,把训练集交给LBPH人脸识别模型进行训练。

  4. 读取测试集数据,整理图片的路径列表

  5. 遍历每张图片,把图片中的人脸使用相同的方式裁剪,
    把人脸数据交给LBPH模型进行类别预测,得到预测结果。

  6. 以图像的方式输出结果。
    “”"
    import os
    import numpy as np
    import cv2 as cv
    import sklearn.preprocessing as sp

fd = cv.CascadeClassifier(’…/ml_data/haar/face.xml’)

def search_faces(directory):
directory = os.path.normpath(directory)

faces = {}
for curdir, subdirs, files in os.walk(directory):
    for jpeg in (file for file in files
                 if file.endswith('.jpg')):
        path = os.path.join(curdir, jpeg)
        label = path.split(os.path.sep)[-2]
        if label not in faces:
            faces[label] = []
        faces[label].append(path)
return faces

train_faces = search_faces(
‘…/ml_data/faces/training’)
codec = sp.LabelEncoder()
codec.fit(list(train_faces.keys()))

train_x, train_y = [], []
for label, filenames in train_faces.items():
for filename in filenames:
image = cv.imread(filename)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
faces = fd.detectMultiScale(gray, 1.1, 2,
minSize=(100, 100))
for l, t, w, h in faces:
train_x.append(
gray[t:t + h, l:l + w])
train_y.append(
codec.transform([label])[0])
train_y = np.array(train_y)
‘’’
训练集结构:
train_x train_y

| face | 0 |

| face | 1 |

| face | 2 |

| face | 1 |

‘’’

局部二值模式直方图人脸识别分类器

model = cv.face.LBPHFaceRecognizer_create()
model.train(train_x, train_y)

测试

test_faces = search_faces(
‘…/ml_data/faces/testing’)
test_x, test_y, test_z = [], [], []
for label, filenames in test_faces.items():
for filename in filenames:
image = cv.imread(filename)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
faces = fd.detectMultiScale(gray, 1.1, 2,
minSize=(100, 100))
for l, t, w, h in faces:
test_x.append(
gray[t:t + h, l:l + w])
test_y.append(
codec.transform([label])[0])
a, b = int(w / 2), int(h / 2)
cv.ellipse(image, (l + a, t + b),
(a, b), 0, 0, 360,
(255, 0, 255), 2)
test_z.append(image)
test_y = np.array(test_y)
pred_test_y = []
for face in test_x:
pred_code = model.predict(face)[0]
pred_test_y.append(pred_code)

print(codec.inverse_transform(test_y))
print(codec.inverse_transform(pred_test_y))

escape = False
while not escape:
for code, pred_code, image in zip(
test_y, pred_test_y, test_z):
label, pred_label =
codec.inverse_transform([code, pred_code])
text = ‘{} {} {}’.format(
label,
‘==’ if code == pred_code else ‘!=’,
pred_label)
cv.putText(image, text, (10, 60),
cv.FONT_HERSHEY_SIMPLEX, 2,
(255, 255, 255), 6)
cv.imshow(‘Recognizing…’, image)
if cv.waitKey(1000) == 27:
escape = True
break

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

不屈的码农

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值