目录
1. 导入包、加载数据
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
from scipy.io import loadmat
raw_data = loadmat('E:\\MachineLearning\\PythonSet\\spam_data1.mat')
raw_data
2. 绘制散点图
data = pd.DataFrame(raw_data['X'], columns=['X1', 'X2'])
data['y'] = raw_data['y']
positive = data[data['y'].isin([1])]
negative = data[data['y'].isin([0])]
fig, ax = plt.subplots(figsize=(6,4))
ax.scatter(positive['X1'], positive['X2'], s=50, marker='x', label='Positive')
ax.scatter(negative['X1'], negative['X2'], s=50, marker='o', label='Negative')
ax.legend()
plt.show()
3. 线性SVM训练数据
from sklearn import svm
# svc:Support Vector Classification
# Sklearn.svm.LinearSVC(penalty=’l2’, loss=’squared_hinge’, dual=True, tol=0.0001,
# C=1.0, multi_class=’ovr’,fit_intercept=True, intercept_scaling=1,
# class_weight=None, verbose=0, random_state=None, max_iter=1000)
# “hinge”是标准的SVM损失,“squared_hinge”是hinge损失的平方
svc = svm.LinearSVC(C=1, loss='hinge', max_iter=1000)
svc.fit(data[['X1', 'X2']], data['y'])
svc.score(data[['X1', 'X2']], data['y'])
0.9803921568627451
若将C=100,则得到的分数为:0.9411764705882353(此处得到的分数更接近0.95 所以分类效果更好,但更容易发生过拟合)
3.1 置信水平
何为置信水平?
置信水平表示从同一个总体中反复抽取样本时包括总体参数的区间所占的百分比。通常,置信水平为 95% 。这表明,如果收集 100 个样本,并创建 100 个 95% 置信区间,则应当会有大约 95% 的区间包含总体参数。
何为置信区间?
误差范围在统计概率中可以称为置信区间
data['SVM 1 Confidence'] = svc.decision_function(data[['X1', 'X2']])
fig, ax = plt.subplots(figsize=(6,4))
ax.scatter(data['X1'], data['X2'], s=50, c=data['SVM 1 Confidence'], cmap='seismic')
ax.set_title('SVM (C=100) Decision Confidence')
plt.show()
对比图(C=1 vs.100):
4. 高斯核函数SVM
def gaussian_kernel(x1, x2, sigma):
return np.exp(-(np.sum((x1 - x2) ** 2) / (2 * (sigma ** 2))))
# 数据检查
x1 = np.array([1.0, 2.0, 1.0])
x2 = np.array([0.0, 4.0, -1.0])
sigma = 2
gaussian_kernel(x1, x2, sigma)
0.32465246735834974
5.非线性分类数据加载
raw_data = loadmat('E:\\MachineLearning\\PythonSet\\spam_data2.mat')
data = pd.DataFrame(raw_data['X'], columns=['X1', 'X2'])
data['y'] = raw_data['y']
positive = data[data['y'].isin([1])]
negative = data[data['y'].isin([0])]
fig, ax = plt.subplots(figsize=(6,4))
ax.scatter(positive['X1'], positive['X2'], s=30, marker='x', label='Positive')
ax.scatter(negative['X1'], negative['X2'], s=30, marker='o', label='Negative')
ax.set_title('Gaussian kernel function ()')
ax.legend()
plt.show()
5.1 训练SVM
svc = svm.SVC(C=100, gamma=10, probability=True)
svc.fit(data[['X1', 'X2']], data['y'])
svc.score(data[['X1', 'X2']], data['y'])
0.9698725376593279
5.2 置信水平
data['Probability'] = svc.predict_proba(data[['X1', 'X2']])[:,0]
fig, ax = plt.subplots(figsize=(6,4))
ax.scatter(data['X1'], data['X2'], s=30, c=data['Probability'], cmap='Orenges')
ax.set_title('Decision Confidence(C=100)')
plt.show()
6. 寻找最合适的参数(C、 σ \sigma σ)
raw_data = loadmat('E:\\MachineLearning\\PythonSet\\spam_data3.mat')
X = raw_data['X']
Xval = raw_data['Xval']
y = raw_data['y'].ravel()
yval = raw_data['yval'].ravel()
C_values = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30, 100]
gamma_values = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30, 100]
best_score = 0
best_params = {'C': None, 'gamma': None}
for C in C_values:
for gamma in gamma_values:
svc = svm.SVC(C=C, gamma=gamma)
svc.fit(X, y)
score = svc.score(Xval, yval)
if score > best_score:
best_score = score
best_params['C'] = C
best_params['gamma'] = gamma
best_score, best_params
(0.965, {‘C’: 0.3, ‘gamma’: 100})
7.垃圾邮件分类
7.1 数据导入
spam_train = loadmat('E:\\MachineLearning\\PythonSet\\spamTrain.mat')
spam_test = loadmat('E:\\MachineLearning\\PythonSet\\spamTest.mat')
spam_train
X = spam_train['X']
Xtest = spam_test['Xtest']
y = spam_train['y'].ravel()
ytest = spam_test['ytest'].ravel()
X.shape, y.shape, Xtest.shape, ytest.shape
((4000, 1899), (4000,), (1000, 1899), (1000,))
7.2 训练SVM分类器
svc = svm.SVC()
svc.fit(X, y)
print('训练精度 = {0}%'.format(np.round(svc.score(X, y) * 100, 2)))
训练精度 = 99.32%
7.3 测试分类效果
print('测试精度 = {0}%'.format(np.round(svc.score(Xtest, ytest) * 100, 2)))
测试精度 = 98.7%