import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#使用pandas分别读取训练数据和测试数据
digits_train = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/optdigits/optdigits.tra',header=None)
digits_test = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/optdigits/optdigits.tes',header=None)
#从训练与测试数字集上都分离出64维度的像素特征与1维度的数字目标
X_digits = digits_train[np.arange(64)]
y_digits = digits_train[64]
'''
#导入PCA
from sklearn.decomposition import PCA
#初始化一个将高纬度特征向量压缩至两个维度的PCA
estimator = PCA(n_components=2)
X_pca = estimator.fit_transform(X_digits)
#显示10类手写体数字图片经PCA压缩后的2维空间分布
def plot_pca_scatter():
colors = ['black','blue','purple','yellow','white','red','lime','cyan','orange','gray']
for i in range(len(colors)):
px = X_pca[:,0][y_digits.as_matrix()==i]
py = X_pca[:,1][y_digits.as_matrix()==i]
plt.scatter(px, py, c = colors[i])
plt.legend(np.arange(0,10).astype(str))
plt.xlabel('First Principal Component')
plt.ylabel('Second Principal Component')
plot_pca_scatter()
'''
#从训练与测试数字集上都分离出64维度的像素特征与1维度的数字目标
X_train = digits_train[np.arange(64)] #训练集
y_train = digits_train[64]
X_test = digits_test[np.arange(64)] #测试集
y_test = digits_test[64]
#导入基于线性核的支持向量机分类器
from sklearn.svm import LinearSVC
svc = LinearSVC()
svc.fit(X_train, y_train)
y_predict = svc.predict(X_test)
#将原始数据压缩到20维度
from sklearn.decomposition import PCA
estimator = PCA(n_components=20) #64维度降维到20维度
pca_X_train = estimator.fit_transform(X_train)
pca_X_test = estimator.transform(X_test)
pca_svc = LinearSVC()
pca_svc.fit(X_train, y_train)
pca_y_predict = pca_svc.predict(X_test)
#性能分析
from sklearn.metrics import classification_report
print('The high dimension score is :', svc.score(X_test, y_test))
print(classification_report(y_test, y_predict))
print('The low dimension score is :', pca_svc.score(X_test, y_test))
print(classification_report(y_test, pca_y_predict))
输出:
分析:尽管经过PCA特征压缩和重建之后的特征数据会损失2%左右的预测准确度,但是相比于原始数据六十四维度的特征而言
却使用PCA压缩并且降低了68.75%的维度。