PCA主成分分析降维实际上是寻求能使样本方差最大的一个低维空间,来最大程度区分样本。例如下面这个二维图表,就可以找一条直线(一维)使得样本点映射在上面方差最大(区分度最大)!
PCA可以用来解决的问题【Andrew Ng曾在讲PCA时提到过】:
1)减少数据因为存储而造成的内存和硬盘的占用
2)加速训练过程
3)高维数据可视化
PCA具体代码
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
import numpy as np
#随便弄一个四维数组来试验一下
DF = np.array([[0,1,4,3],
[1,2,8,9],
[2,4,16,81],
[2,5,20,243],
[4,6,24,729]])
#样本数不能少于维数
#如果你不知道降到几维合适,那就是用mle帮你选取降到几维
pca_mle = PCA(n_components='mle').fit(DF) #最大似然法选取特征数量
print(pca_mle.explained_variance_ratio_)
cumsum = np.cumsum(pca_mle.explained_variance_ratio_.sum()) # 使用累计统计,来表示可解释特征的信息占比和
print(cumsum)
data_dr = pca_mle.transform(DF)
print(data_dr)#每一个维度的数据都进行了零均值化
print(data_dr.shape)
#作cumsum图评价
pca_line = PCA().fit(DF) # 什么参数都不写,代表是 min(x.shape),一般情况下就是原特征数目了。
print(pca_line.explained_variance_ratio_)
cumsum = np.cumsum(pca_line.explained_variance_ratio_) # 使用累计统计,来表示可解释特征的信息占比和
print(cumsum) #从一维到四维的cumsum
plt.plot(range(1, DF.shape[1] + 1), cumsum) #设置x\y
plt.xticks(range(1, DF.shape[1] + 1)) # 横坐标轴是整数
plt.xlabel('number of components after DR')
plt.ylabel('cumulative explained variance')
plt.show()
我建议使用Anaconda的Jupyter Notebook来运行,如下:
补充实例:
from sklearn.decomposition import PCA
from sklearn.datasets import load_digits #8x8的手写数字数据
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
import time
X, y = load_digits(return_X_y=True)
print('X shape:', X.shape) # 此处会看到X是64维的数据
X_train, x_test, y_train, y_test = train_test_split(X, y)
tic = time.time()
knn_clf = KNeighborsClassifier()
knn_clf.fit(X_train, y_train)
print (knn_clf.score(x_test, y_test))
toc = time.time()
print ('Time of method[exec_without_pca] costs:'+str(toc-tic))
print ('----' * 10)
tic = time.time()
knn_clf = KNeighborsClassifier()
pca = PCA(n_components=0.95) # 重构阈值为95%
pca.fit(X_train, y_train)
X_train_dunction = pca.transform(X_train)
X_test_dunction = pca.transform(x_test)
knn_clf.fit(X_train_dunction, y_train)
print (knn_clf.score(X_test_dunction, y_test))
toc = time.time()
print ('Time of method[exec_with_pca] costs:'+str(toc-tic))
import matplotlib.pyplot as plt
%matplotlib inline
def draw_graph():
pca = PCA(n_components=2)
pca.fit(X)
X_reduction = pca.transform(X)
for i in range(10):
plt.scatter(X_reduction[y==i,0], X_reduction[y==i,1], alpha=0.8, label='%s' % i)
plt.legend()
plt.show()