矩阵的主成分分析与基于奇异值的主成分分析
from sklearn.preprocessing import scale
def _6pca2(X :np.array) ->tuple :
'''
主成分分析。内部已标准化。后续降维可选取返回的特征向量与原数据相乘。
依赖:sklearn.preprocessing.scale, np.corrcoef, np.linalg.eig
:param X:数据矩阵,假定行为数据,列为属性
:return:依次返回特征值(降序排列)与特征向量矩阵(向量按列排列),可根据需要自行选择保留
'''
X_std = scale(X, with_mean=True, with_std=True, axis=0)
covmtx_X = np.corrcoef(X_std.T)
eig_val, eig_vec = np.linalg.eig(covmtx_X)
return eig_val, eig_vec
import sklearn
def _7svd_pca(X:np.array)->tuple:
'''
奇异值分解,解决特征维数较低时的问题,其不计算协方差矩阵。
:param X: 数据矩阵,行代表数据,列代表属性
:return: 左奇异矩阵,奇异值,标准化后的X。可自行取奇异矩阵中的向量构建新的降维数据矩阵。
'''
X_std = sklearn.preprocessing.scale(X, with_std=True, with_mean=True)
Uvec, Sval, Vvec = np.linalg.svd(X,full_matrices=False)
'''
full_matrices : bool, optional
If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and
``(..., N, N)``, respectively. Otherwise, the shapes are
``(..., M, K)`` and ``(..., K, N)``, respectively, where
``K = min(M, N)``.
'''
return Uvec, Sval, X_std
sklearn库的PCA与基于核函数的PCA
def kernel_pca_test():
'''
了解sklearn库的核pca与普通pca方法
:return:
'''
from sklearn.decomposition import KernelPCA, PCA
from sklearn.datasets import load_iris
kpca = KernelPCA(kernel='poly', gamma=10)
pca = PCA(n_components=2)
datas = load_iris()
xs = datas['data']
ys = datas['target']
kpca.fit(X=xs)
xs_kpca = kpca.transform(xs)
plt.figure(2)
plt.title('kpca')
plt.scatter(xs_kpca[:, 0], xs_kpca[:, 1], c=ys)
pca.fit(xs)
xs_pca = pca.transform(xs)
plt.figure(1)
plt.title('pca')
plt.scatter(xs_pca[:,0],xs_pca[:,1],c=ys)
plt.show()