绘制 PCA 双标图、碎石图、变量载荷图和变量贡献图

文章介绍了如何使用Python的sklearn库进行主成分分析(PCA),包括数据预处理(标准化)、双标图、碎石图(展示变量贡献率变化)以及变量载荷图的绘制。同时强调了标准化在PCA中的重要性,尤其是在不同尺度的变量处理时。
摘要由CSDN通过智能技术生成

1、双标图

import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn import datasets


# data = np.random.random((1000,10))
# y = np.random.randint(0,6,1000)

iris = datasets.load_iris()
data = iris.data
y = iris.target

data = (data-np.mean(data,axis=0))/np.std(data,axis=0) # 将变量缩放为均值为0,标准偏差为1。使用StandardScaler也可以
# In general, it's a good idea to scale the data prior to PCA.
# scaler = StandardScaler()
# scaler.fit(data)
# data = scaler.transform(data)
pca = PCA()
x_new = pca.fit_transform(data)

def myplot(score,coeff,labels=None):
    xs = score[:,0]
    ys = score[:,1]
    n = coeff.shape[0]
    scalex = 1.0/(xs.max() - xs.min())
    scaley = 1.0/(ys.max() - ys.min())
    plt.scatter(xs * scalex,ys * scaley, c=y)
    for i in range(n):
        plt.arrow(0, 0, coeff[i,0], coeff[i,1],color='r',alpha = 1,
                  head_width=0.04,head_length=0.03,overhang=1)
        if labels is None:
            plt.text(coeff[i,0]* 1.15, coeff[i,1] * 1.15, "Var"+str(i+1), color = 'g', ha = 'center', va = 'center')
        else:
            plt.text(coeff[i,0]* 1.15, coeff[i,1] * 1.15, labels[i], color = 'g', ha = 'center', va = 'center')
    plt.xlim(-1,1)
    plt.ylim(-1,1)
    plt.xlabel("PC{}".format(1))
    plt.ylabel("PC{}".format(2))
    plt.grid()

#Call the function. Use only the 2 PCs.
myplot(x_new[:,0:2],np.transpose(pca.components_[0:2, :]),
       ["a1","a2","a3","a4","a5","a6","a7","a8","a9","a10"])
plt.show()

带图例的

import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn import datasets

# data = np.random.random((1000,10))
# y = np.random.randint(0,6,1000)

iris = datasets.load_iris()
data = iris.data
y = iris.target

data = (data-np.mean(data,axis=0))/np.std(data,axis=0) # 将变量缩放为均值为0,标准偏差为1。使用StandardScaler也可以
# In general, it's a good idea to scale the data prior to PCA.
# scaler = StandardScaler()
# scaler.fit(data)
# data = scaler.transform(data)
pca = PCA()
x_new = pca.fit_transform(data)


def myplot(score, coeff, labels=None):
    xs = score[:, 0]
    ys = score[:, 1]
    n = coeff.shape[0]
    scalex = 1.0 / (xs.max() - xs.min())
    scaley = 1.0 / (ys.max() - ys.min())
    for i in range(3):
        plt.scatter(xs[y == i] * scalex,
                    ys[y == i] * scaley,
                    linewidth=0.01,label=i)
    for i in range(n):
        plt.arrow(0, 0, coeff[i, 0], coeff[i, 1], color='r', alpha=1,
                  head_width=0.04, head_length=0.03, overhang=1)
        if labels is None:
            plt.text(coeff[i, 0] * 1.15, coeff[i, 1] * 1.15, "Var" + str(i + 1), color='g', ha='center', va='center')
        else:
            plt.text(coeff[i, 0] * 1.15, coeff[i, 1] * 1.15, labels[i], color='g', ha='center', va='center')
    plt.xlim(-1, 1)
    plt.ylim(-1, 1)
    plt.xlabel("PC{}".format(1))
    plt.ylabel("PC{}".format(2))
    plt.grid()

# Call the function. Use only the 2 PCs.
myplot(x_new[:, 0:2], np.transpose(pca.components_[0:2, :]),
       ["a1", "a2", "a3", "a4", "a5", "a6", "a7", "a8", "a9", "a10"])
plt.legend()
plt.show()

标出95%的置信区间

from matplotlib.patches import Ellipse
from sklearn.decomposition import PCA
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets

def plot_point_cov(points, nstd=3, ax=None, **kwargs):
    # 求所有点的均值作为置信圆的圆心
    pos = points.mean(axis=0)
    # 求协方差
    cov = np.cov(points, rowvar=False)

    return plot_cov_ellipse(cov, pos, nstd, ax, **kwargs)

def plot_cov_ellipse(cov, pos, nstd=3, ax=None, **kwargs):
    def eigsorted(cov):
        cov = np.array(cov)
        vals, vecs = np.linalg.eigh(cov)
        order = vals.argsort()[::-1]
        return vals[order], vecs[:, order]

    if ax is None:
        ax = plt.gca()
    vals, vecs = eigsorted(cov)

    theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
    width, height = 2 * nstd * np.sqrt(vals)
    ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
    ax.add_artist(ellip)
    return ellip

# 画置信圆
def show_ellipse(X_pca, y, pca,feature_label=None):
    # 定义颜色
    colors = ['tab:blue', 'tab:orange', 'seagreen']
    category_label = ['Ethiopia', 'Somalia', 'Kenya']

    # 定义分辨率
    plt.figure(dpi=100, figsize=(8, 6))
    # 三分类则为3

    xs = X_pca[:, 0]
    ys = X_pca[:, 1]
    scalex = 1.0 / (xs.max() - xs.min())
    scaley = 1.0 / (ys.max() - ys.min())

    xs = xs * scalex
    ys = ys * scaley
    data = np.concatenate((xs[:,None],ys[:,None]),1)
    for i in range(max(y)+1):
        plt.plot(data[:,0][y == i],data[:,1][y == i],'.',color=colors[i], label=category_label[i], markersize=8)
        plot_point_cov(data[y == i], nstd=3, alpha=0.25, color=colors[i])

    plt.plot([0,0], [-1,1], '--', lw=1, color='#cccccc')
    plt.plot([-1, 1], [0, 0], '--', lw=1, color='#cccccc')

    coeff = np.transpose(pca.components_[0:2, :])
    for i in range(coeff.shape[0]):
        plt.arrow(0, 0, coeff[i, 0], coeff[i, 1], color='r', alpha=1,
                  head_width=0.04, head_length=0.03, overhang=1)
        if feature_label is None:
            plt.text(coeff[i, 0] * 1.15, coeff[i, 1] * 1.15, "Var" + str(i + 1), color='g', ha='center', va='center')
        else:
            plt.text(coeff[i, 0] * 1.15, coeff[i, 1] * 1.15, feature_label[i], color='g', ha='center', va='center')

    # 添加坐标轴
    plt.xlim(-1, 1)
    plt.ylim(-1, 1)
    plt.xticks(size=10, family='Times New Roman')
    plt.yticks(size=10, family='Times New Roman')
    font = {'family': 'Times New Roman', 'size': 10}
    plt.xlabel('PC1 ({} %)'.format(round(pca.explained_variance_ratio_[0] * 100, 2)), font)
    plt.ylabel('PC2 ({} %)'.format(round(pca.explained_variance_ratio_[1] * 100, 2)), font)
    plt.legend(prop={"family": "Times New Roman", "size": 8}, loc='upper right')
    plt.show()

if __name__ == '__main__':
    iris = datasets.load_iris()
    X = iris.data
    y = iris.target
    X = (X-np.mean(X,axis=0))/np.std(X,axis=0) # 将变量缩放为均值为0,标准偏差为1。使用StandardScaler也可以

    pca = PCA()
    x_new = pca.fit_transform(X)
    show_ellipse(x_new, y, pca)

2、碎石图

import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn import datasets


# data = np.random.random((1000,10))
# y = np.random.randint(0,6,1000)

iris = datasets.load_iris()
data = iris.data
y = iris.target
data = (data-np.mean(data,axis=0))/np.std(data,axis=0) # 将变量缩放为均值为0,标准偏差为1。使用StandardScaler也可以

# 使用自助法随机抽样
np.random.seed(0)
sample = data[np.random.randint(0,100,100)]
var = []
for i in range(500):
    sample_n = sample[np.random.randint(0,100,100)]
    pca = PCA()
    pca.fit(sample_n)
    var.append(pca.explained_variance_ratio_)
var = np.array(var)
plt.errorbar(np.linspace(1,data.shape[1],data.shape[1]),np.mean(var,axis=0),yerr=np.std(var,axis=0),
             lw=2,elinewidth=1.5,ms=5,capsize=3,fmt='b-o') # 'r-x': k控制折线颜色,o控制点的类型

# print(pca.components_)
# print(pca.explained_variance_ratio_)
# print(np.mean(pca.components_,axis=1).sum())
# plt.plot(pca.explained_variance_ratio_,marker='o')
# plt.legend()
plt.show()

带抖动的散点图集合

import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn import datasets

data = np.random.random((1000,10))
y = np.random.randint(0,6,1000)

# iris = datasets.load_iris()
# data = iris.data
# y = iris.target
data = (data-np.mean(data,axis=0))/np.std(data,axis=0) # 将变量缩放为均值为0,标准偏差为1。使用StandardScaler也可以

# 使用自助法随机抽样
np.random.seed(0)
sample = data[np.random.randint(0, 100, 100)]
var = []
for i in range(500):
    sample_n = sample[np.random.randint(0, 100, 100)]
    pca = PCA()
    pca.fit(sample_n)
    var.append(pca.explained_variance_ratio_)
var = np.array(var)
plt.errorbar(np.linspace(1, data.shape[1], data.shape[1]), np.mean(var, axis=0), yerr=np.std(var, axis=0),
             lw=2, elinewidth=1.5, ms=5, capsize=5, fmt='b-o')  # 'r-x': k控制折线颜色,o控制点的类型

# 绘制具有抖动的散点图
x_jittered = np.random.uniform(-0.1,0.1,size=var.shape[0]*var.shape[1])
cc = np.repeat(np.linspace(1, data.shape[1], data.shape[1]),var.shape[0])+x_jittered
plt.scatter(cc,var.T.reshape(-1),c="#cccccc",marker=".",alpha=0.5,linewidths=0)
"""
# 或者这样也可以
for i, d in enumerate(var.T):
    x_ = (i+1)+np.random.uniform(-0.1, 0.1, size=var.shape[0])
    plt.scatter(x_, d, c="#cccccc", marker=".", alpha=0.5, linewidths=0)
"""

# print(pca.components_)
# print(pca.explained_variance_ratio_)
# print(np.mean(pca.components_,axis=1).sum())
# plt.plot(pca.explained_variance_ratio_,marker='o')
# plt.legend()
plt.show()

3、变量载荷图

import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn import datasets

# data = np.random.random((1000,10))
# y = np.random.randint(0,6,1000)

iris = datasets.load_iris()
data = iris.data
y = iris.target
data = (data-np.mean(data,axis=0))/np.std(data,axis=0) # 将变量缩放为均值为0,标准偏差为1。使用StandardScaler也可以

# 使用自助法随机抽样
np.random.seed(0)

pca = PCA()
x_new = pca.fit_transform(data)

# 绘制载荷图
fig, ax = plt.subplots()
b = ax.barh(range(1, data.shape[1]+1), pca.components_[0], color='#6699CC') # 第一主成分
# b = ax.barh(range(1, data.shape[1]+1), pca.components_[1], color='#6699CC') # 第二主成分
plt.show()

4、变量贡献图

import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn import datasets

# data = np.random.random((1000,10))
# y = np.random.randint(0,6,1000)

iris = datasets.load_iris()
data = iris.data
y = iris.target
data = (data-np.mean(data,axis=0))/np.std(data,axis=0) # 将变量缩放为均值为0,标准偏差为1。使用StandardScaler也可以

# 使用自助法随机抽样
np.random.seed(0)

pca = PCA()
x_new = pca.fit_transform(data)

# 获取每个特征对于每个主成分的贡献率
explained_variance_ratio = pca.explained_variance_ratio_

# 计算每个变量的贡献程度
variable_contribution = np.multiply(explained_variance_ratio[:, np.newaxis], pca.components_ ** 2)

def contri(x):
    total_ = np.sum(x,axis=1,keepdims=True)
    return x/total_

# 计算百分比
variable_contribution = contri(variable_contribution)*100

# 绘制变量贡献图
fig, ax = plt.subplots()
b = ax.barh(range(1, data.shape[1]+1), variable_contribution[0,:], color='#6699CC') # 第一主成分
# b = ax.barh(range(1, data.shape[1]+1), variable_contribution[1,:], color='#6699CC') # 第二主成分
plt.show()

注意:

其实变量贡献图就是双标图中特征向量在不同主成分上的投影,也就是特征向量。我们在绘制变量贡献图的时候,其实对特征向量进行平方就可以了(保证为正)。但是上述结果也是对的,因为计算结果相同。

import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn import datasets

# data = np.random.random((1000,10))
# y = np.random.randint(0,6,1000)

iris = datasets.load_iris()
data = iris.data
y = iris.target
data = (data - np.mean(data, axis=0)) / np.std(data, axis=0)  # 将变量缩放为均值为0,标准偏差为1。使用StandardScaler也可以

# 使用自助法随机抽样
np.random.seed(0)

pca = PCA()
x_new = pca.fit_transform(data)

# 获取每个特征对于每个主成分的贡献率
explained_variance_ratio = pca.explained_variance_ratio_

# 绘制变量贡献图
fig, ax = plt.subplots()
b = ax.barh(range(1, data.shape[1] + 1), (pca.components_ ** 2)[0, :], color='#6699CC')  # 第一主成分
# b = ax.barh(range(1, data.shape[1]+1), (pca.components_ ** 2)[1,:], color='#6699CC') # 第二主成分
plt.show()

# pca.components_ ** 2的结果与第一个代码计算的variable_contribution结果一致

5、附录 

绘图效果来自nature communications的一篇论文。

参考:Leaf-level coordination principles propagate to the ecosystem scale (https://doi.org/10.1038/s41467-023-39572-5)、主成分分析图

此图相关R代码、数据:PCA双标图、碎石图R代码、数据

6、注意(重要)

在主成分分析中,变量 经常被缩放(即标准化)。当变量以不同的尺度(例如:公斤、公里、厘米……)测量时,特别推荐这样做;否则,获得的 PCA 输出将受到严重影响。

目标是使变量具有可比性。通常,变量被缩放以具有 标准偏差 1 和均值为零

数据标准化是在 PCA 和聚类分析之前广泛用于基因表达数据分析的一种方法。当变量的均值和/或标准差相差很大时,我们可能还想对数据进行缩放。

缩放变量时,数据可以转换如下:

mean(x) - X的均值

sd(x) - 标准差

注意,我们在使用R语言,还有Origin进行PCA时候,它们是默认进行了自动标准化数据,而python的PCA是没有的,因此我们需要手动计算,才能保持结果一致。

  • 4
    点赞
  • 22
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

清纯世纪

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值