文章目录
1.K-means简介与基本原理
1)简介
2)基本原理
2.K-means距离计算方法
一般使用欧氏距离,作为距离度量
3.K-means代码实现
# K-means代码实现
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
# 加载数据
def loaddata():
data = np.loadtxt('data/cluster_data.csv', delimiter=',')
return data
# 随机初始化质心,实际上是从数据点X中随机选k个作为质心
def kMeansInitCentroids(X, k):
# 从X数据中随机取k个作为质心
index = np.random.randint(0, X.shape[0], k)
return X[index]
# 计算数据点到质心的距离,并判断该数据点属于哪个质心
def findClosestCentroids(X, centroids):
# idx中数据表明对应X的数据是属于哪一个质心
idx = np.zeros(X.shape[0]).reshape(X.shape[0], -1)
for i in range(len(X)):
index = 0
# 初始无限大
minDistance = float('inf')
for k in range(len(centroids)):
# 距离计算,使用欧几里得距离
distance = np.sum(np.power(X[i] - centroids[k], 2))
# 选择距离最小的点及索引
if distance < minDistance:
minDistance = distance
index = k
idx[i] = index
return idx
# 重新计算质心位置
def cmputerCentorids(X, idx):
# 找到所有聚类中心索引
# tolist()表示将矩阵(matrix)和数组(array)转化为列表
k = set(np.ravel(idx).tolist())
k = list(k)
# 构建k行2列的数组
centroids = np.ndarray((len(k), X.shape[1]))
for i in range(len(k)):
# 选择数据X中类别为k[i]的数据
data = X[np.where(idx == k[i])[0]]
# 重新计算聚类中心
centroids[i] = (np.sum(data, axis=0)) / len(data)
return centroids
# 把所有方法组合成K-means算法
def k_means(X, k, max_iters):
# 初始化聚类中心
initial_centroids = kMeansInitCentroids(X, k)
# 迭代
for i in range(max_iters):
if i == 0:
centroids = initial_centroids
# 计算样本到质心的距离,并返回每个样本所属的质心
idx = findClosestCentroids(X, centroids)
# 重新计算质心
centroids = cmputerCentorids(X, idx)
return idx, centroids
if __name__ == '__main__':
X = loaddata()
plt.scatter(X[:, 0], X[:, 1], s=20)
idx, centroids = k_means(X, 3, 8)
# 打印类别
print('类别为\n', idx)
print('聚类中心为\n', centroids)
# 画图
cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b'])
plt.scatter(X[:, 0], X[:, 1], c=np.ravel(idx), cmap=cm_dark, s=20)
plt.scatter(centroids[:, 0], centroids[:, 1], c=np.arange(len(centroids)), cmap=cm_dark, s=500)
plt.show()
聚类中心为
[[3.04367119 1.01541041]
[1.95399466 5.02557006]
[6.03366736 3.00052511]]
4.Sklearn实现K-means
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
# 加载数据
def loaddata():
data = np.loadtxt('data/cluster_data.csv', delimiter=',')
return data
if __name__ == '__main__':
X = loaddata()
# n_clusters表示聚类中心,max_iter = 10
model = KMeans(n_clusters=3, max_iter=10)
model.fit(X)
print('聚类中心为\n', model.cluster_centers_)
print('每个样本所属的族\n', model.labels_)
# 画图
cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b'])
plt.scatter(X[:, 0], X[:, 1], c=model.labels_, cmap=cm_dark, s=20)
plt.scatter(model.cluster_centers_[:, 0], model.cluster_centers_[:, 1], c=np.arange(len(model.cluster_centers_)),
marker='*', s=100)
plt.show()
聚类中心为
[[6.03366736 3.00052511]
[1.95399466 5.02557006]
[3.04367119 1.01541041]]
每个样本所属的族
[1 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 1]
5.层次聚类
1)原理及距离计算
2)层次聚类示例
6.sklearn实现层次聚类
# 导入层次聚类的包
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
# 加载数据
def loaddata():
data = np.loadtxt('data/cluster_data.csv', delimiter=',')
return data
if __name__ == '__main__':
X = loaddata()
# linkage可取值,表示类间聚类如何计算:ward: 最小方差;complete 最大距离;average 平均距离;single 最小距离
# affinity:距离的计算方法,它默认是‘euclidean’(欧式距离)
model = AgglomerativeClustering(n_clusters=3, affinity='euclidean', linkage='complete')
model.fit(X)
print('每个样本所属的族:\n', model.labels_)
# 画图
cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b'])
plt.scatter(X[:, 0], X[:, 1], c=model.labels_, cmap=cm_dark, s=20)
plt.show()
每个样本所属的族:
[1 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 1]
7.密度聚类
密度聚类方法通常是通过样本分布的紧密程度来进行聚类的
密度聚类算法从样本的密度角度考虑样本之间的可连接性,并基于可连接样本不断扩展聚类,最后形成聚类结果
概念:
对象的邻域eps
核心对象
密度可达
密度直达
8.密度聚类的sklearn代码实现
# 根据密度聚类过程,无需设置聚类个数
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
# 加载数据
def loaddata():
data = np.loadtxt('data/cluster_data.csv', delimiter=',')
return data
if __name__ == '__main__':
X = loaddata()
model = DBSCAN(eps=0.5, min_samples=5, metric='euclidean')
model.fit(X)
print('每个样本所属的族:', model.labels_)
# 画图
cm_dark = mpl.colors.ListedColormap(['g','r','b','c'])
plt.scatter(X[:,0],X[:,1],c= model.labels_,cmap=cm_dark,s=20)
plt.show()
每个样本所属的族: [ 0 -1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 -1 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -1
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -1 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 -1 0 0 0 0 0 0 0 0 0 0 -1
0 0 0 0 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
2 2 2 2 -1 2 -1 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 -1 -1 1 1 1 1 1 1 1 1 1 1 1 1 -1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 0]
9.高斯混合模型
1)高斯混合模型的介绍
2)高斯混合模型参数估计
gamma(i,k)表示第i个样本属于第k个类别的概率
10.高斯混合模型代码实现
# 高斯混合模型原生代码
import numpy as np
# 引入多变量的正态分布
from scipy.stats import multivariate_normal
# 生成一些数据用来做实验
# 1.生成均值为1.71,标准差为0.056的男生身高数据
np.random.seed(0)
mu_m = 1.71 # 期望
sigma_m = 0.056 # 方差
num_m = 10000 # 数据个数10000个
# 指定均值、标准差的正态分布
rand_data_m = np.random.normal(mu_m, sigma_m, num_m)
# 生成标签
y_m = np.ones(num_m)
# 2.生成均值为1.58,标准差为0.051的女生身高数据
np.random.seed(0)
mu_w = 1.58
sigma_w = 0.051
num_w = 10000
rand_data_w = np.random.normal(mu_w, sigma_w, num_w)
# 生成标签
y_w = np.zeros(num_w)
# 把男生数据与女生数据合在一起
data = np.append(rand_data_m, rand_data_w)
# 转换成列
data = data.reshape(-1, 1)
y = np.append(y_m, y_w)
print(data)
print(y)
# 高斯混合模型实现
num_iter = 1000
# n表示行,d表示维度
n, d = data.shape
# 初始化参数
mu1 = data.min(axis=0)
mu2 = data.max(axis=0)
# np.identity( N ) 创建一个N * N的单位矩阵(对角线为1,其余为0)
sigma1 = np.identity(d)
sigma2 = np.identity(d)
pi = 0.5
for i in range(num_iter):
# 计算gamma
# 多变量的正态分布
norm1 = multivariate_normal(mu1, sigma1)
norm2 = multivariate_normal(mu2, sigma2)
tau1 = pi * norm1.pdf(data)
tau2 = (1 - pi) * norm2.pdf(data)
gamma = tau1 / (tau1 + tau2)
# 计算mu1值
mu1 = np.dot(gamma, data) / np.sum(gamma)
# 计算mu2值
mu2 = np.dot((1 - gamma), data) / np.sum((1 - gamma))
# 计算sigma1
sigma1 = np.dot(gamma * (data - mu1).T, data - mu1) / np.sum(gamma)
# 计算sigma2
sigma2 = np.dot((1 - gamma) * (data - mu2).T, (data - mu2)) / np.sum(1 - gamma)
# 计算pi
pi = np.sum(gamma) / n
print(u'类别概率:\t', pi)
print(u'均值:\t', mu1, mu2)
print(u'方差:\n', sigma1, '\n\n', sigma2, '\n')
[[1.80878693]
[1.7324088 ]
[1.76480933]
...
[1.60636048]
[1.57832104]
[1.64620368]]
[1. 1. 1. ... 0. 0. 0.]
类别概率: 0.48738846392845536
均值: [1.57749047] [1.70726384]
方差:
[[0.00244834]]
[[0.00315184]]
11.高斯混合模型
# sklearn实现GMM
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.mixture import GaussianMixture
# 生成一些数据用来做实验
# 1.生成均值为1.71,标准差为0.056的男生身高数据
np.random.seed(0)
mu_m = 1.71 # 期望
sigma_m = 0.056 # 方差
num_m = 10000 # 数据个数10000个
# 指定均值、标准差的正态分布
rand_data_m = np.random.normal(mu_m, sigma_m, num_m)
# 生成标签
y_m = np.ones(num_m)
# 2.生成均值为1.58,标准差为0.051的女生身高数据
np.random.seed(0)
mu_w = 1.58
sigma_w = 0.051
num_w = 10000
rand_data_w = np.random.normal(mu_w, sigma_w, num_w)
# 生成标签
y_w = np.zeros(num_w)
# 把男生数据与女生数据合在一起
data = np.append(rand_data_m, rand_data_w)
# 转换成列
data = data.reshape(-1, 1)
y = np.append(y_m, y_w)
print(data)
print(y)
# 训练模型
# n_components表示聚为多少类,max_iter表示迭代次数,full:每个组份有自己的非对角协方差矩阵。
g = GaussianMixture(n_components=2, covariance_type='full', tol=1e-6, max_iter=1000)
g.fit(data)
print(u'类别概率:\t', g.weights_[0])
print(u'均值:\n', g.means_, '\n')
print(u'方差:\n', g.covariances_, '\n')
y_hat = g.predict(data)
print('准确率:', accuracy_score(y, y_hat))
12.案例:对亚洲足球队进行聚类分析
# 对亚洲足球队进行聚类分析
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
df = pd.read_csv('data/football_team_data.csv', delimiter=',', index_col='国家')
# print(df)
X = df.values
# 标准化
# 每一列转换成均值为0,方差为1的数
std = StandardScaler()
X = std.fit_transform(X)
# print(X)
# 训练模型
model = KMeans(n_clusters=3, max_iter=10)
model.fit(X)
print('类别标签:\n', model.labels_)
df['聚类结果'] = model.labels_
# print(df)
[[-0.5842676 0.05223517 -0.64677721]
[-0.97679881 -2.12423024 -1.03291285]
[-0.9466041 -1.77599577 -1.61211632]
[-1.76186121 -1.86305439 -0.83984503]
[-0.76543585 -1.16658546 -0.06757374]
[-0.04076286 0.05223517 -1.22598067]
[ 0.26118422 0.05223517 0.51162973]
[-0.34270994 0.05223517 -0.83984503]
[-0.13134698 0.05223517 -0.45370938]
[ 0.89527309 0.05223517 1.28390102]
[ 0.29137893 0.92282133 1.28390102]
[-0.16154169 0.92282133 0.31856191]
[ 0.71410485 0.92282133 0.12549408]
[ 0.5329366 0.92282133 0.70469755]
[ 2.16345083 0.92282133 1.28390102]
[-1.58069297 -0.81835099 -1.80518414]
[-0.49368348 0.05223517 1.28390102]
[ 0.77449426 0.92282133 -0.26064156]
[ 2.042672 0.92282133 0.89776537]
[ 0.11021068 0.92282133 1.0908332 ]]
类别标签:
[2 0 0 0 2 2 1 2 2 1 1 1 1 1 1 0 1 1 1 1]