案例
1.读取数据,并简单处理一下
import pandas as pd
path=r'data0507.csv'
data=pd.read_csv(path,engine='python',encoding='utf-8')
data=data.drop(columns=["Unnamed: 0",'Unnamed: 0.1'])
data.head(1)
由于所有数据均为文本,不能直接进行聚类分析,所以分析前需先将字符型数据转为数值型,例如我们将根据职位的薪酬(两个特征:薪酬范围上限与薪酬范围下限)进行分类
2.数据预处理
- 实际上就是基于 “薪酬” 这一列衍生出 “up” 、“down” 这两列,分别代表薪酬的上限与下限
- 所有计量单位统一转为:元/月
- 如果薪酬需面议,则暂不做处理
Result_up=[]
Result_down=[]
for salary in data['薪资']:
try:
w=salary.replace('/','').replace('以下','').replace('以上','')
w=w.replace('万','').replace('千','').replace('元','').replace('天','').replace('月','').replace('年','')
num=w.find('-')
if w=='面议':
down=w
up=w
else:
if num!=-1:
down=float(w[:num])
up=float(w[num+1:])
else:
down=float(w)
up=float(w)
except:
up=0
down=0
#print(down,up)
try:
if '天' in salary:
if '万' in salary:
result_down=down*10000*30
result_up=up*10000*30
elif '千' in salary:
result_down=down*1000*30
result_up=up*1000*30
else:
result_down=down
result_up=up
elif '月' in salary:
if '万' in salary:
result_down=down*10000
result_up=up*10000
elif '千' in salary:
result_down=down*1000
result_up=up*1000
else:
result_down=down
result_up=up
elif '年' in salary:
if '万' in salary:
result_down=down*10000/12
result_up=up*10000/12
elif '千' in salary:
result_down=down*1000/12
result_up=up*1000/12
else:
result_down=down
result_up=up
elif '面议' in salary:
result_down=down
result_up=up
else:
if '万' in salary:
result_down=down*10000
result_up=up*10000
elif '千' in salary:
result_down=down*1000
result_up=up*1000
else:
result_down=down
result_up=up
#print(result_down,result_up)
except:
result_down=0
result_up=0
Result_down.append(result_down)
Result_up.append(result_up)
data['down']=Result_down
data['up']=Result_up
由于工资面议的话,薪酬范围实在不好自定义,所以直接删掉
datanotnull=[]
for i,label in enumerate(data['up']):
if label!='面议':
datanotnull.append(data.iloc[i])
data=pd.DataFrame(datanotnull)
data.head()
3.聚类,并可视化结果
import numpy as np
from sklearn import cluster
import matplotlib.pyplot as plt
X2=np.array(data[['down','up']])
clst=cluster.KMeans()
clst.fit(X2)
predicted_X2=clst.predict(X2)
data['聚类类别2']=predicted_X2
labels=np.unique(predicted_X2)
fig=plt.figure(figsize=(15,10))
plt.rcParams['font.family'] = ['sans-serif']
plt.rcParams['font.sans-serif'] = ['SimHei']
ax=fig.add_subplot(1,1,1)
colors='rgbyckm'
for i,label in enumerate(labels):
position=predicted_X2==label
ax.scatter(X2[position,0],X2[position,1],label='cluster %d'%label,color=colors[i%len(colors)],s=6)
ax.legend(loc='best',framealpha=0.5)
ax.set_xlabel('薪资下限')
ax.set_ylabel('薪资上限')
ax.set_title('聚类结果')
plt.show()
书本内容打卡
模型原型
sklearn.neighbors.KMeans(n_clusters=8,init=‘k_means++’,n_init=10,max_iter=300,tol=0.0001, precompute_distances=‘auto’,verbose=0,random_state=None,copy_x=True)
参数
- n_clusters:分类簇的数量
- init:指定初始均值向量的策略
- ‘k_means++’:该初始化策略选择的初始均值向量相互之间都距离较远,它的效果较好
- ‘random’:从数据集中随机选择K个样本作为初始均值向量
- 提供一个数组(形状为n.clusters.n_features),该数组作为初始均值向量
- n_init:k均值算法运行的次数
- max_iter:单轮k均值算法中最大的迭代次数
- tol
- precompute_distances:是否提前计算好样本之间的距离(如果提前计算好距离,则需要更多的内存,但算法会运行得更快)
- ‘auto’:如果n_samples*n_clusters>12million则提前计算
- True:提前计算
- False:不提前计算
- verbose:
- 0:不输出日志信息
- 1:每隔一段时间打印一次日志信息
- 大于1:打印日志信息更频繁了
- random_state
- copy_x:主要用于precompute_distances=True的情况
- True:计算距离的时候,不修改原始数据
- False:计算距离的时候,会修改原始数据用于节省内存;然后当算法结束的时候会将原始数据返回(可能会因为浮点数的表示,而有一些精度误差)
属性
- clustercenters:分类簇的均值向量
- labels_:每个样本所属簇的标记
- inertia_:每个样本与最近簇中心的距离之和
方法
- fit(X[,y])
- fit_predict(X[,y])
- predict(X)
- score(X,[,y])
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.samples_generator import make_blobs
from sklearn import cluster
from sklearn.metrics import adjusted_rand_score
from sklearn import mixture
产生数据
def create_data(centers,num=100,std=0.7):
X,labels_true=make_blobs(n_samples=num,centers=centers,cluster_std=std)
return X,labels_true
查看生成的样本点
def plot_data(*data):
X,labels_true=data
labels=np.unique(labels_true)
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
colors='rgbyckm'
for i,label in enumerate(labels):
position=labels_true==label
ax.scatter(X[position,0],X[position,1],label='cluster %d'%label,color=colors[i%len(colors)])
ax.legend(loc='best',framealpha=0.5)
ax.set_xlabel('X[0]')
ax.set_ylabel('Y[1]')
ax.set_title('data')
plt.show()
X,labels_true=create_data([[1,1],[2,2],[1,2],[10,20]],1000,0.5)
plot_data(X,labels_true)
使用Kmeans
def test_Kmeans(*data):
X,labels_true=data
clst=cluster.KMeans()
clst.fit(X)
predicted_labels=clst.predict(X)
print('ARI:%s'%adjusted_rand_score(labels_true,predicted_labels))
print('Sum center distance %s'%clst.inertia_)
test_Kmeans(X,labels_true)
簇的数量的影响
def test_Kmeans_nclusters(*data):
X,labels_true=data
nums=range(1,50)
ARIs=[]
Distances=[]
for num in nums:
clst=cluster.KMeans(n_clusters=num)
clst.fit(X)
predicted_labels=clst.predict(X)
ARIs.append(adjusted_rand_score(labels_true,predicted_labels))
Distances.append(clst.inertia_)
fig=plt.figure()
ax=fig.add_subplot(1,2,1)
ax.plot(nums,ARIs,marker='+')
ax.set_xlabel('n_clusters')
ax.set_ylabel('ARI')
ax=fig.add_subplot(1,2,2)
ax.plot(nums,Distances,marker='o')
ax.set_xlabel('n_clusters')
ax.set_ylabel('inertia_')
fig.suptitle('KMeans')
plt.show()
test_Kmeans_nclusters(X,labels_true)
k均值算法运行的次数和选择初始中心向量策略的影响
def test_Kmeans_n_init(*data):
X,labels_true=data
nums=range(1,50)
fig=plt.figure()
ARIs_k=[]
Distances_k=[]
ARIs_r=[]
Distances_r=[]
for num in nums:
clst=cluster.KMeans(n_init=num,init='k-means++')
clst.fit(X)
predicted_labels=clst.predict(X)
ARIs_k.append(adjusted_rand_score(labels_true,predicted_labels))
Distances_k.append(clst.inertia_)
clst=cluster.KMeans(n_init=num,init='random')
clst.fit(X)
predicted_labels=clst.predict(X)
ARIs_r.append(adjusted_rand_score(labels_true,predicted_labels))
Distances_r.append(clst.inertia_)
ax=fig.add_subplot(1,2,1)
ax.plot(nums,ARIs_k,marker='+',label='k-means++')
ax.plot(nums,ARIs_r,marker='+',label='random')
ax.set_xlabel('n_init')
ax.set_ylabel('ARI')
ax.set_ylim(0,1)
ax.legend(loc='best')
ax=fig.add_subplot(1,2,2)
ax.plot(nums,Distances_k,marker='+',label='k-means++')
ax.plot(nums,Distances_r,marker='+',label='random')
ax.set_xlabel('n_init')
ax.set_ylabel('inertia_')
ax.legend(loc='best')
fig.suptitle('Kmeans')
plt.show()
test_Kmeans_n_init(X,labels_true)