机器学习实战——K-均值聚类算法

K-Means聚类算法属于无监督学习算法
python实现(机器学习实战)

import numpy as np
import matplotlib.pyplot as plt
def loadDataSet(filename):
    dataMat=[]
    fr=open(filename)
    for line in fr.readlines():
        curline=line.strip().split('\t')
        fltLine=list(map(float,curline))
        dataMat.append(fltLine)
    return dataMat
#欧氏距离:dist = np.linalg.norm(vec1-vec2)
def distEclud(vecA,vecB):
    return np.sqrt(np.sum(np.power(vecA-vecB,2)))

def randCent(dataSet,k):
    n=np.shape(dataSet)[1]
    centroids = np.mat(np.zeros((k,n)))
    for j in range(n):
        minJ = min(dataSet[:,j])
        rangeJ = float(max(dataSet[:,j])-minJ)
        centroids[:,j] = minJ +rangeJ * np.random.rand(k,1)#np.random.rand 返回0-1之间的值,服从均匀分布
    return centroids

dataMat = np.mat(loadDataSet('F:/机器学习实战/machinelearninginaction/Ch10/testSet.txt'))
fig=plt.figure()
ax=fig.add_subplot(111)
ax.scatter(dataMat[:,0].flatten().A[0],dataMat[:,1].flatten().A[0])
plt.show()

def kMeans(dataSet,k,distMeans = distEclud,createCent = randCent):
    m=np.shape(dataSet)[0]
    clusterAssment = np.mat(np.zeros((m,2)))#第一列记录簇分配结果,第二列存储误差
    centroids = createCent(dataSet,k)  #构建质心,生成min-max之间的值
    clusterChanged = True
    while clusterChanged:
        clusterChanged = False
        for i in range (m):
            minDist = np.inf;minIndex = -1
            for j in range (k):
                distJI = distMeans(centroids[j,:],dataSet[i,:]) #计算距离,每个点与所有质点求距离
                if distJI <minDist:
                    minDist = distJI;minIndex=j #为数据点分配簇
            if clusterAssment[i,0] !=minIndex:clusterChanged =True
            clusterAssment[i,:]=minIndex,minDist**2 #第一列记录簇分配结果,第二列存储误差
#        print(centroids)
        for cent in range(k):
            ptsInClust = dataSet[np.nonzero(clusterAssment[:,0].A==cent)[0]]
#            print("===",ptsInClust)
            centroids[cent,:] = np.mean(ptsInClust,axis=0)#类质心
    return centroids,clusterAssment

mycentroids,clusterAssing=kMeans(dataMat,4)
#print("=====",mycentroids,clusterAssing)
dataMat1 = dataMat[np.nonzero(clusterAssing[:,0].A==0)[0]]#此处的0指的是指取第一个索引,看那个索引就知道了
dataMat2 = dataMat[np.nonzero(clusterAssing[:,0].A==1)[0]]
dataMat3 = dataMat[np.nonzero(clusterAssing[:,0].A==2)[0]]
dataMat4 = dataMat[np.nonzero(clusterAssing[:,0].A==3)[0]]
fig=plt.figure()
ax1=fig.add_subplot(111)
ax1.scatter(dataMat1[:,0].flatten().A[0],dataMat1[:,1].flatten().A[0],c='m',marker='o')
ax1.scatter(dataMat2[:,0].flatten().A[0],dataMat2[:,1].flatten().A[0],c='r',marker='*')
ax1.scatter(dataMat3[:,0].flatten().A[0],dataMat3[:,1].flatten().A[0],c='g',marker='s')
ax1.scatter(dataMat4[:,0].flatten().A[0],dataMat4[:,1].flatten().A[0],c='y',marker='^')
ax1.scatter(mycentroids[0,0],mycentroids[0,1],c='c',marker='+')
ax1.scatter(mycentroids[1,0],mycentroids[1,1],c='c',marker='+')
ax1.scatter(mycentroids[2,0],mycentroids[2,1],c='c',marker='+')
ax1.scatter(mycentroids[3,0],mycentroids[3,1],c='c',marker='+')
plt.show()

上述是最基础的聚类算法:K-均是聚类算法。该算法假设将一个数据集分为K簇,因此先初始化K个质心,然后对于数据集中的每一个点都计算与所有质心的距离,选取距离最近的对应的质心,并把该点分到该簇中,直到任一点的 簇分配结果不发生改变时,迭代终止,最后质心的取值就是给定簇数据集的均值。

但是K-均是聚类算法也有不少缺陷,首先K是人为指定的,如果K不恰当的话,可能聚类效果就不是很好,如果K选取的较大,那么将会带来很大的计算复杂度。

因此二分K-均值算法被提出,主要思想就是先将数据集中所有的点都作为一簇,然后对其进行二分类,接着选择其中一个簇进行划分,划分之后计算划分的误差和剩余数据误差之和,统计误差,再对另一簇进行划分,也统计误差,比较对哪一簇划分的总误差最小,就选择对该簇进行划分。

def biKmeans(dataSet, k, distMeas=distEclud):
    m = np.shape(dataSet)[0]
    clusterAssment = np.mat(np.zeros((m,2)))
    centroid0 = np.mean(dataSet, axis=0).tolist()[0]
    centList =[centroid0] #create a list with one centroid
    for j in range(m):#calc initial Error
        clusterAssment[j,1] = distMeas(np.mat(centroid0), dataSet[j,:])**2
    while (len(centList) < k):
        lowestSSE = np.inf
        for i in range(len(centList)):
            ptsInCurrCluster = dataSet[np.nonzero(clusterAssment[:,0].A==i)[0],:]#get the data points currently in cluster i
            centroidMat, splitClustAss = kMeans(ptsInCurrCluster, 2, distMeas)
            sseSplit = sum(splitClustAss[:,1])#compare the SSE to the currrent minimum
            sseNotSplit = sum(clusterAssment[np.nonzero(clusterAssment[:,0].A!=i)[0],1])
            print ("sseSplit, and notSplit: ",sseSplit,sseNotSplit)
            if (sseSplit + sseNotSplit) < lowestSSE:
                bestCentToSplit = i
                bestNewCents = centroidMat
                bestClustAss = splitClustAss.copy()
                lowestSSE = sseSplit + sseNotSplit
        bestClustAss[np.nonzero(bestClustAss[:,0].A == 1)[0],0] = len(centList) #change 1 to 3,4, or whatever
#        print("====",bestClustAss)
        bestClustAss[np.nonzero(bestClustAss[:,0].A == 0)[0],0] = bestCentToSplit
        print ('the bestCentToSplit is: ',bestCentToSplit)
        print ('the len of bestClustAss is: ', len(bestClustAss))
        centList[bestCentToSplit] = bestNewCents[0,:].tolist()[0]#replace a centroid with two best centroids 
        print("--------------",centList)
        centList.append(bestNewCents[1,:].tolist()[0])
        clusterAssment[np.nonzero(clusterAssment[:,0].A == bestCentToSplit)[0],:]= bestClustAss#reassign new clusters, and SSE
    return np.mat(centList), clusterAssment

dataMat3 = np.mat(loadDataSet('F:/机器学习实战/machinelearninginaction/Ch10/testSet2.txt'))
centList,myNewAssments = biKmeans(dataMat3,3)

dataMat1 = dataMat3[np.nonzero(myNewAssments[:,0].A==0)[0]]#此处的0指的是指取第一个索引,看那个索引就知道了
dataMat2 = dataMat3[np.nonzero(myNewAssments[:,0].A==1)[0]]
dataMat3 = dataMat3[np.nonzero(myNewAssments[:,0].A==2)[0]]
fig=plt.figure()
ax2=fig.add_subplot(111)
ax2.scatter(dataMat1[:,0].flatten().A[0],dataMat1[:,1].flatten().A[0],c='m',marker='o')
ax2.scatter(dataMat2[:,0].flatten().A[0],dataMat2[:,1].flatten().A[0],c='r',marker='*')
ax2.scatter(dataMat3[:,0].flatten().A[0],dataMat3[:,1].flatten().A[0],c='g',marker='s')
ax2.scatter(centList[0,0],centList[0,1],c='c',marker='+')
ax2.scatter(centList[1,0],centList[1,1],c='c',marker='+')
ax2.scatter(centList[2,0],centList[2,1],c='c',marker='+')
plt.show()

就这样。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值