机器学习-python实现kMeans算法

代码及数据集下载:K-means
聚类是一种无监督的学习,将相似的对象放到同一簇中,有点像是全自动分类,簇内的对象越相似,簇间的对象差别越大,则聚类效果越好。
1.k均值聚类算法
k均值聚类将数据分为k个簇,每个簇通过其质心,即簇中所有点的中心来描述。首先随机确定k个初始点作为质心,然后将数据集分配到距离最近的簇中。然后将每个簇的质心更新为所有数据集的平均值。然后再进行第二次划分数据集,直到聚类结果不再变化为止。
伪代码为

随机创建k个簇质心
当任意一个点的簇分配发生改变时:
    对数据集中的每个数据点:
        对每个质心:
            计算数据集到质心的距离
        将数据集分配到最近距离质心对应的簇
    对每一个簇,计算簇中所有点的均值并将均值作为质心

python实现

import numpy as np
import matplotlib.pyplot as plt

def loadDataSet(fileName):  
    dataMat = [] 
    with open(fileName) as f:
        for line in f.readlines():
            line = line.strip().split('\t')
            dataMat.append(line)
    dataMat = np.array(dataMat).astype(np.float32)
    return dataMat


def distEclud(vecA,vecB):
    return np.sqrt(np.sum(np.power((vecA-vecB),2)))
def randCent(dataSet,k):
    m = np.shape(dataSet)[1]
    center = np.mat(np.ones((k,m)))
    for i in range(m):
        centmin = min(dataSet[:,i])
        centmax = max(dataSet[:,i])
        center[:,i] = centmin + (centmax - centmin) * np.random.rand(k,1)
    return center
def kMeans(dataSet,k,distMeans = distEclud,createCent = randCent):
    m = np.shape(dataSet)[0]
    clusterAssment = np.mat(np.zeros((m,2)))
    centroids = createCent(dataSet,k)
    clusterChanged = True
    while clusterChanged:
        clusterChanged = False
        for i in range(m):
            minDist = np.inf
            minIndex = -1
            for j in range(k):
                distJI = distMeans(dataSet[i,:],centroids[j,:])
                if distJI < minDist:
                    minDist = distJI
                    minIndex = j
            if clusterAssment[i,0] != minIndex:
                clusterChanged = True
            clusterAssment[i,:] = minIndex,minDist**2
        for cent in range(k):
            ptsInClust = dataSet[np.nonzero(clusterAssment[:,0].A == cent)[0]]
            centroids[cent,:] = np.mean(ptsInClust,axis = 0)
    return centroids,clusterAssment



data = loadDataSet('testSet.txt')
muCentroids, clusterAssing = kMeans(data,4)
fig = plt.figure(0)
ax = fig.add_subplot(111)
ax.scatter(data[:,0],data[:,1],c = clusterAssing[:,0].A)
plt.show()

print(clusterAssing)

2.二分k均值算法
K均值算法可能会收敛到局部最小值,而非全局最小。一种用于度量聚类效果的指标为误差平方和(SSE)。因为取了平方,更加重视原理中心的点。为了克服k均值算法可能会收敛到局部最小值的问题,有人提出来二分k均值算法。
首先将所有点作为一个簇,然后将该簇一分为二,然后选择所有簇中对其划分能够最大程度减低SSE的值的簇,直到满足指定簇数为止。
伪代码

将所有点看成一个簇
计算SSE
while 当簇数目小于k时:
    for 每一个簇:
        计算总误差
        在给定的簇上进行k均值聚类(k=2)
        计算将该簇一分为二的总误差
    选择使得误差最小的那个簇进行划分操作
import numpy as np
import matplotlib.pyplot as plt

def loadDataSet(fileName):  
    dataMat = [] 
    with open(fileName) as f:
        for line in f.readlines():
            line = line.strip().split('\t')
            dataMat.append(line)
    dataMat = np.array(dataMat).astype(np.float32)
    return dataMat


def distEclud(vecA,vecB):
    return np.sqrt(np.sum(np.power((vecA-vecB),2)))
def randCent(dataSet,k):
    m = np.shape(dataSet)[1]
    center = np.mat(np.ones((k,m)))
    for i in range(m):
        centmin = min(dataSet[:,i])
        centmax = max(dataSet[:,i])
        center[:,i] = centmin + (centmax - centmin) * np.random.rand(k,1)
    return center
def kMeans(dataSet,k,distMeans = distEclud,createCent = randCent):
    m = np.shape(dataSet)[0]
    clusterAssment = np.mat(np.zeros((m,2)))
    centroids = createCent(dataSet,k)
    clusterChanged = True
    while clusterChanged:
        clusterChanged = False
        for i in range(m):
            minDist = np.inf
            minIndex = -1
            for j in range(k):
                distJI = distMeans(dataSet[i,:],centroids[j,:])
                if distJI < minDist:
                    minDist = distJI
                    minIndex = j
            if clusterAssment[i,0] != minIndex:
                clusterChanged = True
            clusterAssment[i,:] = minIndex,minDist**2
        for cent in range(k):
            ptsInClust = dataSet[np.nonzero(clusterAssment[:,0].A == cent)[0]]
            centroids[cent,:] = np.mean(ptsInClust,axis = 0)
    return centroids,clusterAssment

def biKmeans(dataSet,k,distMeans = distEclud):
    m = np.shape(dataSet)[0]
    clusterAssment = np.mat(np.zeros((m,2)))
    centroid0 = np.mean(dataSet,axis=0).tolist()
    centList = [centroid0]
    for j in range(m):
        clusterAssment[j,1] = distMeans(dataSet[j,:],np.mat(centroid0))**2
    while (len(centList)<k):
        lowestSSE = np.inf
        for i in range(len(centList)):
            ptsInCurrCluster = dataSet[np.nonzero(clusterAssment[:,0].A == i)[0],:]
            centroidMat,splitClustAss = kMeans(ptsInCurrCluster,2,distMeans)
            sseSplit = np.sum(splitClustAss[:,1])
            sseNotSplit = np.sum(clusterAssment[np.nonzero(clusterAssment[:,0].A != i)[0],1])
            if (sseSplit + sseNotSplit) < lowestSSE:
                bestCentToSplit = i
                bestNewCents = centroidMat.copy()
                bestClustAss = splitClustAss.copy()
                lowestSSE = sseSplit + sseNotSplit
        print('the best cent to split is ',bestCentToSplit)
#        print('the len of the bestClust')
        bestClustAss[np.nonzero(bestClustAss[:,0].A == 1)[0],0] = len(centList)
        bestClustAss[np.nonzero(bestClustAss[:,0].A == 0)[0],0] = bestCentToSplit

        clusterAssment[np.nonzero(clusterAssment[:,0].A == bestCentToSplit)[0],:] = bestClustAss.copy()
        centList[bestCentToSplit] = bestNewCents[0,:].tolist()[0]
        centList.append(bestNewCents[1,:].tolist()[0])
    return np.mat(centList),clusterAssment







data = loadDataSet('testSet2.txt')
muCentroids, clusterAssing = biKmeans(data,3)
fig = plt.figure(0)
ax = fig.add_subplot(111)
ax.scatter(data[:,0],data[:,1],c = clusterAssing[:,0].A,cmap=plt.cm.Paired)
ax.scatter(muCentroids[:,0],muCentroids[:,1])
plt.show()

print(clusterAssing)
print(muCentroids)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值