k_means 聚类算法

1. 聚类是一种无监督的学习,它将相似的对象归到同一个簇中。 他有点像全自动归类。聚类方法几乎可以应用所有对象,簇内对象越相似,聚类效果越好。
2.聚类与分类最大不同在于,分类的目标事先已知,而聚类则不一样。因为其产生的效果与分类相同,而只是类别没有预先定义,聚类有时也称为无监督分类。
3. K-means 算法:
     3.1 Clustering 中的经典算法,数据挖掘十大经典算法之一
     3.2 算法接受参数 k ;然后将事先输入的n个数据对象划分为 k个聚类以便使得所获得的聚类满足:同一
           聚类中的对象相似度较高;而不同聚类中的对象相似度较小。
     3.3 算法思想:
           以空间中k个点为中心进行聚类,对最靠近他们的对象归类。通过迭代的方法,逐次更新各聚类中心     
           的值,直至得到最好的聚类结果
     3.4 算法描述:
          
          (1)适当选择c个类的初始中心;
          (2)在第k次迭代中,对任意一个样本,求其到c各中心的距离,将该样本归到距离最短的中心所在     
                  的类;
          (3)利用均值等方法更新该类的中心值;
          (4)对于所有的c个聚类中心,如果利用(2)(3)的迭代法更新后,值保持不变,则迭代结束,
                   否则继续迭代。
     3.5 算法流程:
          
          输入:k, data[n];
          (1) 选择k个初始中心点,例如c[0]=data[0],…c[k-1]=data[k-1];
          (2) 对于data[0]….data[n], 分别与c[0]…c[k-1]比较,假定与c[i]差值最少,就标记为i;
          (3) 对于所有标记为i点,重新计算c[i]={ 所有标记为i的data[j]之和}/标记为i的个数;
          (4) 重复(2)(3),直到所有c[i]值的变化小于给定阈值。


在麦子学院观看的视频代码

#! encoding:utf-8
import numpy as np
from scipy.cluster.hierarchy import centroid
from bokeh.util.session_id import random

def kmeans(X, k, maxIt):# X为矩阵 k:所需划分的块数 MaxIt:迭代最多的次数
    numPoints,numDim = X.shape #numPoints,numDim该矩阵的行列数
    dataSet = np.zeros((numPoints, numDim+1))#dataSet 比 X 多出来一列 其代表类别
    dataSet[:,:-1] = X # 1
    
    centroids = dataSet[np.random.randint(numPoints, size = k),:]#centroids 代表中心点(随机)
#     centroids = dataSet[0:2, :]#挑选前两个
    centroids[:, -1] = range(1, k +1)# 2  附上类别
    oldCentroids = None#旧的类别
    iterations= 0
    while not shouldStop(oldCentroids, centroids, iterations, maxIt):# 4
        print "iteration: \n", iterations
        print "dataSet: \n", dataSet
        print "centroids: \n", centroids
        oldCentroids = np.copy(centroids)# 复制矩阵   此处不能直接用等于  因为如果是等于  则oldCentroids会随着Centroids变而变
        iterations +=1
        updateLabels(dataSet, centroids) #更新类别
        centroids = getCentroids(dataSet, k)#获取新的中心点
        
    return dataSet    
def shouldStop(oldCentroids, centroids, iterations, maxIt):#终止条件
    if iterations > maxIt:#1、迭代次数大于规定的次数
        return True
    return np.array_equal(oldCentroids, centroids)#2、两个矩阵的数值是一样的
            
def updateLabels(dataSet, centroids):
    
    numPoints,numDim = dataSet.shape
    
    for i in range(numPoints):
        dataSet[i,-1] =  getLabelFromClosestCentroid(dataSet[i, :-1], centroids)
        
def getCentroids(dataSet, k):
    result = np.zeros((k, dataSet.shape[1]))        
    for i in range(1, k+1):
        oneCluster = dataSet[dataSet[:, -1] == i, :-1] # 5 此处很精妙 以我现在垃圾python的水平
        result[i - 1, :-1] = np.mean(oneCluster, axis = 0)#求均值  axis=0对 列 求均值 即是一行
        result[i - 1, -1] = i
    return result
def  getLabelFromClosestCentroid(setRow, centroids):
    lable = centroids[0,-1];
    mindis = np.linalg.norm(setRow - centroids[0,:-1])#numpy 里面求欧式距离
    
    for i in range(1, centroids.shape[0]):#求离中心点近的
        dis = np.linalg.norm(setRow- centroids[i, :-1])
        if dis < mindis:
            mindis = dis
            lable = centroids[i,-1]
    return lable
x1 = np.array([1,1])
x2 = np.array([2,1])
x3 = np.array([4,3])
x4 = np.array([5,5])
TestX = np.vstack((x1,x2,x3,x4))

data = kmeans(TestX, 2, 10)
print "final result"
print data


数据

1.658985	4.285136
-3.453687	3.424321
4.838138	-1.151539
-5.379713	-3.362104
0.972564	2.924086
-3.567919	1.531611
0.450614	-3.302219
-3.487105	-1.724432
2.668759	1.594842
-3.156485	3.191137
3.165506	-3.999838
-2.786837	-3.099354
4.208187	2.984927
-2.123337	2.943366
0.704199	-0.479481
-0.392370	-3.963704
2.831667	1.574018
-0.790153	3.343144
2.943496	-3.357075
-3.195883	-2.283926
2.336445	2.875106
-1.786345	2.554248
2.190101	-1.906020
-3.403367	-2.778288
1.778124	3.880832
-1.688346	2.230267
2.592976	-2.054368
-4.007257	-3.207066
2.257734	3.387564
-2.679011	0.785119
0.939512	-4.023563
-3.674424	-2.261084
2.046259	2.735279
-3.189470	1.780269
4.372646	-0.822248
-2.579316	-3.497576
1.889034	5.190400
-0.798747	2.185588
2.836520	-2.658556
-3.837877	-3.253815
2.096701	3.886007
-2.709034	2.923887
3.367037	-3.184789
-2.121479	-4.232586
2.329546	3.179764
-3.284816	3.273099
3.091414	-3.815232
-3.762093	-2.432191
3.542056	2.778832
-1.736822	4.241041
2.127073	-2.983680
-4.323818	-3.938116
3.792121	5.135768
-4.786473	3.358547
2.624081	-3.260715
-4.009299	-2.978115
2.493525	1.963710
-2.513661	2.642162
1.864375	-3.176309
-3.171184	-3.572452
2.894220	2.489128
-2.562539	2.884438
3.491078	-3.947487
-2.565729	-2.012114
3.332948	3.983102
-1.616805	3.573188
2.280615	-2.559444
-2.651229	-3.103198
2.321395	3.154987
-1.685703	2.939697
3.031012	-3.620252
-4.599622	-2.185829
4.196223	1.126677
-2.133863	3.093686
4.668892	-2.562705
-2.793241	-2.149706
2.884105	3.043438
-2.967647	2.848696
4.479332	-1.764772
-4.905566	-2.911070


机器学习实战这本书,其中绘图部分写的略搓……

#! encoding:utf-8
from numpy import *
from K_means.k_means import distEclud, centroids
from blaze import inf
import matplotlib.pyplot as plt
from matplotlib.pyplot import scatter
#加载数据
def loadDataSet(filename):
    fr = open(filename)
    datamat=[]
    for line in fr.readlines():
        curline = line.strip().split('\t')
        frline = map(float, curline)#将文本转化为字符型
        datamat.append(frline)
    return datamat
#计算距离
def disEclud(vecA, vecB):
    return sqrt(sum(power(vecA-vecB,2)))
#产生随机的质心
def randcent(dataSet, k):
    n = shape(dataSet)[1]#列
    centroids = mat(zeros((k,n)))#初始化质心
 
    for j in range(n):
        minJ = min(dataSet[:,j])
        maxJ = max(dataSet[:,j])
      
        randJ = float(maxJ - minJ)
        print randJ*random.rand(k,1)
        centroids[:,j] = minJ + randJ*random.rand(k,1)#保证质心在最大和最小的范围内
        
    return centroids   
def kmeans(dataSet, k, distMeas = distEclud, createCent = randcent):
    m = shape(dataSet)[0]
    clusterAssment = mat(zeros((m,2)))
    centroids = createCent(dataSet, k)
    clusterchanged = True
    while clusterchanged:
         clusterchanged = False
         for i in range(m):
            index = -1
            mindis = inf
            for j in range(k):
                dist = disEclud(dataSet[i], centroids[j])
                if(mindis > dist):
                    mindis = dist
                    index = j
            if clusterAssment[i,0]!= index:
                clusterchanged = True
            clusterAssment[i,0] = index
            clusterAssment[i,1] = dist**2
         print centroids
         #更新质心位置
         for cent in range(k):
            ptsInClust = dataSet[nonzero(clusterAssment[:,0].A == cent)[0]]
            centroids[cent,:] = mean(ptsInClust,axis = 0)
    return centroids, clusterAssment
            
    
def plot(dataSet, cluserAssment):#绘图
    
    for i in range(shape(cluserAssment)[0]):
        if  cluserAssment[i,0] == 0:
              scatter(dataSet[:,0],dataSet[:,1],color="black",marker='s',s=30) 
  
    x1 = dataSet[nonzero(cluserAssment[:,0].A == 1)[0]]
    scatter(x1[:,0],x1[:,1],color="red",marker='s',s=30)
    x1 = dataSet[nonzero(cluserAssment[:,0].A == 2)[0]]
    scatter(x1[:,0],x1[:,1],color="green",marker='s',s=30)   
    x1 = dataSet[nonzero(cluserAssment[:,0].A == 3)[0]]
    scatter(x1[:,0],x1[:,1],color="blue",marker='s',s=30)      
    plt.show()
dataSet = loadDataSet(r"C:\Users\QAQ\Desktop\testSet.txt")
datMat = mat(dataSet)
a,b = kmeans(datMat, 4)
plot(datMat, b)
# print "质心",a
# print "b",b

        

在k均值聚类中簇的数目k是一个用户预先定义的参数,那么用户如何才能知道k的选择是否正确?如何才能知道生成的簇哪个比较好?在包含簇分配结果的矩阵中保存着每个点的误差,即该店到簇质心距离的平方值


k均值短发收敛但聚类效果较差的原因是,k均值算法收敛到了局部最小值而非全局最小值(局部最小值指结果还可以但非最好结果, 全局最小值是可能的最好结果)

SSE:一种用于度量聚类效果的指标SSE(Sum Of Squared Error, 误差平方和  对应clusterAssment矩阵的第一列之和)SSE越小代表越接近质心, 一种肯定可以降低SSE值的方法就是增加簇的个数, 但违背了聚类的目标,聚类的目标是在保持簇数目不变的情况下提高簇的质量。


为了克服k均值算法受凉与局部最小的问题有人提出二分k均值算法。

   该算法首先将所有的点作为一个簇,然后将该簇一分为二。之后选择其中一个簇继续画分,选择哪一个簇进行划分发取决于对其划分是否可以最大程度降低SSE的值


#! encoding:utf-8
from numpy import *

from blaze import inf
import matplotlib.pyplot as plt
from matplotlib.pyplot import scatter
from networkx.algorithms import cluster
#加载数据
def loadDataSet(filename):
    fr = open(filename)
    datamat=[]
    for line in fr.readlines():
        curline = line.strip().split('\t')
        frline = map(float, curline)#将文本转化为字符型
        datamat.append(frline)
    return datamat
#计算距离
def disEclud(vecA, vecB):
    return sqrt(sum(power(vecA-vecB,2)))
#产生随机的质心
def randcent(dataSet, k):
    n = shape(dataSet)[1]#列
    centroids = mat(zeros((k,n)))#初始化质心
 
    for j in range(n):
        minJ = min(dataSet[:,j])
        maxJ = max(dataSet[:,j])
      
        randJ = float(maxJ - minJ)
        print randJ*random.rand(k,1)
        centroids[:,j] = minJ + randJ*random.rand(k,1)#保证质心在最大和最小的范围内
        
    return centroids 
#kmeans算法  
def kmeans(dataSet, k, distMeas = disEclud, createCent = randcent):
    m = shape(dataSet)[0]
    clusterAssment = mat(zeros((m,2)))
    centroids = createCent(dataSet, k)
    clusterchanged = True
    while clusterchanged:
         clusterchanged = False
         for i in range(m):#两层for循环寻找离哪个质心进,遍历所有点
            index = -1
            mindis = inf
            for j in range(k):#遍历所有质心
                dist = disEclud(dataSet[i], centroids[j])
                if(mindis > dist):#更新
                    mindis = dist
                    index = j
            if clusterAssment[i,0]!= index:#如果存在跟以前的分类变了那就继续循环
                clusterchanged = True
            clusterAssment[i,0] = index
            clusterAssment[i,1] = dist**2
         print centroids
         #更新质心位置
         for cent in range(k):
            ptsInClust = dataSet[nonzero(clusterAssment[:,0].A == cent)[0]]
            centroids[cent,:] = mean(ptsInClust,axis = 0)
    return centroids, clusterAssment
#二分k_均值聚类算法            暴力大法好啊
def bikmeans(dataSet, k, disMeas = disEclud): 
   
    m = shape(dataSet)[0]
    clusterAssment = mat(zeros((m,2)))
    #创建一个初始簇
    centroid0 = mean(dataSet, axis = 0).tolist()[0]
    centlist=[centroid0]#质心的列表
#     print type(mat(centroid0)),type(dataSet[0])
    for j in range(m):#所有点到质心的距离
        clusterAssment[j,1] = disMeas(dataSet[j,:], mat(centroid0))**2
    
    while len(centlist) < k:#直到找到k个质心才停止
        lowsetSEE = inf#寻找是SSE最小的质心
        for i in range(len(centlist)):#分割每个簇  寻找最小SSE(到质心之间的距离)
            ptsInCurrCluster = dataSet[nonzero(clusterAssment[:,0].A == i)[0],:]#现在要分的簇聚类的情况
            centroidMat, splitClustAss = kmeans(ptsInCurrCluster, 2)#将当前的簇  分割成两个
            sseSplit = sum(splitClustAss[:,1])#切割后 两个簇的SSE
            sseNoSplit = sum(clusterAssment[nonzero(clusterAssment[:,0].A != i)[0],1])#其他簇SSE的和
            
#             print "sseSplit, and sseNotSplit",sseSplit, sseNoSplit
            if (sseNoSplit + sseSplit) < lowsetSEE:#不断更新寻找最小的SSE   
                lowsetSEE = sseNoSplit + sseSplit
                bestCentTosplit = i#记录最好的分割簇的序号
                bestNewCents = centroidMat#记录最好分割簇的质心
                bestClusAss = splitClustAss.copy()#记录最好分割簇的分组以及到质心距离的情况
                
        bestClusAss[nonzero(bestClusAss[:,0].A == 1)[0],0] = len(centlist)#将分割簇里面组号为1的 在原有序号上加一(就是列表的长度因为列表下标从0开始)
        bestClusAss[nonzero(bestClusAss[:,0].A == 0)[0],0] = bestCentTosplit #分组为0的序号不改变
        centlist[bestCentTosplit]=bestNewCents[0,:].tolist()[0]#这里跟书上不一样, 要变成列表,要不然是矩阵类型
        centlist.append(bestNewCents[1,:].tolist()[0])    
        clusterAssment[nonzero(clusterAssment[:,0].A == bestCentTosplit)[0],:] = bestClusAss#将切割簇原来的更新
    
    return mat(centlist), clusterAssment
        
#     return mat(centlist), clusterAssment
        
       
def plot(dataSet, cluserAssment):
    
    for i in range(shape(cluserAssment)[0]):
        if  cluserAssment[i,0] == 0:
              scatter(dataSet[:,0],dataSet[:,1],color="black",marker='s',s=30) 
      

    x1 = dataSet[nonzero(cluserAssment[:,0].A == 1)[0]]
    scatter(x1[:,0],x1[:,1],color="red",marker='s',s=30)
    x1 = dataSet[nonzero(cluserAssment[:,0].A == 2)[0]]
    scatter(x1[:,0],x1[:,1],color="green",marker='s',s=30)   
    x1 = dataSet[nonzero(cluserAssment[:,0].A == 3)[0]]
    scatter(x1[:,0],x1[:,1],color="blue",marker='s',s=30)      
    plt.show()

#二分k均值
dataSet = loadDataSet(r"C:\Users\QAQ\Desktop\testSet2.txt")
datMat = mat(dataSet)
a,b = bikmeans(datMat, 3)
print "质心-----------",a
plot(datMat, b)

      

数据

3.275154	2.957587
-3.344465	2.603513
0.355083	-3.376585
1.852435	3.547351
-2.078973	2.552013
-0.993756	-0.884433
2.682252	4.007573
-3.087776	2.878713
-1.565978	-1.256985
2.441611	0.444826
-0.659487	3.111284
-0.459601	-2.618005
2.177680	2.387793
-2.920969	2.917485
-0.028814	-4.168078
3.625746	2.119041
-3.912363	1.325108
-0.551694	-2.814223
2.855808	3.483301
-3.594448	2.856651
0.421993	-2.372646
1.650821	3.407572
-2.082902	3.384412
-0.718809	-2.492514
4.513623	3.841029
-4.822011	4.607049
-0.656297	-1.449872
1.919901	4.439368
-3.287749	3.918836
-1.576936	-2.977622
3.598143	1.975970
-3.977329	4.900932
-1.791080	-2.184517
3.914654	3.559303
-1.910108	4.166946
-1.226597	-3.317889
1.148946	3.345138
-2.113864	3.548172
0.845762	-3.589788
2.629062	3.535831
-1.640717	2.990517
-1.881012	-2.485405
4.606999	3.510312
-4.366462	4.023316
0.765015	-3.001270
3.121904	2.173988
-4.025139	4.652310
-0.559558	-3.840539
4.376754	4.863579
-1.874308	4.032237
-0.089337	-3.026809
3.997787	2.518662
-3.082978	2.884822
0.845235	-3.454465
1.327224	3.358778
-2.889949	3.596178
-0.966018	-2.839827
2.960769	3.079555
-3.275518	1.577068
0.639276	-3.412840


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值