自己动手实现一个K-Means算法

import matplotlib.pyplot as plt
import random
import numpy as np
import math

# 随机生成的点
random_x = [random.randint(-100, 100) for _ in range(50)]
random_y = [random.randint(-100, 100) for _ in range(50)]
random_points= [(x, y) for x, y in zip(random_x, random_y)] 
# 随机初始聚类点
def generate_random_point(min_, max_):
    '随机生成一个坐标点'
    return random.randint(min_, max_), random.randint(min_, max_)

K = 4 # 设置的聚类的个数
new_kernels = [generate_random_point(-100, 100) for _ in range(K)]  # 这里是可以优化下的

# 计算距离的函数
def dis(p1, p2):
    """计算2个点之间的距离的平方和"""
    return (p1[0] - p2[0])**2 + (p1[1] - p2[1]) ** 2

while True:
    sqrt_distances = []
    previous_kernels = new_kernels
    
    groups = [[] for _ in range(K)]

    for p in random_points:
        distances = [dis(p, k) for k in previous_kernels]

        min_index = np.argmin(distances)

        groups[min_index].append(p) # 把每一个点到选定的聚类中心的距离进行计算,找到该点最近的聚类中心,并且将这个点放到对应的聚类中心的那个数组



    kernel_color = ['red', 'orange', 'cyan', 'blue']

    plt.scatter(previous_kernels[0][0], previous_kernels[0][1], s=100, color=kernel_color[0])
    plt.scatter(previous_kernels[1][0], previous_kernels[1][1], s=100, color=kernel_color[1])
    plt.scatter(previous_kernels[2][0], previous_kernels[2][1], s=100, color=kernel_color[2])

    new_kernels = []
    for index, point in enumerate(groups):
        p_x = [_x for _x, _y in point]
        p_y = [_y for _x, _y in point]
        # 平均值点作为新聚类点
        n_k_x, n_k_y = np.mean(p_x), np.mean(p_y)
        new_kernels.append((n_k_x, n_k_y))

        plt.scatter(p_x, p_y, color=kernel_color[index])

        # 绘制出3个新聚类点
        plt.scatter(n_k_x, n_k_y, color=kernel_color[index], s=200, alpha=0.5)

        sqrt_distance = math.sqrt(dis((n_k_x, n_k_y), previous_kernels[index]))
        sqrt_distances.append(sqrt_distance)

        print("根据新的group获得的kernal和之前的kernel的距离是: {}".format(sqrt_distance))
        
    if not any(sqrt_distances):
        break
    plt.show()


工程化代码如下:

# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt 

class K_Means(object):
    # k是分组数;tolerance‘中心点误差’;max_iter是迭代次数
    def __init__(self, k=2, tolerance=0.0001, max_iter=300):
        self.k_ = k
        self.tolerance_ = tolerance
        self.max_iter_ = max_iter

    def fit(self, data):
        self.centers_ = {}
        for i in range(self.k_):
            self.centers_[i] = data[i]

        for i in range(self.max_iter_):
            self.clf_ = {}
            for i in range(self.k_):
                self.clf_[i] = []
            # print("质点:",self.centers_)
            for feature in data:
                # distances = [np.linalg.norm(feature-self.centers[center]) for center in self.centers]
                distances = []
                for center in self.centers_:
                    # 欧拉距离
                    # np.sqrt(np.sum((features-self.centers_[center])**2))
                    distances.append(np.linalg.norm(feature - self.centers_[center]))
                classification = distances.index(min(distances))
                self.clf_[classification].append(feature)

            # print("分组情况:",self.clf_)
            prev_centers = dict(self.centers_)
            for c in self.clf_:
                self.centers_[c] = np.average(self.clf_[c], axis=0)

            # '中心点'是否在误差范围
            optimized = True
            for center in self.centers_:
                org_centers = prev_centers[center]
                cur_centers = self.centers_[center]
                if np.sum((cur_centers - org_centers) / org_centers * 100.0) > self.tolerance_:
                    optimized = False
            if optimized:
                break

    def predict(self, p_data):
        distances = [np.linalg.norm(p_data - self.centers_[center]) for center in self.centers_]
        index = distances.index(min(distances))
        return index


if __name__ == '__main__':
    x = np.array([[1, 2], [1.5, 1.8], [5, 8], [8, 8], [1, 0.6], [9, 11]])
    k_means = K_Means(k=2)
    k_means.fit(x)
    print(k_means.centers_)
    for center in k_means.centers_:
        plt.scatter(k_means.centers_[center][0], k_means.centers_[center][1], marker='*', s=150)

    for cat in k_means.clf_:
        for point in k_means.clf_[cat]:
            plt.scatter(point[0], point[1], c=('r' if cat == 0 else 'b'))

    predict = [[2, 1], [6, 9]]
    for feature in predict:
        cat = k_means.predict(predict)
        plt.scatter(feature[0], feature[1], c=('r' if cat == 0 else 'b'), marker='x')

    plt.show()
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值