李航统计学习方法-k近邻法python实现

1、 基于欧氏距离

import numpy as np


def createDataSet():
    group = np.array([[1.5, 1.4], [1.6, 1.5], [0.1, 0.2], [0.0, 0.1]])  
    labels = ['A', 'A', 'B', 'B'] 
    return group, labels


def KNNClassify(newInput, dataSet, labels, k):
    numSamples = dataSet.shape[0]  
    diff = np.tile(newInput, (numSamples, 1)) - dataSet 
    squareDiff = diff ** 2 
    squareDist = sum(squareDiff, axis=1) 
    distance = squareDist ** 0.5

    sortedDistance = np.argsort(distance)

    classCount = {}  
    for i in range(k):
        voteLabel = labels[sortedDistance[i]]
        classCount[voteLabel] = classCount.get(voteLabel, 0) + 1
    maxCount = 0
    for key, value in classCount.items():
        if value > maxCount:
            maxCount = value
            maxIndex = key
    return maxIndex


if __name__ == '__main__':
    dataSet, labels = createDataSet()
    testData = np.array([1.2, 1.0])
    k = 3
    predictlabel = KNNClassify(testData, dataSet, labels, k)
    print('测试数据为:', testData, '预测结果类别为:', predictlabel)
    testData = np.array([0.1, 0.3])
    predictlabel = KNNClassify(testData, dataSet, labels, k)
    print('测试数据为:', testData, '预测结果类别为:', predictlabel)

2、基于sklearn  的 KNeighborsClassifier

 

# -*- encoding:utf-8-*-

import numpy as np
from sklearn import neighbors, datasets

datas = datasets.load_digits()
totalNum = len(datas.data)
trainNum = int(0.7 * totalNum)
trainX = datas.data[0:trainNum]
trainY = datas.target[0:trainNum]
testX = datas.data[trainNum:]
testY = datas.target[trainNum:]
n_neighbors = 10
clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, weights='uniform', algorithm='auto')
clf.fit(trainX, trainY)
answer = clf.predict(testX)
print("错误率:%.2f%%" % ((1 - np.sum(answer == testY) / float(len(testY))) * 100))

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值