1、 基于欧氏距离
import numpy as np
def createDataSet():
group = np.array([[1.5, 1.4], [1.6, 1.5], [0.1, 0.2], [0.0, 0.1]])
labels = ['A', 'A', 'B', 'B']
return group, labels
def KNNClassify(newInput, dataSet, labels, k):
numSamples = dataSet.shape[0]
diff = np.tile(newInput, (numSamples, 1)) - dataSet
squareDiff = diff ** 2
squareDist = sum(squareDiff, axis=1)
distance = squareDist ** 0.5
sortedDistance = np.argsort(distance)
classCount = {}
for i in range(k):
voteLabel = labels[sortedDistance[i]]
classCount[voteLabel] = classCount.get(voteLabel, 0) + 1
maxCount = 0
for key, value in classCount.items():
if value > maxCount:
maxCount = value
maxIndex = key
return maxIndex
if __name__ == '__main__':
dataSet, labels = createDataSet()
testData = np.array([1.2, 1.0])
k = 3
predictlabel = KNNClassify(testData, dataSet, labels, k)
print('测试数据为:', testData, '预测结果类别为:', predictlabel)
testData = np.array([0.1, 0.3])
predictlabel = KNNClassify(testData, dataSet, labels, k)
print('测试数据为:', testData, '预测结果类别为:', predictlabel)
2、基于sklearn 的 KNeighborsClassifier
# -*- encoding:utf-8-*-
import numpy as np
from sklearn import neighbors, datasets
datas = datasets.load_digits()
totalNum = len(datas.data)
trainNum = int(0.7 * totalNum)
trainX = datas.data[0:trainNum]
trainY = datas.target[0:trainNum]
testX = datas.data[trainNum:]
testY = datas.target[trainNum:]
n_neighbors = 10
clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, weights='uniform', algorithm='auto')
clf.fit(trainX, trainY)
answer = clf.predict(testX)
print("错误率:%.2f%%" % ((1 - np.sum(answer == testY) / float(len(testY))) * 100))