K-Nearest Neighbor
通俗的说,物以类聚,人以群分。即KNN聚类算法
算法原理
计算距离(常用欧几里得算法或马氏距离算法)、升序排列、取前K个、加权平均。
K值的选取:
k太大:导致分类模糊。
k太小:受个例的影响,波动较大。
实战应用
癌症数据检测
import csv
import random
with open('Prostate_Cancer.csv', 'r') as file: # 以文本方式打开文件
reader = csv.DictReader(file)
datas = [row for row in reader]
# 分组
random.shuffle(datas)
n = len(datas)//3
test_set = datas[0:n]
train_set = datas[n:]
# KNN
def distance(d1, d2):
res = 0
for key in ("radius", "texture", "perimeter", "area", "smoothness", "compactness", "symmetry", "fractal_dimension"):
res+=(float(d1[key])-float(d2[key]))**2 # 平方和
return res**0.5
K=7
def knn(data):
# 结果及距离
res=[
{"result": train['diagnosis_result'], "distance": distance(data, train)}
for train in train_set
]
# 排序——升序
res = sorted(res, key=lambda item:item['distance'])
# 取前K个
res2=res[0:K]
# 加权平均
result={'B': 0, 'M': 0}
sum = 0
for r in res2:
sum+=r['distance']
for r in res2:
result[r['result']]+=1-r['distance']/sum
if result['B']>result['M']:
return 'B'
else:
return 'M'
# 测试
correct = 0
for test in test_set:
result = test['diagnosis_result']
result2 = knn(test)
if result == result2:
correct+=1
print("准确率:{:.2f}%".format(100*correct/len(test_set)))