##### Kd树的实现
> K近邻(KNN)算法实现有很多种,比如全部遍历,球树,kd树等等,这里我们使用kd树来实现KNN
- 构造kd树
```
T = [[2,3],[5,4],[9,6],[4,7],[8,1],[7,2]]
class myNode:
def __init__(self,point):
self.left = None
self.right = None
self.point = point #节点
pass
def media(data):
m = int(len(data) / 2)
return data[m],m
def build_kd_tree(data,d):
data = sorted(data, key = lambda x : x[d]) #key:特征值
p,m = media(data)
tree = myNode(p)
del data[m]
print(data,p)
if m > 0 :tree.left = build_kd_tree(data[:m],not d)
if len(data) > 1 : tree.right = build_kd_tree(data[m:],not d)
return tree
kd_tree = build_kd_tree(T,0)
print(kd_tree)
```
首先这里创建了一个类,里面的构造方法记录节点以及左右分支。
media每次去中间的点。build_kd_tree构造kd树。首先对传入的数据按照特征值进行排序。然后取中间的节点构建一棵树。然后对两边剩余的节点进行判断,构造左右分支
- 对kd树进行查找
```
T = [[2, 3], [5, 4], [9, 6], [4, 7], [8, 1], [7, 2]]
#构建kd树逻辑
class node:
def __init__(self,point):
self.left = None
self.right = None
self.parent = None
self.point = point
def set_left(self,left):
if left == None : pass
self.left = left
left.parent = self
def set_right(self,right):
if right == None : pass
self.right = right
right.parent = self
def media(lst):
m = int(len(lst) / 2)
return lst[m],m
#对kd树进行构建
def build_kdtree(data,d):
data = sorted(data,key = lambda x: x[d])
p,m = media(data)
tree = node(p)
del data[m]
if m > 0:
tree.set_left(build_kdtree(data[:m],not d))
if len(data) > 1:
tree.set_right(build_kdtree(data[m:],not d))
return tree
#计算距离
def get_distance(pointA,target):
print(pointA,target)
return ((pointA[0] - target[0]) ** 2 + (pointA[1] - target[1]) ** 2) ** 0.5
#对kd树进行查找
def search_kdtree(tree,d,target):
#目标点的特征值小于当前节点,左子树,否则右子树
if target[d] < tree.point[d]:
if tree.left != None:
return search_kdtree(tree.left,not d,target)
else:
if tree.right != None:
return search_kdtree(tree.right,not d,target)
#对当前最小距离进行更新
def update_best(t,distance):
if t == None: return
t = t.point
distance = get_distance(t,target)
print(distance)
if distance < best[1]:
best[0] = t
best[1] = distance
best = [tree.point,1000]
#对kd树进行回溯
while(tree.parent != None):
update_best(tree.parent.left,best)
update_best(tree.parent.right,best)
tree = tree.parent
return best[0]
kd_tree = build_kdtree(T,0)
print(search_kdtree(kd_tree,0,[2.1,3.5]))
```
对kd树的搜索是建立在生成kd树之后的,首先要计算每个点到目标点距离,进行不断地更新。search_kdtree()方法开始先进行迭代,得到叶子节点后,进行回溯,计算每个点和目标点的距离,记录下最小距离的点。
##### 基于全搜索KNN算法的实现
> 这里实现KNN我先使用对所有点进行欧式距离的计算,而不使用kd树,因为kd相对复杂
- 处理数据
这里实现KNN算法首先需要读取数据,对数据进行划分,将数据集划分为训练集和测试集
```
import csv
import random
import pandas as pd
from sklearn.model_selection import train_test_split
#filename:文件路径 d:特征个数 split:一般将训练集:测试集为67:33 trainSet训练集 testSet 测试集
def loadDataSet(filename,d,split = 0.66,trainSet = [],testSet = []):
with open(filename,"r") as csvfile:
d = d
lines = csv.reader(csvfile)
dataset = list(lines)
#为dataset进行赋值
for x in range(len(dataset)):
for y in range(d):
dataset[x][y] = float(dataset[x][y])
#分割数据集
if random.random() < split:
trainSet.append(dataset[x])
else:
testSet.append(dataset[x])
```
这里读取数据这里具体不是很清楚,之后解决。但是这不是KNN的主要部分,就先不纠结了
- 计算相似度(距离)
```
import math
#计算欧式距离,length: 计算特征的数量 data: 数据
def get_distance(data1,data2,length):
distance = 0
#根据维度的不同计算欧式距离
for i in range(length):
distance += pow((data1[i] - data2[i]),2)
return math.sqrt(distance)
```
- 邻近相似度
这里的邻近相似度就是根据k的不同,获取到与目标点最近的k个点
```
import operator
#trainingSet:训练集 testInstance: 目标值
def getNeighbors(trainingSet,testInstance,k):
distance = []
length = len(testInstance) - 1 #这里有些疑问,为什么要减1!!!!
for i in range(len(trainingSet)):
dist = get_distance(trainingSet[i],testInstance,length)
distance.append((trainingSet[i],dist))
#对维度为1的数值进行排序
distance.sort(key = operator.itemgetter(1))
neighbors = []
for j in range(k):
neighbors.append(distance[j][0])
return neighbors
```
- 进行预测
然后就是根据邻近的相似度,预测出目标点所属的分类
```
#neighbors: 最近的k个点
def getResponse(neighbors):
#创建一个字典
classVotes = {}
for x in range(len(neighbors)):
response = neighbors[x][-1] #假设该类别的属性在最后一列
#进行投票表决
if response in classVotes:
classVotes[response] += 1
else:
classVotes[response] = 1
#排序
sortedVotes = sorted(classVotes.items(),key = None,reverse = True)
return sortedVotes[0][0] #返回出现最多的属性
```
- 对准确率进行计算
然后就是对准确率进行计算,这里只是一个简单的计算,我用命中的点/所有的点计算准确率
```
#参数 testSet: 测试集 predicitions: 对测试集的预测结果
def getScore(testSet,predictions):
corrent = 0
for i in range(len(testSet)):
if testSet[i][-1] == predictions[i]:
corrent += 1
return float(corrent / len(testSet))
```
- 主函数
```
def main():
trainingSet = []
testSet = []
split = 0.67
#分割数据集
loadDataSet(r"D:\Python Dataset\iris.csv",4,split,trainingSet,testSet)
print("Training Set: ",repr(len(trainingSet)))
print("Test Set: ",repr(len(testSet)))
k = 3
predictions = []
#获取前k个点
for i in range(len(testSet)):
neighbors = getNeighbors(trainingSet,testSet[i],k)
response = getResponse(neighbors)
predictions.append(response)
score = getScore(testSet,predictions)
print("The score is: ",repr(score))
main()
```
##### 基于kd树KNN算法的实现
> 这部分即根据kd树的思想,结合上一部分KNN算法的实现,根据kd树来实现KNN算法,但是可能程序合理性不高,所以在算法的准确率和时间均差于全搜索的KNN,准确率比全搜索差0.1-0.2.所以有时间后期还会修改
- [ ] KNN算法实现
- 构造kd树的基类
```
class myNode:
def __init__(self,point):
self.left = None
self.right = None
self.parent = None
self.point = point
pass
def setLeft(self,left):
if left == None:
pass
self.left = left
left.parent = self
def setRight(self,right):
if right == None:
pass
self.right = right
right.parent = self
```
- 对kd树进行构造
```
#计算节点
def media(lst):
m = int(len(lst) / 2)
return lst[m],m
#构建kd树
def build_kd_tree(data,d):
data = sorted(data,key = lambda x : x[d])
p,m = media(data)
tree = myNode(p)
del data[m]
#print(data,p)
if m > 0:
tree.setLeft(build_kd_tree(data[:m],not d))
if len(data) > 1:
tree.setRight(build_kd_tree(data[m:],not d))
#print("The data length is :" ,repr(len(data)))
return tree
```
- 对数据进行读取,并将其分割为训练集和测试集
```
#读取文件
def loadDataSet(filename,d,split = 0.66,trainingSet = [],testSet = []):
with open(filename,"r") as csvfile:
d = d
lines = csv.reader(csvfile)
dataSet = list(lines)
for x in range(len(dataSet)):
for y in range(d):
dataSet[x][y] = float(dataSet[x][y])
if random.random() < split:
trainingSet.append(dataSet[x])
else:
testSet.append(dataSet[x])
```
- 计算欧式距离
```
def getDistance(data1,data2,length):
distance = 0
for i in range(length):
distance += pow((data1[i] - data2[i]),2)
return math.sqrt(distance)
```
- 对kd树进行搜索,返回距离目标点最近的k个点
```
#搜索kd树
def search_kdtree(tree,d,target,k = 3):
print("This is searchFun")
length = len(target) - 1 #获取距离剔除掉最后的属性
if target[d] < tree.point[d]:
if tree.left != None:
return search_kdtree(tree.left,not d,target,k)
else:
if tree.right != None:
return search_kdtree(tree.right,not d,target,k)
def updateBestDis(t,distance,k):
print("This is updataFun")
if t == None:
return
t = t.point
distance = getDistance(t,target,length)
print(t,distance)
if distance < best[k - 1][1]:
print(type(best[k-1]))
best[k - 1][0] = t
best[k - 1][1] = distance
best.sort(key = operator.itemgetter(1))
return best
def initBestDis(t,i):
print("This is initFun")
print("I is : ",repr(i))
if t == None:
return
t = t.point
i = i
distance = getDistance(t,target,length)
print(t,distance)
print("i is : ",repr(i))
#best[i][0] = t
#best[i][1] = distance
best.append([t,distance])
best.sort(key = operator.itemgetter(1))
#return best
best = []
i = 0
if k >= 2:
while(i <= k - 1):
if tree.parent != None:
initBestDis(tree.parent.left,i)
i += 1
if i <= k - 1:
if tree.parent.right != None:
initBestDis(tree.parent.right,i)
i += 1
tree = tree.parent
else:
initBestDis(tree.parent.left,k,i)
print(best)
print(type(best))
print("The Best Length is : ",repr(len(best)))
minK = best[k - 1][1]
while(tree.parent != None):
if getDistance(tree.parent.point,target,length) < minK:
updateBestDis(tree.parent.left,best,k)
updateBestDis(tree.parent.right,best,k)
tree = tree.parent
print("search Done!!!")
best.sort(key = operator.itemgetter(1))
neighbors = []
for i in range(k):
neighbors.append(best[i][0])
return neighbors
```
这里是这个算法最核心的部分,我来详细讲一下。首先对kd树进行递归的搜索,找到距离目标点最近的叶子节点。如果目标点的特征值小于某节点,进入左子树,反之右子树。然后里面有两个函数,分别是initBestDis()和updataBestDis()。initBestDis()函数是根据传入的k值对前K个距离的点进行初始化,updateBestDis()是对best(list)进行更新,如果新节点小于list中第k个距离,则进行替换。下面一个循环是对best数组进行初始化。最后一个循环则是计算不同节点到目标点的距离,然后进行替换。如果父节点的距离大于best里的最大值,则不用进行查找父节点了。
- 对最近的节点使用少数服从多数的思想进行预测
```
#进行预测
def getResponse(neighbors):
classDic = {}
for i in range(len(neighbors)):
response = neighbors[i][-1]
if response in classDic:
classDic[response] += 1
else:
classDic[response] = 1
sortDic = sorted(classDic.items(),key = None,reverse = True)
return sortDic[0][0]
```
- 对准确率进行计算
```
#计算得分
def getScore(testSet,predictions):
print("testSet is : ",repr(len(testSet)))
print("prediction is : ",repr(len(predictions)))
temp = 0
for i in range(len(testSet)):
if testSet[i][-1] == predictions[i]:
temp += 1
return float(temp / len(testSet))
```
- 主函数
```
def main():
start = time.clock()
split = 0.66
trainingSet = []
testSet = []
loadDataSet(r"D:\Python Dataset\iris.csv",4,split,trainingSet,testSet)
print("The TrainingSet length : ",repr(len(trainingSet)))
print("The TestSet length : ",repr(len(testSet)))
kd_tree = build_kd_tree(trainingSet,0)
print("-------",repr(kd_tree))
predictions = []
k = 3
for i in range(len(testSet)):
neighbors = search_kdtree(kd_tree,0,testSet[i],k)
result = getResponse(neighbors)
predictions.append(result)
print("----------->",repr(predictions))
score = getScore(testSet,predictions)
print("The Score is : ",repr(score))
end = time.clock()
total_time = end -start
print("-----The Time finished-----",str(total_time))
main()
```