android knn 算法demo,KNN算法实现

k近邻算法就是对于所有的数据点中,选取离它最近的k个点,从而判断类别。

import numpy as np

import matplotlib.pyplot as plt

raw_data_X = [[3.393533211, 2.331273381],

[3.110073483, 1.781539638],

[1.343808831, 3.368360954],

[3.582294042, 4.679179110],

[2.280362439, 2.866990263],

[7.423436942, 4.696522875],

[5.745051997, 3.533989803],

[9.172168622, 2.511101045],

[7.792783481, 3.424088941],

[7.939820817, 0.791637231]

]

raw_data_y = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]

X_train = np.array(raw_data_X)

y_train = np.array(raw_data_y)

plt.scatter(X_train[y_train==0,0], X_train[y_train==0,1], color='g')

plt.scatter(X_train[y_train==1,0], X_train[y_train==1,1], color='r')

plt.show()

添加一个新点

x == np.array([([8.0936073188.09360 , 3.365731514])

plt.scatter(X_train[y_train==0,0], X_train[y_train==0,1], color='g')

plt.scatter(X_train[y_train==1,0], X_train[y_train==1,1], color='r')

plt.scatter(x[0], x[1], color='b')

plt.show()

knn过程

from math import sqrt

distances = []

for x_train in X_train:

d = sqrt(np.sum((x_train - x)**2))

distances.append(d)

distances也可以写成

distances = [sqrt(np.sum((x_train - x)**2))

for x_train in X_train]

nearest = np.argsort(distances)

k = 6

topK_y = [y_train[neighbor] for neighbor in nearest[:k]]

用于统计两种类别个数

from collections import Counter

votes = Counter(topK_y)

predict_y = votes.most_common(1)[0][0]

结果

predict_y

以上是knn算法的实现,这里我们采用了自己的数据,下面引入sklearn中的方法和数据进行实现。

from sklearn.neighbors import KNeighborsClassifier

kNN_classifier = KNeighborsClassifier(n_neighbors=6)

kNN_classifier.fit(X_train, y_train)

y_predict = kNN_classifier.predict(x.reshape(1,-1))

y_predict[0]

sklearn的流程已经走完了,现在自己仿造sklearn写一个knn函数。

class KNNClassifier:

def __init__(self, k ):

assert k >= 1, "k must be valid"

self.k = k

self._X_train = None

self._y_train = None

def fit(self, X_train, y_train):

assert X_train.shape[0] == y_train.shape[0],"the size must valid"

assert self.k <= X_train.shape[0], "value k must be valid"

self._X_train = X_train

self._y_train = y_train

return self

def predict(self,X_predict):

assert self._X_train is not None and self._y_train is not None , "must fit before predict"

assert X_predict.shape[1] == self._X_train.shape[1]

y_predict = [self._predict(x) for x in X_predict]

return np.array(y_predict)

def _predict(self, x):

distances = [sqrt(np.sum((x_train - x) ** 2)) for x_train in self._X_train]

nearest = np.argsort(distances)

topK_y = [self._y_train[i] for i in nearest[:self.k]]

votes = Counter(topK_y)

return votes.most_common(1)[0][0]

def __repr__(self):

return "KNN(k=%d)" % self.k

这样我们就完整地形成了对knn算法形成了封装,接下来要对该算法进行测试。测试的原理就是分离出一部分数据用于测试,另一部分用于训练。

split函数用于将数据集分成测试和训练两个部分

import numpy as np

def train_test_split(X, y, test_ratio=0.2, seed=None):

"""将数据 X 和 y 按照test_ratio分割成X_train, X_test, y_train, y_test"""

assert X.shape[0] == y.shape[0], \

"the size of X must be equal to the size of y"

assert 0.0 <= test_ratio <= 1.0, \

"test_ration must be valid"

if seed:

np.random.seed(seed)

shuffled_indexes = np.random.permutation(len(X))

test_size = int(len(X) * test_ratio)

test_indexes = shuffled_indexes[:test_size]

train_indexes = shuffled_indexes[test_size:]

X_train = X[train_indexes]

y_train = y[train_indexes]

X_test = X[test_indexes]

y_test = y[test_indexes]

return X_train, X_test, y_train, y_test

from playML.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(X, y)

from playML.kNNplayML.k import KNNClassifier

my_knn_clf = KNNClassifier(k=3)

my_knn_clf.fit(X_train, y_train)

y_predict = my_knn_clf.predict(X_test)

sum(y_predict == y_test)

sum(y_predict == y_test) / len(y_test)

结果为预测准确率

当然sklearn也有对应的函数来计算准确率

from sklearn.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=666)

from sklearn.neighbors import KNeighborsClassifier

knn_clf = KNeighborsClassifier(n_neighbors=3)

knn_clf.fit(X_train, y_train)

y_predict = knn_clf.predict(X_test)

from sklearn.metrics import accuracy_score

accuracy_score(y_test, y_predict)

我们也可以封装自己的准确率计算函数

import numpy as np

def accuracy_score(y_true, y_predict):

'''计算y_true和y_predict之间的准确率'''

assert y_true.shape[0] == y_predict.shape[0], \

"the size of y_true must be equal to the size of y_predict"

return sum(y_true == y_predict) / len(y_true)

准确度计算完了,那么在计算knn时k的取值该如何优化呢?这里就涉及到了超参数。

经典的KNN调包实现过程如下

import numpy as np

from sklearn import datasets

digits = datasets.load_digits()

X = digits.data

y = digits.target

from sklearn.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=666)

from sklearn.neighbors import KNeighborsClassifier

knn_clf = KNeighborsClassifier(n_neighbors=3)

knn_clf.fit(X_train, y_train)

knn_clf.score(X_test, y_test)

寻找更好的K,k从1到10,计算结果最好的k值,这里最好的k是4,如果是10的话需要继续寻找大于10的k:

best_score = 0.0

best_k = -1

for k in range(1, 11):

knn_clf = KNeighborsClassifier(n_neighbors=k)

knn_clf.fit(X_train, y_train)

score = knn_clf.score(X_test, y_test)

if score > best_score:

best_k = k

best_score = score

print("best_k =", best_k)

print("best_score =", best_score)

考虑更多因素,之前取k时,只考虑到了k个节点里哪个类型更多,预测就取那个类型,实际上还需要考虑k个节点里不同距离有不同的权重,离得近的虽然数量少,但是权重应该更大,当method为uniform时,为不考虑距离,distance时考虑:

best_score = 0.0

best_k = -1

best_method = ""

for method in ["uniform", "distance"]:

for k in range(1, 11):

knn_clf = KNeighborsClassifier(n_neighbors=k, weights=method)

knn_clf.fit(X_train, y_train)

score = knn_clf.score(X_test, y_test)

if score > best_score:

best_k = k

best_score = score

best_method = method

print("best_method =", best_method)

print("best_k =", best_k)

print("best_score =", best_score)

sk_knn_clf = KNeighborsClassifier(n_neighbors=4, weights="distance", p=1)

sk_knn_clf.fit(X_train, y_train)

sk_knn_clf.score(X_test, y_test)

best_method = uniform

best_k = 4

best_score = 0.991666666667

此时我们考虑了距离,这个距离叫做欧拉距离,除了欧拉距离之外我们还有其他计算方式,比如曼哈顿距离和明科夫斯基距离,这里引入明科夫斯基距离里面的超参数p:

best_scorebest_sco = 0.0

best_k = -1

best_p = -1

for k in range(1, 11):

for p in range(1, 6):

knn_clf = KNeighborsClassifier(n_neighbors=k, weights="distance", p=p)

knn_clf.fit(X_train, y_train)

score = knn_clf.score(X_test, y_test)

if score > best_score:

best_k = k

best_p = p

best_score = score

print("best_k =", best_k)

print("best_p =", best_p)

print("best_score =", best_score)

best_k = 3

best_p = 2

best_score = 0.988888888889

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值