机器学习KNN近邻算法
一、算法原理
K近邻学习是一种常用的监督学习方法。工作机制:给定测试样本,基于某种距离度量找出训练集中与其最靠近的K哥训练样本,基于这K个“邻居”的信息进行预测。通常咋分类任务中可使用“投票法”,即选择这K个样本中出现最多的类别标记作为预测结果;在回归任务中可使用“平均法”,即将这K个样本的实值输出标记的平均值作为预测结果;还可基于距离远近进行加权平均或加权投票,距离越近的样本权重越大。K近邻算法比较特殊,可以被认为是没有模型的算法,或可认为训练数据集就是模型本身
二、手动实现
KNN分类器
import numpy as np
from math import sqrt
from collections import Counter
class KNNClassifier:
def __init__(self,k):
"""初始化KNN分类器"""
assert k >= 1 ,"k must be valid"
self.k = k
self._X_train = None
self._y_train = None
def fit(self,X_train,y_train):
"""根据训练数据集X_train和y_train训练KNN分类器"""
assert 1 <= self.k <= X_train.shape[0], "k must be valid"
assert X_train.shape[0] == y_train.shape[0], \
"the size of X_train must equal to the size of y_train"
self._X_train = X_train
self._y_train = y_train
return self
def predict(self,X_predict):
"""给定待预测数据集X_predict,返回表示X——predict的结果向量"""
y_predict = [self._predict(x)for x in X_predict]
return np.array(y_predict)
def _predict(self,x):
"""给定单个待预测数据x,返回x_predict的预测结果值"""
distances = [sqrt(np.sum((x_train - x) ** 2)) for x_train in X_train]
nearest = np.argsort(distances)
topK_y = [y_train[i] for i in nearest[:k]]
votes = Counter(topK_y)
return votes.most_common(1)[0][0]
def __repr__(self):
return "KNN(k=%d)" % self.k
三、sklearn调用实现
sklearn 鸢尾花数据集为例
from sklearn import datasets
from collections import Counter # 为了做投票
from sklearn.model_selection import train_test_split
import numpy as np
# 导入iris数据
iris = datasets.load_iris()
X = iris.data
y = iris.target
X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=20)
def euc_dis(instance1, instance2):
""&#