目录
引言
K-近邻算法(KNN)是最简单的机器学习算法之一。
该算法的思想是: 两个样本如果足够相似的话,那么它们很可能属于同一个类别。对于一个新的样本,只看和它最相似的一个样本是不靠谱的,这里看和它最相似的k个样本中属于哪个类别的样本数最多,则这个新样本最有可能属于该类别。
详细的算法介绍网上随便都能收到很多,这里就不写了,直接整代码。
1. KNN的简单实现
首先对数据进行训练数据和测试数据的拆分,这里使用python,直接上代码。
def train_test_split(X, y, test_ratio=0.2, seed=None):
"""
将数据 X 和 y 按照test_ratio分割成X_train, X_test, y_train, y_test
:param X: 需要拆分的特征集
:param y: 需要拆分的标签
:param test_ratio: 测试数据的比例,默认为0.2,即20%为测试数据80%为训练数据
:param seed: 随机数种子
:return: X_train, X_test, y_train, y_test
"""
# 检查数据和标签是否匹配,检查比例是否合法
assert X.shape[0] == y.shape[0], "the size of X must be equal to the size of y"
assert 0.0 <= test_ratio <= 1.0, "test_ration must be valid"
if seed:
np.random.seed(seed)
# 对数据的索引进行打乱
shuffled_indexes = np.random.permutation(len(X))
test_size = int(len(X) * test_ratio)
# 获取打乱后训练数据和测试数据的索引
test_indexes = shuffled_indexes[:test_size]
train_indexes = shuffled_indexes[test_size:]
# 获取训练数据和测试数据
X_train = X[train_indexes]
y_train = y[train_indexes]
X_test = X[test_indexes]
y_test = y[test_indexes]
return X_train, X_test, y_train, y_test
下面是knn的实现方法。
def kNN_classify(k, X_train, y_train, x):
"""
X_train和y_train为已知数据集
给定单个待预测数据x,返回x的预测结果值
"""
assert 1 <= k <= X_train.shape[0], "k must be valid"
assert X_train.shape[0] == y_train.shape[0], "the size of X_train must equal to the size of y_train"
assert X_train.shape[1] == x.shape[0], "the feature number of x must be equal to X_train"
# 计算新的样本与训练数据的欧拉距离
distances = [sqrt(np.sum((x_train - x)**2)) for x_train in X_train]
# 根据距离进行排序,这里返回的是相应的索引号
nearest = np.argsort(distances)
# 获取前k个标签topK_y
topK_y = [y_train[i] for i in nearest[:k]]
# 计算topK_y中每种标签的数量,votes是个字典
votes = Counter(topK_y)
return votes.most_common(1)[0][0]
2. 封装KNN方法
模仿sklearn模块,将上面的方法封装成类。
class KNNClassifier:
def __init__(self, k):
"""初始化kNN分类器"""
assert k >= 1, "k must be valid"
self.k = k
self._X_train = None
self._y_train = None
def fit(self, X_train, y_train):
"""根据训练数据集X_train和y_train训练kNN分类器"""
assert X_train.shape[0] == y_train.shape[0], "the size of X_train must be equal to the size of y_train"
assert self.k <= X_train.shape[0], "the size of X_train must be at least k."
self._X_train = X_train
self._y_train = y_train
return self
def predict(self, X_predict):
"""给定待预测数据集X_predict,返回表示X_predict的结果向量"""
assert self._X_train is not None and self._y_train is not None, "must fit before predict!"
assert X_predict.shape[1] == self._X_train.shape[1], "the feature number of X_predict must be equal to X_train"
y_predict = [self._predict(x) for x in X_predict]
return np.array(y_predict)
def _predict(self, x):
"""给定单个待预测数据x,返回x的预测结果值"""
assert x.shape[0] == self._X_train.shape[1], "the feature number of x must be equal to X_train"
distances = [sqrt(np.sum((x_train - x) ** 2))
for x_train in self._X_train]
nearest = np.argsort(distances)
topK_y = [self._y_train[i] for i in nearest[:self.k]]
votes = Counter(topK_y)
return votes.most_common(1)[0][0]
def score(self, X_test, y_test):
"""根据测试数据集X_test和y_test确定当前模型的准确度"""
y_predict = self.predict(X_test)
return accuracy_score(y_test,y_predict)
def __repr__(self):
return "KNN(k=%d)" % self.k
3. 测试
直接使用python中sklearn模块提供的数据进行测试。
3.1 使用鸢尾花数据测试
python中sklearn模块提供了鸢尾花的数据,下面直接上代码。
if __name__ == '__main__':
# 加载鸢尾花数据
iris = datasets.load_iris()
# 获取特征集和标签
X_iris = iris.get("data")
y_iris = iris.get("target")
# 对样本数据进行划分
X_iris_train, X_iris_test, y_iris_train, y_iris_test = train_test_split(X_iris, y_iris, test_ratio=0.2)
# 创建knn对象
knn_iris = KNNClassifier(5)
# 训练数据
knn_iris.fit(X_iris_train, y_iris_train)
# 输出knn模型的精度
print("使用鸢尾花数据集,模型的精度:{}".format(knn_iris.score(X_iris_test, y_iris_test)))
输出结果:
使用鸢尾花数据集,模型的精度:0.9666666666666667
3.2 使用手写数字数据测试
sklearn模块提供了手写数字的数据,用同样的方式进行测试。
if __name__ == '__main__':
# 加载手写数字数据集
digits = datasets.load_digits()
# 获取特征集和标签
X_digits = digits.get("data")
y_digits = digits.get("target")
# 对样本数据进行划分
X_digits_train, X_digits_test, y_digits_train, y_digits_test = train_test_split(X_digits, y_digits, test_ratio=0.2)
# 创建knn对象
knn_digits = KNNClassifier(5)
# 训练数据
knn_digits.fit(X_digits_train, y_digits_train)
# 输出knn模型当前的精度
print("使用手写数字数据集,模型的精度:{}".format(knn_digits.score(X_digits_test, y_digits_test)))
输出结果:
使用手写数字数据集,模型的精度:0.9860724233983287