1.4 KNN算法学习——KNN算法的实现与封装

封装代码实现
import numpy as np
from math import sqrt
from collections import Counter

def kNN_classfy(k,X_train,y_train,x):

    assert 1 <= k <=X_train.shape[0],"k must be valid"
    assert X_train.shape[0]==y_train.shape[0],"the size of X_train must equal to the sise of y_train"
    assert X_train.shape[1]==x.shape[0],"the feature number of x must be equal to X_train"

    #距离求解
    distance = [sqrt(np.sum((x_train-x)**2)) for x_train in X_train]

    #最近距离的索引排序
    nearest=np.argsort(distance)

    #求前k个最近的点的y值
    top_K=[y_train[i] for i in nearest[:k]]

    #投票
    votes=Counter(top_K)

    #返回投票结果
    return votes.most_common(1)[0][0]
运行结果:

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是使用KNN算法过滤邮件的Python代码: ```python import numpy as np from collections import Counter import re def clean_text(text): """ 将文本转换为小写并去除非字母字符 """ text = text.lower() text = re.sub(r'[^a-z]', ' ', text) return text def get_word_counts(text): """ 计算文本中每个单词的出现次数 """ words = text.split() return Counter(words) def get_cosine_similarity(a, b): """ 计算两个向量之间的余弦相似度 """ dot_product = np.dot(a, b) norm_a = np.linalg.norm(a) norm_b = np.linalg.norm(b) return dot_product / (norm_a * norm_b) class KNN: def __init__(self, k=5): self.k = k def fit(self, X, y): self.X = X self.y = y def predict(self, X): y_pred = [] for x in X: # 计算与训练集中每个样本的余弦相似度 similarities = [] for i in range(len(self.X)): xi = self.X[i] yi = self.y[i] xi_counts = get_word_counts(clean_text(xi)) x_counts = get_word_counts(clean_text(x)) similarity = get_cosine_similarity(list(xi_counts.values()), list(x_counts.values())) similarities.append((similarity, yi)) # 根据相似度排序,并选取前k个最相似的样本 similarities = sorted(similarities, key=lambda x: x[0], reverse=True) k_neighbors = similarities[:self.k] # 统计k个样本中类别出现的次数,选取出现次数最多的类别作为预测结果 k_neighbors_labels = [x[1] for x in k_neighbors] most_common = Counter(k_neighbors_labels).most_common(1) y_pred.append(most_common[0][0]) return y_pred ``` 使用示例: ```python from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.pipeline import Pipeline # 加载20个新闻组数据集 newsgroups = fetch_20newsgroups(subset='all', remove=('headers', 'footers', 'quotes')) # 将文本转换为TF-IDF向量 vectorizer = TfidfVectorizer() X = vectorizer.fit_transform(newsgroups.data) y = newsgroups.target # 划分训练集和测试集 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # 构建KNN分类器,并使用Pipeline进行封装 knn = KNN(k=5) clf = Pipeline([ ('vectorizer', vectorizer), ('knn', knn) ]) # 训练模型并预测测试集 clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # 计算准确率 accuracy = accuracy_score(y_test, y_pred) print('Accuracy:', accuracy) ``` 在这个示例中,我们使用20个新闻组数据集进行了测试,使用TF-IDF向量化文本,并将KNN算法封装在Pipeline中。最终,我们得到了一个准确率为约0.54的模型。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值