1、kNN算法原理
k近邻(k-Nearest Neighbor)是比较简单的机器学习算法,它通过计算向量(特征值)间的距离衡量相似度来进行分类。它的思想很简单:如果一个样本在特征空间中的k个最近邻(最相似)的样本中的大多数属于某一类别,则该样本也属于这个类别。说白了就是一句话:对某个样本,与他最相似的k个样本属于哪一类,它就属于哪一类。
2、kNN的python实现(进行文本分类)
a、准备数据
准备数据,保存在NBayes_lib.py中
import numpy as np
from numpy import *
def loadDataSet():
postingList = [['my','dog','has','flea','problems','help','help','please'],
['maybe','not','take','him','to','dog','park','stupid'],
['my','dalmation','is','so','cute','I','love','him','my'],
['stop','posting','stupid','workless','garbage'],
['mr','licks','ate','my','steak','how','to','stop','him'],
['quit','buying','worthless','dog','food','stupid']]
classVec = [0,1,0,1,0,1]
return postingList,classVec
class NBayes(object):
def __init__(self):
self.vocabulary = []
self.idf = 0
self.tf = 0
self.tdm = 0
self.Pcates = {}
self.labels = []
self.doclength = 0
self.vocablen = 0
self.testset = 0
def train_set(self, trainset,classVec):
self.cate_prob(classVec)
self.doclength = len(trainset)
tempset = set()
[tempset.add(word) for doc in trainset for word in doc]
self.vocabulary = list(tempset)
self.vocablen = len(self.vocabulary)
self.calc_wordfreq(trainset)
self.build_tdm()
def cate_prob(self,classVec):
self.labels = classVec
labeltemps = set(self.labels)
for labeltemp in labeltemps:
self.Pcates[labeltemp] = float(self.labels.count(labeltemp)) / float(len(self.labels))
def calc_wordfreq(self,trainset):
self.idf = np.zeros([1,self.vocablen])
self.tf = np.zeros([self.doclength,self.vocablen])
for indx in range(self.doclength):
for word in trainset[indx]:
self.tf[indx,self.vocabulary.index(word)]+=1
for signleword in set(trainset[indx]):
self.idf[0,self.vocabulary.index(signleword)]+=1
def build_tdm(self):
self.tdm = np.zeros([len(self.Pcates),self.vocablen])
sumlist = np.zeros([len(self.Pcates),1])
for indx in range(self.doclength):
self.tdm[self.labels[indx]]+= self.tf[indx]
sumlist[self.labels[indx]]=np.sum(self.tdm[self.labels[indx]])
self.tdm = self.tdm/sumlist
def map2vocab(self,testdata):
self.testset = np.zeros([1,self.vocablen])
for word in testdata:
self.testset[0,self.vocabulary.index(word)] +=1
def predict(self,testset):
if np.shape(testset)[1] !=self.vocablen:
print('输入错误')
exit(0)
predvalue = 0
predclass = ""
for tdm_vect,keyclass in zip(self.tdm,self.Pcates):
temp = np.sum(testset*tdm_vect*self.Pcates[keyclass])
if temp > predvalue:
predvalue = temp
predclass = keyclass
return predclass
def calc_tfidf(self,trainset):
self.idf = np.zeros([1,self.vocablen])
self.tf = np.zeros([self.doclength,self.vocablen])
for indx in range(self.doclength):
for word in trainset[indx]:
self.tf[indx,self.vocabulary.index(word)]+=1
self.tf[indx] = self.tf[indx]/float(len(trainset[indx]))
for signleword in set(trainset[indx]):
self.idf[0,self.vocabulary.index(signleword)]+=1
self.idf = np.log(float(self.doclength)/self.idf)
self.tf = np.multiply(self.tf,self.idf)
b、进行分类
import sys
import os
from numpy import *
import numpy as np
import operator
from Nbayes_lib import *
import importlib
importlib.reload(sys)
k=3
def cosdist(v1,v2):
return np.dot(v1,v2) / (linalg.norm(v1)*linalg.norm(v2))
def classify(testdata,traindata,listClass,k):
datasetsize = traindata.shape[0] #返回样本的行数,就是样本的个数
distance = array(zeros(datasetsize))
for indx in range(datasetsize): #计算测试集与训练集之间的距离
distance[indx] = cosdist(testdata,traindata[indx])
sortedDistIndicies = argsort(-distance)
#print(sortedDistIndicies)
classCount = {}
for i in np.arange(k): #获取角度最小的前k项作为参考项
voteIlabel = listClass[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
#print(classCount)
sortedClassCount = sorted(classCount.items(),key=operator.itemgetter(1),reverse = True)
return sortedClassCount[0][0]
dataset,listClasses = loadDataSet()
print('数据集是')
print(dataset)
print('label是:')
print(listClasses)
nb = NBayes()
nb.train_set(dataset,listClasses)
print('tf[3]是:')
print(nb.tf[3])
print('tf是')
print(nb.tf)
print(classify(nb.tf[3],nb.tf,listClasses,k))
输出结果为1,准确率100%