机器学习实战——k近邻算法
算法流程:
(1)收集数据
(2)准备数据:最好使用结构化数据
(3)分析数据
(4)训练数据:此步骤不适合k近邻
(5)测试数据:计算其错误率
(6)使用数据
(2)准备,python导入数据
# 导入科学计算包numpy
import numpy from *
# 导入运算符模块
import operator
#创建数据集和标签
def createDataset():
group = array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
labels = ['A','A','B','B']
return group,labels
########################################################
#k近邻算法
"""
实施knn分类算法的伪代码:
1.计算每一个点与样本点的距离
2.将距离按照从小到大排序
3.取前面k个距离最小的的点
4.这k个点在标签中出现的概率
5.取这k个点出现概率最大的标签为当前点的分类
"""
def classify0(inX,dataSet,labels,k):
dataSetSize = dataSet.shape[0] #第一维度的长度
diffMat = tile(inX,(dataSetSize,1))-dataSet #tile的作用是将intX向量补全为dataSetSize行矩阵
sqDiffMat = diffMat**2
sqDistance = sqDiffMat.sum(axis =1) #每行相加和
distances = sqDistance**0.5
sortedDistIndicies = distances.argsort() #排序编号
classCount = {}
for i in range(k):
voteIlabel= labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0)+1 #用于保存voteIlabel出现次数
sortedClassCount = sorted(classCount.items(),key=operator.itemgetter(1),reverse = True)
return sortedClassCount[0][0]
# 准备数据,利用python处理文本数据
def file2matrix(filename):
fr = open(filename)
arrayOLlines = fr.readlines()
numberOfLines = len(arrayOLlines)
returnMat = zeros((numberOfLines,3))
classLabelVector = []
index = 0
for line in arrayOLlines:
line = line.strip() #用于移除字符串首尾间指定字符
listFromLine = line.split('\t')
returnMat[index,:] = listFromLine[0:3]
classLabelVector.append(int(listFromLine[-1]))"""
样本主要包含了一下内容
1、每年获得的飞行常客里程数
2、玩视频游戏所耗时间百分比
3、每周消费的冰激凌公升数
"""
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
datingDataMat,datingLabels = file2matrix('datingTestSet2.txt')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(datingDataMat[:,0],datingDataMat[:,1],15.0*array(datingLabels),15.0*array(datingLabels))
plt.ylabel(u"玩视频游戏所消耗的时间")
plt.xlabel(u"每年获取飞行常客里程数")
plt.show()
index += 1
return returnMat,classLabelVector
#准备数据:归一化数据
def autoNorm(dataSet):
minVals = dataSet.min(0) #参数0使得函数可以从列中取得最小值
maxVals = dataSet.max(0)
ranges = maxVals-minVals
normDataSet = zeros(shape(dataSet))
m = dataSet.shape[0]
normDataSet = dataSet - tile(minVals,(m,1))
normDataSet = normDataSet/tile(ranges,(m,1))
return normDataSet,minVals,ranges
(3)分析数据
"""
样本主要包含了一下内容
1、每年获得的飞行常客里程数
2、玩视频游戏所耗时间百分比
3、每周消费的冰激凌公升数
"""
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
datingDataMat,datingLabels = file2matrix('datingTestSet2.txt')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(datingDataMat[:,1],datingDataMat[:,2],15.0*array(datingLabels),15.0*array(datingLabels))
plt.xlabel(u"玩视频游戏所消耗的时间")
plt.ylabel(u"每周消费的冰淇凌公升数")
plt.show()
"""
样本主要包含了一下内容
1、每年获得的飞行常客里程数
2、玩视频游戏所耗时间百分比
3、每周消费的冰激凌公升数
"""
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
datingDataMat,datingLabels = file2matrix('datingTestSet2.txt')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(datingDataMat[:,0],datingDataMat[:,1],15.0*array(datingLabels),15.0*array(datingLabels))
plt.ylabel(u"玩视频游戏所消耗的时间")
plt.xlabel(u"每年获取飞行常客里程数")
plt.show()
对比上述两幅图,我们可以知道使用第一和第二列属性(图二)比使用第二和第三列属性(图一)来展示数据的想过更好,能更加清晰第表示不同样本的分类区域
(5)测试数据:计算其错误率
def datingClassTest():
hoRatio = 0.10
datingDataMat,datingLabels = file2matrix('datingTestSet2.txt')
normMat,ranges,minVals = autoNorm(datingDataMat)
m = normMat.shape[0]
numTestVeces = int (m*hoRatio)
errorCount = 0.0
for i in range(numTestVeces):
classifierResult = classify0(normMat[i,:],normMat[numTestVeces:m,:],
datingLabels[numTestVeces:m],3)
print("the classifier cam back with :%s,the real answer is %s"
%(classifierResult,datingLabels[i]))
if(classifierResult != datingLabels[i]):
errorCount +=1.0
print ("the total error rate is :%f"%(errorCount/float(numTestVeces)))
结果:
(6)使用数据
def classifyPerson():
resultList = ['not at all', 'in small doses', 'in large doses']
percentTats = float(input('percentage of time spent playing video games?'))
ffMiles = float(input('frequent flier miles earned per year?'))
iceCream = float(input('liters of ice cream consumed per year?'))
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')
normMat, ranges, minVals = autoNorm(datingDataMat)
inArr = array([ffMiles, percentTats, iceCream])
classifierResult = classify0((inArr - minVals) / ranges, normMat, datingLabels, 3)
print('you will probably like this person: ', resultList[classifierResult - 1])