Machine_Learning_in_Action-CH2-K近邻算法(python3.6版本)

引入kNN.py

'''
Created on Sep 16, 2010
kNN: k Nearest Neighbors

Input:      inX: vector to compare to existing dataset (1xN)
            dataSet: size m data set of known vectors (NxM)
            labels: data set labels (1xM vector)
            k: number of neighbors to use for comparison (should be an odd number)
            
Output:     the most popular class label

@author: pbharrin1
'''
from numpy import *
import operator
from os import listdir

def classify0(inX, dataSet, labels, k):
    dataSetSize = dataSet.shape[0]
    diffMat = tile(inX, (dataSetSize,1)) - dataSet
    sqDiffMat = diffMat**2
    sqDistances = sqDiffMat.sum(axis=1)
    distances = sqDistances**0.5
    sortedDistIndicies = distances.argsort()     
    classCount={}          
    for i in range(k):
        voteIlabel = labels[sortedDistIndicies[i]]
        classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
    sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
    return sortedClassCount[0][0]

def createDataSet():
    group = array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
    labels = ['A','A','B','B']
    return group, labels

def file2matrix(filename):
    fr = open(filename)
    numberOfLines = len(fr.readlines())         #get the number of lines in the file
    returnMat = zeros((numberOfLines,3))        #prepare matrix to return
    classLabelVector = []                       #prepare labels return   
    fr = open(filename)
    index = 0
    for line in fr.readlines():
        line = line.strip()
        listFromLine = line.split('\t')
        returnMat[index,:] = listFromLine[0:3]
        classLabelVector.append(int(listFromLine[-1]))
        index += 1
    return returnMat,classLabelVector
    
def autoNorm(dataSet):
    minVals = dataSet.min(0)
    maxVals = dataSet.max(0)
    ranges = maxVals - minVals
    normDataSet = zeros(shape(dataSet))
    m = dataSet.shape[0]
    normDataSet = dataSet - tile(minVals, (m,1))
    normDataSet = normDataSet/tile(ranges, (m,1))   #element wise divide
    return normDataSet, ranges, minVals
   
def datingClassTest():
    hoRatio = 0.50      #hold out 10%
    datingDataMat,datingLabels = file2matrix('datingTestSet2.txt')       #load data setfrom file
    normMat, ranges, minVals = autoNorm(datingDataMat)
    m = normMat.shape[0]
    numTestVecs = int(m*hoRatio)
    errorCount = 0.0
    for i in range(numTestVecs):
        classifierResult = classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],3)
        print("the classifier came back with: %d, the real answer is: %d" % (classifierResult, datingLabels[i]))
        if (classifierResult != datingLabels[i]): errorCount += 1.0
    print ("the total error rate is: %f" % (errorCount/float(numTestVecs)))
    print (errorCount)
    
def classifyPerson():
    resultList = ['not at all','in small doses', 'in large doses']
    percentTats = float(input("percentage of time spent playing video games?"))
    ffMiles = float(input("frequent flier miles earned per year?"))
    iceCream = float(input("liters of ice cream consumed per year?"))
    datingDataMat,datingLabels = file2matrix('datingTestSet2.txt')
    normMat, ranges, minVals = autoNorm(datingDataMat)
    inArr = array([ffMiles, percentTats, iceCream])
    classifierResult = classify0((inArr-minVals)/ranges,normMat,datingLabels,3)
    print("You will probably like this person: ",resultList[classifierResult - 1])

def img2vector(filename):
    returnVect = zeros((1,1024))
    fr = open(filename)
    for i in range(32):
        lineStr = fr.readline()
        for j in range(32):
            returnVect[0,32*i+j] = int(lineStr[j])
    return returnVect

def handwritingClassTest():
    hwLabels = []
    trainingFileList = listdir('trainingDigits')           #load the training set
    m = len(trainingFileList)
    trainingMat = zeros((m,1024))
    for i in range(m):
        fileNameStr = trainingFileList[i]
        fileStr = fileNameStr.split('.')[0]     #take off .txt
        classNumStr = int(fileStr.split('_')[0])
        hwLabels.append(classNumStr)
        trainingMat[i,:] = img2vector('trainingDigits/%s' % fileNameStr)
    testFileList = listdir('testDigits')        #iterate through the test set
    errorCount = 0.0
    mTest = len(testFileList)
    for i in range(mTest):
        fileNameStr = testFileList[i]
        fileStr = fileNameStr.split('.')[0]     #take off .txt
        classNumStr = int(fileStr.split('_')[0])
        vectorUnderTest = img2vector('testDigits/%s' % fileNameStr)
        classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3)
        print( "the classifier came back with: %d, the real answer is: %d" % (classifierResult, classNumStr))
        if (classifierResult != classNumStr): errorCount += 1.0
    print ("\nthe total number of errors is: %d" % errorCount)
    print( "\nthe total error rate is: %f" % (errorCount/float(mTest)))

运行jupyter notebook

试验kNN.py

import pandas as pd
import numpy as np
%matplotlib inline
%matplotlib notebook
import matplotlib.pyplot as plt
from numpy import *

# 引入kNN.py
import kNN

group,labels = kNN.createDataSet()
group
#array([[1. , 1.1],
#       [1. , 1. ],
#       [0. , 0. ],
#       [0. , 0.1]])

labels
#['A', 'A', 'B', 'B']

kNN.classify0([0,0],group,labels,3)
#'B'

2.2 约会网站数据分析

datingDataMat, datingLabels = kNN.file2matrix('datingTestSet2.txt')

datingDataMat
#array([[4.0920000e+04, 8.3269760e+00, 9.5395200e-01],
#       [1.4488000e+04, 7.1534690e+00, 1.6739040e+00],
#       [2.6052000e+04, 1.4418710e+00, 8.0512400e-01],
#       ...,
#       [2.6575000e+04, 1.0650102e+01, 8.6662700e-01],
#       [4.8111000e+04, 9.1345280e+00, 7.2804500e-01],
#       [4.3757000e+04, 7.8826010e+00, 1.3324460e+00]])

datingLabels[:20]
#[3, 2, 1, 1, 1, 1, 3, 3, 1, 3, 1, 1, 2, 1, 1, 1, 1, 1, 2, 3]

2.2.2 分析数据:使用matplotlib

import matplotlib.pyplot as plt
import matplotlib
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(datingDataMat[:,1],datingDataMat[:,2],s=15.0*array(datingLabels),c=15.0*array(datingLabels))
plt.show()

在这里插入图片描述
.

2.2.3 准备数据:归一化数据

normMat, ranges, minVals = kNN.autoNorm(datingDataMat)
normMat
#array([[0.44832535, 0.39805139, 0.56233353],
#       [0.15873259, 0.34195467, 0.98724416],
#       [0.28542943, 0.06892523, 0.47449629],
#       ...,
#       [0.29115949, 0.50910294, 0.51079493],
#       [0.52711097, 0.43665451, 0.4290048 ],
#       [0.47940793, 0.3768091 , 0.78571804]])

ranges
#array([9.1273000e+04, 2.0919349e+01, 1.6943610e+00])

minVals
#array([0.      , 0.      , 0.001156])

2.2.4 测试算法:作为完整程序验证分类器

kNN.datingClassTest()
#the classifier came back with: 2, the real answer is: 2
#the classifier came back with: 1, the real answer is: 1
#the classifier came back with: 1, the real answer is: 1
#the classifier came back with: 2, the real answer is: 2
#the total error rate is: 0.066000
#33.0

2.2.5 使用算法:构建完整可用系统

kNN.classifyPerson()
#percentage of time spent playing video games?10
#frequent flier miles earned per year?10000
#liters of ice cream consumed per year?0.5
#You will probably like this person:  in small doses

2.3 示范:手写系统

2.3.1 准备数据:将图像转化为测试向量

testVector = kNN.img2vector('testDigits/0_13.txt')

testVector[0,0:31]
#array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1.,
#       1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])

testVector[0,32:63]
#array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1.,
#       1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])

2.3.2 测试算法:使用k近邻算法识别手写数字

kNN.handwritingClassTest()
#the classifier came back with: 9, the real answer is: 9
#the classifier came back with: 9, the real answer is: 9
#the classifier came back with: 9, the real answer is: 9
#the total number of errors is: 10
#the total error rate is: 0.010571
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值