邻近 算法 实例

约会

40920 8.326976 0.953952 3
14488 7.153469 1.673904 2
26052 1.441871 0.805124 1
75136 13.147394 0.428964 1
38344 1.669788 0.134296 1
72993 10.141740 1.032955 1
35948 6.830792 1.213192 3
42666 13.276369 0.543880 3
67497 8.631577 0.749278 1
35483 12.273169 1.508053 3
50242 3.723498 0.831917 1
63275 8.385879 1.669485 1
5569 4.875435 0.728658 2
51052 4.680098 0.625224 1
77372 15.299570 0.331351 1
43673 1.889461 0.191283 1
61364 7.516754 1.269164 1
69673 14.239195 0.261333 1
15669 0.000000 1.250185 2
28488 10.528555 1.304844 3
6487 3.540265 0.822483 2

手写0_0.txt

00000000000001111000000000000000
00000000000011111110000000000000
00000000001111111111000000000000
00000001111111111111100000000000
00000001111111011111100000000000
00000011111110000011110000000000
00000011111110000000111000000000
00000011111110000000111100000000
00000011111110000000011100000000
00000011111110000000011100000000
00000011111100000000011110000000
00000011111100000000001110000000
00000011111100000000001110000000
00000001111110000000000111000000
00000001111110000000000111000000
00000001111110000000000111000000
00000001111110000000000111000000
00000011111110000000001111000000
00000011110110000000001111000000
00000011110000000000011110000000
00000001111000000000001111000000
00000001111000000000011111000000
00000001111000000000111110000000
00000001111000000001111100000000
00000000111000000111111000000000
00000000111100011111110000000000
00000000111111111111110000000000
00000000011111111111110000000000
00000000011111111111100000000000
00000000001111111110000000000000
00000000000111110000000000000000
00000000000011000000000000000000

#-*- coding: utf-8 -*-    #必须写在头行

'''
Created on Sep 16, 2010
kNN: k Nearest Neighbors

Input:      inX: vector to compare to existing dataset (1xN)
            dataSet: size m data set of known vectors (NxM)
            labels: data set labels (1xM vector)
            k: number of neighbors to use for comparison (should be an odd number)

Output:     the most popular class label

@author: pbharrin
'''


from numpy import *
import operator
from os import listdir


 #待预测数据,训练数据特征,训练数据标签,k值
def classify0(inX, dataSet, labels, k):
    #训练数据个数
    dataSetSize = dataSet.shape[0]

    #step1:求距离
    #tile重复inX数据,共(dataSetSize,1)次后
    diffMat = tile(inX, (dataSetSize,1)) - dataSet
    #每个维度距离平方
    sqDiffMat = diffMat**2  
    #按行求和
    sqDistances = sqDiffMat.sum(axis=1)  
    #开方
    distances = sqDistances**0.5 

    #step2:对距离从小到大排序
    sortedDistIndicies = distances.argsort()     
    classCount={}         
    for i in range(k):
        #step3:选择k个近邻样本
        #得到升序排序后的距离所对应的的标签
        voteIlabel = labels[sortedDistIndicies[i]]  
        #step4:统计K个近邻样本的各个类别数 
        #(若voteIlabel在字典classCount中,get返回键值;若不在字典中返回0)
        classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1 
    #step5:返回类别数最多的类别作为待分类数据的类别标签
    #reverse=True对标签的个数进行降序排序
    sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
    return sortedClassCount[0][0]   #排序后第一个标签的个数最多,即认为待分类的向量归属于此标签 

def createDataSet():   
    group = array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
    labels = ['A','A','B','B']
    return group, labels

def file2matrix(filename):   #读取数据并解析
    fr = open(filename)
    #.txt文本中的内容的行数
    numberOfLines = len(fr.readlines())   
    #数组array类型
    returnMat = zeros((numberOfLines,3))  
    classLabelVector = []   #存类别标签 
    fr = open(filename)
    index = 0
    for line in fr.readlines():
        line = line.strip()  #删除开头和结尾处的空白符
        listFromLine = line.split('\t')  #以\t为分割,返回一个list列表
        returnMat[index,:] = listFromLine[0:3] #存特征值
        #使用负下标选取listFromLine最后一列添加到 classLabelVector中
        classLabelVector.append(int(listFromLine[-1])) 
        index += 1
    return returnMat,classLabelVector

def autoNorm(dataSet):       #归一化 newvalue=(oldvalue-min)/(max-min)
    minVals = dataSet.min(0) #取每列的最小值,行向量
    maxVals = dataSet.max(0)
    ranges = maxVals - minVals #每列的取值范围
    normDataSet = zeros(shape(dataSet))#初始化全零
    m = dataSet.shape[0]#样本总数
    normDataSet = dataSet - tile(minVals, (m,1))   #tile以(m,1)形式重复minVals
    normDataSet = normDataSet/tile(ranges, (m,1))   #element wise divide
    return normDataSet, ranges, minVals

def datingClassTest(): #测试
    hoRatio = 0.10      ##测试数据占的比例
    datingDataMat,datingLabels = file2matrix('datingTestSet.txt')  
    #特征归一化到0-1
    normMat, ranges, minVals = autoNorm(datingDataMat)
    #所有样本数
    m = normMat.shape[0]        
    #测试数据总数
    numTestVecs = int(m*hoRatio)    
    errorCount = 0.0                #错误分类计数
    for i in range(numTestVecs):    #前0~numTestVecs-1用于测试,剩余训练
        classifierResult = classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],3) #k=3
        print "the classifier came back with: %d, the real answer is: %d" % (classifierResult, datingLabels[i])
        if (classifierResult != datingLabels[i]): errorCount += 1.0
    print "the total error rate is: %f" % (errorCount/float(numTestVecs))
    print errorCount



def classifyPerson():  #使用算法预测
    #结果列表:不喜欢、魅力一般、极具魅力
    resultList = ['not at all','in small doses','in large doses'] 
    #输入每年获得的飞行常客里程数
    ffMiles = float(raw_input("frequent flier miles earned per year:")) 
    # 游戏所耗时间百分比
    percentTats = float(raw_input("percentage of time spent playing video games:")) 
    #每周消费的冰淇淋公升数
    iceCream = float(raw_input("liters of ice cream consumed per week:")) 
    #训练样本集
    datingDataMat,datingLabels = file2matrix('datingTestSet.txt')  
    #训练数据归一化
    normMat,ranges,minVals = autoNorm(datingDataMat) 
    inArr = array([ffMiles,percentTats,iceCream])    #待预测数据写入数组中
    #调用分类器(待预测数据也要归一化)
    classifierResult = classify0((inArr-minVals)/ranges,normMat,datingLabels,3) 
    #数组索引要-1
    print "You will probably like this person: ",resultList[classifierResult -1] 



def img2vector(filename):        #32x32转化为1x1024
    returnVect = zeros((1,1024))
    fr = open(filename)
    for i in range(32):
        lineStr = fr.readline()  #读取文本文件的一行
        for j in range(32):
            returnVect[0,32*i+j] = int(lineStr[j])
    return returnVect

def handwritingClassTest():
    hwLabels = []
    #load the training set #获取trainingDigits下的目录名
    trainingFileList = listdir('trainingDigits') 
    m = len(trainingFileList)  #目录中的文件数
    trainingMat = zeros((m,1024))
    #存所有的手写字训练样本trainingMat[]及类别hwLabels
    for i in range(m):
        fileNameStr = trainingFileList[i]       #样本名 如8_43.txt
        fileStr = fileNameStr.split('.')[0]     #take off .txt #8_43
        classNumStr = int(fileStr.split('_')[0]) #得到类别8
        hwLabels.append(classNumStr) #训练样本类别标签
        #文件8_43.txt中的内容转化为1x1024向量
        trainingMat[i,:] = img2vector('trainingDigits/%s' % fileNameStr) 

    #存每个测试样本vectorUnderTest及类别classNumStr
    testFileList = listdir('testDigits') #所有测试集目录名
    errorCount = 0.0
    mTest = len(testFileList)
    for i in range(mTest):
        fileNameStr = testFileList[i]
        fileStr = fileNameStr.split('.')[0]     #take off .txt
        classNumStr = int(fileStr.split('_')[0])
        #测试数据
        vectorUnderTest = img2vector('testDigits/%s' % fileNameStr)  
        #分类测试并计算错误率
        classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3) 
        print "the classifier came back with: %d, the real answer is: %d" % (classifierResult, classNumStr)
        if (classifierResult != classNumStr): errorCount += 1.0
    print "\nthe total number of errors is: %d" % errorCount
    print "\nthe total error rate is: %f" % (errorCount/float(mTest))

if __name__=='__main__':
    #约会数据分类测试
    #datingClassTest()

    #用约会数据预测新数据类别标签
    #classifyPerson()

    #手写字识别
    #handwritingClassTest()

    datingDataMat,datingLabels = file2matrix('datingTestSet.txt')

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值