机器学习实战(1)----K-近邻算法(基于python3.5)

ML新手日常学习记录
代码基于python3.5

'''
优点:精度高、对异常值不敏感、无数据输入假定。
缺点:计算复杂度高、空间复杂度高。
适用数据范围:数值型和标称型。
k-近邻算法是分类数据最简单最有效的算法,它是基于实例的学习,使用算法时必须有接近实际数据的训练样本数据
k-近邻算法必须保存全部数据集,若训练数据集很大,就必须使用大量的存储空间
k-近邻算法对每个数据计算距离值,计算比较耗时
k-近邻算法无法给出任何数据的基础结构信息
'''
import numpy as np
import operator   #运算符模块
from sklearn.preprocessing import LabelEncoder    #用于Label编码
#from sklearn.preprocessing import OneHotEncoder     #用于one-hot编码

def createDataSet():
    group = np.array([[1.0,1.1], [1.0,1.0], [0,0], [0,0.1]])
    labels = ['A', 'A', 'B', 'B']
    return group, labels

def classify0(inX, dataSet, labels, k):
    '''
    inX:用于分类的输入向量
    dataSet:训练样本集
    labels:标签向量
    k: 选择最近邻的数目
    '''
    dataSetSize = dataSet.shape[0]
    #距离计算
    diffMat = np.tile(inX, (dataSetSize,1)) - dataSet
    sqDiffMat = diffMat ** 2
    sqdistances = sqDiffMat.sum(axis = 1)
    distances = sqdistances ** 0.5
    sortedDistIndicies = distances.argsort()
    classCount = {}
    #选择距离最小的k个点
    for i in range(k):
        voteIlabel = labels[sortedDistIndicies[i]]
        classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
    #排序,逆序,从大到小排序,最后返回发生频率最高的元素标签
    sortedClassCount = sorted(classCount.items(), 
        key = operator.itemgetter(1), reverse = True)
    return sortedClassCount[0][0]
#转换文本数据为Numpy
def file2matrix(filename):
    fr = open(filename)
    arrayOLines = fr.readlines()
    numberOfLines = len(arrayOLines)  #得到文件的行数
    returnMat = np.zeros((numberOfLines,3))
    classLabelVector = []
    index = 0
    #循环处理文件中每行数据
    for line in arrayOLines:
        line = line.strip()  #截取掉所有的回车字符
        listFromLine = line.split('\t')   #使用Tab字符\t将上一步中的整行数据分割成一个元素列表
        returnMat[index,:] = listFromLine[0:3]   #取出前3列元素存入特征矩阵
        classLabelVector.append(listFromLine[-1])  #取最后一列存入标签矩阵,-1表示列表的最后一列
        index += 1
    values = np.array(classLabelVector)
    classLabelVector = LabelEncoder().fit_transform(values)
    return returnMat, classLabelVector

#归一化函数
def autoNorm(dataSet):
    minVals = dataSet.min(0)  #每列的最小值
    maxVals = dataSet.max(0)  #每列的最大值
    ranges = maxVals - minVals
    normDataSet = np.zeros(np.shape(dataSet))
    m = dataSet.shape[0]
    normDataSet = dataSet - np.tile(minVals, (m, 1))
    normDataSet = normDataSet/np.tile(ranges, (m, 1))
    return normDataSet, ranges, minVals

#分类器测试
def datingClassTest():
    hoRatio = 0.1
    datingDataMat, datingLabels = file2matrix('F:\算法学习\机器学习书籍\machinelearninginaction\Ch02\datingTestSet.txt')
    normMat, ranges, minVals = autoNorm(datingDataMat)
    m = normMat.shape[0]
    numTestVecs = int(m * hoRatio)
    errorCount = 0.0
    for i in range(numTestVecs):
        classifierResult = classify0(normMat[i,:], normMat[numTestVecs:m,:],datingLabels[numTestVecs:m], 3)
        print('the classifier came back with: %d, the real answer is: %d' %(classifierResult, datingLabels[i]))
        if (classifierResult != datingLabels[i]): errorCount += 1.0
    print('the total error rate is: %f' % (errorCount/float(numTestVecs)))

#预测函数
def classifyPerson():
    resultList = ['not at all', 'in small doses', 'in large doses']
    percentTats = float(input('percentage of time spent playing video games?'))
    ffMiles = float(input('frequent flier miles earned per year?'))
    iceCream = float(input('liters of ice cream consumed per year?'))
    datingDataMat, datingLabels = file2matrix('F:\算法学习\机器学习书籍\machinelearninginaction\Ch02\datingTestSet.txt')
    normMat, ranges, minVals = autoNorm(datingDataMat)
    inArr = np.array([ffMiles, percentTats, iceCream])
    classifierResult = classify0((inArr - minVals)/ranges, normMat, datingLabels, 3)
    print('You will probably like this person:', resultList[classifierResult - 1])
    
#图像转换为向量,将32X32的图像拉成1X1024的数组
def img2vector(filename):
    returnVect = np.zeros((1,1024))  #创建1X1024的数组
    fr = open(filename)
    #循环读出文件前32行,并将每一行的头32个字符值存入数组中
    for i in range(32):
        lineStr = fr.readline()
        for j in range(32):
            returnVect[0,32*i+j] = int(lineStr[j])
    return returnVect

#手写数字识别系统测试
import os
def handwritingClassTest():
    hwLabels = []
    trainingFileList = os.listdir(r'F:\算法学习\机器学习书籍\machinelearninginaction\Ch02\trainingDigits')
    m = len(trainingFileList)   #文件夹中文件总数
    trainingMat = np.zeros((m,1024))
    for i in range(m):
        fileNameStr = trainingFileList[i]  #读取文件中第i个txt文件
        fileStr = fileNameStr.split('.')[0]  #截掉文件后缀名(.txt)
        classNumStr = int(fileStr.split('_')[0]) #截取下划线之前的部分获得标签(0_0,3_136,2_50)
        hwLabels.append(classNumStr)
        trainingMat[i,:] = img2vector(r'F:\算法学习\机器学习书籍\machinelearninginaction\Ch02\trainingDigits/%s' % fileNameStr)
    testFileList = os.listdir(r'F:\算法学习\机器学习书籍\machinelearninginaction\Ch02\testDigits')
    errorCount = 0.0
    mTest = len(testFileList)
    for i in range(mTest):
        fileNameStr = testFileList[i]
        fileStr = fileNameStr.split('.')[0]  #截掉文件后缀名(.txt)
        classNumStr = int(fileStr.split('_')[0]) #截取下划线之前的部分获得标签(0_0,3_136,2_50)
        vectorUnderTest = img2vector(r'F:\算法学习\机器学习书籍\machinelearninginaction\Ch02\trainingDigits/%s' % fileNameStr)
        classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3)
        print('the classifier came back with: %d, the real answer is: %d' %(classifierResult, classNumStr))
        if (classifierResult != classNumStr): errorCount += 1.0
    print('\nthe total number of errors is: %d'  %  errorCount)
    print('\nthe totle error rate in: %d'   %  (errorCount/float(mTest)))
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值