机器学习实战(六)利用AdaBoost元算法提高分类性能

一、优缺点

1、优点:泛化错误率低,易编码,可以应用咋大部分分类器上,无参数调整
2、缺点:对离群点敏感
3、试用数据类型:数值型和标称型数据

二、代码实例

# -*- coding: utf-8 -*-
# @Time    : 19-4-12 下午2:38
# @Author  : MRB
# @File    : adaboost.py
# @Software: PyCharm Community Edition

from numpy import *
import matplotlib.pyplot as plt

def loadSimpData():
    dataMat = matrix(([[1.,2.1],
                       [2.,1.1],
                       [1.3,1.],
                       [1.,1.],
                       [2.,1.]
                       ]))
    classLabels = [1.0,1.0,-1.0,-1.0,1.0]
    return dataMat,classLabels

def show_data(dataMat, labelMat):
    import matplotlib.pyplot as plt
    dataArr = array(dataMat)
    n = shape(dataArr)[0]
    xcord1 = []
    ycord1 = []
    xcord2 = []
    ycord2 = []
    for i in range(n):
        if int(labelMat[i]) == 1:
            xcord1.append((dataArr[i, 0]))
            ycord1.append((dataArr[i, 1]))
        else:
            xcord2.append((dataArr[i, 0]))
            ycord2.append((dataArr[i, 1]))
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
    ax.scatter(xcord2, ycord2, s=30, c='blue', marker='o')
    plt.xlabel("X1")
    plt.ylabel("X2")
    plt.show()


# 单层决策树生成函数


#通过阈值比较对数据进行分类
def stumpClassify(dataMatrix,dimen,threshVal,threshIneq):
    retArray = ones((shape(dataMatrix)[0],1))
    if threshIneq == 'lt':
        retArray[dataMatrix[:,dimen]<=threshVal] = -1.0
    else:
        retArray[dataMatrix[:, dimen] > threshVal] = -1.0
    return retArray

#单层决策树生成函数
def buildStump(dataArr,classLabels,D):
    dataMatrix = mat(dataArr)
    labelMat = mat(classLabels).T #转置
    m,n = shape(dataMatrix)
    numSteps = 10.0
    bestStump = {}
    bestClasEst = mat(zeros((m,1)))
    minError = inf #无穷
    for i in range(n):
        rangeMin = dataMatrix[:,i].min()
        rangeMax = dataMatrix[:,i].max()
        stepSize = (rangeMax-rangeMin)/numSteps
        for j in range(-1,int(numSteps)+1):
            for inequal in ['lt','gt']:
                threshVal = (rangeMin+float(j) * stepSize)
                predictedVals = stumpClassify(dataMatrix,i,threshVal,inequal)
                errArr = mat(ones((m,1)))
                errArr[predictedVals == labelMat] = 0
                weightedError = D.T*errArr  #加权计算错误率
                # print("split: dim %d,thresh %.2f,threshVal inequal:%s,the weighted error is %.3f" % (i,threshVal,inequal,weightedError))
                # print("split: dim {},thresh {},threshVal inequal:{},the weighted error is {}".format(i, threshVal.inequal, weightedError))
                if weightedError < minError:
                    minError = weightedError
                    bestClasEst = predictedVals.copy()
                    bestStump['dim'] = i
                    bestStump['thresh'] = threshVal
                    bestStump['ineq'] = inequal
    return bestStump,minError,bestClasEst



def adaBoostTrainDS(dataArr,classLabels,numIt=40):
    '''
    :param dataArr: 数据集
    :param classLabels: 类别标签
    :param numIt: 迭代次数
    :return:
    '''
    weakClassArr = []
    m = shape(dataArr)[0]
    # print(m)
    D = mat(ones((m,1))/m)
    # print(D)
    aggClassEst = mat(zeros((m,1)))
    for i in range(numIt):
        bestStump,error,classEst = buildStump(dataArr,classLabels,D) #找到最佳的单层决策树
        print("D:",D.T)
        alpha = float(0.5*log((1.0-error)/max(error,1e-16)))
        bestStump['alpha'] = alpha
        weakClassArr.append(bestStump) #将单层决策树加入到决策数组中
        print('classEst: ',classEst)

        #为下一轮迭代计算D
        expon = multiply(-1*alpha*mat(classLabels).T,classEst) #计算新的权重数组
        D = multiply(D,exp(expon))
        D = D/D.sum()

        #错误率累加计算
        aggClassEst += alpha*classEst
        print("aggClassEst: ",aggClassEst)
        aggErrors = multiply(sign(aggClassEst) != mat(classLabels).T,ones((m,1)))
        errorRate = aggErrors.sum()/m
        print("total error: ",errorRate)
        if(errorRate == 0.0):
            break
    return weakClassArr

#测试算法:基于AdaBoost的分类
def adaClassify(dataToClass,classifierArr):
    '''
    :param dataToClass:  待分类样例
    :param classifierArr: 多个弱分类器数组
    :return:
    '''
    dataMatrix = mat(dataToClass)
    m = shape(dataMatrix)[0]
    aggClassEst = mat(zeros((m,1)))
    for i in range(len(classifierArr)):
        classEst = stumpClassify(dataMatrix,classifierArr[i]['dim'],classifierArr[i]['thresh'],classifierArr[i]['ineq'])
        aggClassEst += classifierArr[i]['alpha']*classEst
        print(aggClassEst)
    return sign(aggClassEst)


if __name__ == '__main__':
    dataMat, labelMat = loadSimpData()
    # 1、
    # # show_data(dataMat,labelMat)
    # D = mat(ones((5,1))/5)
    # bestStump, minError, bestClasEst = buildStump(dataMat,labelMat,D)
    # print(bestStump,minError,bestClasEst)

    #2、
    weakClassArr = adaBoostTrainDS(dataMat, labelMat)
    # print(weakClassArr)
    print('*'*50)
    result = adaClassify([[5,5],[0,0]],weakClassArr)
    print('*'*50)
    print(result)

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值