机器学习实战笔记三——Logistic回归

# -*- coding: utf-8 -*-
"""
Created on Wed Apr 18 09:41:09 2018

@author: zhangsh
"""

'''
Logistic 回归的一般工程:
    1、收集数据
    2、准备数据:数值型数据
    3、分析数据
    4、训练数据:寻找最佳的分类回归系数
    5、测试算法
    6、使用算法
'''
from numpy import *
import matplotlib.pyplot as plt

def loadDataSet():
    dataMat = []
    labelMat = []
    fr = open('testSet.txt')
    for line in fr.readlines():
        linArr = line.strip().split()  # 去掉每一行的'\n',再按照空格切分,切分后的各个元素保存为一个列表
        dataMat.append([1.0,float(linArr[0]),float(linArr[1])]) # (w0,w1,w2) * (1,x1,x2)^T
        labelMat.append(int(linArr[2]))
    return dataMat,labelMat

# S 函数
def sigmoid(inX):
    return 1.0/(1+exp(-inX))

# 梯度上升算法,求出最佳的 W 参数
def gradAscent(dataMatIn,classLabels):
    dataMatrix = mat(dataMatIn)  # 将列表转换为 numpy 数组 100*3 (1.0,X1,X2)
    labelMat = mat(classLabels).transpose()  # 转换成 100*1 列向量
    m,n = shape(dataMatrix)  # 返回数据集的行数与列数,m 为行数 100,n 为列数 3
    alpha = 0.001  # 梯度表示移动方向,步长表示每次移动量
    maxCycles = 500  # 移动500次
    weights = ones((n,1))  # weights 为 n*1=3*1 列向量。z=w0+w1*x1+w2*x2
    for k in range(maxCycles):
        h = sigmoid(dataMatrix * weights)  # 100*1 列向量,z = w0 +w1*X1+w2*X2
        error = (labelMat - h)
        weights = weights + alpha * dataMatrix.transpose() * error
    return weights

# 根据求出的 w 画出拟合曲线
def plotBestFit(weights):
    dataMat,labelMat = loadDataSet()
    dataArr = array(dataMat)
    n = shape(dataArr)[0]  # 返回 dataArr 的行数
    
    xcord1 = []
    ycord1 = []
    
    xcord2 = []
    ycord2 = []
    
    for i in range(n):
        if int(labelMat[i]) == 1:    # 第一类
            xcord1.append(dataArr[i,1])
            ycord1.append(dataArr[i,2])
        else:                           # 第二类
            xcord2.append(dataArr[i,1])
            ycord2.append(dataArr[i,2])
    
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.scatter(xcord1,ycord1,s=30,c='red',marker='s')
    ax.scatter(xcord2,ycord2,s=30,c='green')
    
    x = arange(-3.0,3.0,0.1)
    y = (-weights[0]-weights[1]*x)/weights[2]
    ax.plot(x,y)
    plt.xlabel('X1')
    plt.ylabel('X2')
    plt.show()

# 改进随机梯度上升法
'''
梯度上升法:每次更新系数需要遍历数据集中所有的数据
随机梯度上升法:一次用一个样本点更新系数
改进的随机梯度上升算法:1、alpha随迭代次数增加而减小  2、随机的用样本点更新系数
'''
def stocGradAscent0(dataMatrix,classLabels):
    m,n = shape(dataMatrix)
    alpha = 0.01
    weights = ones(n)
    for i in range(m):
        h = sigmoid(sum(dataMatrix[i]*weights))
        error = classLabels[i] - h
        weights = weights + alpha * error * dataMatrix[i]
    return weights

# 改进的随机梯度上升算法
# numpy.random.uniform(low,high,size)
# 从一个均匀分布[low,high)中随机采样,定义域是左闭右开。size:输出样本个数,默认值为 1
def stocGradAscent1(dataMatrix,classLabels,numIter=150):    
    m,n = shape(dataMatrix)
    weights = ones(n)  # 初始化权值
    for j in range(numIter):
        dataIndex = list(range(m))  # python 3.x range 返回的是 range 对象
        for i in range(m):
            alpha = 4/(1.0 + j + i) +0.01  # alpha 随着迭代次数增多变小
            randIndex = int(random.uniform(0,len(dataIndex))) # 随机选取样本来更新回归系数
            h = sigmoid(sum(dataMatrix[randIndex] * weights))
            error = classLabels[randIndex] - h
            weights = weights + alpha * error * dataMatrix[randIndex]
            del(dataIndex[randIndex])
    return weights

# 分类函数
def classifyVector(inX,weights):
    prob = sigmoid(sum(inX * weights))
    if prob>0.5:
        return 1.0
    else:
        return 0.0

def colicTest():
    frTrain = open('horseColicTraining.txt')
    frTest = open('horseColicTest.txt')
    
    trainingSet = []  # 保存训练集
    trainingLabels = []  # 保存标签集
    for line in frTrain.readlines():
        currLine = line.strip().split('\t')
        lineArr = []
        for i in range(21):  # 前21列为属性,第21列为标签
            lineArr.append(float(currLine[i]))
        trainingSet.append(lineArr)
        trainingLabels.append(float(currLine[21]))
        
    trainWeights = stocGradAscent1(array(trainingSet),trainingLabels,500)
    
    errorCount = 0
    numTestVec = 0.0
    for line in frTest.readlines():
        numTestVec += 1.0
        currLine = line.strip().split('\t')
        lineArr = []
        for i in range(21):
            lineArr.append(float(currLine[i]))
            
        if int(classifyVector(array(lineArr),trainWeights)) != int(currLine[21]):
            errorCount += 1
            
            
    errorRate = float(errorCount/numTestVec)
    print('The error rate of this test is: %f' %errorRate)
    return errorRate

def multiTest():
    numTests = 10
    errorSum = 0.0
    for k in range(numTests):
        errorSum += colicTest()
    print('After %d iterations the average error rate is: %f' %(numTests,errorSum/float(numTests)))
        

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值