logistic解决死马问题(机器学习实战)

# 逻辑斯提回归 用来分类
import numpy as np
import math
import matplotlib.pyplot as plt

#w0 * 1 =b
def loadData():
    dataMat, labelMat = [], []
    fr = open('testSet.txt')
    for line in fr.readlines():
        lineArr = line.strip().split()
        dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
        labelMat.append(int(lineArr[2]))
    return dataMat, labelMat

def sigmoid(inX):
    return 1.0/(1.0+np.exp(-inX))

#转为mat利用矩阵相乘,每次全部数据都参与运算
def gradAscent(dataMatIn, classlabelIn):
    dataMat = np.mat(dataMatIn)
    classlabel = np.mat(classlabelIn).T
    m, n = np.shape(dataMat)
    alpha = 0.001
    maxiter = 500
    weights = np.ones([n, 1])
    for k in range(maxiter):
        h = sigmoid(dataMat*weights)
        error = classlabel - h
        weights += alpha*dataMat.transpose()*error
    return weights

#随机梯度上升: 每次选择一个样本用来更新
def SGA1(dataMatIn, classlabelIn):
    m, n = np.shape(dataMatIn)
    alpha = 0.001
    weights = np.ones(n)
    for i in range(m):
        h = sigmoid(dataMatIn[i])
        error = h - classlabelIn[i]
        weights += alpha*error*dataMatIn[i]
    return weights

#优化的随机梯度上升算法,可以避免震荡
#每轮迭代: 随机取样本,使用后删除
def SGA(dataMMatIn, classlabelIn, numIter=15):
    dataMat = np.array(dataMMatIn)
    m, n = np.shape(dataMat)
    weights = np.ones(n)
    for j in range(numIter):
        # 这里要转为list,否则无法删除
        dataIndex = list(range(m))
        for i in range(m):
            alpha = 4.0/(1+i+j)+0.01
            randIndex = int(np.random.uniform(0, len(dataIndex)))
            h = sigmoid(sum(dataMat[randIndex]*weights))
            error = h - classlabelIn[randIndex]
            weights += alpha*error*dataMat[randIndex]
            del(dataIndex[randIndex])
    return weights


def classifyVector(inX, weights):
    # numpy * 是对应元素相乘得到是向量
    prob = sigmoid(sum(inX*weights))
    if prob>0.5:
        return 1
    else:
        return 0

def colicTest():
    frTrain = open('horseColicTraining.txt')
    frTest = open('horseColicTest.txt')
    trainSet, trainLable = [], []
    for line in frTrain.readlines():
        curlLine = line.strip().split('\t')
        LineArr = []
        for i in range(21):
            LineArr.append(float(curlLine[i]))
        trainSet.append(LineArr)
        trainLable.append(float(curlLine[21]))
    trainWeights = gradAscent(trainSet, trainLable)
    Weights = [x for sublist in trainWeights for x in sublist]
    errorCount, numTestVec = 0, 0
    for Line in frTest.readlines():
        numTestVec += 1
        curlLine = Line.strip().split('\t')
        LineArr = []
        for i in range(21):
            LineArr.append(float(curlLine[i]))
        classify = classifyVector(np.array(LineArr), Weights)
        if classify!=int(curlLine[21]):
            errorCount += 1
    errorrate = float(errorCount)/numTestVec
    print(errorrate)


colicTest()

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值