基于Logistic回归的思想,利用梯度上升的方法,求取回归系数。并且完成对马生病数据的训练和预测。
例子来自《Machine Learning in Action》 Peter Harrington
梯度上升
加载数据集
数据集合中有两类共100个数据点
""" 加载数据集 """
def loadDataSet():
dataMat = []; labelMat = []
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat,labelMat
sigmoid函数
利用该函数的函数性质,用于分类
""" sigmoid函数 """
def sigmoid(inX):
return 1.0/(1+exp(-inX))
梯度上升求权重向量
""" 梯度上升 """
def gradAscent(dataMatIn, classLabels):
dataMatrix = mat(dataMatIn) #转换为 NumPy 矩阵
labelMat = mat(classLabels).transpose() #转换为 NumPy 矩阵,求转置 (行向量-->列向量)
m,n = shape(dataMatrix) #获取矩阵的大小
alpha = 0.001 #步长
maxCycles = 500 #迭代代数
weights = ones((n,1)) #权重向量
for k in range(maxCycles):
h = sigmoid(dataMatrix*weights)
error = (labelMat - h) # 惩罚度
weights = weights + alpha * dataMatrix.transpose()* error
return weights
测试
def testGradAscent():
dataArr,labelMat = logRegres.loadDataSet()
weights=logRegres.gradAscent(dataArr,labelMat)
print(weights)
结果
[[ 4.12414349]
[ 0.48007329]
[-0.6168482 ]]
可视化结果
""" 绘制拟合后的直线 """
def plotBestFit(weights):
import matplotlib.pyplot as plt
dataMat,labelMat=loadDataSet()
dataArr = array(dataMat)
n = shape(dataArr)[0] # 数据点的个数
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
for i in range(n): # 根据数据点的类型进行分类
if int(labelMat[i])== 1:
xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
ax.scatter(xcord2, ycord2, s=30, c='green')
x = arange(-3.0, 3.0, 0.1)
y = (-weights[0]-weights[1]*x)/weights[2]
ax.plot(x, y)
plt.xlabel('X1'); plt.ylabel('X2');
plt.show()
测试
注意numpy矩阵转换为python数组
"""测试绘制拟合的直线"""
def testPlotBestFit():
dataArr,labelMat = logRegres.loadDataSet()
weights =logRegres.gradAscent(dataArr,labelMat)
logRegres.plotBestFit(weights.getA()) # getA() : matrix --> array
结果
随机梯度
之前的梯度计算,当数据集很大的时候,计算量会很大,所以采用随机梯度算法,即每一次迭代只计算一个点。
随机梯度算法_0
""" 随机梯度上升0 """
def stocGradAscent0(dataMatrix, classLabels):
m,n = shape(dataMatrix)
alpha = 0.01
weights = ones(n)
for i in range(m):
h = sigmoid(sum(dataMatrix[i]*weights)) # 每次只选取一个特征点进行训练
error = classLabels[i] - h
weights = weights + alpha * error * dataMatrix[i]
return weights
测试
"""测试随机梯度上升0"""
def teststocGradAscent0():
dataArr,labelMat = logRegres.loadDataSet()
weights =logRegres.stocGradAscent0(array(dataArr),labelMat)
logRegres.plotBestFit(weights)
结果
由于迭代次数比较少,所以划分效果不是很理想。
随机梯度算法_1
分析之前效果不理想的原因:
1. 由于迭代过程中步长固定,所以在最后收敛的过程中,会周期震荡。
2. 每次的训练点不是随机取得,会收到数据周期性的影响。
针对这两点,做出如下修改:
1. alpha 步长大小随着迭代的次数而减少
2. 随机选取数据点进行训练
""" 随机梯度上升1 """
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
m,n = shape(dataMatrix)
weights = ones(n)
for j in range(numIter):
dataIndex = list(range(m)) # rang 对象无法迭代
for i in range(m):
alpha = 4/(1.0+j+i)+0.0001 # 步长会随着迭代进行而减少,但不会为0。防止波动和停止不前
randIndex = int(random.uniform(0,len(dataIndex))) # 随机选取迭代值,防止周期波动
h = sigmoid(sum(dataMatrix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
del(dataIndex[randIndex])
return weights
测试
"""测试随机梯度上升1"""
def teststocGradAscent1():
dataArr,labelMat = logRegres.loadDataSet()
weights =logRegres.stocGradAscent1(array(dataArr),labelMat)
logRegres.plotBestFit(weights)
结果
可以看到,这次的划分效果就很好了。
应用
利用Logistic回归来预测病马的死亡率
训练
""" 利用回归系数和特征量计算类别"""
def classifyVector(inX, weights):
prob = sigmoid(sum(inX*weights))
if prob > 0.5: return 1.0
else: return 0.0
""" 加载数据 训练 测试"""
def colicTest():
# 训练回归系数
frTrain = open('horseColicTraining.txt'); frTest = open('horseColicTest.txt')
trainingSet = []; trainingLabels = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr =[]
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 500)
# 测试分类效果
errorCount = 0; numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr =[]
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(array(lineArr), trainWeights))!= int(currLine[21]):
errorCount += 1
errorRate = (float(errorCount)/numTestVec)
print ("the error rate of this test is: %f" % errorRate)
return errorRate
测试
"""预测病马死亡率"""
def multiTest():
numTests = 10; errorSum=0.0
for k in range(numTests):
errorSum += logRegres.colicTest()
print ("after %d iterations the average error rate is: %f" % (numTests, errorSum/float(numTests)))
结果
the error rate of this test is: 0.432836
the error rate of this test is: 0.268657
the error rate of this test is: 0.417910
the error rate of this test is: 0.313433
the error rate of this test is: 0.298507
the error rate of this test is: 0.358209
the error rate of this test is: 0.298507
the error rate of this test is: 0.283582
the error rate of this test is: 0.388060
the error rate of this test is: 0.402985
after 10 iterations the average error rate is: 0.346269
以上完整代码见GitHub。