写在开头的话:在学习《机器学习实战》的过程中发现书中很多代码并没有注释,这对新入门的同学是一个挑战,特此贴出我对代码做出的注释,仅供参考,欢迎指正。
1、随机梯度上升
#coding:gbk
from numpy import *
#功能:导入数据集
#输入:无
#输出:数据矩阵,标签向量
def loadDataSet():
dataMat = []#数据矩阵
labelMat = []#标签向量
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()#strip()表示删除空白符,split()表示分割
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])#1.0表示x0
labelMat.append(int(lineArr[2]))
return dataMat, labelMat
#功能:计算x的Sigmoid函数
#输入:x
#输出:x的Sigmoid函数
def sigmoid(inX):
return 1.0 / (1 + exp(-inX))
#功能:Logistic回归梯度上升优化算法
#输入:无
#输出:优化后的权重向量
def gradAscent():
dataMatIn, classLabels = loadDataSet()#得数据矩阵,标签向量
dataMatrix = mat(dataMatIn)#将列表转换成m*n矩阵
labelMat = mat(classLabels).transpose()#将1*m标签向量转换成m*1矩阵
m, n = shape(dataMatrix)#得dataMatrix的行数、列数
alpha = 0.001#步长
maxCycles = 500#最大迭代数量
weights = ones((n,1))#n*1权重矩阵
for k in range(maxCycles):
h = sigmoid(dataMatrix * weights)
error = (labelMat - h)
weights = weights + alpha * dataMatrix.transpose() * error#数学推导,f = x * w so w = xT * f
return weights
#功能:画出决策边界
#输入:无
#输出:无
def plotBestFit():
weights = stocGradAscent1()#得优化后的权重向量
#weights = weights.getA()#主窗口输入help(numpy.matrix.getA),将matrix转换为array
import matplotlib.pyplot as plt
dataMat, labelMat = loadDataSet()#得数据矩阵,标签向量
dataArr = array(dataMat)#将dataMat转换为array
n = shape(dataArr)[0]#得dataArr行数
xcord1 = []
ycord1 = []
xcord2 = []
ycord2 = []
for i in range(n):
if int(labelMat[i]) == 1:#标签为1
xcord1.append(dataArr[i, 1])
ycord1.append(dataArr[i, 2])
else:#标签为0
xcord2.append(dataArr[i, 1])
ycord2.append(dataArr[i, 2])
fig = plt.figure()
ax= fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s = 30, c = 'red', marker = 's')#red square红方块
ax.scatter(xcord2, ycord2, s = 30, c='green')#绿圆点
x = arange(-3.0, 3.0, 0.1)#在[-3.0,3.0]区间里以0.1的步长取数,得列表
y = (-weights[0] - weights[1] * x) / weights[2]#直线方程:weights[0] + weights[1] * x + weights[2] * 2 = 0
ax.plot(x, y)
plt.xlabel('X1')
plt.ylabel('X2')
plt.show()
#功能:随机梯度上升
#输入:无
#输出:优化后的权重向量
def stocGradAscent0():
dataArr, classLabels = loadDataSet() # 得数据矩阵,标签向量
dataMatrix = array(dataArr) # 将列表转换成m*n矩阵
m, n = shape(dataMatrix)#得dataMatrix的行数、列数
alpha = 0.01#步长
weights = ones(n)#n阶权重向量
for i in range(m):
h = sigmoid(sum(dataMatrix[i] * weights))
error = classLabels[i] - h
weights = weights + alpha * error *dataMatrix[i]
return weights
2、改进的随机梯度上升
#功能:改进的随机梯度上升
#输入:无
#输出:优化后的权重向量
def stocGradAscent1(dataMatrix, classLabels, numIter = 150):
m, n = shape(dataMatrix) # 得dataMatrix的行数、列数
weights = ones(n) # n阶权重向量
for j in range(numIter):#n次迭代
dataIndex = range(m)
for i in range(m):
alpha = 4 / (1.0 + j + i) + 0.01#使得步长随着迭代的进行而逐渐减小
randIndex = int(random.uniform(0, len(dataIndex)))#随机取第randIndex行的dataMatrix
h = sigmoid(sum(dataMatrix[randIndex] * weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
del(dataIndex[randIndex])#删除第randIndex行,不参与迭代
return weights
3、从疝气病症预测病马的死亡率
#功能:预测类别标签
#输入:特征向量,回归系数
#输出:预测的类别标签
def classifyVector(inX, weights):
prob = sigmoid(sum(inX * weights))
if prob > 0.5:
return 1.0
else:
return 0.0
def colicTest():
frTrain = open('horseColicTraining.txt')
frTest = open('horseColicTest.txt')
trainingSet = []
trainingLabels = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')#strip()表示删除空白符,split()表示分割
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))#将这个属性放入lineArr
trainingSet.append(lineArr)#属性集
trainingLabels.append(float(currLine[21]))#标签集
trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 500)
errorCount = 0
numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')#strip()表示删除空白符,split()表示分割
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))#将这个属性放入lineArr
if int(classifyVector(array(lineArr), trainWeights)) != int(currLine[21]):#预测标签和验证标签不一致
errorCount += 1
errorRate = (float(errorCount) / numTestVec)
print "the error rate of this test is: %f" % errorRate
return errorRate
def multiTest():
numTests = 10
errorSum = 0.0
for k in range(numTests):
errorSum += colicTest()
print "the %d test: " % k
print "after %d iterations the average error rate is: %f" % (numTests, errorSum / float(numTests))