Logistic回归的目的是寻找一个非线性函数Sigmiod的最佳拟合参数,求解过程可以由最优化算法来完成 。在最优 算法中,最常用 的就是梯度上升算法,而梯度上升算法又可以简化为随机梯度上升法 。
随机梯度上升算法与梯度上升算法的效果相当,但占用更少的计算资源。此外,随机梯度上升是一个在线算法,它可以在新数据到来时就完成参数更新,而不需要重新读取整个数据集来进行批处理运算。
import numpy as np
def loadDataSet(): #导入数据
dataMat = []
labelMat = []
fr = open(r'F:\算法学习\机器学习书籍\机器学习实战(Python2.7)\Ch05\testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])]) #第一列为X1,第二列为X2
labelMat.append(int(lineArr[2])) #第三列为类别标签
return dataMat, labelMat
def sigmoid(inX): #sigmoid函数
return 1.0/(1 + np.exp(-inX))
#梯度上升算法,与梯度下降法一样,前者求最大值,后者求最小值
def gradAscent(dataMatIn, classLabels):
#输入参数为训练样本及对应的类标签
dataMatrix = np.mat(dataMatIn) #将输入训练样本数组转换成矩阵形式
labelMat = np.mat(classLabels).T #类别标签向量,转为列向量方便处理,若本身就是列向量,则本行可注释掉
m, n = np.shape(dataMatrix)
alpha = 0.001 #步长
maxCycles = 500 #最大迭代次数
weights = np.ones((n,1))
for k in range(maxCycles):
h = sigmoid(dataMatrix * weights)
error = (labelMat - h)
weights = weights + alpha * dataMatrix.T * error #权值更新量,即求解损失函数对应的拉格朗日函数的极值
return weights #返回的回归系数
#画出数据集和Logistic回归最佳拟合直线
def plotBestFit(wei):
import matplotlib.pyplot as plt
weights = wei.getA()
dataMat, labelMat = loadDataSet()
dataArr = np.array(dataMat)
n = np.shape(dataArr)[0]
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
for i in range(n):
if int(labelMat[i]) == 1:
xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s = 30, c = 'red', marker = 's')
ax.scatter(xcord2, ycord2, s = 30, c = 'green')
x = np.arange(-3.0, 3.0, 0.1)
y = (-weights[0] -weights[1]*x)/weights[2]
ax.plot(x, y)
plt.xlabel('X1'); plt.ylabel('X2')
plt.show()
def stocGradAscent0(dataMatrix, classLabels):
m, n = np.shape(dataMatrix)
alpha = 0.01
weights = np.ones(n)
for i in range(m):
h = sigmoid(sum(dataMatrix[i] * weights))
error = classLabels[i] - h #与梯度上升法的区别在于h和error的计算结果为数值,并且不需要进行矩阵转换
weights = weights + alpha * error * dataMatrix[i]
return weights
#改进的随机梯度上升法
def stocGradAscent1(dataMatrix, classLabels, numIter = 150):
m, n = np.shape(dataMatrix)
weights = np.ones(n)
for j in range(numIter):
dataIndex = range(m)
for i in range(m):
alpha = 4/(1.0+j+i)+0.01 #将alpha改为随迭代次数而变化
randIndex = int(np.random.uniform(0,len(dataMatrix)))
h = sigmoid(sum(dataMatrix[randIndex] * weights)) #随机选取dataMatrix中的数进行更新
error = classLabels[randIndex] - h #与梯度上升法的区别在于h和error的计算结果为数值,并且不需要进行矩阵转换
weights = weights + alpha * error * dataMatrix[randIndex]
del(dataIndex[randIndex])
return weights