1.梯度上升法
def gradascent(datamatin,classlabels):
datamat=mat(datamatin)
labelmat=mat(classlabels).transpose()
m,n=shape(datamat)
alpha=0.001
maxcycles=500
weights=ones((n,1))
for k in range(maxcycles):
h=sigmoid(datamat*weights)
error=(labelmat-h)
weights=weights+alpha*datamat.transpose()*error
return weights
2.随机梯度上升法(在线学习法)
def stoGradAscent(datamat,classlabels):
datamat=mat(datamat)
classlabels=mat(classlabels).transpose()
m,n=shape(datamat)
alpha=0.01
weights=ones((n,1))
for i in range(m):
print(datamat[i])
print(weights)
h=sigmoid(sum(datamat[i]*weights))
error=classlabels[i]-h
weights=weights+alpha*error*datamat[i]
return weights
3.改进的随机梯度上升法
def stoGradAscent2(datamat,classlabels,numiter=150):
datamat=mat(datamat)
classlabels=mat(classlabels).transpose()
m,n=shape(datamat)
weights=ones((n,1))
dataIndex=range(m)
for j in range(numiter):
for i in range(m):
alpha=4/(1.0+j+i)+0.01
randIndex=int(random.uniform(0,len(dataIndex)))
h=sigmoid(sum(datamat[randIndex]*weights))
error=classlabels[randIndex]-h
weights=weights+alpha*error*datamat[randIndex]
del(list(dataIndex)[randIndex])
return weights