利用Logistic回归进行分类的主要思想是:根据现有数据对分类边界线建立回归公式,以此进行分类。
类似于下图中建立回归公式得到边界线,将红色和绿色的样本进行分类。
from numpy import *
import matplotlib.pyplot as plt
def loaddataset():
dataMat=[]
labelMat=[]
fr=open('testSet.txt')#打开testSet数据集
for line in fr.readlines():#读取文件中的每一行
lineArr=line.strip().split()#将每一行的数据分开
dataMat.append([1.0,float(lineArr[0]),float(lineArr[1])])#第一列和第二列作为特征数据集,添加1是构造常数项x0
labelMat.append(int(lineArr[2]))#第三列作为标签
return dataMat,labelMat
def sigmoid(inX):
return 1.0/(1+exp(-inX))
输入数据集和标签
def gradAscent(dataMatIn,classLabels):
dataMatrix=mat(dataMatIn)#(m,n)
labelMat=mat(classLabels).transpose()#转置后(m,1)
m,n=shape(dataMatrix)
alpha=0.001
maxCycles=500
weights=ones((n,1))#初始化回归系数,(n,1)
for k in range(maxCycles):
h=sigmoid(dataMatrix*weights)
error=labelMat-h
weights=weights+alpha*dataMatrix.transpose()*error#梯度上升法
return weights
画图
def plotBestFit(wei):
weights=wei.getA()
dataMat,labelMat=loaddataset()
dataArr=array(dataMat)
n=shape(dataArr)[0]
xcord1=[];ycord1=[]
xcord2=[];ycord2=[]
for i in range(n):
if int(labelMat[i])==1:
xcord1.append(dataArr[i,1])
ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i, 1])
ycord2.append(dataArr[i, 2])
fig=plt.figure()
ax=fig.add_subplot(111)
ax.scatter(xcord1,ycord1,s=30,c='red',marker='s')
ax.scatter(xcord2,ycord2,s=30,c='green')
x=arange(-3.0,3.0,0.1)
y=(-weights[0]-weights[1]*x)/weights[2]
ax.plot(x,y)
plt.xlabel('X1')
plt.ylabel('X2')
plt.show()
dataArr,labelMat=loaddataset() weights=gradAscent(dataArr,labelMat)
print(weights) plotBestFit(weights)
算法改进:
随机梯度上升:每次只使用一个数据样本点来更新回归系数,减少计算次数。
def sgdAscent(dataMatrix,classLabels):
m,n=shape(dataMatrix)
alpha=0.01
weights=ones(n)
for i in range(m):
h=sigmoid(sum(dataMatrix[i]*weights))
error=classLabels[i]-h
weights=weights+alpha*error*dataMatrix[i]#数组和列表的乘法计算不一样,矩阵类型是numpy的matrix,按照矩阵的规则计算;列表按照数值规则计算。
return weights
改进随机梯度上升法
def sgdAscent2(dataMatrix,classLabels,numIter=150):
m,n=shape(dataMatrix)
weights=ones(n)
for j in range(numIter):
dataIndex=range(m)
for i in range(m):
alpha=4/(1.0+j+i)+0.01#0.01是保证在多次迭代之后新数据仍有一定影响力
randIndex=int(random.uniform(0,len(dataIndex)))#减少周期波动,随机选取更新
h = sigmoid(sum(dataMatrix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights + alpha * dataMatrix[randIndex] * error
#del(dataIndex[randIndex])
return weights
测试实例:
def classifyVector(inX,weights):
prob=sigmoid(sum(inX*weights))
if prob>0.5:
return 1.0
else:
return 0.0
def colicTest():
frTrain=open('horseColicTraining.txt')
frTest=open('horseColicTest.txt')
trainingSet=[]
trainingLabels=[]
for line in frTrain.readlines():
currLine=line.strip().split('\t')
lineArr=[]
for i in range(21):#读取21个特征数据
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
trainWeights=sgdAscent2(array(trainingSet),trainingLabels,500)
errorCount=0
numTestVec=0.0
for line in frTest.readlines():
numTestVec+=1.0
currLine=line.strip().split('\t')
lineArr=[]
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(array(lineArr),trainWeights))!=int(currLine[21]):
errorCount+=1
errorrate=float(errorCount)/numTestVec
print('the error rate is {0}'.format(errorrate))
return errorrate
def multiTest():#进行多次测试,此处即进行十次测试
numTests=10
errorSum=0.0
for k in range(numTests):
errorSum+=colicTest()
print("after {0} iterations the average error rate is {1}".format(numTests,errorSum/float(numTests)))