'''
回归:拟合,要找到最佳拟合参数集(训练分类器时的做法就是在找最佳拟合参数,使用的是最优化算法)
利用Logistic回归进行分类的主要思想:根据现有数据对分类边界建立回归公式以此进行分类
优点:计算代价小,容易理解和实现
缺点:容易欠拟合,分类精度可能不高
适用数据类型:数值型和标称型数据
'''
'''
梯度上升法基于的思想:要找到某函数的最大值,最好的方法是沿着该函数的梯度方向探寻
如果梯度记为▽,则函数f(x,y)的梯度为:▽f(x,y)=分别对x和y求偏导,f(x,y)必须在待计算的点上有定义并且可微
梯度算法到达每个点后都会重新估计移动的方向(x(y)方向移动距离为对x(y)的偏导值)
'''
from numpy import*
import math
def loadDataSet():
dataMat=[]
labelMat=[]
fr=open('testSet.txt')
for line in fr.readlines():
lineArr=line.strip().split()
dataMat.append([1.0,float(lineArr[0]),float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat,labelMat
def sigmoid(inX):
if inX>=0:
return 1.0/(1+exp(-inX))
else:
return exp(inX)/(1+exp(inX))
'''
这里对sigmoid函数进行了优化,当inX趋于无穷大时有可能发生极大溢出,产生警告:RuntimeWarning: overflow encountered in exp
'''
def gradAscent(dataMatIn,classLabels):
dataMatrix=mat(dataMatIn)
labelMat=mat(classLabels).transpose()
m,n=shape(dataMatrix)
alpha=0.001
maxCycles=500
weights=ones((n,1))
for k in range(maxCycles):
h=sigmoid(dataMatrix*weights)
error=(labelMat-h)
weights=weights+alpha*dataMatrix.transpose()*error
return weights
'''
难理解地方:
Sigmoid函数的输入设为z,则z=W(T)X,其中W(T)是向量W的转置,X也是一个向量
Sigmoid函数是一个阶跃函数,将样本的每一个特征值都乘一个回归系数,然后都加起来,再带入Sigmoid函数得到结果(0或者1)
(Sigmoid 函数当x>0时函数值大于0.5,x<0时函数值小于0.5)
关于最佳系数的确定:所给定的
'''
def plotBestFit(weights):
import matplotlib.pyplot as plt
dataMat,labelMat=loadDataSet()
dataArr=array(dataMat)
n=shape(dataArr)[0]
xcord1=[];ycord1=[]
xcord2=[];ycord2=[]
for i in range(n):
if int(labelMat[i])==1:
xcord1.append(dataArr[i,1])
ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1])
ycord2.append(dataArr[i,2])
fig=plt.figure()
ax=fig.add_subplot(111)
ax.scatter(xcord1,ycord1,s=30,c='red',marker='s')
ax.scatter(xcord2,ycord2,s=30,c='green',marker='*')
x=arange(-3.0,3.0,0.1)
y=(-weights[0]-weights[1]*x)/weights[2]
ax.plot(x,y)
plt.xlabel('X1');plt.ylabel('X2')
plt.show()
def stocGradAscent0(dataMatrix,classLabels):
m,n=shape(dataMatrix)
alpha=0.01
weights=ones(n)
for i in range(m):
h=sigmoid(sum(dataMatrix[i]*weights))
error=classLabels[i]-h
weights=weights+alpha*error*dataMatrix[i]
return weights
def stocGradAscent1(dataMatrix,classLabels,numIter=150):
m,n=shape(dataMatrix)
weights=ones(n)
for j in range(numIter):
dataIndex=list(range(m))
for i in range(m):
alpha=4/(1.0+j+i)+0.01
randIndex=int(random.uniform(0,len(dataIndex)))
h=sigmoid(sum(dataMatrix[randIndex]*weights))
error=classLabels[randIndex]-h
weights=weights+alpha*error*dataMatrix[randIndex]
del(dataIndex[randIndex])
return weights
def classifyVector(inX,weights):
prob=sigmoid(sum(inX*weights))
if prob>0.5:
return 1.0
else:
return 0
def colicTest():
frTrain=open('horseColicTraining.txt')
frTest=open('horseColicTest.txt')
trainingSet=[]
trainingLabels=[]
for line in frTrain.readlines():
currLine=line.strip().split('\t')
lineArr=[]
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
trainWeights=stocGradAscent1(array(trainingSet),trainingLabels,500)
errorCount=0
numTestVec=0.0
for line in frTest.readlines():
numTestVec+=1.0
currLine=line.strip().split('\t')
lineArr=[]
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(array(lineArr),trainWeights))!=int(currLine[21]):
errorCount+=1
errorRate=(float(errorCount)/numTestVec)
print("the error rate of this test is: %f"% errorRate)
return errorRate
def multiTest():
numTests=10
errorSum=0.0
for k in range(numTests):
errorSum+=colicTest()
print("after %d iterations the average error rate is: %f"%(numTests,errorSum/float(numTests)))