python《机器学习实战》逻辑回归部分,用全部样本多次进行梯度上升的程序如下:
# coding=utf-8
__author__ = 'Administrator'
from numpy import *
#从文本中加载数据,文档中保存了100个坐标为X,Y的数据
def loadDataSet():
dataMat = []; labelMat = []
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])]) #将数据维度进行了拓展,第一维全部设置为1.0,第二维和第三维是原文本文档中的数据
labelMat.append(int(lineArr[2])) #标签
return dataMat,labelMat
# sigmoid 函数运算
def sigmoid(inX):
return 1.0/(1+exp(-inX))
#梯度下降法
def gradAscent(dataMatIn, classLabels):
dataMatrix = mat(dataMatIn) #convert to NumPy matrix
labelMat = mat(classLabels).transpose() #convert to NumPy matrix
m,n = shape(dataMatrix) #get the rows and cols of the data
alpha = 0.001 #