根据麦子学院彭亮老师的机器学习梯度下降法的python程序而写
#coding=utf-8
import numpy as np
import random
# 梯度下降算法
def gradientDescent(x, y, theta, alpha, m, numIteration): # 输入实例、分类标签、要学习的参数、学习率、实例个数、迭代次数
xTrans = x.transpose() # 矩阵的转置
for i in range(0, numIteration):
hypothsis = np.dot(x, theta)
# print 'hypothsis',hypothsis
loss = hypothsis - y
cost = np.sum(loss ** 2) / (2 * m)
print("Iteration %d / Cost:%f" % (i, cost))
gradient = np.dot(xTrans, loss) / m
theta = theta - alpha * gradient # 更新法则
return theta
# 创建数据,用作测试
def genData(numPoints, bais, variance):
'''
:param numPoints: 样本个数
:param bais: 偏差
:param variance: 方差
:return:
'''
x = np.zeros(shape=(numPoints, 2)) # 全零矩阵,行数为样本个数,列数为2
y = np.zeros(shape=numPoints) # 标签,零向量。行数为样本个数,列数为1
for i in range(0, numPoints):
x[i][0] = 1 # 所有行第1列为:1
x[i][1] = i # 所有行第2列为:行的数目
y[i] = (i + bais) + random.uniform(0, 1) * variance
return x, y
x, y = genData(100, 25, 10)
print("x:", x)
print("y:", y)
m, n = np.shape(x)
n_y = np.shape(y)
print "m: %s n: %s " %(str(m),str(n))
print "n_y: %s" %(str(n_y))
numIteration = 120000
alpha = 0.0005
theta = np.ones(n) # 初始化theta 1*n
theta = gradientDescent(x, y, theta, alpha, m, numIteration)
print theta
结果:
[ 30.56233795 0.99501898]