#!/bin/python
#coding=utf-8
# 实现线性回归
import numpy as np
import random
#自动生成数据集,numPoints是行数
#x是测试数据集,2维的向量,y是label
def genData(numPoints,bias,variance):
x = np.zeros(shape=(numPoints,2))
y = np.zeros(shape=(numPoints))
for i in range(0,numPoints):
x[i][0]=1
x[i][1]=i
#uniform(0,1)生成0到1之间的随机数
y[i]=(i+bias)+random.uniform(0,1)+variance
return x,y
#梯度下降算法
def gradientDescent(x,y,theta,alpha,m,numIterations):
#transpose(x)矩阵的转置
xTran = np.transpose(x)
for i in range(numIterations):
hypothesis = np.dot(x,theta)
loss = hypothesis - y
#线性模型中的cost
cost = np.sum(loss ** 2)/(2 * m)
gradient = np.dot(xTran,loss) / m
theta = theta - alpha * gradient
print ("Iteration %d | cost :%f" %(i,cost))
return theta
x,y = genData(100, 25, 10)
print "x:"
print x
print "y:"
print y
m, n = np.shape(x)
n_y = np.shape(y)
#print("m:"+str(m)+" n:"+str(n)+" n_y:"+str(n_y))
numIterations = 100000
alpha = 0.0005
theta = np.ones(n)
theta= gradientDescent(x, y, theta, alpha, m, numIterations)
print(theta)
#测试算法
X = [1, 1.3]
y = np.dot(X, np.transpose(theta))
y
print y
机器学习实战之线性回归算法
最新推荐文章于 2021-07-02 14:55:33 发布