python 线性回归 优化_从头开始使用梯度下降优化在Python中实现多元线性回归(代码)...

import matplotlib.pyplot as plt

import numpy as np

def hypothesis(theta, X, n):

h = np.ones((X.shape[0],1))

theta = theta.reshape(1,n+1)

for i in range(0,X.shape[0]):

h[i] = float(np.matmul(theta, X[i]))

h = h.reshape(X.shape[0])

return h

def BGD(theta, alpha, num_iters, h, X, y, n):

cost = np.ones(num_iters)

for i in range(0,num_iters):

theta[0] = theta[0] - (alpha/X.shape[0]) * sum(h - y)

for j in range(1,n+1):

theta[j] = theta[j] - (alpha/X.shape[0]) * sum((h-y) * X.transpose()[j])

h = hypothesis(theta, X, n)

cost[i] = (1/X.shape[0]) * 0.5 * sum(np.square(h - y))

theta = theta.reshape(1,n+1)

return theta, cost

def linear_regression(X, y, alpha, num_iters):

n = X.shape[1]

one_column = np.ones((X.shape[0],1))

X = np.concatenate((one_column, X), axis = 1)

# initializing the parameter vector...

theta = np.zeros(n+1)

# hypothesis calculation....

h = hypothesis(theta, X, n)

# returning the optimized parameters by Gradient Descent...

theta, cost = BGD(theta,alpha,num_iters,h,X,y,n)

return theta, cost

data = np.loadtxt('data1.txt', delimiter=',')

X_train = data[:,:7] #feature set

y_train = data[:,8] #label set

mean = np.ones(X_train.shape[1])

std = np.ones(X_train.shape[1])

for i in range(0, X_train.shape[1]):

mean[i] = np.mean(X_train.transpose()[i])

std[i] = np.std(X_train.transpose()[i])

for j in range(0,X_train.shape[0]):

X_train[j][i] = (X_train[j][i] - mean[i])/std[i]

theta, cost = linear_regression(X_train, y_train, 0.0001, 30000)

cost = list(cost)

n_iterations = [x for x in range(1,30001)]

plt.plot(n_iterations, cost)

plt.xlabel('No. of iterations')

plt.ylabel('Cost')

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值