OLS

import numpy as np
import time
import matplotlib.pyplot as plt

rng = np.random.RandomState(1234)
x = 1 * rng.rand(100, 3)

y = x.dot([[1], [5], [9]])
err = rng.randn(100, 1)
y = y + err

n_sample = x.shape[0]
n_feature = x.shape[1]

print(n_sample, n_feature)
plt.plot(x, y, ".b")


#################  Model  ################
def model(X, theta):
    theta.shape = (n_feature, 1)
    return X.dot(theta)


################  Cost function  #########
def cost(X, y, theta):
    tmp = (model(X, theta) - y).T.dot(model(X, theta) - y)
    return (1 / (2 * n_sample)) * tmp


###############  Gradients  ################
def gradient(X, y, theta):
    grad = np.zeros([n_feature,1])
    grad = (1.0/n_sample)*(X.T.dot(model(X, theta) - y))
    # for j in range(n_feature):
    #     grad[j] = (1/n_sample) * (model(X,theta) - y).T.dot(X[:,j])

    return grad


############# update_theta  ###############
def update_theta(theta, grad, sigma):
    return (theta - sigma * grad)


############ stop_strategy #################
def stop_strategy(cost, update_cost, threshold):
    return (cost - update_cost) < threshold


################# OLS ######################
def OLS(X, y, sigma, threshold):
    theta = np.array([1, 2, 3])
    counter = 0
    while (1):
        J = cost(X, y, theta)
        grad = gradient(X, y, theta)
        print(grad)
        theta_new = update_theta(theta, grad, sigma)
        J_update = cost(X, y, theta_new)
        stop = stop_strategy(J, J_update, threshold)
        if (stop):
            break

        theta = theta_new
        counter = counter + 1

    return theta,counter


theta,counter= OLS(x, y, 0.2, 0.0001)
print(theta)
print(counter)


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值