【tensorflow】线性模型实战

线性模型:y = 1.477 * x + 0.089

 

1. 采样数据

采样噪声eps在均值0,方差0.01的高斯分布中,而后在均匀分布U(0,1)中,区间[-10,10]进行n=100次随机采样:

data = []
for i in range(100):
    x = np.random.uniform(-10.,10.)    #均匀分布
    eps = np.random.normal(0.,0.1)    #正态分布
    y = 1.477 * x + 0.089 + eps
    data.append([x,y])    #保存样本点
data = np.array(data)    #转换为2D Numpy数组

 

2. 计算误差

循环计算每个点的预测值与真是值之间差的平方并累加,从而获得训练集上的均方误差损失值。

def mse(b,w,points):
    totalError = 0
    for i in range(0,len(points)):
        x = points[i,0]    #获得i号点的输入x
        y = points[i,1]    #获得i号点的输出y
        totalError += (y-(w*x+b))**2    
    return totalError / float(len(points))    #均方差

 

3. 计算梯度

def step_gradient(b_current,w_current,points,lr):
    b_gradient = 0
    w_gradient = 0
    M = float(len(points))    #总体样本
    for i in range(0,len(points)):
        x = points[i,0]
        y = points[i,1]
        b_gradient += (2/M) * ((w_current * x + b_current)-y)
        w_gradient += (2/M) * x * ((w_current * x + b_current)-y)
    new_b = b_current - (lr * b_gradient)
    new_w = w_current - (lr * w_gradient)
    return (new_b,new_w)

 

4. 梯度更新

对权重w和偏置b进行更新,通过梯度下降寻找最小均方差时的w,b:

def gradient_descent(points,starting_b,starting_w,lr,num_iterations):
    b = starting_b
    w = starting_w
    for step in range(num_iterations):
        b,w = step_gradient(b,w,np.array(points),lr)
        loss = mse(b,w,points)
        if step%50 == 0:
            print(f"iterations:{step},loss:{loss},w:{w},b:{b}")
    return [b,w]

 

5. 主训练函数

if __name__ == '__main__':
    lr = 0.01
    initial_b = 0
    initial_w = 0
    num_iterations = 1000
    [b,w],lossed = gradient_descent(data,initial_b,initial_w,lr,num_iterations)
    loss = mse(b,w,data)
    print(f'Final loss:{loss},w:{w},b:{b}')

 

代码如下:

import numpy as np
import matplotlib.pyplot as plt

data = []
for i in range(100):
    x = np.random.uniform(-10., 10.)  # 均匀分布
    eps = np.random.normal(0., 0.1)  # 正态分布
    y = 1.477 * x + 0.089 + eps
    data.append([x, y])  # 保存样本点
data = np.array(data)  # 转换为2D Numpy数组


def mse(b, w, points):
    totalError = 0
    for i in range(0, len(points)):
        x = points[i, 0]  # 获得i号点的输入x
        y = points[i, 1]  # 获得i号点的输出y
        totalError += (y - (w * x + b)) ** 2
    return totalError / float(len(points))  # 均方差


def step_gradient(b_current, w_current, points, lr):
    b_gradient = 0
    w_gradient = 0
    M = float(len(points))  # 总体样本
    for i in range(0, len(points)):
        x = points[i, 0]
        y = points[i, 1]
        b_gradient += (2 / M) * ((w_current * x + b_current) - y)
        w_gradient += (2 / M) * x * ((w_current * x + b_current) - y)
    new_b = b_current - (lr * b_gradient)
    new_w = w_current - (lr * w_gradient)
    return (new_b, new_w)


def gradient_descent(points, starting_b, starting_w, lr, num_iterations):
    b = starting_b
    w = starting_w
    loss1 = []
    epoch = []
    for step in range(num_iterations):
        b, w = step_gradient(b, w, np.array(points), lr)
        loss = mse(b, w, points)
        if step % 50 == 0:
            print(f"iterations:{step},loss:{loss},w:{w},b:{b}")
            loss1.append(loss)
            epoch.append(step)
            plt.plot(epoch,loss1)
            plt.xlabel('epoch')
            plt.ylabel('loss1')
            plt.title('MSE function')
            plt.show()
    return [b, w]


if __name__ == '__main__':
    lr = 0.01
    initial_b = 0
    initial_w = 0
    num_iterations = 1000
    [b, w]= gradient_descent(data, initial_b, initial_w, lr, num_iterations)
    loss = mse(b, w, data)
    print(f'Final loss:{loss},w:{w},b:{b}')

 

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值