pytorch搭建卷积神经网络【第十一课_学习机制及训练基本架构】

该博客详细介绍了使用PyTorch进行深度学习的基本流程,包括广播机制、梯度下降求偏导、基本训练流程的Python实现和PyTorch实现、反向传播以及参数更新。通过线性模型和温度数据为例,展示了如何计算损失、求导并更新权重和偏置,最终得到模型的训练结果。
摘要由CSDN通过智能技术生成


前言

案例代码https://github.com/2012Netsky/pytorch_cnn/blob/main/4_time_series_bikes.ipynb

一、pytorch广播机制

二、沿着梯度下降 求b偏导

三、基本训练流程(python基本函数实现)

四、基本训练流程(pytorch基本函数实现)

五、反向传播

六、参数更新


#!/usr/bin/env python
# coding: utf-8

# 学习的机制
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
import torch
torch.set_printoptions(edgeitems=2, linewidth=75)

# 生成数据
t_c = [0.5,  14.0, 15.0, 28.0, 11.0,  8.0,  3.0, -4.0,  6.0, 13.0, 21.0]
t_u = [35.7, 55.9, 58.2, 81.9, 56.3, 48.9, 33.9, 21.8, 48.4, 60.4, 68.4]
t_c = torch.tensor(t_c)
t_u = torch.tensor(t_u)

# 线性模型测试
def model(t_u, w, b):
    return w * t_u + b

# 损失定义
def loss_fn(t_p, t_c):
    squared_diffs = (t_p - t_c)**2
    return squared_diffs.mean()


# 正向计算
w = torch.ones(())# 
b = torch.zeros(())
t_p = model(t_u, w, b)
print(t_p,w,b,w.shape,b.shape)

#w1 = torch.ones() 报错
#b1 = torch.zeros()
#print(w1,b1)

# 计算损失
loss = loss_fn(t_p, t_c)
loss

# pytorch广播机制
x = torch.ones(())
y = torch.ones(3,1)
z = torch.ones(1,3)
a = torch.ones(2, 1, 1)
print(f"shapes: x: {x.shape}, y: {y.shape}")
print(f"        z: {z.shape}, a: {a.shape}")
print("x * y:", (x * y).shape)
print("y * z:", (y * z).shape)
print("y * z * a:", (y * z * a).shape)


# 沿着梯度下降 求w偏导
delta = 0.1

loss_rate_of_change_w =     (loss_fn(model(t_u, w + delta, b), t_c) - 
     loss_fn(model(t_u, w - delta, b), t_c)) / (2.0 * delta)

# w更新
learning_rate = 1e-2

w = w - learning_rate * loss_rate_of_change_w

# 沿着梯度下降 求b偏导
loss_rate_of_change_b =     (loss_fn(model(t_u, w, b + delta), t_c) - 
     loss_fn(model(t_u, w, b - delta), t_c)) / (2.0 * delta)
# b更新
b = b - learning_rate * loss_rate_of_change_b


# 损失求导
def dloss_fn(t_p, t_c):
    dsq_diffs = 2 * (t_p - t_c) / t_p.size(0)  # <1>
    return dsq_diffs


# w偏导
def dmodel_dw(t_u, w, b):
    return t_u


# b偏导
def dmodel_db(t_u, w, b):
    return 1.0


# 梯度
def grad_fn(t_u, t_c, t_p, w, b):
    dloss_dtp = dloss_fn(t_p, t_c)
    dloss_dw = dloss_dtp * dmodel_dw(t_u, w, b)
    dloss_db = dloss_dtp * dmodel_db(t_u, w, b)
    return torch.stack([dloss_dw.sum(), dloss_db.sum()])  # <1>


# 1.基本训练流程(python基本函数实现)
def training_loop(n_epochs, learning_rate, params, t_u, t_c):
    for epoch in range(1, n_epochs + 1):
        w, b = params

        t_p = model(t_u, w, b)  # <1>
        loss = loss_fn(t_p, t_c)
        grad = grad_fn(t_u, t_c, t_p, w, b)  # <2>

        params = params - learning_rate * grad

        print('Epoch %d, Loss %f' % (epoch, float(loss))) # <3>
            
    return params

# 2.基本训练流程(pytorch基本函数实现)
def training_loop(n_epochs, learning_rate, params, t_u, t_c,
                  print_params=True):
    index = []
    y1 = []
    y2 = []
    y11 = []
    y22 = []
    y33 = []
    
    
    for epoch in range(1, n_epochs + 1):
        w, b = params
        
        # 正向传播---------------------
        # 预测
        t_p = model(t_u, w, b)  # <1>
        
        # 计算损失 用来显示是否收敛
        loss = loss_fn(t_p, t_c)
        
        
        # 反向传播---------------------
        # 求导
        grad = grad_fn(t_u, t_c, t_p, w, b)  # <2>
        # 参数更新
        params = params - learning_rate * grad


        if epoch in {1, 2, 3, 10, 11, 99, 100, 4000, 5000}:  # <3>
            print('Epoch %d, Loss %f' % (epoch, float(loss)))
            if print_params:
                print('    Params:', params)
                print('    Grad:  ', grad)
        if epoch in {4, 12, 101}:
            print('...')

        if not torch.isfinite(loss).all():
            break  # <3>
    
    return params


# 执行训练
training_loop(
    n_epochs = 100, 
    learning_rate = 1e-4, 
    params = torch.tensor([1.0, 0.0]), 
    t_u = t_u, 
    t_c = t_c)


training_loop(
    n_epochs = 100, 
    learning_rate = 1e-4, 
    params = torch.tensor([1.0, 0.0]), 
    t_u = t_u, 
    t_c = t_c)

t_un = 0.1 * t_u

training_loop(
    n_epochs = 100, 
    learning_rate = 1e-2, 
    params = torch.tensor([1.0, 0.0]), 
    t_u = t_un, # <1>
    t_c = t_c)


params = training_loop(
    n_epochs = 5000, 
    learning_rate = 1e-2, 
    params = torch.tensor([1.0, 0.0]), 
    t_u = t_un, 
    t_c = t_c,
    print_params = False)

params

# 训练结果及训练集数据
get_ipython().run_line_magic('matplotlib', 'inline')
from matplotlib import pyplot as plt

t_p = model(t_un, *params)  # <1>

fig = plt.figure(dpi=600)
plt.xlabel("Temperature (°Fahrenheit)")
plt.ylabel("Temperature (°Celsius)")
plt.plot(t_u.numpy(), t_p.detach().numpy()) # <2>
plt.plot(t_u.numpy(), t_c.numpy(), 'o')
plt.savefig("temp_unknown_plot.png", format="png")  # bookskip


get_ipython().run_line_magic('matplotlib', 'inline')
from matplotlib import pyplot as plt

fig = plt.figure(dpi=600)
plt.xlabel("Measurement")
plt.ylabel("Temperature (°Celsius)")
plt.plot(t_u.numpy(), t_c.numpy(), 'o')

plt.savefig("temp_data_plot.png", format="png")

在这里插入图片描述

总结

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

【网络星空】

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值