第二章 pytorch回归问题

一、梯度下降算法

  • 用于反向传播时求解权重矩阵的最优解

二、线性回归问题与逻辑回归问题

  • 主要是使用使用激活函数将函数值限定在有限范围内(即:[0,1] or [-1,1] .etc)

三、线性回归实战

# -*- coding: UTF-8 -*-
'''
@version: 1.0
@PackageName: pytorch_learning - regression_demo.py
@author: yonghao
@Description: 实现线性回归
@since 2021/01/24 21:29
'''
import numpy as np


def compute_error_for_line_given_points(w, b, points) -> float:
'''
计算样本的平均损失
:paramw: 权重值
:paramb: 误差值
:parampoints: 样本点
:return: 返回样本平均损失值
'''
total_error = 0
for i in range(0, len(points)):
x = points[i, 0]
y = points[i, 1]
total_error += (y - (w * x + b)) ** 2
return total_error / float(len(points))


def step_gradient(b_current, w_current, points, learning_rate):
'''
计算当前样本的梯度值
:paramb_current:当前的拟合线性方程的常数值
:paramw_current:当前的拟合线性方程的斜率
:parampoints: 样本
:paramlearning_rate:学习率
:return: 返回梯度下降后的参数
'''
b_gradient = 0
w_gradient = 0
N = float(len(points))
for i in range(0, len(points)):
x = points[i, 0]
y = points[i, 1]
b_gradient += -(2 / N) * (w_current * x + b_current - y)
w_gradient += -(2 / N) * (w_current * x + b_current - y) * x
    new_w = w_current - learning_rate * w_gradient
    new_b = b_current - learning_rate * b_gradient
 return new_w, new_b


def gradient_descent_runner(points, starting_b, starting_w, learning_rate, num_iterations):
'''
梯度下降算法
:parampoints:样本点集
:paramstarting_b: 起始的b
:paramstarting_w: 起始的w
:paramlearning_rate: 学习率
:paramnum_iterations: 迭代次数
:return: 计算得到的权重系数与误差系数
'''
w = starting_w
    b = starting_b
 for i in range(num_iterations):
w, b = step_gradient(b, w, points, learning_rate)
return w, b


def run():
points = np.random.uniform(0, 5, (100, 2))
learning_rate = 0.0001
initial_b = 0
initial_w = 0
num_iterations = 1000
print('Starting gradient descent at b = {},w = {},error={}'
.format(initial_b, initial_w,
compute_error_for_line_given_points(initial_w,
initial_b,
points)))
w, b = gradient_descent_runner(points, initial_b, initial_w,
learning_rate, num_iterations)
print('After gradient descent at b = {},w = {},error={}'
.format(b, w,
compute_error_for_line_given_points(w,
b,
points)))


if __name__ == '__main__':
run()

四、分类问题

在这里插入图片描述

在这里插入图片描述

在这里插入图片描述

在这里插入图片描述

在这里插入图片描述

在这里插入图片描述


五、手写数字识别体验

在这里插入图片描述

在这里插入图片描述

在这里插入图片描述

# -*- coding: UTF-8 -*-
'''
@version: 1.0
@PackageName: pytorch_learning - classification.py
@author: yonghao
@Description: 实现手写数字的分类时间
@since 2021/01/24 22:38
'''
import torch
from torch import nn
from torch.nn import functional as F
from torch import optim

import torchvision
from matplotlib import pyplot as plt
from realwork.work2_handwrite_classification.utils import plot_curve, plot_image, one_hot

batch_size = 512

# step1 .load dataset
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('mnist_data', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True
)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('mnist_data/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=False)
# 测试显示数据集的函数
# x, y = next(iter(train_loader))
# print(x.shape, y.shape)
# print(test_loader)
# plot_image(x, y, 'image sample')
print("train num = {}".format(len(train_loader)))
print("test num = {}".format(len(test_loader)))


class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# xw+b
self.fc1 = nn.Linear(28 * 28, 256)
self.fc2 = nn.Linear(256, 64)
self.fc3 = nn.Linear(64, 10) # 由于是10分类所以最后层输出一定是10

def forward(self, x):
# x:[b,1,28,28]
# h1 = relu(xw1+b1)
x = F.relu(self.fc1(x))
# h2 = relu(h1w2+b2)
x = F.relu(self.fc2(x))
# h3 = h2w3+b3
x = self.fc3(x)
return x


# 创建网络
net = Net()
# 定制的梯度下架算法计算器:[w1,b1,w2,b2,w3,b3]
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
# 保存梯度计算过程中的损失值
loss_values = []
for epoch in range(3):
for batch_idx, (x, y) in enumerate(train_loader):
# x:[b,1,28,28],y:[512]
# x:[b,1,28,28] -> [b,784]
x = x.view(x.size(0), x.size(2) * x.size(3))
# => [b,10]
out = net(x)
# [b,10]
y_onehot = one_hot(y)
# loss = mse(out,y_onehot)
loss = F.mse_loss(out, y_onehot)

optimizer.zero_grad()
loss.backward()
# w' = w - lr*grad
optimizer.step()
loss_values.append(loss.item())
# 每10个batch显示一次loss值
# if batch_idx % 10 == 0:
#     print(epoch, batch_idx, loss.item())

# we get optimal [w1,b1,w2,b2,w3,b3]
# 显示loss的变化情况
# plot_curve(loss_values)

# 由测试集显示其准确度
total_correct = 0
for x, y in test_loader:
x = x.view(x.size(0), 28 * 28)
out = net(x)
# out: [b,10] => pred: [b]
# 将soft_one_hot值转换为hard_one_hot值,使其与真实标签一致
# 标注最大值的位置(将高维空间表示为一维中的位置从0开始)
pred = out.argmax(dim=1)
# 判断正确的总数
correct = pred.eq(y).sum().float().item()
total_correct += correct

total_dataset = len(test_loader.dataset)
acc = total_correct / total_dataset
print("test acc:{}%".format(acc * 100))

next(iter(test_loader))
x, _ = next(iter(test_loader))
out = net(x.view(x.size(0), 28 * 28))
pred = out.argmax(dim=1)
plot_image(x, pred, "test")
  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

ModelBulider

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值