【PyTorch教程】P23 loss function

P23 loss function

  • 计算输出和目标的差距;指明优化方向:
    在这里插入图片描述

  • 注意输入和输出形状:

在这里插入图片描述
在这里插入图片描述

  • 也讲了MSE—lossFunction:

在这里插入图片描述

  • MAE结果:
    在这里插入图片描述
    在这里插入图片描述

  • 梯度下降:
    在这里插入图片描述

  • 查看反向传播的梯度:

在这里插入图片描述
在这里插入图片描述

可以执行的代码-1

# !usr/bin/env python3
# -*- coding:utf-8 -*-

"""
author :24nemo
 date  :2021年07月07日
"""

import torch
from torch import nn
from torch.nn import L1Loss


inputs = torch.tensor([1, 2, 3], dtype=torch.float32)
targets = torch.tensor([1, 2, 5], dtype=torch.float32)


inputs = torch.reshape(inputs, (1, 1, 1, 3))  # batch_size, channel, 1行, 3列
targets = torch.reshape(targets, (1, 1, 1, 3))


loss = L1Loss(reduction='sum')
result = loss(inputs, targets)


loss_mse = nn.MSELoss()
result_mse = loss_mse(inputs, targets)


print(result)
print(result_mse)


x = torch.tensor([0.1, 0.2, 0.3])
y = torch.tensor([1])
x = torch.reshape(x, (1, 3))
loss_cross = nn.CrossEntropyLoss()
result_cross = loss_cross(x, y)
print(result_cross)


'''

使用 L1Loss 时,一定要注意 shape 的形状,输入和输出的大小,要看清楚:
Input: (N, *)(N,∗) where *∗ means, any number of additional dimensions
Target: (N, *)(N,∗), same shape as the input

'''

import torch
from torch import nn
from torch.nn import L1Loss


inputs = torch.tensor([1, 2, 3], dtype=torch.float32)
targets = torch.tensor([1, 2, 5], dtype=torch.float32)


inputs = torch.reshape(inputs, (1, 1, 1, 3))  # batch_size, channel, 1行, 3列
targets = torch.reshape(targets, (1, 1, 1, 3))


loss = L1Loss()
result = loss(inputs, targets)
print("result:", result)


'''
MSELoss
'''


loss_mse = nn.MSELoss()
result_mse = loss_mse(inputs, targets)

print("result_mse:", result_mse)

'''
交叉熵,用于分类问题

这里up讲得挺细致
'''

可以运行的代码-2

# !usr/bin/env python3
# -*- coding:utf-8 -*-

"""
author :24nemo
 date  :2021年07月07日
"""

'''
import torchvision
from torch import nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.data import DataLoader

dataset = torchvision.datasets.CIFAR10("../dataset", train=False, transform=torchvision.transforms.ToTensor(),
                                       download=True)

dataloader = DataLoader(dataset, batch_size=1)

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.model1 = Sequential(
            Conv2d(3, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )

    def forward(self, x):
        x = self.model1(x)
        return x


loss = nn.CrossEntropyLoss()
tudui = Tudui()
for data in dataloader:
    imgs, targets = data
    outputs = tudui(imgs)
    result_loss = loss(outputs, targets)
    result_loss.backward()
    print("ok")
'''
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Linear
from torch.nn.modules.flatten import Flatten
from torch.utils.data import DataLoader

'''
如何在之前的写的神经网络中用到Loss Function
'''

dataset = torchvision.datasets.CIFAR10("../dataset", train=False, transform=torchvision.transforms.ToTensor(),
                                       download=True)

dataloader = DataLoader(dataset, batch_size=64)


class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.conv1 = Conv2d(3, 32, 5, padding=2)
        self.maxpool1 = MaxPool2d(2)
        self.conv2 = Conv2d(32, 32, 5, padding=2)
        self.maxpool2 = MaxPool2d(2)
        self.conv3 = Conv2d(32, 64, 5, padding=2)
        self.maxpool3 = MaxPool2d(2)
        self.flatten = Flatten()
        self.linear1 = Linear(64 * 4 * 4, 64)
        self.linear2 = Linear(64, 10)

    def forward(self, m):
        m = self.conv1(m)
        m = self.maxpool1(m)
        m = self.conv2(m)
        m = self.maxpool2(m)
        m = self.conv3(m)
        m = self.maxpool3(m)
        m = self.flatten(m)
        m = self.linear1(m)
        m = self.linear2(m)
        return m


loss = nn.CrossEntropyLoss()  # 定义损失函数
tudui = Tudui()
for data in dataloader:
    imgs, targets = data
    outputs = tudui(imgs)
    print("outputs:", outputs)
    print("targets:", targets)
    result_loss = loss(outputs, targets)  # 调用损失函数
    result_loss.backward()  # 反向传播, 这里要注意不能使用定义损失函数那里的 loss,而要使用 调用损失函数之后的 result_loss
    print("OK")  # 这部分,在debug中可以看到 grad 通过反向传播之后,才有值,debug修好了之后,再来看这里

2023.7.18添加

  • 这部分使用了调试功能。是为了查看grad参数,在loss.backward语句前后的变化。
  • 这个grad的位置不好找,完全按照截图里的步骤可以找到。并附上了调试的代码。
    在这里插入图片描述
from torch import nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear
import torchvision
from torch.utils.data import DataLoader

dataset = torchvision.datasets.CIFAR10(root='data', train=False, transform=torchvision.transforms.ToTensor(), download=True)

dataloader = DataLoader(dataset, batch_size=2)



class Nemo(nn.Module):
    def __init__(self):
        super().__init__()
        self.model1 = Sequential(
            Conv2d(in_channels=3, out_channels=32, kernel_size=5, stride=1, padding=2),
            MaxPool2d(2),
            Conv2d(in_channels=32, out_channels=32, kernel_size=5, stride=1, padding=2),
            MaxPool2d(2),
            Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )

    def forward(self, x):
        x = self.model1(x)
        return x

loss = nn.CrossEntropyLoss()


nemo = Nemo()

for data in dataloader:
    imgs, targets = data
    outputs = nemo(imgs)
    print("输出:", outputs)
    print("标签:", targets)
    result_loss = loss(outputs, targets)
    result_loss.backward()

    print(result_loss)
    print("OK")

完整目录

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值