1.常见的损失函数
1.1 L1Loss
1.2 MSELoss
1.3 CrossEntropyLoss
1.4 代码实战
from torch import nn
import torch
input_tensor = torch.tensor([1, 2, 3], dtype=torch.float)
target_tensor = torch.tensor([1, 2, 5], dtype=torch.float)
inputs = torch.reshape(input_tensor, (1, 1, 1, 3))
targets = torch.reshape(target_tensor, (1, 1, 1, 3))
# L1Loss
loss_L1 = nn.L1Loss(reduction='sum')
result = loss_L1(inputs, targets)
print(result)
# MSELoss
loss_MSE = nn.MSELoss()
result_MSE = loss_MSE(inputs, targets)
print(result_MSE)
# CrossEntropyLoss
x = torch.tensor([0.1, 0.2, 0.3])
y = torch.tensor([1])
x = torch.reshape(x, (1, 3))
loss_cross = nn.CrossEntropyLoss()
result_cross = loss_cross(x, y)
print(result_cross)
2.反向传播
2.1 梯度下降
2.2 代码实战
from torch import nn
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
class My_Module(nn.Module):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.seq = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5, padding=2),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5, padding=2),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, padding=2),
nn.MaxPool2d(kernel_size=2),
nn.Flatten(),
nn.Linear(in_features=1024, out_features=64),
nn.Linear(in_features=64, out_features=10)
)
def forward(self, x):
output = self.seq(x)
return output
my_module = My_Module()
loss = nn.CrossEntropyLoss()
test_dataset = datasets.CIFAR10(root="datasets", transform=transforms.ToTensor(), download=True)
test_dataloader = DataLoader(dataset=test_dataset, batch_size=1, shuffle=True, drop_last=False)
for data in test_dataloader:
imgs, targets = data
outputs = my_module(imgs)
result_loss = loss(outputs, targets)
result_loss.backward()