一、图解
二、L1Loss和MSELoss
import torch
from torch.nn import L1Loss
from torch import nn
input=torch.tensor([1,2,3],dtype=torch.float)
target=torch.tensor([1,2,5],dtype=torch.float)
input=torch.reshape(input,(1,1,1,3))
target=torch.reshape(target,(1,1,1,3))
loss=L1Loss(reduction="sum")
loss2=nn.MSELoss()
resultmse=loss2(input,target)
result=loss(input,target)
print(result)
print(resultmse)
三、交叉熵
x=torch.tensor([0.1,0.2,0.3])
y=torch.tensor([1])
x=torch.reshape(x,(1,3))
loss_cross=nn.CrossEntropyLoss()
result_cross=loss_cross(x,y)
print(result_cross)
四、用上节课的神经网络测试
import torchvision
from torch import nn as nn
from torch.utils.data import DataLoader
dataset=torchvision.datasets.CIFAR10("./data",train=False,transform=torchvision.transforms.ToTensor(),download=True)
data_loader=DataLoader(dataset=dataset,batch_size=1)
class Tudui(nn.Module):
def __init__(self):
super().__init__()
self.model1=nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5, padding=2),
nn.MaxPool2d(2),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5, padding=2),
nn.MaxPool2d(2),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, padding=2),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(1024, 64),
nn.Linear(64, 10)
)
def forward(self,x):
x=self.model1(x)
return x
loss=nn.CrossEntropyLoss()
tudui=Tudui()
for data in data_loader:
imgs,targets=data
outputs=tudui(imgs)
result=loss(outputs,targets)
print(result)