pytorch学习中的基础笔记(二)

本文介绍了PyTorch中常用的损失函数,如L1Loss、MSELoss和CrossEntropyLoss,并展示了它们的使用。接着讨论了优化器的作用,以SGD为例展示了训练过程。此外,还探讨了预训练模型的使用和修改,以及模型的保存与加载。最后,给出了一个完整的模型训练流程,包括数据加载、模型定义、损失计算、优化更新和模型保存。文章还涉及GPU训练和模型验证的方法。
摘要由CSDN通过智能技术生成

一、损失函数与反向传播
loss function(越小越好):1.计算实际输出与目标输出之间的差距;2.为我们更新输出提供一定的依据(反向传播)

import torch
from torch import nn
from torch.nn import L1Loss

inputs = torch.tensor([1,2,3],dtype=float)
targets = torch.tensor([1,2,5],dtype=float)
inputs = torch.reshape(inputs,(1,1,1,3))
targets = torch.reshape(targets,(1,1,1,3))
loss = L1Loss(reduction='sum')
result = loss(inputs,targets)#省略了forward
loss_mse = nn.MSELoss()
result_mse = loss_mse(inputs,targets)
print(result)
print(result_mse)

x = torch.tensor([0.1,0.2,0.3])#N,C
y = torch.tensor([1])#N
x = torch.reshape(x,(1,3))
loss_cross = nn.CrossEntropyLoss()#注意每个损失函数输入与输出的形式
result_cross = loss_cross(x,y)
print(result_cross)
import torch
from torch import nn
from torch.nn import ReLU, Sigmoid, Linear
import torchvision
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

dataset = torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
dataloader=DataLoader(dataset,batch_size=64)
class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.linear1 = Linear(196608,10)
    def forward(self,input):
        output = self.linear1(input)
        return output
model = Model()
for data in dataloader:
    imgs,targets=data
    print(imgs.shape)
    #output = torch.reshape(input,(1,1,1,-1))
    output = torch.flatten(imgs)
    print(output.shape)
    output = model(output)
    print(output.shape)

二、优化器
优化器根据梯度调整参数,达到降低误差的目的。学习率一般刚开始选择比较大的,后面选择比较小的



import torch
import torchvision
from torch import nn
from torch.autograd import Variable
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.data import DataLoader

dataset = torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
dataloader=DataLoader(dataset,batch_size=1)
class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.model1 = Sequential(
            Conv2d(3,32,5,padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )
    def forward(self, x):
        x = self.model1(x)
        return x
loss = nn.CrossEntropyLoss()
model = Model()
optim = torch.optim.SGD(model.parameters(),lr=0.01)
for epoch in range(20):
    running_loss = 0
    for data in dataloader:
        imgs,targets = data
        outputs = model(imgs)
        result_loss = loss(outputs,targets)
        optim.zero_grad()#清零梯度
        result_loss.backward()#计算每个参数的梯度
        optim.step()#参数调优
        running_loss += result_loss#计算一轮循环所有图片loss的总和
    print(result_loss)

三、pytorch现有网络模型的使用及修改

import torchvision

#train_data = torchvision.datasets.ImageNet("./dataset_imagenet",split='train',download=True,transform=torchvision.transforms.ToTensor())
from torch import nn

vgg16_false = torchvision.models.vgg16(pretrained=False)
vgg16_true = torchvision.models.vgg16(pretrained=True)
print(vgg16_true)
train_data = torchvision.datasets.CIFAR10("./dataset",train=True,download=True,transform=torchvision.transforms.ToTensor())
#如何利用现有的网络改动它的结构,迁移学习,很多网络用vgg16作为前置网络结构,用vgg16提取一些特征,在vgg16之后加一些网络结构实现一个功能,所以vgg16比较重要
vgg16_true.add_module('add_linear',nn.Linear(1000,10))#把线性层加到classifier之后,平级
print(vgg16_true)
vgg16_true.classifier.add_module('add_linear',nn.Linear(1000,10))#把线性层加到classifier之内
vgg16_false.classifier[6] = nn.Linear(4096,10)#不是添加,而是直接修改某一层的参数

四、网络模型的保存与读取

import torch
import torchvision
from torch import nn

vgg16 = torchvision.models.vgg16(pretrained=False)
#网络模型的保存有两种方式,第一种方式不仅保存了网络模型结构,还保存了网络模型的一些参数
torch.save(vgg16,"model_method1.pth")
#保存方式二(官方推荐)占用内存小
torch.save(vgg16.state_dict(),"vgg16_method2.pth")#把vgg16的状态保存成字典形式,不再保存网络模型的结构,只保存网络模型的一些参数

#保存方式一加载数据
model = torch.load("model_method1.pth")
#print(model)
#保存方式二加载数据
#model = torch.load("vgg16_method2.pth")#这种方式加载出来的为字典格式的参数
#下面这种方式加载的数据依然是模型结构
vgg16 = torchvision.models.vgg16(pretrained=False)
vgg16.load_state_dict(torch.load("vgg16_method2.pth"))
#print(vgg16)

#保存方式一具有一定的陷阱,如果保存模型与加载数据时不在同一个文件夹,则有可能会报错类不存在
class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.conv1 = nn.Conv2d(3,64,kernel_size=3)
    def forward(self, x):
        x = self.conv1(x)
        return x
model = Model()
torch.save(model,"model1_method1.pth")

五、完整的模型训练套路

import torch.optim
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

from model import *

train_data = torchvision.datasets.CIFAR10(root="./dataset",train=True,transform=torchvision.transforms.ToTensor(),download=True)
test_data = torchvision.datasets.CIFAR10(root="./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
#查看数据集图片的数量
train_data_size = len(train_data)
test_data_size = len(test_data)
print("训练数据集的长度为:{}".format(train_data_size))#字符串格式化
print("测试数据集的长度为:{}".format(test_data_size))
#加载数据集
train_dataloader = DataLoader(train_data,batch_size=64)
test_dataloader = DataLoader(test_data,batch_size=64)

#创建网络模型
model = Model()
#损失函数
loss_fn = nn.CrossEntropyLoss()
#优化器
learning_rate = 0.01
optimizer = torch.optim.SGD(model.parameters(),lr=learning_rate)
#设置训练网络的一些参数
total_train_step = 0 #记录训练的次数
total_test_step = 0 #记录测试的次数
epoch = 10 #记录训练的轮数
#添加tensorboard
writer = SummaryWriter("logs")

for i in range(epoch):
    print("-------第{}轮训练-------".format(i+1))
    #训练步骤开始
    model.train()#对特定层有作用
    for data in train_dataloader:
        imgs,targets = data
        output = model(imgs)
        loss = loss_fn(output,targets)
        #优化器优化模型,优化之前梯度需要清零
        optimizer.zero_grad()
        loss.backward()#获取每个参数节点的梯度
        optimizer.step()#梯度优化
        total_train_step +=1
        if total_train_step %100 ==0:#减少输出次数
            print("训练次数{},Loss:{}".format(total_train_step,loss.item()))#加上item可以把数据类型去掉,直接输出数字
            writer.add_scalar("train_loss",loss.item(),total_train_step)

    #测试步骤开始
    model.eval()#对特定层有作用
    total_test_loss = 0 #循环内的loss每次求出的是data的loss,而不是整个数据集的loss
    total_accuracy = 0 #计算正确率
    with torch.no_grad():#让网络模型中没有梯度
        for data in test_dataloader:
            imgs,targets = data
            outputs = model(imgs)
            loss = loss_fn(outputs,targets)
            total_test_loss += loss.item()
            accuracy = (outputs.argmax(1) == targets).sum() #横向1和竖向0
            total_accuracy += accuracy

    print("整体测试集上的loss: {}".format(total_test_loss))
    print("整体测试集上的正确率:{}".format(total_accuracy/test_data_size))
    writer.add_scalar("test_loss",total_test_loss,total_test_step)
    writer.add_scalar("test_accuracy",total_accuracy/test_data_size,total_test_step)
    total_test_step += 1
    #模型保存
    torch.save(model,"model_{}.pth".format(i))#一般保存每一轮训练的结果,也可以保存每训练多少步的结果
    print("模型已保存")
writer.close()

model部分

#搭建神经网络,为了规范,会将这些代码单独放在一个文件里面
import torch
from torch import nn

class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.model = nn.Sequential(
            nn.Conv2d(3,32,5,1,2),
            nn.MaxPool2d(2),
            nn.Conv2d(32,32,5,1,2),
            nn.MaxPool2d(2),
            nn.Conv2d(32,64,5,1,2),
            nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(64*4*4,64),
            nn.Linear(64,10)
        )
    def forward(self,x):
        x = self.model(x)
        return x
if __name__ == '__main__':
    model = Model()
    input = torch.ones(64,3,32,32)
    output = model(input)
    print(output.shape)

六、利用gpu训练
gpu训练方式有两种,
1.模型,数据,损失函数调用cuda
2…首先定义训练的设备

device = torch.device("cpu")
device = torch.device("cuda")
device = torch.device("cuda:0")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

然后采用网络模型调用device

model = model.to(device)#可以不用复制model.to(device)
loss_fn = loss_fn.to(device)#可以不用复制loss_fn.to(device)
imgs = imgs.to(device)
targets = targets.to(device)

七、完整的模型验证(测试,demo)套路–利用已经训练好的模型,然后给它提供输入

import torch
import torchvision
from PIL import Image
from torch import nn
from torchvision.transforms import Resize, ToTensor
#加载图片
img_path = "./imgs1/dog.png"
image = Image.open(img_path)
print(image)
image = image.convert("RGB")
transform = torchvision.transforms.Compose([Resize((32,32)),ToTensor()])
image = transform(image)
print(image.shape)
#加载网络模型
class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.model = nn.Sequential(
            nn.Conv2d(3,32,5,1,2),
            nn.MaxPool2d(2),
            nn.Conv2d(32,32,5,1,2),
            nn.MaxPool2d(2),
            nn.Conv2d(32,64,5,1,2),
            nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(64*4*4,64),
            nn.Linear(64,10)
        )
    def forward(self,x):
        x = self.model(x)
        return x

moxing = torch.load("model_0.pth")
print(moxing)
image = torch.reshape(image,(1,3,32,32))
moxing.eval()
with torch.no_grad():
    output = moxing(image)
print(output)
print(output.argmax(1))
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值