pytorch入门-网络模型训练

损失函数和反向传播

import torch
from torch.nn import L1Loss # L1Loss 是 PyTorch 中的一个损失函数,用于衡量预测值与目标值之间的绝对差异。
from torch import nn


inputs = torch.tensor([1, 2, 3], dtype=torch.float32) #创建了一个名为 inputs 的张量,它包含元素 [1, 2, 3],数据类型为 torch.float32。
targets = torch.tensor([1, 2, 5], dtype=torch.float32)

inputs = torch.reshape(inputs,(1, 1, 1, 3)) # 形状调整的目的是确保输入和目标张量具有相同的形状,以便能够使用损失函数进行计算。通常情况下,损失函数对输入数据的形状有特定的要求。
targets = torch.reshape(targets, (1, 1, 1, 3))

loss = L1Loss()
result = loss(inputs, targets)

loss_mse = nn.MSELoss() # nn.MSELoss 是 PyTorch 中的均方误差损失函数。该函数用于衡量模型输出与目标值之间的平均平方差异。
result_mse = loss_mse(inputs, targets)

print(result)
print(result_mse )

x = torch.tensor([0.1, 0.2, 0.3])
y = torch.tensor([1])
x = torch.reshape(x,(1, 3))
#nn.CrossEntropyLoss 是 PyTorch 中的交叉熵损失函数。该函数常用于多类别分类任务中,用于衡量模型输出和目标类别之间的交叉熵损失。
# 请确保你的 inputs 张量的形状为 (batch_size, num_classes),其中 batch_size 是批次大小,num_classes 是类别数量。而 targets 张量的形状为 (batch_size,),其中包含了每个样本对应的目标类别索引。
loss_cross = nn.CrossEntropyLoss()
result_cross = loss_cross(x, y)
print(result_cross)
import torchvision
from torch import nn

# Conv2d 是用于定义二维卷积层的类。
# MaxPool2d 是用于定义二维最大池化层的类。
# Flatten 是用于将多维输入数据展平为一维的类。
# Linear 是用于定义线性层(全连接层)的类。
# Sequential 是一个容器类,用于按顺序组合各种层以构建神经网络模型。
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.data import DataLoader

dataset = torchvision.datasets.CIFAR10("../dataset", train=False, transform=torchvision.transforms.ToTensor(),
                                       download=True)

dataloader = DataLoader(dataset, batch_size=1)

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()

        self.modul1 = Sequential(
            Conv2d(3, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )

    def forward(self, x):

        x = self.modul1(x)
        return x

loss = nn.CrossEntropyLoss()

tudui = Tudui()
for data in dataloader:
    imgs, targets = data
    outputs = tudui(imgs)
    result_loss = loss(outputs, targets)
    # result_loss.backward()
    print(result_loss)

优化器

import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.data import DataLoader

dataset = torchvision.datasets.CIFAR10("../dataset", train=False, transform=torchvision.transforms.ToTensor(),
                                       download=True)

dataloader = DataLoader(dataset, batch_size=1)

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()

        self.modul1 = Sequential(
            Conv2d(3, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )

    def forward(self, x):

        x = self.modul1(x)
        return x
# 定义模型和损失函数
loss = nn.CrossEntropyLoss()
tudui = Tudui()
# 使用了 PyTorch 中的 torch.optim.SGD 优化器来定义一个随机梯度下降(Stochastic Gradient Descent,SGD)优化器,并将其命名为 optim。
# 定义优化器
optim = torch.optim.SGD(tudui.parameters(), lr = 0.01)

for epoch in range(20):
    running_loss = 0.0
    for data in dataloader:
        imgs, targets = data
        outputs = tudui(imgs) ## 模型前向传播
        result_loss = loss(outputs, targets) ## 计算损失
        # print(result_loss)
        ## 在每个训练步骤中进行优化
        optim.zero_grad() ## 清空梯度
        result_loss.backward() ## 反向传播和计算梯度
        optim.step() ## 更新模型参数
        running_loss = running_loss + result_loss
    print(result_loss)

模型的保存和加载

import torch
import torchvision
from torch import nn

vgg16 = torchvision.models.vgg16(pretrained=False)
# 保存方式1,模型结构+模型参数
torch.save(vgg16, "vgg16_method1.pth")

# 保存方式2,模型参数(官方推荐)
torch.save(vgg16.state_dict(), "vgg16_method2.pth")
import torch

# 方式一
import torchvision

model = torch.load("vgg16_method1.pth")
# print(model)

# 方式二
vgg16 = torchvision.models.vgg16(pretrained=False)
vgg16.load_state_dict(torch.load("vgg16_method2.pth"))
# model = torch.load("vgg16_method2.pth")
print(vgg16)

完整的模型训练

model

import torch
from torch import nn


# 搭建神经网络
class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.model = nn.Sequential(
            nn.Conv2d(3, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 64, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(64*4*4, 64),
            nn.Linear(64, 10)

        )

    def forward(self, x):
        x = self.model(x)
        return x

if __name__ == '__main__':
    tudui = Tudui()
    input = torch.ones((64, 3, 32, 32))
    output = tudui(input)
    print(output.shape)
import torch
import torchvision
from model import *
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

# 准备数据
train_data = torchvision.datasets.CIFAR10(root="../dataset", train=True, transform=torchvision.transforms.ToTensor(),
                                          download=True)
test_data = torchvision.datasets.CIFAR10(root="../dataset", train=False, transform=torchvision.transforms.ToTensor(),
                                         download=True)

# length 长度
train_data_size = len(train_data)
test_data_size = len(test_data)

print("训练数据集的长度为:{}".format(train_data_size))
print("测试数据集的长度为:{}".format(test_data_size))

# 利用 DataLoader 来加载数据集
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)

#创建网络模型
tudui = Tudui()

# 损失函数
loss_fn = nn.CrossEntropyLoss() #衡量分类模型预测结果与真实标签之间的差异性。

# 优化器
learning_rate = 0.01 #1e-2
optimizer = torch.optim.SGD(tudui.parameters(), lr=learning_rate)

# 设置训练网络的次数
total_train_step = 0

# 记录测试的次数
total_test_step = 0

# 训练的轮数
epoch = 10

#添加tensorboard
writer = SummaryWriter("../logs_train")

for i in range(epoch):
    print("------第{}轮训练开始------".format(i+1))

    #训练步骤开始
    tudui.train()
    for data in train_dataloader:
        imgs, targets = data
        outputs = tudui(imgs)
        loss = loss_fn(outputs, targets)

        #优化器模型
        optimizer.zero_grad() # optimizer.zero_grad()的作用就是将模型中所有可训练参数的梯度清零,以准备接收新的梯度值。它会遍历所有的模型参数,并将它们的grad属性设置为0。
        loss.backward()#用于计算梯度并进行自动微分(Automatic Differentiation)
        optimizer.step()#调用optimizer.step()来更新模型的参数

        total_train_step = total_train_step + 1
        if total_train_step % 100 == 0:
            print("训练次数:{}, Loss:{}".format(total_train_step, loss.item()))
            writer.add_scalar("train_loss", loss.item(),total_train_step)

    # 测试步骤开始
    tudui.eval()
    total_test_loss = 0 # 使用total_test_loss变量来累加每个测试样本的损失值。
    total_accuracy = 0 # 累计每个样本的准确率
    
    #with torch.no_grad():是一个上下文管理器(context manager),用于在PyTorch中临时禁用梯度计算和自动求导。
    # 在深度学习中,当使用训练好的模型进行推断或测试时,通常不需要计算梯度。禁用梯度计算可以提高推断或测试过程的效率,并节省内存空间。
    with torch.no_grad():
        for data in test_dataloader:
            imgs, targets = data
            outputs = tudui(imgs)
            loss = loss_fn(outputs, targets)
            total_test_loss = total_test_loss + loss.item()
            accuracy = (outputs.argmax(1) == targets).sum()
            total_accuracy = total_accuracy + accuracy
    print("整体测试集上的Loss:{}".format(total_test_loss))
    print(("整体测试集上的正确率{}").format(total_accuracy/test_data_size))
    writer.add_scalar("tes_loss", total_test_loss, total_test_step)
    writer.add_scalar("test_accuracy",total_accuracy/test_data_size, total_test_step)
    total_test_step = total_test_step + 1

    torch.save(tudui, "tudui_{}.pth".format(i))
    # torch.save(tudui.state_dict(), "tudui_{}.pth".format(i))
    print("模型已保存")

writer.close()

利用GPU训练(1)

import time

import torch
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

# 准备数据
train_data = torchvision.datasets.CIFAR10(root="../dataset", train=True, transform=torchvision.transforms.ToTensor(),
                                          download=True)
test_data = torchvision.datasets.CIFAR10(root="../dataset", train=False, transform=torchvision.transforms.ToTensor(),
                                         download=True)

# length 长度
train_data_size = len(train_data)
test_data_size = len(test_data)

print("训练数据集的长度为:{}".format(train_data_size))
print("测试数据集的长度为:{}".format(test_data_size))

# 利用 DataLoader 来加载数据集
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)

#创建网络模型
class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.model = nn.Sequential(
            nn.Conv2d(3, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 64, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(64*4*4, 64),
            nn.Linear(64, 10)

        )

    def forward(self, x):
        x = self.model(x)
        return x

tudui = Tudui()
if torch.cuda.is_available():
    tudui = tudui.cuda()

# 损失函数
loss_fn = nn.CrossEntropyLoss() #衡量分类模型预测结果与真实标签之间的差异性。
if torch.cuda.is_available():
    loss_fu = loss_fn.cuda()

# 优化器
learning_rate = 0.01 #1e-2
optimizer = torch.optim.SGD(tudui.parameters(), lr=learning_rate)

# 设置训练网络的次数
total_train_step = 0

# 记录测试的次数
total_test_step = 0

# 训练的轮数
epoch = 10

#添加tensorboard
writer = SummaryWriter("../logs_train")
start_time = time.time()

for i in range(epoch):
    print("------第{}轮训练开始------".format(i+1))

    #训练步骤开始
    tudui.train()
    for data in train_dataloader:
        imgs, targets = data
        if torch.cuda.is_available():
            imgs = imgs.cuda()
            targets = targets.cuda()
        outputs = tudui(imgs)
        loss = loss_fn(outputs, targets)

        #优化器模型
        optimizer.zero_grad() # optimizer.zero_grad()的作用就是将模型中所有可训练参数的梯度清零,以准备接收新的梯度值。它会遍历所有的模型参数,并将它们的grad属性设置为0。
        loss.backward()#用于计算梯度并进行自动微分(Automatic Differentiation)
        optimizer.step()#调用optimizer.step()来更新模型的参数

        total_train_step = total_train_step + 1
        if total_train_step % 100 == 0:
            end_time = time.time()
            print(end_time - start_time)
            print("训练次数:{}, Loss:{}".format(total_train_step, loss.item()))
            writer.add_scalar("train_loss", loss.item(),total_train_step)

    # 测试步骤开始
    tudui.eval()
    total_test_loss = 0 # 使用total_test_loss变量来累加每个测试样本的损失值。
    total_accuracy = 0 # 累计每个样本的准确率

    #with torch.no_grad():是一个上下文管理器(context manager),用于在PyTorch中临时禁用梯度计算和自动求导。
    # 在深度学习中,当使用训练好的模型进行推断或测试时,通常不需要计算梯度。禁用梯度计算可以提高推断或测试过程的效率,并节省内存空间。
    with torch.no_grad():
        for data in test_dataloader:
            imgs, targets = data
            if torch.cuda.is_available():
                imgs = imgs.cuda()
                targets = targets.cuda()
            outputs = tudui(imgs)
            loss = loss_fn(outputs, targets)
            total_test_loss = total_test_loss + loss.item()
            accuracy = (outputs.argmax(1) == targets).sum()
            total_accuracy = total_accuracy + accuracy
    print("整体测试集上的Loss:{}".format(total_test_loss))
    print(("整体测试集上的正确率{}").format(total_accuracy/test_data_size))
    writer.add_scalar("tes_loss", total_test_loss, total_test_step)
    writer.add_scalar("test_accuracy",total_accuracy/test_data_size, total_test_step)
    total_test_step = total_test_step + 1

    torch.save(tudui, "tudui_{}.pth".format(i))
    # torch.save(tudui.state_dict(), "tudui_{}.pth".format(i))
    print("模型已保存")

writer.close()

利用GPU训练(2)

import time

import torch
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter


device = torch.device("cuda")
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 准备数据
train_data = torchvision.datasets.CIFAR10(root="../dataset", train=True, transform=torchvision.transforms.ToTensor(),
                                          download=True)
test_data = torchvision.datasets.CIFAR10(root="../dataset", train=False, transform=torchvision.transforms.ToTensor(),
                                         download=True)

# length 长度
train_data_size = len(train_data)
test_data_size = len(test_data)

print("训练数据集的长度为:{}".format(train_data_size))
print("测试数据集的长度为:{}".format(test_data_size))

# 利用 DataLoader 来加载数据集
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)

#创建网络模型
class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.model = nn.Sequential(
            nn.Conv2d(3, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 64, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(64*4*4, 64),
            nn.Linear(64, 10)

        )

    def forward(self, x):
        x = self.model(x)
        return x

tudui = Tudui()
tudui = tudui.to(device)

# 损失函数
loss_fn = nn.CrossEntropyLoss() #衡量分类模型预测结果与真实标签之间的差异性。
loss_fu = loss_fn.to(device)

# 优化器
learning_rate = 0.01 #1e-2
optimizer = torch.optim.SGD(tudui.parameters(), lr=learning_rate)

# 设置训练网络的次数
total_train_step = 0

# 记录测试的次数
total_test_step = 0

# 训练的轮数
epoch = 10

#添加tensorboard
writer = SummaryWriter("../logs_train")
start_time = time.time()

for i in range(epoch):
    print("------第{}轮训练开始------".format(i+1))

    #训练步骤开始
    tudui.train()
    for data in train_dataloader:
        imgs, targets = data
        imgs = imgs.to(device)
        targets = targets.to(device)
        outputs = tudui(imgs)
        loss = loss_fn(outputs, targets)

        #优化器模型
        optimizer.zero_grad() # optimizer.zero_grad()的作用就是将模型中所有可训练参数的梯度清零,以准备接收新的梯度值。它会遍历所有的模型参数,并将它们的grad属性设置为0。
        loss.backward()#用于计算梯度并进行自动微分(Automatic Differentiation)
        optimizer.step()#调用optimizer.step()来更新模型的参数

        total_train_step = total_train_step + 1
        if total_train_step % 100 == 0:
            end_time = time.time()
            print(end_time - start_time)
            print("训练次数:{}, Loss:{}".format(total_train_step, loss.item()))
            writer.add_scalar("train_loss", loss.item(),total_train_step)

    # 测试步骤开始
    tudui.eval()
    total_test_loss = 0 # 使用total_test_loss变量来累加每个测试样本的损失值。
    total_accuracy = 0 # 累计每个样本的准确率

    #with torch.no_grad():是一个上下文管理器(context manager),用于在PyTorch中临时禁用梯度计算和自动求导。
    # 在深度学习中,当使用训练好的模型进行推断或测试时,通常不需要计算梯度。禁用梯度计算可以提高推断或测试过程的效率,并节省内存空间。
    with torch.no_grad():
        for data in test_dataloader:
            imgs, targets = data
            imgs = imgs.to(device)
            targets = targets.to(device)
            outputs = tudui(imgs)
            loss = loss_fn(outputs, targets)
            total_test_loss = total_test_loss + loss.item()
            accuracy = (outputs.argmax(1) == targets).sum()
            total_accuracy = total_accuracy + accuracy
    print("整体测试集上的Loss:{}".format(total_test_loss))
    print(("整体测试集上的正确率{}").format(total_accuracy/test_data_size))
    writer.add_scalar("tes_loss", total_test_loss, total_test_step)
    writer.add_scalar("test_accuracy",total_accuracy/test_data_size, total_test_step)
    total_test_step = total_test_step + 1

    torch.save(tudui, "tudui_{}.pth".format(i))
    # torch.save(tudui.state_dict(), "tudui_{}.pth".format(i))
    print("模型已保存")

writer.close()

完整训练模型验证

import torch
import torchvision
from PIL import Image
from torch import nn

image_path = r"D:\PyCharm\learn_torch\image\dog.jpg"

image = Image.open(image_path)
image = image.convert('RGB')
print(image)

transform = torchvision.transforms.Compose([torchvision.transforms.Resize((32, 32)),
                                            torchvision.transforms.ToTensor()])

image = transform(image)
print(image.shape)


class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.model = nn.Sequential(
            nn.Conv2d(3, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 64, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(64 * 4 * 4, 64),
            nn.Linear(64, 10)

        )

    def forward(self, x):
        x = self.model(x)
        return x


model = torch.load("tudui_9.pth", map_location=torch.device("cpu"))

print(model)
image = torch.reshape(image, (1, 3, 32, 32))
model.eval()
with torch.no_grad():
    output = model(image)
print(output)

print(output.argmax(1))

完整的模型验证

import torch
import torchvision
from PIL import Image
from torch import nn

image_path = r"D:\PyCharm\learn_torch\image\dog.jpg"

image = Image.open(image_path)
image = image.convert('RGB')
print(image)

transform = torchvision.transforms.Compose([torchvision.transforms.Resize((32, 32)),
                                            torchvision.transforms.ToTensor()])

image = transform(image)
print(image.shape)


class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.model = nn.Sequential(
            nn.Conv2d(3, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 64, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(64 * 4 * 4, 64),
            nn.Linear(64, 10)

        )

    def forward(self, x):
        x = self.model(x)
        return x


model = torch.load("tudui_8.pth", map_location=torch.device("cpu"))

print(model)
image = torch.reshape(image, (1, 3, 32, 32))
model.eval()
with torch.no_grad():
    output = model(image)
print(output)

print(output.argmax(1))

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值