PyTorch入门【小土堆】

1 pyTorch的前置知识

1.1 检测是否正常运行

1.安装
进入pytorch首页 下拉,https://pytorch.org/

2.运行

import torch
torch.cuda.is_available() 

1.2 学习的两大法宝

如果把pytorch的学习比作工具箱

dir()

它是打开、可以看见工具,
如果显示双下划线,表示不可以修改

help()

把它看做说明书

应用demo

dir(python) #1、2、3、4
dir(pytorch.3) #a,b,c
help(torch.3.a) #把手放在特定地方,然后拧动它
help(torch.cuda.is_available) #里面的函数不要带括号

2 加载数据

DatasetDataloader
提供 获取数据及其lable的方法,能得到总共有多少数据,能获取每一个数据及其 label把数据打包,方便后续使用

2.1 使用DataSet

import os

from PIL import Image
from torch.utils.data import Dataset


# dataset有两个作用:1、加载每一个数据,并获取其label;2、用len()查看数据集的长度
class MyData(Dataset):
    def __init__(self, root_dir, label_dir):  # 初始化,为这个函数用来设置在类中的全局变量
        self.root_dir = root_dir
        self.label_dir = label_dir
        self.path = os.path.join(self.root_dir,self.label_dir)  
        # 单纯的连接起来而已,背下来怎么用就好了,因为在win下和linux下的斜线方向不一样,所以用这个函数来连接路径
        self.img_path = os.listdir(self.path)  # img_path 的返回值,就已经是一个列表了

    def __getitem__(self, idx):  # 获取数据对应的 label
        img_name = self.img_path[idx]  # img_name 在上一个函数的最后,返回就是一个列表了
        img_item_path = os.path.join(self.root_dir, self.label_dir, img_name)  # 这行的返回,是一个图片的路径,加上图片的名称了,能够直接定位到某一张图片了
        img = Image.open(img_item_path)  # 这个步骤看来是不可缺少的,要想 show 或者 操作图片之前,必须要把图片打开(读取),也就是 Image.open()一下,这可能是 PIL 这个类型图片的特有操作
        label = self.label_dir  # 这个例子中,比较特殊,因为图片的 label 值,就是图片所在上一级的目录
        return img, label  # img 是每一张图片的名称,根据这个名称,就可以使用查看(直接img)、print、size等功能
        # label 是这个图片的标签,在当前这个类中,标签,就是只文件夹名称,因为我们就是这样定义的

    def __len__(self):
        return len(self.img_path)  # img_path,已经是一个列表了,len()就是在对这个列表进行一些操作


if __name__ == '__main__':
    root_dir = "dataset/train"
    ants_label_dir = "ants"
    bees_label_dir = "bees"
    ants_dataset = MyData(root_dir, ants_label_dir)
    bees_dataset = MyData(root_dir, bees_label_dir)
    train_dataset = ants_dataset + bees_dataset
    
# runfile('E:/pythonProject/learn_pytorch/read_data.py')

2.2 使用Tensorboard

2.2.1 安装

  1. 安装tensorboard
pip install tensorboard
  1. 运行
activate pytorch
cd D:\pycharm-workplace\pythonProject
# 切换到 Logs的上一层目录中
tensorboard --logdir=logs

2.2.2 几个简单使用的demo

demo1: 函数
from torch.utils.tensorboard import SummaryWriter

writer = SummaryWriter("logs")

# writer.add_image()
# writer.add_scalar()

for i in range(100):
    writer.add_scalar("y=2x",2*i,i)
writer.close()

在这里插入图片描述在这里插入图片描述

demo2:图片
from torch.utils.tensorboard import SummaryWriter
from PIL import Image
from torchvision import transforms

writer = SummaryWriter("../logs")
img = Image.open('../images/pytorch.png')
# img.show()
print(img)

trans_tensor = transforms.ToTensor()
img_tensor = trans_tensor(img)
writer.add_image('ToTensor',img_tensor)


trans_norm =  transforms.Normalize([2, 3, 5, 10], [0.5, 0.5, 0.5, 0.5])
img_norm = trans_norm(img_tensor)
writer.add_image("Normalize",img_norm,2) #2,表示第2步
writer.close()

在这里插入图片描述

2.3 使用DataLoader

torchvision.datasets.CIFAR10(root="../dataset",train=False,download=True,transform=dataset_transform) :下载数据集的时候,把download设为true,会可以看到它的下载链接的,把它关了,自己下载放到指定的文件夹,以后就不用再下了。

import torchvision
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader

dataset_transform = torchvision.transforms.Compose([
    torchvision.transforms.ToTensor()
])

test_set = torchvision.datasets.CIFAR10(root="../dataset",train=False,download=True,transform=dataset_transform)
test_loader = DataLoader(dataset=test_set,batch_size=64,shuffle=False,num_workers=0,drop_last=True)

print(test_set[0])
img,target = test_set[0]
print(img.shape)
print(target)
print(test_set.classes[target])


writer = SummaryWriter("../dataloader")
for epoch in range(2):
    step = 0
    for data in test_loader:
        imgs,target =  data
        writer.add_images("Epoch:{}".format(epoch),imgs,step)
        step=step+1

writer.close()

查看运行结果:
在这里插入图片描述

3 Transform的使用

由于我不搞图像,懒得写

4 神经网络的构建

4.1 nn.Module

import torch
from torch import nn

class Tudui(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self,input):
        output = input+1
        return output

tudui = Tudui()
x=torch.tensor(1.0)
output = tudui(x)
print(output)

4.2 卷积层

import torchvision
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from torch import nn
from torch.nn import Conv2d

test_set = torchvision.datasets.CIFAR10(root="../dataset",train=False,download=True,
                                        transform=torchvision.transforms.ToTensor())
dataloader = DataLoader(dataset=test_set,batch_size=64)


class Tudui(nn.Module):
    def __init__(self):
        super(Tudui,self).__init__()
        self.conv1 = Conv2d(3,6,3,stride=1,padding=0)


    def forward(self,x):
        x=self.conv1(x)
        return  x

tudui = Tudui()
print(tudui)

writer = SummaryWriter("./logs")
step = 0
for data in dataloader:
    imgs,target = data
    output=tudui(imgs)
    print(imgs.shape)
    print(output.shape)

4.3 线性层和其他层

import torch
import torchvision
from torch import nn
from torch.nn import Linear
from torch.utils.data import DataLoader
dataset = torchvision.datasets.CIFAR10(root="../dataset",train=False,download=True,
                                        transform=torchvision.transforms.ToTensor())

dataloader = DataLoader(dataset,batch_size=64)


class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.linear1 = Linear(196608,10)
    def forward(self,input):
        output = self.linear1(input)
        return output

tudui = Tudui()
for data in dataloader:
    imgs,target = data
    print(imgs.shape)
    output = torch.reshape(imgs,(1,1,1,-1))
    print(output.shape)
    output = tudui(output)
    print(output.shape)
    output2 = torch.flatten(imgs)
    print(output2.shape)
    print("---------------")

4.4 Sequential的使用

import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter


class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        # self.conv1 = Conv2d(in_channels=3, out_channels=32, kernel_size=5, padding=2, stride=1)
        # self.maxpool1 = MaxPool2d(2)
        # self.conv2 = Conv2d(in_channels=32, out_channels=32, kernel_size=5, padding=2)
        # self.maxpool2 = MaxPool2d(2)
        # self.conv3 = Conv2d(in_channels=32, out_channels=64, kernel_size=5, padding=2)
        # self.maxpool3 = MaxPool2d(2)
        # self.flatten = Flatten()  # 64 4*4
        # self.linear1 = Linear(1024, 64)
        # self.linear2 = Linear(64, 10)
        self.model = Sequential(
            Conv2d(3,32,5,padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024,64),
            Linear(64,10)
        )

    def forward(self, x):
        # x = self.conv1(x)
        # x = self.maxpool1(x)
        # x = self.conv2(x)
        # x = self.maxpool2(x)
        # x = self.conv3(x)
        # x = self.maxpool3(x)
        # x = self.flatten(x)
        # x = self.linear1(x)
        # x = self.linear2(x)
        x=self.model(x)
        return x


tudui = Tudui()
print(tudui)
input=torch.ones((64,3,32,32))
output = tudui(input)
print(output.shape)

# writer = SummaryWriter("logs_seq")
# writer.add_graph(tudui,input)
# writer.close()

test_set = torchvision.datasets.CIFAR10(root="./dataset",train=False,download=True,
                                        transform=torchvision.transforms.ToTensor())
dataloader = DataLoader(dataset=test_set,batch_size=1)
loss = nn.CrossEntropyLoss()


tudui1 = Tudui()
optim = torch.optim.SGD(tudui1.parameters(),lr=0.001)
for epoch in range(20):
    running_loss = 0
    for data in dataloader:
        imgs,target = data
        output = tudui1(imgs)
        result_loss=loss(output,target)
        # print(result_loss)
        optim.zero_grad()
        result_loss.backward()
        optim.step() #优化器对每个参数进行调优
        running_loss += result_loss
    print(running_loss)

5 网络模型的使用

5.1 现有网络模型的使用及修改

import torchvision.models
from torch import nn

vgg16_false  = torchvision.models.vgg16(pretrained=False)
vgg16_true = torchvision.models.vgg16(pretrained=True)

print(vgg16_true)
#增加一个层(后面)
vgg16_true.classifier.add_module('add_linear',nn.Linear(1000,10))
print(vgg16_true)
print("-----------------")

print(vgg16_false)
# 修改一个层
vgg16_false.classifier[6]=nn.Linear(4096,10)
print(vgg16_false)

在这里插入图片描述

5.2 网络模型的保存与读取

model.save.py

import torch
import torchvision

vgg16 = torchvision.models.vgg16(pretrained=False)

# 保存方式1,模型结构+模型参数
torch.save(vgg16, "vgg16_method1.pth")

# 保存方式2,把模型参数保存成字典(官方参数)
torch.save(vgg16.state_dict(), "vgg16_method2.pth")

model_load.py


import torch

# 方式1=》保存方式1,加载模型
import torchvision

model = torch.load("vgg16_method1.pth")
# print(model)

# 方式2 =》加载模型

model=torch.load('vgg16_method2.pth')
# print(model)

# ---------------
vgg_16 = torchvision.models.vgg16(pretrained=False)
vgg_16.load_state_dict(torch.load('vgg16_method2.pth'))
print(vgg_16)

方式1的输出效果:
在这里插入图片描述
方式2:直接print出来是一个字典。在这里插入图片描述
方式2(使用load_state_dict)
在这里插入图片描述

6 完整的模型训练套路(以CIFAR10数据集为例)

在这里插入图片描述
model.py

# 搭建神经网络
from torch import nn


class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.model = nn.Sequential(
            nn.Conv2d(3, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 64, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(64 * 4 * 4, 64),
            nn.Linear(64, 10)
        )

    def forward(self, x):
        x = self.model(x)
        return x

train.py

import torch
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

from xiaotudui.model import Tudui

# 准备数据集
train_data = torchvision.datasets.CIFAR10(root="../data", train=True, transform=torchvision.transforms.ToTensor(),
                                          download=True)
test_data = torchvision.datasets.CIFAR10(root="../dataset", train=False, download=True,
                                         transform=torchvision.transforms.ToTensor())


# length长度
train_data_size = len(train_data)
test_data_size = len(test_data)
print("训练数据集的长度为:{}".format(train_data_size))
print("测试数据集的长度为:{}".format(test_data_size))

# 利用dataLoader来加载数据集
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataLoader = DataLoader(test_data, batch_size=64)


# 创建网络模型
tudui = Tudui()
# input = torch.ones((64, 3, 32, 32))  # batch_size=32,通道数=4,32*32
# output = tudui(input)
# print(output.shape)

# 损失函数
loss_fn = nn.CrossEntropyLoss()

# 优化器
learning_rate = 1e-2
optimzer = torch.optim.SGD(tudui.parameters(), lr=learning_rate)

writer = SummaryWriter("../logs_train")

# 设置训练网络的一些参数
# 记录训练的次数
total_train_step = 0
# 记录测试的次数
test_train_step = 0
# 训练的轮数
epoch = 10


for i in range(epoch):
    print("------第{}轮训练开始------".format(i + 1))

    # 训练步骤开始
    tudui.train()  # 会影响dropout和norm层
    for data in train_dataloader:
        imgs, targets = data
        outputs = tudui(imgs)
        loss = loss_fn(outputs, targets)

        # 优化器优化模型
        optimzer.zero_grad()
        loss.backward()
        optimzer.step()

        total_train_step = total_train_step + 1
        if total_train_step % 100 == 0:
            # item()方法可以把tensor的数据拿出了,变成相应的数据类型(如int等)
            print("训练次数:{},Loss:{}".format(total_train_step, loss.item()))
            writer.add_scalar("train_loss", loss.item(), total_train_step)

    # 测试步骤开始
    tudui.eval()
    total_test_loss = 0
    total_test_step = 0
    total_accuracy = 0
    with torch.no_grad():
        for data in test_dataLoader:
            imgs, targets = data
            outputs = tudui(imgs)
            loss = loss_fn(outputs, targets)
            total_test_loss += loss.item()
            accuracy = (outputs.argmax(1) == targets).sum()
            total_accuracy = total_accuracy + accuracy

    print("整体测试集上的Loss:{}".format(total_test_loss))
    print("整体测试集上的正确率:{}".format(total_accuracy / test_data_size))
    writer.add_scalar("test_loss", total_test_loss, total_test_step)
    writer.add_scalar("test_accuracy", total_accuracy / test_data_size, total_test_step)
    total_test_step += 1

    torch.save(tudui, "tudui_{}.pth".format(i))
    print("模型已保存")
writer.close()

7 利用GPU训练

7.1 方式1

7.1.1 修改地方

(1)创建模型

创建模型的时候cuda()一下
在这里插入图片描述

(2)损失函数

在这里插入图片描述

(3)训练时候的imgs和target

在这里插入图片描述

(4)测试步骤的imgs和target

在这里插入图片描述

7.1.2 完整代码

模型也移动到一个py文件里面

import time

import torch
import torchvision
from torch import nn
from torch.utils.data import DataLoader

# 添加tensorboard
from torch.utils.tensorboard import SummaryWriter

train_data = torchvision.datasets.CIFAR10(root="../data", train=True, transform=torchvision.transforms.ToTensor(),
                                          download=True)
test_data = torchvision.datasets.CIFAR10(root="../dataset", train=False, download=True,
                                         transform=torchvision.transforms.ToTensor())

train_dataloader = DataLoader(train_data, batch_size=64)
test_dataLoader = DataLoader(test_data, batch_size=64)


class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.model = nn.Sequential(
            nn.Conv2d(3, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 64, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(64 * 4 * 4, 64),
            nn.Linear(64, 10)
        )

    def forward(self, x):
        x = self.model(x)
        return x


start_time = time.time()
train_data_size = len(train_data)
test_data_size = len(test_data)

print("训练数据集的长度为:{}".format(train_data_size))
print("测试数据集的长度为:{}".format(test_data_size))

if __name__ == '__main__':
    tudui = Tudui()
    tudui = tudui.cuda()
    input = torch.ones((64, 3, 32, 32))  # batch_size=32,通道数=4,32*32
    # output = tudui(input)
    # print(output.shape)

# 损失函数
loss_fn = nn.CrossEntropyLoss()
loss_fn = loss_fn.cuda()
# 优化器
learning_rate = 1e-2
optimzer = torch.optim.SGD(tudui.parameters(), lr=learning_rate)

writer = SummaryWriter("../logs_train")

total_train_step = 0
test_train_step = 0
epoch = 10
for i in range(epoch):
    print("------第{}轮训练开始------".format(i + 1))

    # 训练步骤开始
    tudui.train()  # 会影响dropout和norm层
    for data in train_dataloader:
        imgs, targets = data
        imgs = imgs.cuda()
        targets = targets.cuda()
        outputs = tudui(imgs)
        loss = loss_fn(outputs, targets)

        # 优化器优化模型
        optimzer.zero_grad()
        loss.backward()
        optimzer.step()

        total_train_step = total_train_step + 1
        if total_train_step % 100 == 0:
            end_time = time.time()
            print(end_time - start_time)
            print("训练次数:{},Loss:{}".format(total_train_step, loss.item()))
            writer.add_scalar("train_loss", loss.item(), total_train_step)

    # 测试步骤开始
    tudui.eval()
    total_test_loss = 0
    total_test_step = 0
    total_accuracy = 0
    with torch.no_grad():
        for data in test_dataLoader:
            imgs, targets = data
            imgs = imgs.cuda()
            targets = targets.cuda()
            outputs = tudui(imgs)
            loss = loss_fn(outputs, targets)
            total_test_loss += loss.item()
            accuracy = (outputs.argmax(1) == targets).sum()
            total_accuracy = total_accuracy + accuracy

    print("整体测试集上的Loss:{}".format(total_test_loss))
    print("整体测试集上的正确率:{}".format(total_accuracy / test_data_size))
    writer.add_scalar("test_loss", total_test_loss, total_test_step)
    writer.add_scalar("test_accuracy", total_accuracy / test_data_size, total_test_step)
    total_test_step += 1

    torch.save(tudui, "tudui_{}.pth".format(i))
    print("模型已保存")
writer.close()

7.1.3 运行结果

打开colab网址(需要翻那个墙)
关于colab。可以看看这篇文章的介绍:Colab使用教程(超级详细版)及Colab Pro/Pro+评测

!nvidia-smi:可以测英伟达显卡,可以用到16g的(看时间段)
看看有没有用到gpu
在这里插入图片描述
运行中下载包
在这里插入图片描述
总共花了101s
在这里插入图片描述

7.2 方式2(更加推荐,很灵活)

7.2.1 修改地方

(1)训练设备

在这里插入图片描述

(2)创建模型

在这里插入图片描述

(3)损失函数

在这里插入图片描述

(4)训练步骤的images和target

在这里插入图片描述

(5)测试步骤的images和target

在这里插入图片描述

7.2.2 完整代码

import time

import torch
import torchvision
from torch import nn
from torch.utils.data import DataLoader

# 添加tensorboard
from torch.utils.tensorboard import SummaryWriter

# 定义训练的设备
device = torch.device("cuda")

train_data = torchvision.datasets.CIFAR10(root="../data",train=True,transform=torchvision.transforms.ToTensor(),
                                          download=True)
test_data = torchvision.datasets.CIFAR10(root="../dataset",train=False,download=True,
                                        transform=torchvision.transforms.ToTensor())

train_dataloader = DataLoader(train_data,batch_size=64)
test_dataLoader = DataLoader(test_data,batch_size=64)

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.model = nn.Sequential(
            nn.Conv2d(3, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 64, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(64 * 4 * 4, 64),
            nn.Linear(64, 10)
        )

    def forward(self, x):
        x=self.model(x)
        return x

start_time = time.time()
train_data_size = len(train_data)
test_data_size = len(test_data)

print("训练数据集的长度为:{}".format(train_data_size))
print("测试数据集的长度为:{}".format(test_data_size))




tudui = Tudui()
tudui = tudui.to(device)


#损失函数
loss_fn = nn.CrossEntropyLoss()
loss_fn = loss_fn.to(device)
#优化器
learning_rate = 1e-2
optimzer = torch.optim.SGD(tudui.parameters(),lr=learning_rate)

writer = SummaryWriter("../logs_train")

total_train_step = 0
test_train_step = 0
epoch = 10
for i in range(epoch):
    print("------第{}轮训练开始------".format(i+1))

    # 训练步骤开始
    tudui.train() # 会影响dropout和norm层
    for data in train_dataloader:
        imgs,targets = data
        imgs = imgs.to(device)
        targets = targets.to(device)
        outputs = tudui(imgs)
        loss = loss_fn(outputs,targets)

        # 优化器优化模型
        optimzer.zero_grad()
        loss.backward()
        optimzer.step()

        total_train_step = total_train_step+1
        if total_train_step % 100 == 0:
            end_time = time.time()
            print(end_time-start_time)
            print("训练次数:{},Loss:{}".format(total_train_step,loss.item()))
            writer.add_scalar("train_loss",loss.item(),total_train_step)

    # 测试步骤开始
    tudui.eval()
    total_test_loss=0
    total_test_step=0
    total_accuracy=0
    with torch.no_grad():
        for data in test_dataLoader:
            imgs,targets = data
            imgs = imgs.to(device)
            targets = targets.to(device)
            outputs = tudui(imgs)
            loss = loss_fn(outputs,targets)
            total_test_loss += loss.item()
            accuracy = (outputs.argmax(1) == targets).sum()
            total_accuracy = total_accuracy + accuracy

    print("整体测试集上的Loss:{}".format(total_test_loss))
    print("整体测试集上的正确率:{}".format(total_accuracy/test_data_size))
    writer.add_scalar("test_loss",total_test_loss, total_test_step)
    writer.add_scalar("test_accuracy",total_accuracy/test_data_size,total_test_step)
    total_test_step +=1

    torch.save(tudui,"tudui_{}.pth".format(i))
    print("模型已保存")
writer.close()


7.2.3 运行结果

在这里插入图片描述

8 完整的模型验证套路(待更)

9 看看开源项目(待更)

  • 6
    点赞
  • 26
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值