pytorch基础学习笔记1

jupyter使用,代码运行

1、package:

        dir():打开,看见

        help():说明书

2、Jupyter创建项目和使用

        进入pytorch的环境:activate pytorch

        配置插件

conda install ipykernel
conda install nb_conda

        打开jupyter:jupyter notebook

        新建,选择环境:python [conda env pytorch]]

        shift+回车:运行代码

3.代码运行方式

        (1)python文件:代码是以块为一个整体运行时,python文件的块是所有行的代码

        优点:通用,传播方便,适用于大型项目

        缺点:需要从头运行

        (2)python控制台:以每一行为块运行(可阅读性降低)

        优点:可以显示每个变量属性

        缺点:不利于代码的阅读及修改

        (3)jupyter:以任意行为块运行

        优点:利于代码的阅读及修改

        缺点:环境需要配置

4.读取数据的两个类

        Dataset:提供一种方式去获取数据及其label(jupyter中dataset??,查询功能)

                功能:如何获取数据及其label,告诉总共有多少的数据

        Dataloader:为后面的网络提供不同的数据形式

Tensorboard的使用

1.函数

add_scalar参数

tag:图表的标题

global_step:x轴

scalar_value:y轴

如何查看logs文件

在终端运行命令:tensorboard --logdir=logs(改变端口运行命令:tensorboard --logdir=logs --port=6007)

2.图像

add_image参数

tag

img_tensor

global_step

from torch.utils.tensorboard import SummaryWriter
import numpy as np
from PIL import Image

writer = SummaryWriter("logs")
image_path = "datasets/hymenoptera_data/train/ants/0013035.jpg"
img_PIL = Image.open(image_path)
img_array = np.array(img_PIL)#将数据类型转换为numpy
print(type(img_array))#输出img_array的数据格式
print(img_array.shape)

writer.add_image("test",img_array,1,dataformats='HWC')
# y=2x
for i in range(100):
    writer.add_scalar("y=2x",2*i,i)

writer.close()

torchvision中的transforms

1.transforms的结构和用法

transforms.py工具箱

from PIL import Image
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms

#tensor 数据类型
img_path = "datasets/hymenoptera_data/train/ants/0013035.jpg"
img = Image.open(img_path)

writer = SummaryWriter("logs")

tensor_trans = transforms.ToTensor()
tensor_img = tensor_trans(img)#将img的图片转化为tensor数据类型的img
print(tensor_img)

writer.add_image("Tensor_img",tensor_img)
writer.close()

2.常见的transforms

ToTensor

输入

PIL        Image.open()

tensor        ToTensor()

narrays        cv.imread()

输出

作用

Normalize(归一化)

input[channel] = (input[channel] - mean[channel]) / std[channel]

output[channel] = (input[channel] - mean[channel]) / std[channel]

Resize

RandomCrop(裁剪)

from PIL import Image
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms

writer = SummaryWriter("logs")

img = Image.open("datasets/hymenoptera_data/train/ants/9715481_b3cb4114ff.jpg")
print(img)

#ToTensor

trans_totensor = transforms.ToTensor()
img_tensor = trans_totensor(img)
writer.add_image("ToTensor",img_tensor)

#Normalize

#input[channel] = (input[channel] - mean[channel]) / std[channel]
#(input-0.5)/0.5=2*input-1
#input[0,1]
#result[-1,1]
print(img_tensor[0][0][0])
trans_norm = transforms.Normalize([6,3,2],[9,3,5])
img_norm = trans_norm(img_tensor)
print(img_norm[0][0][0])
writer.add_image("Normalize",img_norm,2)

#Resize
print(img.size)
trans_resize = transforms.Resize((512,512))
#img PIL -> resize -> img_resize PIL
img_resize = trans_resize(img)
#在tensorboard进行显示,需要把 img PIL数据类型变化为totensor类型
#img_resize PIL -> totensor -> img_resize tensor
img_resize = trans_totensor(img_resize)
writer.add_image("Resize",img_resize,0)
print(img_resize)

#Compose - resize - 2
trans_resize_2 = transforms.Resize(512)
#PIL -> PIL -> tensor
trans_compose = transforms.Compose([trans_resize_2,trans_totensor])
img_resize_2 = trans_compose(img)
writer.add_image("Resize",img_resize_2,1)

#RandomCrop
trans_random = transforms.RandomCrop((256,400))
trans_compose_2 = transforms.Compose([trans_random,trans_totensor])
for i in range(10):
    img_crop = trans_compose_2(img)
    writer.add_image("RandomCropHW",img_crop,i)

writer.close()

总结

关注输入和输出类型

多看官方文档

关注方法需要什么参数

不知道返回值的时候

*print

*print(type())

*debug

3.torchvision中的数据集使用

import torchvision
from torch.utils.tensorboard import SummaryWriter

dataset_transform = torchvision.transforms.Compose([
    torchvision.transforms.ToTensor()
])

train_set = torchvision.datasets.CIFAR10(root="./datasets", train=True, transform=dataset_transform, download=True)
test_set = torchvision.datasets.CIFAR10(root="./datasets", train=False, transform=dataset_transform, download=True)

# print(test_set[0])
# print(test_set.classes)
#
# img,target = test_set[0]
# print(img)
# print(target)
# print(test_set.classes[target])
# img.show()
# print(test_set[0])

writer = SummaryWriter("p13")
for i in range(10):
    img, target = test_set[i]
    writer.add_image("test_set", img, i)

writer.close()

4.DataLoader的使用

import torchvision.datasets
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

#准备的测试数据集
test_data = torchvision.datasets.CIFAR10("./datasets", train=False, transform=torchvision.transforms.ToTensor())

test_loader = DataLoader(dataset=test_data, batch_size=64, shuffle=True, num_workers=0, drop_last=True)
#shuffle=False不进行打乱操作,一般为True
#drop_last为True时,剩余图片不足64则直接舍去

#测试数据集中第一张图片及target
img, target = test_data[0]
print(img.shape)
print(target)

writer = SummaryWriter("dataloader")
for epoch in range(2):
    step = 0
    for data in test_loader:
        imgs, targets = data
        # print(imgs.shape)
        # print(targets)
        writer.add_images("epoch:{}".format(epoch), imgs, step)
        step = step + 1

writer.close()

神经网络torch.nn

神经网络的基本骨架-nn.Moudle的使用

mport torch
from torch import nn

class module1(nn.Module):
    def __int__(self):
        super().__int__()

    def forward(self,input):
        output = input+1
        return output

module1 = module1()
x = torch.tensor(1.0)
output = module1(x)
print(output)

Convolution Layers(卷积层)

import torch
import torch.nn.functional as F
import torchvision.datasets
from torch import nn
from torch.nn import Conv2d
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

# input = torch.tensor([[1,2,0,3,1],
#                       [0,1,2,3,1],
#                       [1,2,1,0,0],
#                       [5,2,3,1,1],
#                       [2,1,0,1,1]])
#
# kernel = torch.tensor([[1,2,1],
#                        [0,1,0],
#                        [2,1,0]])
#
# input = torch.reshape(input,(1,1,5,5))
# kernel = torch.reshape(kernel,(1,1,3,3))
#
# print(input.shape)
# print(kernel.shape)
#
# #stride卷积步长
# output = F.conv2d(input,kernel,stride=1)
# print(output)
#
# output2 = F.conv2d(input,kernel,stride=2)
# print(output2)
#
# #padding=1,上下左右增加一行/列,默认值为0
# output3 = F.conv2d(input,kernel,stride=1,padding=1)
# print(output3)

dataset = torchvision.datasets.CIFAR10("datasets",train=False,transform=torchvision.transforms.ToTensor(),download=True)
dataloader = DataLoader(dataset,batch_size=64)
class Module2(nn.Module):
    def __init__(self):
        super(Module2,self).__init__()
        self.conv1 = Conv2d(in_channels=3,out_channels=6,kernel_size=3,stride=1,padding=0)

    def forward(self,x):
        x = self.conv1(x)
        return x

module2 = Module2()
print(module2)

writer = SummaryWriter("logs")
step = 0
for data in dataloader:
    imgs,target = data
    output = module2(imgs)
    print(imgs.shape)
    print(output.shape)
    #torch.Size([64, 3, 32, 32])
    writer.add_images("input",imgs,step)
    #torch.Size([64, 6, 30, 30])  ->  [xxx,3,30,30]
    output = torch.reshape(output,(-1,3,30,30))
    writer.add_images("output",output,step)

    step = step + 1
writer.close()

最大池化的使用

ceil_model=True        允许有出界部分

ceil_model=False        不允许(默认)

卷积的作用是提取特征,池化的作用是降低特征数量

import torch
import torchvision.datasets
from torch import nn
from torch.nn import MaxPool2d
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

dataset = torchvision.datasets.CIFAR10("./datasets",train=False,download=True,transform=torchvision.transforms.ToTensor())
dataloader = DataLoader(dataset,batch_size=64)

# input = torch.tensor([[1,2,0,3,1],
#                       [0,1,2,3,1],
#                       [1,2,1,0,0],
#                       [5,2,3,1,1],
#                       [2,1,0,1,1]],dtype=torch.float32)#改为浮点数数据类型
#
# input = torch.reshape(input,(-1,1,5,5))
# print(input.shape)

class Module3(nn.Module):
    def __init__(self):
        super(Module3,self).__init__()
        self.maxpool1 = MaxPool2d(kernel_size=3,ceil_mode=False)

    def forward(self,input):
        output = self.maxpool1(input)
        return output

module3 = Module3()
# output = module3(input)
# print(output)
writer = SummaryWriter("./logs_maxpool")
step = 0
for data in dataloader:
    imgs,target = data
    writer.add_images("input",imgs,step)
    output = module3(imgs)
    writer.add_images("output",output,step)
    step=step + 1

writer.close()

非线性激活

1.relu

2.sigmoid

import torch
import torchvision.datasets
from torch import nn
from torch.nn import ReLU, Sigmoid
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

input = torch.tensor([[1,-0.5],
                      [-1,3]])
input = torch.reshape(input,(-1,1,2,2,))
print(input.shape)

dataset = torchvision.datasets.CIFAR10("./datasets",train=False,download=True,transform=torchvision.transforms.ToTensor())
dataloader = DataLoader(dataset,batch_size=64)

class Module4(nn.Module):
    def __init__(self):
        super(Module4,self).__init__()
        self.relu1 = ReLU()
        self.sigmoid1 = Sigmoid()

    def forward(self,input):
        output = self.sigmoid1(input)
        return output

module4 = Module4()
writer = SummaryWriter("./logs_relu")
step = 0
# output = module4(input)
# print(output)
for data in dataloader:
    imgs,target = data
    writer.add_images("input",imgs,global_step=step)
    output = module4(imgs)
    writer.add_images("output",output,step)
    step += 1

writer.close()

其他结构

1.线性层(linear)

import torch
import torchvision
from torch import nn
from torch.nn import Linear
from torch.utils.data import DataLoader

dataset = torchvision.datasets.CIFAR10("./datasets",train=False,transform=torchvision.transforms.ToTensor(),download=True)
dataloader = DataLoader(dataset,batch_size=64,drop_last=True)

class Module5(nn.Module):
    def __init__(self):
        super(Module5,self).__init__()
        self.linear1 = Linear(196608,10)

    def forward(self,input):
        output = self.linear1(input)
        return output

module5 = Module5()

for data in dataloader:
    imgs,target = data
    print(imgs.shape)
    # output = torch.reshape(imgs,(1,1,1,-1))
    output = torch.flatten(imgs)
    print(output.shape)
    output = module5(output)
    print(output.shape)

2.sequential

import torch
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.tensorboard import SummaryWriter


#CIFAR10 model 结构
class Module6(nn.Module):
    def __init__(self):
        super(Module6,self).__init__()
        # self.conv1 = Conv2d(3,32,5,padding=2)
        # self.maxpool1 = MaxPool2d(2)
        # self.conv2 = Conv2d(32,32,5,padding=2)
        # self.maxpool2 = MaxPool2d(2)
        # self.conv3 = Conv2d(32,64,5,padding=2)
        # self.maxpool3 = MaxPool2d(2)
        # self.flatten = Flatten()
        # self.linear1 = Linear(1024,64)
        # self.linear2 = Linear(64,10)

        self.model1 = Sequential(
            Conv2d(3,32,5,padding=2),
            MaxPool2d(2),
            Conv2d(32,32,5,padding=2),
            MaxPool2d(2),
            Conv2d(32,64,5,padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024,64),
            Linear(64,10)

        )

    def forward(self,x):
        # x = self.conv1(x)
        # x = self.maxpool1(x)
        # x = self.conv2(x)
        # x = self.maxpool2(x)
        # x = self.conv3(x)
        # x = self.maxpool3(x)
        # x = self.flatten(x)
        # x = self.linear1(x)
        # x = self.linear2(x)
        x = self.model1(x)
        return x

modulde6 = Module6()
print(modulde6)

input = torch.ones((64,3,32,32))
output = modulde6(input)
print(output.shape)

writer = SummaryWriter("./logs_seq")
writer.add_graph(modulde6,input)
writer.close()

Loss Functions(损失函数)和反向传播

1.损失函数

*计算实际输出和目标之间的差距

*为我们更新输出提供一定的依据(反向传播)

import torch
from torch import nn
from torch.nn import L1Loss

inputs = torch.tensor([1,2,3],dtype=torch.float32)
targets = torch.tensor([1,2,5],dtype=torch.float32)

inputs = torch.reshape(inputs, [1,1,1,3])
targets = torch.reshape(targets, [1,1,1,3])
#  (1-1)+(2-2)+(5-3) / 3
#  loss = L1Loss()
#  (1-1)+(2-2)+(5-3) = 2
loss = L1Loss(reduction='sum')
result = loss(inputs,targets)

#均方差
loss_mse = nn.MSELoss()
result_mse = loss_mse(inputs,targets)
print(result)
print(result_mse)

#交叉熵
x = torch.tensor([0.1,0.2,0.3])
y = torch.tensor([1])
x = torch.reshape(x,(1,3))
loss_cross = nn.CrossEntropyLoss()
result_cross = loss_cross(x,y)
print(result_cross)

2.反向传播(backward)

import torchvision
from torch import nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.data import DataLoader

dataset = torchvision.datasets.CIFAR10("./datasets",train=False,download=True,transform=torchvision.transforms.ToTensor())
dataloader = DataLoader(dataset,batch_size=1)
class Module6(nn.Module):
    def __init__(self):
        super(Module6,self).__init__()
        self.model1 = Sequential(
            Conv2d(3,32,5,padding=2),
            MaxPool2d(2),
            Conv2d(32,32,5,padding=2),
            MaxPool2d(2),
            Conv2d(32,64,5,padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024,64),
            Linear(64,10)

        )

    def forward(self,x):
        x = self.model1(x)
        return x


loss = nn.CrossEntropyLoss()
modulde6 = Module6()
for data in dataloader:
    imgs,targets = data
    outputs = modulde6(imgs)
    result_loss = loss(outputs,targets)
    #反向传播计算梯度,根据梯度更想参数,实现loss最小化
    result_loss.backward()
    print("ok")

3.优化器

import torch.optim
import torchvision
from torch import nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.data import DataLoader

dataset = torchvision.datasets.CIFAR10("./datasets",train=False,download=True,transform=torchvision.transforms.ToTensor())
dataloader = DataLoader(dataset,batch_size=1)
class Module6(nn.Module):
    def __init__(self):
        super(Module6,self).__init__()
        self.model1 = Sequential(
            Conv2d(3,32,5,padding=2),
            MaxPool2d(2),
            Conv2d(32,32,5,padding=2),
            MaxPool2d(2),
            Conv2d(32,64,5,padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024,64),
            Linear(64,10)

        )

    def forward(self,x):
        x = self.model1(x)
        return x


loss = nn.CrossEntropyLoss()
modulde6 = Module6()
#创建一个优化器
optim = torch.optim.SGD(modulde6.parameters(),lr=0.01)
for epoch in range(20):
    running_loss = 0.0
    for data in dataloader:
        imgs,targets = data
        outputs = modulde6(imgs)
        result_loss = loss(outputs,targets)
        #梯度清零
        optim.zero_grad()
        #反向传播计算梯度,根据梯度更想参数,实现loss最小化
        result_loss.backward()
        optim.step()
        #总的损失
        running_loss = running_loss + result_loss
    print(running_loss)

网络模型

1.VGG 16

import torchvision
from torch import nn

vgg16_false = torchvision.models.vgg16(pretrained=False)
vgg16_true = torchvision.models.vgg16(pretrained=True)

print(vgg16_true)
#迁移学习,把VGG16当作前置的网络结构
train_data = torchvision.datasets.CIFAR10("./datasets",train=True,transform=torchvision.transforms.ToTensor(),download=True)

#加一个线性层,使得 in_features=1000  out_features=10
vgg16_true.classifier.add_module('add_linear',nn.Linear(1000,10))
print(vgg16_true)

print(vgg16_false)
vgg16_false.classifier[6] = nn.Linear(4096,10)
print(vgg16_false)

2.模型的保存

import torch
import torchvision.models

vgg16 = torchvision.models.vgg16(pretrained=False)
#保存方式1:保存了网络模型的结构+网络模型中的参数
torch.save(vgg16,"vgg16_method1.pth")

#保存方式2:把VGG中的参数保存成为Python中的字典形式(模型参数)  官方推荐
torch.save(vgg16.state_dict(),"vgg16_method2.pth")

3.模型的加载

import torch

#保存方式1 -》 加载模型
model = torch.load("vgg16_method1.pth")

#方式2
model = torch.load("vgg16_method2.pth")
print(model)

4.完整的模型训练套路-利用GPU训练(以cifar10数据集为例)

网络模型

数据(输入,标注)

损失函数

.cuda()

import torch.optim
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import time

# from model import *

#准备数据集
train_data = torchvision.datasets.CIFAR10("./datasets",train=True,transform=torchvision.transforms.ToTensor(),download=True)
test_data = torchvision.datasets.CIFAR10("./datasets",train=False,transform=torchvision.transforms.ToTensor(),download=True)

#length 长度
train_data_size = len(train_data)
test_data_size = len(test_data)
#python中格式化字符串的写法:如果train_data_size=10  训练数据集的长度为:10
print("训练数据集的长度为:{}".format(train_data_size))
print("测试数据集的长度为:{}".format(test_data_size))

#利用dataloader来加载数据集
train_dataloader = DataLoader(train_data,batch_size=64)
test_dataloader = DataLoader(test_data,batch_size=64)

#搭建神经网络
class Module7(nn.Module):
    def __init__(self):
        super(Module7,self).__init__()
        self.model = nn.Sequential(
            nn.Conv2d(3,32,5,1,2,),
            nn.MaxPool2d(2),
            nn.Conv2d(32,32,5,1,2),
            nn.MaxPool2d(2),
            nn.Conv2d(32,64,5,1,2,),
            nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(1024,64),
            nn.Linear(64,10),
        )
    def forward(self,x):
        x = self.model(x)
        return x


#创建网络模型
module7 = Module7()
#网络模型转移到cuda上
module7 = module7.cuda()

#创建损失函数
loss_fn = nn.CrossEntropyLoss()
loss_fn = loss_fn.cuda()

#优化器
learning_rate = 1e-2
optimizer = torch.optim.SGD(module7.parameters(),learning_rate)

#设置训练网络的一些参数
#记录训练的次数
total_train_step = 0
#记录测试的次数
total_test_step = 0
#训练轮数
epoch = 10

#添加tensorboard
writer = SummaryWriter("./logs_train")
#记录开始时间
start_time = time.time()

for i in range(epoch):
    print("-------第{}轮训练开始-------".format(i+1))
    #训练开始
    module7.train()
    for data in train_dataloader:
        imgs,targets = data
        imgs = imgs.cuda()
        targets = targets.cuda()
        outputs = module7(imgs)
        loss = loss_fn(outputs,targets)
        #优化器优化模型
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        total_train_step = total_train_step + 1
        if total_train_step % 100 == 0:
            end_time = time.time()
            print(end_time-start_time)
            print("训练次数:{},Loss:{}".format(total_train_step,loss.item()))
            writer.add_scalar("train_loss",loss.item(),total_train_step)

    #测试步骤开始
    module7.eval()
    total_test_loss = 0
    total_accuracy = 0
    with torch.no_grad():
        for data in test_dataloader:
            imgs,targets = data
            imgs = imgs.cuda()
            targets = targets.cuda()
            outputs = module7(imgs)
            loss = loss_fn(outputs,targets)
            total_test_loss = total_test_loss + loss.item()
            accuracy = (outputs.argmax(1) == targets).sum()
            total_accuracy = total_accuracy + accuracy

    print("整体测试集上的Loss:{}".format(total_test_loss))
    print("整体测试集上的正确率:{}".format(total_accuracy/test_data_size))
    writer.add_scalar("test_loss",total_test_loss,total_test_step)
    writer.add_scalar("test_accuracy",total_accuracy/test_data_size,total_test_step)
    total_test_step = total_test_step + 1

    torch.save(module7,"module7_{}.pth".format(i))
    # torch.save(module7.state_dict(),"module7_{}.pth".format(i))
    print("模型已保存")

writer.close()

5.完整的模型验证(测试,demo)套路

利用已经训练好的模型,然后给它提供输入

*test.py

import torch
import torchvision.transforms
from PIL import Image
from torch import nn

image_path = "./imgs/dog.png"
image = Image.open(image_path)
print(image)

image = image.convert('RGB')
transform = torchvision.transforms.Compose([torchvision.transforms.Resize((32,32)),
                                            torchvision.transforms.ToTensor()])

image = transform(image)
print(image.shape)

class Module7(nn.Module):
    def __init__(self):
        super(Module7,self).__init__()
        self.model = nn.Sequential(
            nn.Conv2d(3,32,5,1,2,),
            nn.MaxPool2d(2),
            nn.Conv2d(32,32,5,1,2),
            nn.MaxPool2d(2),
            nn.Conv2d(32,64,5,1,2,),
            nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(1024,64),
            nn.Linear(64,10),
        )
    def forward(self,x):
        x = self.model(x)
        return x

model = torch.load("module7_29.pth")
print(model)
# image = image.cuda()
image = torch.reshape(image,(1,3,32,32))
#模型转化成测试类型
model.eval()
#节约内存和性能
with torch.no_grad():
    output = model(image.cuda())
print(output)

print(output.argmax(1))

看一下GitHub

*读readme

  • 24
    点赞
  • 11
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值