#Pytorch
加载数据
加载图片
from PIL import Image
imag_path="G:\\Pytorch_py\\Pytorch_learn\\hymenoptera_data\\hymenoptera_data\\train\\ants\\0013035.jpg"
img=Image.open(imag_path)#使用函数()里是图片
img.show()#显示图片
**实现的效果 **
###实现路径加起来
使用的是os.path.join(a,b)
#路径加起来
root_path="Pytorch_learn/hymenoptera_data/hymenoptera_data/train"
lable_dir_path="ants"
path=os.path.join(root_path,lable_dir_path)#将路径加起来
###实现图片路径读取
from torch.utils.data import Dataset
from PIL import Image
import os
# help(Dataset)
class Mydata(Dataset):
def __int__(self,root_dir,label_dir):
self.root_dir=root_dir
self.label_dir=label_dir
self.path=os.path.join(self.root_dir,self.label_dir)
self.img_path=os.listdir(self.path)
def __getitem__(self, idx):
img_name=self.img_path[idx]
img_item_path=os.path.join(self.root_dir,self.label_dir,img_name)
img=Image.open(img_item_path)
label=self.label_dir
return img,label
def __len__(self):
return len(self.img_path)
root_dir="hymenoptera_data\\hymenoptera_data\\train"
ants_label_dir="\\ants"
bees_label_dir="\\bees"
ants_dataset=Mydata(root_dir,ants_label_dir)
bees_dataset=Mydata(root_dir,bees_label_dir)
img1,lable=ants_dataset[0]
img1.show()
print(lable)
##tensorboard使用方法
是个可视化化工具浏览器网址 http://localhost:6006/
里面有俩个通常好使用的函数 一writer.add_scalar(“A”,B,C)
# writer.add_image()
# writer.add_scalar("A",B,C)
#A是标题第一个要加引号,B是y的值,C是对应x的值。y可以是对应x的函数
"""
#这个是读取函数进行训练
# writer.add_scalar()
for i in range(100):
writer.add_scalar("y=x",i,i)
for i in range(100):
writer.add_scalar("y=2x",2*i,i)"""
from torch.utils.tensorboard import SummaryWriter
使用查看是在终端conda环境中输入·
tensorboard --logdir=G:\\Pytorch_py\\Pytorch_learn\\logs --port=6007#默认是6006号端口加上——port是修改端口位置
--logdir后接的是存放的log位置地址,不加引号
writer.add_image()
#这个是读取图片进行训练
#参数是writer.add_image("A",B,C) A参数是标题,B的类型是张量或者是numpy数组,C参数的步长 注意B的类型不同 HWC或者WHC
from torch.utils.tensorboard import SummaryWriter
from PIL import Image
import numpy as np
writer=SummaryWriter("logs")#这个logs存放的是日志文件
image_path="G:\\Pytorch_py\\Pytorch_learn\\练手数据集\\train\\ants_image\\0013035.jpg"
image_PIL=Image.open(image_path)
image_array=np.array(image_PIL)#穿金路的是一个张量
writer.add_image("test",image_array,2,dataformats='HWC')#这个是读取图片进行训练
#参数是writer.add_image("A",B,C) A参数是标题,B的类型是张量或者是numpy数组,C参数的步长 注意B的类型不同 HWC或者WHC
writer.close()
##transfroms的结构与用法
transfroms功能结构
#transfroms 类似于一个工具箱将一张图片选取一个工具函数,(比如Totensor 将图片或者nparray转换为一个张量)转换为一个所需要的结果
tensor张量
通常是通过将图片或者nparray转换为tensor
image_path="G:\\Pytorch_py\\Pytorch_learn\\练手数据集\\train\\ants_image" \
"\\5650366_e22b7e1065.jpg"
image=Image.open(image_path)
tensor_trans=transforms.ToTensor()#利用这个ToTensor类来进行转换
tensor_image=tensor_trans(image)
常见的transfroms
totensor,normaize,resize,compose的使用以及主意的点
from PIL import Image
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
#tensor使用
writer=SummaryWriter("logs")
img=Image.open("G:\\Pytorch_py\\Pytorch_learn\\练手数据集\\train\\ants_image\\67270775_e9fdf77e9d.jpg")
trans_temsor=transforms.ToTensor()
img_tensor=trans_temsor(img)
writer.add_image("tensor_image",img_tensor,2)
#Normalize 归一化
print(img_tensor[0][0][0])
trans_norm=transforms.Normalize([0.5,0.5,0.5],[0.5,0.5,0.5])#张量是三层因此需要三个均值以及三个方差这个是自己定义的
img_norm=trans_norm(img_tensor)#归一化只能输入的是张量
print(img_norm[0][0][0])
writer.add_image("img_norm",img_norm)
#Resize
print(img.size)
trans_resize=transforms.Resize((512,512))#参数是给定改变的大小输入的是
img_resize=trans_resize(img)#这个只会改变尺寸大小不会改变类型,还是JPG格式
img_resize=trans_temsor(img_resize)
writer.add_image("img_resize",img_resize)
#compose ——resize -2 compose将多种转换合在一起,然后一次执行compose列表里的转换
trans_resize_2=transforms.Resize(512)
trans_compose=transforms.Compose([trans_resize_2,trans_temsor])#这步执行完后还是改变jpg大小的jpg然后在执行将图片转换为张量
img_resize_2=trans_compose(img)
writer.add_image("img_comose",img_resize_2)
#Randomcrop 随机裁剪
trans_randcrop=transforms.RandomCrop((213,213))#随机裁剪后的格式也是.jpg图片格式
trans_compose_2=transforms.Compose([trans_randcrop,trans_temsor])
for i in range(10):
img_randomcrop=trans_compose_2(img)
writer.add_image("img_randomcrop", img_randomcrop)
torchvision中的datasets的使用
import torchvision
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
dataset_transform=transforms.Compose([transforms.ToTensor()]) #将下载的图片进行转换为张量
train_set=torchvision.datasets.CIFAR10(root="./dataset",train=True,transform=dataset_transform,download=True)
test_set=torchvision.datasets.CIFAR10(root="./dataset",train=False,transform=dataset_transform,download=True)
# img,target=train_set[0]
# img.show()
# print(test_set.classes[target])
print(test_set[0])
writer=SummaryWriter("p10")
for i in range(10):
img,target=test_set[i]
writer.add_image('test-set',img,i)
writer.close()
##dataloader加载数据
batch_size每次输入的图片数量,shuffle为是否打乱输入的顺序 默认是False,num_workers是指线程来加载的数量默认是为0,利用主线程来进行加载。drop_last是否省略最后不能整除的图片默认是False
import torchvision.datasets
#准备的测试数据集
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
test_data=torchvision.datasets.CIFAR10(root="./dataset",train=False,transform=torchvision.transforms.ToTensor())
test_loader=DataLoader(dataset=test_data,batch_size=64,shuffle=True,num_workers=0,drop_last=True)
#测试集中国第一张图片以及第一个target
img, target=test_data[0]
print(img.shape)
print(target)
writer=SummaryWriter("dataloaders")
for epoch in range(2):
step=0
for data in test_loader:
imgs,targets=data
# print(imgs.shape)
# print(targets)
writer.add_images(f"epoch为:{epoch}",imgs,step)#这个步长就是取的数据次数第一次第二次.....
step=step+1
writer.close()
##神经网络的搭建
import torch
from torch import nn
import torch.nn.functional as F
class Zymodule(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
output = input + 1
return output
zymodule = Zymodule()
x = torch.tensor(1.0)
print(zymodule(x))
自己写的模型都是继承nn.Module(),构建神经网络主要是输入input到神经网络中然后通过操作然后输出output,这个操作就是forward
官网连接:https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module
卷积层
###卷积层的操作
卷积层上的操作示意图如下:
卷积操作就是移动卷积核到输入图像上覆盖计算每次移动第一覆盖为1X1+2X2+0X1+0X0+1X1+0X2+2X1+1X2+0X0对应等于10,其中的参数设置有五个
input输入的张量,weight权值一般不变动,bias可选的形状偏置张量一般也不变动,卷积核stride移动的步长,padding是对输入张量的填充。#向外围填充一圈计算卷积的时候填充的无值默认为0乘相加上其余有值的
"""
卷积层
"""
import torch
import torch.nn.functional as F
input=torch.tensor([[1,2,0,3,1],
[0,1,2,3,1],
[1,2,1,0,0],
[5,2,3,1,1],
[2,1,0,1,1]
])
kernel=torch.tensor([[1,2,1],
[0,1,0],
[2,1,0]])
print(input.shape)
print(kernel.shape)
#输入进去的格式要
input=torch.reshape(input,(1,1,5,5))
kernel=torch.reshape(kernel,(1,1,3,3))
print(input.shape)
print(kernel.shape)
output=F.conv2d(input,kernel,stride=1)#这个步长是1上下移动计算都是1输出的是3*3的一个数组
print(output)
output1=F.conv2d(input,kernel,stride=2)#这个步长是2上下移动计算都是2输出的是一个2*2的数组
print(output1)
#padding是将输入的图片进行外围填充padding为1时就像外围填充一圈
output2=F.conv2d(input,kernel,stride=1,padding=1)#向外围填充一圈计算卷积的时候填充的无值默认为0乘相加上其余有值的
print(output2)
#输出结果
——————————————————————————————————————————————————————————————————————————————————————————————————————
torch.Size([5, 5])
torch.Size([3, 3])
torch.Size([1, 1, 5, 5])
torch.Size([1, 1, 3, 3])
tensor([[[[10, 12, 12],
[18, 16, 16],
[13, 9, 3]]]])
tensor([[[[10, 12],
[13, 3]]]])
tensor([[[[ 1, 3, 4, 10, 8],
[ 5, 10, 12, 12, 6],
[ 7, 18, 16, 16, 8],
[11, 13, 9, 3, 4],
[14, 13, 9, 7, 4]]]])
进程已结束,退出代码0
官网https://pytorch.org/docs/stable/generated/torch.nn.functional.conv2d.html#torch.nn.functional.conv2d
卷积层输入参数对应的含义
以上就是输入的参数,本小节主要介绍in_channels和out_channels,其余常用参数在卷积层的操作里,这里的in_channels是指输入的张量是几维的如果输入的是一个n*n的数组就只输入了一个一维张量,out_channels是指卷积核的数量,如果out_channels=1表明只有一个卷积层去卷积计算。如果out_channels=2的话是指用两个卷积核去就行卷积操作。俩个卷积核计算得到的数据重叠在一起进行输出结果。
卷积层读取图片进行卷积的操作
import torch
import torchvision
from torch import nn
from torch.nn import Conv2d
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
dataset = torchvision.datasets.CIFAR10("./data", train=False, transform=torchvision.transforms.ToTensor(),
download=True)
dataloader = DataLoader(dataset, batch_size=64)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.conv1 = Conv2d(in_channels=3, out_channels=6, kernel_size=3, stride=1,
padding=0) # 彩色图像输入为3层,我们想让它的输出为6层,选3 * 3 的卷积
def forward(self, x):
x = self.conv1(x)
return x
tudui = Tudui()
writer=SummaryWriter("covn_1")
step=0
for data in dataloader:
imgs, targets = data
output = tudui(imgs)
print(imgs.shape) # 输入为3通道32×32的64张图片
print(output.shape) # 输出为6通道30×30的64张图片 通道数就是特则图的层数
writer.add_images("input",imgs,global_step=step)
output=torch.reshape(output,(-1,3,30,30))#由于显示的图片值只能是三通道的所以将六通道转换为3通道格式
writer.add_images("output", output, global_step=step)
step=step+1
writer.close()
输入的图像
卷积后的图像
###通道数的理解
##最大池化层的使用
最大池化导入的包from torch.nn import MaxPool2d池化的重要参数ceil_mode=True等于True是取向下取整不省略。
import torch
from torch import nn
from torch.nn import MaxPool2d
input = torch.tensor([[1, 2, 0, 3, 1],
[0, 1, 2, 3, 1],
[1, 2, 1, 0, 0],
[5, 2, 3, 1, 1],
[2, 1, 0, 1, 1]], dtype=torch.float32)
input = torch.reshape(input, (-1, 1, 5, 5))
print(input.shape)
print(input)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.maxpool = MaxPool2d(kernel_size=3, ceil_mode=True)
def forward(self, input):
output = self.maxpool(input)
return output
tudui = Tudui()
output = tudui(input)
print(output)
卷积与池化使用
import torch
import torchvision.datasets
from torch import nn
from torch.nn import MaxPool2d, Conv2d
from torch.utils.data import DataLoader
# input = torch.tensor([[1, 2, 0, 3, 1],
# [0, 1, 2, 3, 1],
# [1, 2, 1, 0, 0],
# [5, 2, 3, 1, 1],
# [2, 1, 0, 1, 1]], dtype=torch.float32)
# input = torch.reshape(input, (-1, 1, 5, 5))
# print(input.shape)
# print(input)
from torch.utils.tensorboard import SummaryWriter
dataset = torchvision.datasets.CIFAR10("./data", train=False, transform=torchvision.transforms.ToTensor(),
download=True)
dataloader = DataLoader(dataset, batch_size=64)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.maxpool = MaxPool2d(kernel_size=3, ceil_mode=True)
self.conv1 = Conv2d(in_channels=3, out_channels=3, kernel_size=3, stride=1,
padding=0) # 彩色图像输入为3层,我们想让它的输出为6层,选3 * 3 的卷积
def forward(self, input):
output = self.maxpool(input)
output1=self.maxpool(output)
return output1
tudui = Tudui()
# output = tudui(input)
# print(output)
writer=SummaryWriter("covn_2_maxpool")
step=0
for data in dataloader:
imgs, targets = data
output1 = tudui(imgs)
writer.add_images("input",imgs,global_step=step)
writer.add_images("output1", output1, global_step=step)
step=step+1
writer.close()
结果就是先卷积使用的是3个卷积核以及三方池化核
非线性激活层
非线性激活是RelU函数目的是取大于0的不变,小于0的变为0.
类似的是
类名:以及调用
input=torch.tensor([[2,-0.5],
[4,-9]])
ReLU(A,inplace=False)
#参数A输入的是张量,inplace=False等于FALSE时是不进行原有的数据的替换,True是改变原有的input
import torch
import torchvision.datasets
from torch import nn
from torch.nn import ReLU,Conv2d,MaxPool2d,Sigmoid
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
input=torch.tensor([[2,-0.5],
[4,-9]])
output=torch.reshape(input,(-1,1,2,2))
dataset1=torchvision.datasets.CIFAR10("./data", train=False, transform=torchvision.transforms.ToTensor(),
download=True)
dataloader = DataLoader(dataset1, batch_size=64)
class Tudui(nn.Module):
def __init__(self):
super().__init__()
self.maxpool1=MaxPool2d(kernel_size=3,ceil_mode=True)#
self.cov1=Conv2d(in_channels=3,out_channels=3,kernel_size=3,stride=1)
self.relu1=ReLU(inplace=False)
self.sigmoid1=Sigmoid()
def forward(self, input):
# input1=self.cov1(input)
# output1=self.maxpool1(input1)
# # output2=self.relu1(input)
output2=self.sigmoid1(input)
return output2
tudi=Tudui()
writer=SummaryWriter("ReUL")
step=0
for data in dataloader:
imgs,tragets=data
output3=tudi(imgs)
writer.add_images('reul1', imgs, global_step=step)
writer.add_images('reul',output3,global_step=step)
step=step+1
writer.close()
##正则化层
线性层
线性层就是把多维的例如5x5的维度拉平拉成一维的25
import torch
import torchvision
from torch import nn
from torch.nn import ReLU
from torch.nn import Sigmoid
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
dataset = torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
dataloader = DataLoader(dataset, batch_size=64,drop_last=True)
for data in dataloader:
imgs, targets = data
print(imgs.shape)
output = torch.reshape(imgs,(1,1,1,-1))
print(output.shape)
以上就是拉开的图片
import torch
import torchvision
from torch import nn
from torch.nn import Linear
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
dataset = torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
dataloader = DataLoader(dataset, batch_size=64,drop_last=True)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.linear1 = Linear(196608,10)#这个就是拉平和有199608个我想要输出10个
def forward(self, input):
output = self.linear1(input)
return output
tudui = Tudui()
writer = SummaryWriter("logs")
step = 0
for data in dataloader:
imgs, targets = data
print(imgs.shape)
writer.add_images("input", imgs, step)
output1=torch.flatten(imgs)#方法二 是将多维拉成一维
output = torch.reshape(imgs,(1,1,1,-1)) # 方法一:拉平
print(output.shape)
output = tudui(output)
print(output.shape)
writer.add_images("output", output, step)
step = step + 1
##神经网路搭建实战
import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
#下面卷积以及池化的参数都是根据是上面的图来的
self.conv1 = Conv2d(3, 32, 5, padding=2)#in_channels, out_channels, kernel_size, stride,padding
#输入的通道是3输出的32, out_channels是指输出的通道数(也是卷积核的数量),由于根据上图输出的高宽么有变化还是32x32去更根据上面去计算的padding的值和stride的值
self.maxpool1 = MaxPool2d(2)#kernel_size, ceil_mode,是指池化的池化核的大小不会改变张量的维度
self.cov2 = Conv2d(32, 32, 5, padding=2)
self.maxpool2 = MaxPool2d(2)
self.conv3 = Conv2d(32, 64, 5, padding=2)
self.maxpool3 = MaxPool2d(2)
self.flatten = Flatten()
self.linear1 = Linear(1024, 64)
self.Linear2 = Linear(64, 10)
def forward(self, x):
x = self.conv1(x)
x = self.maxpool1(x)
x = self.conv2(x)
x = self.maxpool2(x)
x = self.conv3(x)
x = self.maxpool3(x)
x = self.flatten(x)
x = self.linear1(x)
x = self.Linear2(x)
return x
tudui = Tudui()
print(tudui)
###神经网络输入数据
import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.conv1 = Conv2d(3,32,5,padding=2)
self.maxpool1 = MaxPool2d(2)
self.conv2 = Conv2d(32,32,5,padding=2)
self.maxpool2 = MaxPool2d(2)
self.conv3 = Conv2d(32,64,5,padding=2)
self.maxpool3 = MaxPool2d(2)
self.flatten = Flatten()
self.linear1 = Linear(1024,64)
self.Linear2 = Linear(64,10)
def forward(self, x):
x = self.conv1(x)
x = self.maxpool1(x)
x = self.conv2(x)
x = self.maxpool2(x)
x = self.conv3(x)
x = self.maxpool3(x)
x = self.flatten(x)
x = self.linear1(x)
x = self.Linear2(x)
return x
tudui = Tudui()
input = torch.ones((64,3,32,32))
output = tudui(input)
print(output.shape)
###Sequential神经网络
import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = Sequential(
Conv2d(3,32,5,padding=2),
MaxPool2d(2),
Conv2d(32,32,5,padding=2),
MaxPool2d(2),
Conv2d(32,64,5,padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024,64),
Linear(64,10)
)
def forward(self, x):
x = self.model1(x)
return x
tudui = Tudui()
input = torch.ones((64,3,32,32))
output = tudui(input)
print(output.shape)
Tensorboard显示网络
import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
dataset = torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
dataloader = DataLoader(dataset, batch_size=64,drop_last=True)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = Sequential(
Conv2d(3,32,5,padding=2),
MaxPool2d(2),
Conv2d(32,32,5,padding=2),
MaxPool2d(2),
Conv2d(32,64,5,padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024,64),
Linear(64,10)
)
def forward(self, x):
x = self.model1(x)
return x
tudui = Tudui()
writer = SummaryWriter("logs")
tudui = Tudui()
input = torch.ones((64,3,32,32))
output = tudui(input)
print(output.shape)
writer.add_graph(tudui, input)
writer.close()
##损失函数与反向传播
###L1Loss损失函数
损失函数是torch.nn.L1Loss(参数A,参数B,reduction=”mean“或者是sum)mean是平均,sum是求和。
import torch
from torch.nn import L1Loss
input=torch.tensor([1,2,3],dtype=torch.float32)
targets=torch.tensor([1,2,8],dtype=torch.float32)
input=torch.reshape(input,(1,1,1,3))
targets=torch.reshape(targets,(1,1,1,3))
print(input.shape)
print(input)
print(targets.shape)
print(targets)
output=L1Loss(reduction="sum")
print(output(input,targets))
output=L1Loss(reduction="mean")
print(output(input,targets))
###MESLoss损失函数
import torch
from torch.nn import L1Loss, MSELoss
input=torch.tensor([1,2,3],dtype=torch.float32)
targets=torch.tensor([1,2,8],dtype=torch.float32)
# input=torch.reshape(input,(1,1,1,3))
# targets=torch.reshape(targets,(1,1,1,3))
# output=L1Loss(reduction="sum")
# print(output(input,targets))
# output=L1Loss(reduction="mean")
# print(output(input,targets))
output=MSELoss(reduction="sum")
print(output(input,targets))
output=MSELoss(reduction="mean")
print(output(input,targets))
###交叉商损失函数
import torch
from torch import nn
from torch.nn import L1Loss, MSELoss, CrossEntropyLoss
x = torch.tensor([0.1,0.2,0.3])
y = torch.tensor([1])
x = torch.reshape(x,(1,3)) # 1的 batch_size,有三类
print(x)
loss_cross = CrossEntropyLoss()
result_cross = loss_cross(x,y)
print(result_cross)
###损失函数反向传播
import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
dataset = torchvision.datasets.CIFAR10("./dataset", train=False, transform=torchvision.transforms.ToTensor(),
download=True)
dataloader = DataLoader(dataset, batch_size=64, drop_last=True)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = Sequential(
Conv2d(3, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 64, 5, padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024, 64),
Linear(64, 10)
)
def forward(self, x):
x = self.model1(x)
return x
loss = nn.CrossEntropyLoss() # 交叉熵
tudui = Tudui()
step=0
for data in dataloader:
imgs, targets = data
outputs = tudui(imgs)
# print(outputs[step])#都是一张图片经过神经网络输出十个类别的概率以及对应的一个真实的targets
# print(targets[step])
result_loss = loss(outputs, targets) # 计算实际输出与目标输出的差距
result_loss.backward() # 计算出来的 loss 值有 backward 方法属性,反向传播来计算每个节点的更新的参数。这里查看网络的属性 grad 梯度属性刚开始没有,反向传播计算出来后才有,后面优化器会利用梯度优化网络参数。
print("ok")
# if step<63:
# step+=1
# elif step==63:
# step=0
优化器
####1. 优化器
① 损失函数调用backward方法,就可以调用损失函数的反向传播方法,就可以求出我们需要调节的梯度,我们就可以利用我们的优化器就可以根据梯度对参数进行调整,达到整体误差降低的目的。
② 梯度要清零,如果梯度不清零会导致梯度累加。
####2. 神经网络优化一轮
import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
dataset = torchvision.datasets.CIFAR10("./dataset", train=False, transform=torchvision.transforms.ToTensor(),
download=True)
dataloader = DataLoader(dataset, batch_size=64, drop_last=True)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = Sequential(
Conv2d(3, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 64, 5, padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024, 64),
Linear(64, 10)
)
def forward(self, x):
x = self.model1(x)
return x
loss = nn.CrossEntropyLoss() # 交叉熵
tudui = Tudui()
optim = torch.optim.SGD(tudui.parameters(), lr=0.01) # 随机梯度下降优化器
for data in dataloader:
imgs, targets = data
outputs = tudui(imgs)
result_loss = loss(outputs, targets) # 计算实际输出与目标输出的差距
optim.zero_grad() # 梯度清零
result_loss.backward() # 反向传播,计算损失函数的梯度
optim.step() # 根据梯度,对网络的参数进行调优
print(result_loss) # 对数据只看了一遍,只看了一轮,所以loss下降不大
####3.神经网络优化多轮
import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
dataset = torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
dataloader = DataLoader(dataset, batch_size=64,drop_last=True)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = Sequential(
Conv2d(3,32,5,padding=2),
MaxPool2d(2),
Conv2d(32,32,5,padding=2),
MaxPool2d(2),
Conv2d(32,64,5,padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024,64),
Linear(64,10)
)
def forward(self, x):
x = self.model1(x)
return x
loss = nn.CrossEntropyLoss() # 交叉熵
tudui = Tudui()
optim = torch.optim.SGD(tudui.parameters(),lr=0.01) # 随机梯度下降优化器
for epoch in range(20):
running_loss = 0.0
for data in dataloader:
imgs, targets = data
outputs = tudui(imgs)
result_loss = loss(outputs, targets) # 计算实际输出与目标输出的差距
optim.zero_grad() # 梯度清零
result_loss.backward() # 反向传播,计算损失函数的梯度
optim.step() # 根据梯度,对网络的参数进行调优
running_loss = running_loss + result_loss
print(running_loss) # 对这一轮所有误差的总和
####4.神经网络学习率的优化
import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
dataset = torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
dataloader = DataLoader(dataset, batch_size=64,drop_last=True)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = Sequential(
Conv2d(3,32,5,padding=2),
MaxPool2d(2),
Conv2d(32,32,5,padding=2),
MaxPool2d(2),
Conv2d(32,64,5,padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024,64),
Linear(64,10)
)
def forward(self, x):
x = self.model1(x)
return x
loss = nn.CrossEntropyLoss() # 交叉熵
tudui = Tudui()
optim = torch.optim.SGD(tudui.parameters(),lr=0.01) # 随机梯度下降优化器
scheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=5, gamma=0.1) # 每过 step_size 更新一次优化器,更新是学习率为原来的学习率的的 0.1 倍
for epoch in range(20):
running_loss = 0.0
for data in dataloader:
imgs, targets = data
outputs = tudui(imgs)
result_loss = loss(outputs, targets) # 计算实际输出与目标输出的差距
optim.zero_grad() # 梯度清零
result_loss.backward() # 反向传播,计算损失函数的梯度
optim.step() # 根据梯度,对网络的参数进行调优
scheduler.step() # 学习率太小了,所以20个轮次后,相当于没走多少
running_loss = running_loss + result_loss
print(running_loss) # 对这一轮所有误差的总和
网络模型使用及其修改
1下载模型并查看模型结构
import torchvision
#trauin_data = torchvision.datasets.ImageNet("./dataset",split="train",download=True,transform=torchvision.transforms.ToTensor()) # 这个数据集没有办法再公开的访问了
vgg16_true = torchvision.models.vgg16(pretrained=True) # 下载卷积层对应的参数是多少、池化层对应的参数时多少,这些参数时ImageNet训练好了的
vgg16_false = torchvision.models.vgg16(pretrained=False) # 没有预训练的参数
print("ok")
print(vgg16_true)
VGG网址https://pytorch.org/vision/stable/models/generated/torchvision.models.vgg16.html#torchvision.models.vgg16
###2. 网络模型添加
import torchvision
from torch import nn
dataset = torchvision.datasets.CIFAR10("./dataset",train=True,transform=torchvision.transforms.ToTensor(),download=True)
vgg16_true = torchvision.models.vgg16(pretrained=True) # 下载卷积层对应的参数是多少、池化层对应的参数时多少,这些参数时ImageNet训练好了的
vgg16_true.add_module('add_linear',nn.Linear(1000,10)) # 在VGG16后面添加一个线性层,使得输出为适应CIFAR10的输出,CIFAR10需要输出10个种类
#这个add_liner是自己取得名字
print(vgg16_true)
3. 网络模型修改
import torchvision
from torch import nn
vgg16_false = torchvision.models.vgg16(pretrained=False) # 没有预训练的参数
print(vgg16_false)
vgg16_false.classifier[6] = nn.Linear(4096,10)0#修改最后一个参数数
print(vgg16_false)
原来的
修改后
###4.网络模型保存
import torchvision
import torch
vgg16 = torchvision.models.vgg16(pretrained=False)
torch.save(vgg16,"./model/vgg16_method1.pth") # 保存方式一:模型结构 + 模型参数
print(vgg16)
####4.1 网络模型导入(方式一)
import torch
model = torch.load("./model/vgg16_method1.pth") # 保存方式一对应的加载模型
print(model)
####4.2 网络模型导入(方式二)
import torchvision
import torch
vgg16 = torchvision.models.vgg16(pretrained=False)
torch.save(vgg16.state_dict(),"./model/vgg16_method2.pth") # 保存方式二:模型参数(官方推荐),不再保存网络模型结构
print(vgg16)
4. 3网络模型导入(方式二)
这个出来的只是参数没有网络结构
import torch
import torchvision
model = torch.load("./model/vgg16_method2.pth") # 导入模型参数
print(model)
如果想要加载网络模型结构代码应该是
import torch
import torchvision
vgg16=torchvision.models.vgg16(pretrained=False)
vgg16.load_state_dict(torch.load("G:\\Pytorch_py\\Pytorch_learn\\model\\vgg16_method2.pth"))
print(vgg16)
##完整的模型训练套路
####1. CIFAR 10 model 网络模型
下面用 CIFAR 10 model网络来完成分类问题,网络模型如下图所示
2. DataLoader加载数据集
import torchvision
from torch import nn
from torch.utils.data import DataLoader
# 准备数据集
train_data = torchvision.datasets.CIFAR10("./dataset",train=True,transform=torchvision.transforms.ToTensor(),download=True)
test_data = torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
# length 长度
train_data_size = len(train_data)
test_data_size = len(test_data)
# 如果train_data_size=10,则打印:训练数据集的长度为:10
print("训练数据集的长度:{}".format(train_data_size))
print("测试数据集的长度:{}".format(test_data_size))
# 利用 Dataloader 来加载数据集
train_dataloader = DataLoader(train_data_size, batch_size=64)
test_dataloader = DataLoader(test_data_size, batch_size=64)
####3.写网络模型并验证
import torch
from torch import nn
# 搭建神经网络
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = nn.Sequential(
nn.Conv2d(3,32,5,1,2), # 输入通道3,输出通道32,卷积核尺寸5×5,步长1,填充2
nn.MaxPool2d(2),
nn.Conv2d(32,32,5,1,2),
nn.MaxPool2d(2),
nn.Conv2d(32,64,5,1,2),
nn.MaxPool2d(2),
nn.Flatten(), # 展平后变成 64*4*4 了
nn.Linear(64*4*4,64),
nn.Linear(64,10)
)
def forward(self, x):
x = self.model1(x)
return x
if __name__ == '__main__':
tudui = Tudui()
input = torch.ones((64,3,32,32))
output = tudui(input)
print(output.shape) # 测试输出的尺寸是不是我们想要的
4.训练网络模型
import torch
import torchvision
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from Pytorch_learn.model import *
# 准备数据集
train_data = torchvision.datasets.CIFAR10("./dataset",train=True,transform=torchvision.transforms.ToTensor(),download=True)
test_data = torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
# length 长度
train_data_size = len(train_data)
test_data_size = len(test_data)
# 如果train_data_size=10,则打印:训练数据集的长度为:10
print("训练数据集的长度:{}".format(train_data_size))
print("测试数据集的长度:{}".format(test_data_size))
# 利用 Dataloader 来加载数据集
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)
tudui=Tudui()
# 损失函数
loss_fn = nn.CrossEntropyLoss() # 交叉熵,fn 是 fuction 的缩写
# 优化器
learning = 0.01 # 1e-2 就是 0.01 的意思
optimizer = torch.optim.SGD(tudui.parameters(), learning) # 随机梯度下降优化器
# 设置网络的一些参数
# 记录训练的次数
total_train_step = 0
# 训练的轮次
epoch = 10
for i in range(epoch):
print("-----第 {} 轮训练开始-----".format(i + 1))
# 训练步骤开始
for data in train_dataloader:
imgs, targets = data
outputs = tudui(imgs)
loss = loss_fn(outputs, targets) # 计算实际输出与目标输出的差距
# 优化器对模型调优
optimizer.zero_grad() # 梯度清零
loss.backward() # 反向传播,计算损失函数的梯度
optimizer.step() # 根据梯度,对网络的参数进行调优
total_train_step = total_train_step + 1
# print("训练次数:{},Loss:{}".format(total_train_step,loss)) # 方式一:获得loss值
print("训练次数:{},Loss:{}".format(total_train_step, loss.item())) # 方式二:获得loss值
5.测试步骤
#测试步骤开始
total_test_less=0
with torch.no_grad():#消除梯度参数
for data in test_dataloader:
imgs,targets=data
output=tudui(imgs)
loss=loss_fn(output,targets)
total_test_less=total_test_less+loss.item()
print(f"测试损失Loss{total_test_less}")
####6.总结
import torch
import torchvision
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from Pytorch_learn.model import *
# 准备数据集
train_data = torchvision.datasets.CIFAR10("./dataset",train=True,transform=torchvision.transforms.ToTensor(),download=True)
test_data = torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
# length 长度
train_data_size = len(train_data)
test_data_size = len(test_data)
# 如果train_data_size=10,则打印:训练数据集的长度为:10
print("训练数据集的长度:{}".format(train_data_size))
print("测试数据集的长度:{}".format(test_data_size))
# 利用 Dataloader 来加载数据集
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)
tudui=Tudui()
# 损失函数
loss_fn = nn.CrossEntropyLoss() # 交叉熵,fn 是 fuction 的缩写
# 优化器
learning = 0.01 # 1e-2 就是 0.01 的意思
optimizer = torch.optim.SGD(tudui.parameters(), learning) # 随机梯度下降优化器
# 设置网络的一些参数
# 记录训练的次数
total_train_step = 0
# 记录测试的次数
total_test_step = 0
# 训练的轮次
epoch = 2
#添加tensorboard
writer=SummaryWriter("trainlog")
for i in range(epoch):
print("-----第 {} 轮训练开始-----".format(i + 1))
# 训练步骤开始
for data in train_dataloader:
imgs, targets = data
outputs = tudui(imgs)
loss = loss_fn(outputs, targets) # 计算实际输出与目标输出的差距
# 优化器对模型调优
optimizer.zero_grad() # 梯度清零
loss.backward() # 反向传播,计算损失函数的梯度
optimizer.step() # 根据梯度,对网络的参数进行调优
total_train_step = total_train_step + 1
if total_train_step %100==0:
print("训练次数:{},Loss:{}".format(total_train_step, loss.item())) # 方式二:获得loss值
# print("训练次数:{},Loss:{}".format(total_train_step,loss)) # 方式一:获得loss值
writer.add_scalar('train_loss',loss.item(),total_train_step)
#测试步骤开始
total_test_less=0
with torch.no_grad():#消除梯度参数
for data in test_dataloader:
imgs,targets=data
output=tudui(imgs)
loss=loss_fn(output,targets)
total_test_less=total_test_less+loss.item()
print(f"测试损失Loss{total_test_less}")
writer.add_scalar('test_loss',total_test_less,total_test_step)
total_test_step=total_test_step+1
torch.save(tudui,f"tudui模型/tudui_{i}.pth")
####7.添加准确率
accary=(output.argmax(1)==targets).sum()#output.argmax(1)这个就是算出最大的预测值[0.1,0.3,0.6] 例如这个是0.6则输出的output.argmax(1)这个参数一是指横向的拿出最大的那个置为1然后拿出下标2与实际的targes进行对比,。sum()累计相同的加一求和
import torch
import torchvision
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from Pytorch_learn.model import *
# 准备数据集
train_data = torchvision.datasets.CIFAR10("./dataset",train=True,transform=torchvision.transforms.ToTensor(),download=True)
test_data = torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
# length 长度
train_data_size = len(train_data)
test_data_size = len(test_data)
# 如果train_data_size=10,则打印:训练数据集的长度为:10
print("训练数据集的长度:{}".format(train_data_size))
print("测试数据集的长度:{}".format(test_data_size))
# 利用 Dataloader 来加载数据集
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)
tudui=Tudui()
# 损失函数
loss_fn = nn.CrossEntropyLoss() # 交叉熵,fn 是 fuction 的缩写
# 优化器
learning = 0.01 # 1e-2 就是 0.01 的意思
optimizer = torch.optim.SGD(tudui.parameters(), learning) # 随机梯度下降优化器
# 设置网络的一些参数
# 记录训练的次数
total_train_step = 0
# 记录测试的次数
total_test_step = 0
# 训练的轮次
epoch = 2
#添加tensorboard
writer=SummaryWriter("trainlog")
for i in range(epoch):
print("-----第 {} 轮训练开始-----".format(i + 1))
# 训练步骤开始
for data in train_dataloader:
imgs, targets = data
outputs = tudui(imgs)
loss = loss_fn(outputs, targets) # 计算实际输出与目标输出的差距
# 优化器对模型调优
optimizer.zero_grad() # 梯度清零
loss.backward() # 反向传播,计算损失函数的梯度
optimizer.step() # 根据梯度,对网络的参数进行调优
total_train_step = total_train_step + 1
if total_train_step %100==0:
print("训练次数:{},Loss:{}".format(total_train_step, loss.item())) # 方式二:获得loss值
# print("训练次数:{},Loss:{}".format(total_train_step,loss)) # 方式一:获得loss值
writer.add_scalar('train_loss',loss.item(),total_train_step)
#测试步骤开始
total_test_less=0
#测试准确率
total_test_accary=0
with torch.no_grad():#消除梯度参数
for data in test_dataloader:
imgs,targets=data
output=tudui(imgs)
loss=loss_fn(output,targets)
total_test_less=total_test_less+loss.item()
accary=(output.argmax(1)==targets).sum()#这个就是算出最大的预测值[0.1,0.3,0.6]
total_test_accary=accary+total_test_accary
print(f"测试损失Loss{total_test_less}")
print(f"测试准确率accary{total_test_accary/test_data_size}")
writer.add_scalar('test_loss',total_test_less,total_test_step)
total_test_step=total_test_step+1
torch.save(tudui,f"tudui模型/tudui_{i}.pth")
1. 利用GPU训练(方式一)
GPU训练主要有三部分,网络模型、数据(输入、标注)、损失函数,这三部分放到GPU上
import torch
import torchvision
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
# from Pytorch_learn.model import *
# 准备数据集
train_data = torchvision.datasets.CIFAR10("./dataset",train=True,transform=torchvision.transforms.ToTensor(),download=True)
test_data = torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
# length 长度
train_data_size = len(train_data)
test_data_size = len(test_data)
# 如果train_data_size=10,则打印:训练数据集的长度为:10
print("训练数据集的长度:{}".format(train_data_size))
print("测试数据集的长度:{}".format(test_data_size))
# 利用 Dataloader 来加载数据集
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = nn.Sequential(
nn.Conv2d(3, 32, 5, 1, 2), # 输入通道3,输出通道32,卷积核尺寸5×5,步长1,填充2
nn.MaxPool2d(2),
nn.Conv2d(32, 32, 5, 1, 2),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, 5, 1, 2),
nn.MaxPool2d(2),
nn.Flatten(), # 展平后变成 64*4*4 了
nn.Linear(64 * 4 * 4, 64),
nn.Linear(64, 10)
)
def forward(self, x):
x = self.model1(x)
return x
tudui=Tudui()
tudui=tudui.cuda()
# 损失函数
loss_fn = nn.CrossEntropyLoss() # 交叉熵,fn 是 fuction 的缩写
loss_fn=loss_fn.cuda()
# 优化器
learning = 0.01 # 1e-2 就是 0.01 的意思
optimizer = torch.optim.SGD(tudui.parameters(), learning) # 随机梯度下降优化器
# 设置网络的一些参数
# 记录训练的次数
total_train_step = 0
# 记录测试的次数
total_test_step = 0
# 训练的轮次
epoch = 2
#添加tensorboard
writer=SummaryWriter("trainlog")
for i in range(epoch):
print("-----第 {} 轮训练开始-----".format(i + 1))
# 训练步骤开始
for data in train_dataloader:
imgs, targets = data
imgs=imgs.cuda()
targets=targets.cuda()
outputs = tudui(imgs)
loss = loss_fn(outputs, targets) # 计算实际输出与目标输出的差距
# 优化器对模型调优
optimizer.zero_grad() # 梯度清零
loss.backward() # 反向传播,计算损失函数的梯度
optimizer.step() # 根据梯度,对网络的参数进行调优
total_train_step = total_train_step + 1
if total_train_step %100==0:
print("训练次数:{},Loss:{}".format(total_train_step, loss.item())) # 方式二:获得loss值
# print("训练次数:{},Loss:{}".format(total_train_step,loss)) # 方式一:获得loss值
writer.add_scalar('train_loss',loss.item(),total_train_step)
#测试步骤开始
total_test_less=0
total_test_accary=0
with torch.no_grad():#消除梯度参数
for data in test_dataloader:
imgs,targets=data
imgs = imgs.cuda()
targets = targets.cuda()
output=tudui(imgs)
loss=loss_fn(output,targets)
total_test_less=total_test_less+loss.item()
accary=(output.argmax(1)==targets).sum()#这个就是算出最大的预测值[0.1,0.3,0.6]
total_test_accary=accary+total_test_accary
print(f"测试损失Loss{total_test_less}")
print(f"测试准确率accary{total_test_accary/test_data_size}")
writer.add_scalar('test_loss',total_test_less,total_test_step)
total_test_step=total_test_step+1
torch.save(tudui,f"tudui模型/tudui_{i}.pth")
###2.GPU的训练时间
import torch
import torchvision
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import time
# from Pytorch_learn.model import *
# 准备数据集
train_data = torchvision.datasets.CIFAR10("./dataset",train=True,transform=torchvision.transforms.ToTensor(),download=True)
test_data = torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
# length 长度
train_data_size = len(train_data)
test_data_size = len(test_data)
# 如果train_data_size=10,则打印:训练数据集的长度为:10
print("训练数据集的长度:{}".format(train_data_size))
print("测试数据集的长度:{}".format(test_data_size))
# 利用 Dataloader 来加载数据集
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = nn.Sequential(
nn.Conv2d(3, 32, 5, 1, 2), # 输入通道3,输出通道32,卷积核尺寸5×5,步长1,填充2
nn.MaxPool2d(2),
nn.Conv2d(32, 32, 5, 1, 2),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, 5, 1, 2),
nn.MaxPool2d(2),
nn.Flatten(), # 展平后变成 64*4*4 了
nn.Linear(64 * 4 * 4, 64),
nn.Linear(64, 10)
)
def forward(self, x):
x = self.model1(x)
return x
tudui=Tudui()
tudui=tudui.cuda()
# 损失函数
loss_fn = nn.CrossEntropyLoss() # 交叉熵,fn 是 fuction 的缩写
loss_fn=loss_fn.cuda()
# 优化器
learning = 0.01 # 1e-2 就是 0.01 的意思
optimizer = torch.optim.SGD(tudui.parameters(), learning) # 随机梯度下降优化器
# 设置网络的一些参数
# 记录训练的次数
total_train_step = 0
# 记录测试的次数
total_test_step = 0
# 训练的轮次
epoch = 2
start_time=time.time()
#添加tensorboard
writer=SummaryWriter("trainlog")
for i in range(epoch):
print("-----第 {} 轮训练开始-----".format(i + 1))
# 训练步骤开始
for data in train_dataloader:
imgs, targets = data
imgs=imgs.cuda()
targets=targets.cuda()
outputs = tudui(imgs)
loss = loss_fn(outputs, targets) # 计算实际输出与目标输出的差距
# 优化器对模型调优
optimizer.zero_grad() # 梯度清零
loss.backward() # 反向传播,计算损失函数的梯度
optimizer.step() # 根据梯度,对网络的参数进行调优
total_train_step = total_train_step + 1
if total_train_step %100==0:
endtime = time.time()
print(endtime - start_time)
print("训练次数:{},Loss:{}".format(total_train_step, loss.item())) # 方式二:获得loss值
# print("训练次数:{},Loss:{}".format(total_train_step,loss)) # 方式一:获得loss值
writer.add_scalar('train_loss',loss.item(),total_train_step)
#测试步骤开始
total_test_less=0
total_test_accary=0
with torch.no_grad():#消除梯度参数
for data in test_dataloader:
imgs,targets=data
imgs = imgs.cuda()
targets = targets.cuda()
output=tudui(imgs)
loss=loss_fn(output,targets)
total_test_less=total_test_less+loss.item()
accary=(output.argmax(1)==targets).sum()#这个就是算出最大的预测值[0.1,0.3,0.6]
total_test_accary=accary+total_test_accary
print(f"测试损失Loss{total_test_less}")
print(f"测试准确率accary{total_test_accary/test_data_size}")
writer.add_scalar('test_loss',total_test_less,total_test_step)
total_test_step=total_test_step+1
torch.save(tudui,f"tudui模型/tudui_{i}.pth")
比cpu快四倍
判断cuda是否可用print(torch.cuda.is_available())
3. 利用GPU训练(方式二)
① 电脑上有两个显卡时,可以用指定cuda:0、cuda:1。
import torchvision
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import time
# 定义训练的设备
# device = torch.device("cpu")
# device = torch.device("cuda") # 使用 GPU 方式一
# device = torch.device("cuda:0") # 使用 GPU 方式二
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# from model import * 相当于把 model中的所有内容写到这里,这里直接把 model 写在这里
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = nn.Sequential(
nn.Conv2d(3, 32, 5, 1, 2), # 输入通道3,输出通道32,卷积核尺寸5×5,步长1,填充2
nn.MaxPool2d(2),
nn.Conv2d(32, 32, 5, 1, 2),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, 5, 1, 2),
nn.MaxPool2d(2),
nn.Flatten(), # 展平后变成 64*4*4 了
nn.Linear(64 * 4 * 4, 64),
nn.Linear(64, 10)
)
def forward(self, x):
x = self.model1(x)
return x
# 准备数据集
train_data = torchvision.datasets.CIFAR10("./dataset", train=True, transform=torchvision.transforms.ToTensor(),
download=True)
test_data = torchvision.datasets.CIFAR10("./dataset", train=False, transform=torchvision.transforms.ToTensor(),
download=True)
# length 长度
train_data_size = len(train_data)
test_data_size = len(test_data)
# 如果train_data_size=10,则打印:训练数据集的长度为:10
print("训练数据集的长度:{}".format(train_data_size))
print("测试数据集的长度:{}".format(test_data_size))
# 利用 Dataloader 来加载数据集
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)
# 创建网络模型
tudui = Tudui()
tudui = tudui.to(device) # 也可以不赋值,直接 tudui.to(device)
# 损失函数
loss_fn = nn.CrossEntropyLoss() # 交叉熵,fn 是 fuction 的缩写
loss_fn = loss_fn.to(device) # 也可以不赋值,直接loss_fn.to(device)
# 优化器
learning = 0.01 # 1e-2 就是 0.01 的意思
optimizer = torch.optim.SGD(tudui.parameters(), learning) # 随机梯度下降优化器
# 设置网络的一些参数
# 记录训练的次数
total_train_step = 0
# 记录测试的次数
total_test_step = 0
# 训练的轮次
epoch = 10
# 添加 tensorboard
writer = SummaryWriter("logs")
start_time = time.time()
for i in range(epoch):
print("-----第 {} 轮训练开始-----".format(i + 1))
# 训练步骤开始
tudui.train() # 当网络中有dropout层、batchnorm层时,这些层能起作用
for data in train_dataloader:
imgs, targets = data
imgs = imgs.to(device) # 也可以不赋值,直接 imgs.to(device)
targets = targets.to(device) # 也可以不赋值,直接 targets.to(device)
outputs = tudui(imgs)
loss = loss_fn(outputs, targets) # 计算实际输出与目标输出的差距
# 优化器对模型调优
optimizer.zero_grad() # 梯度清零
loss.backward() # 反向传播,计算损失函数的梯度
optimizer.step() # 根据梯度,对网络的参数进行调优
total_train_step = total_train_step + 1
if total_train_step % 100 == 0:
end_time = time.time()
print(end_time - start_time) # 运行训练一百次后的时间间隔
print("训练次数:{},Loss:{}".format(total_train_step, loss.item())) # 方式二:获得loss值
writer.add_scalar("train_loss", loss.item(), total_train_step)
# 测试步骤开始(每一轮训练后都查看在测试数据集上的loss情况)
tudui.eval() # 当网络中有dropout层、batchnorm层时,这些层不能起作用
total_test_loss = 0
total_accuracy = 0
with torch.no_grad(): # 没有梯度了
for data in test_dataloader: # 测试数据集提取数据
imgs, targets = data # 数据放到cuda上
imgs = imgs.to(device) # 也可以不赋值,直接 imgs.to(device)
targets = targets.to(device) # 也可以不赋值,直接 targets.to(device)
outputs = tudui(imgs)
loss = loss_fn(outputs, targets) # 仅data数据在网络模型上的损失
total_test_loss = total_test_loss + loss.item() # 所有loss
accuracy = (outputs.argmax(1) == targets).sum()
total_accuracy = total_accuracy + accuracy
print("整体测试集上的Loss:{}".format(total_test_loss))
print("整体测试集上的正确率:{}".format(total_accuracy / test_data_size))
writer.add_scalar("test_loss", total_test_loss, total_test_step)
writer.add_scalar("test_accuracy", total_accuracy / test_data_size, total_test_step)
total_test_step = total_test_step + 1
torch.save(tudui, "./model/tudui_{}.pth".format(i)) # 保存每一轮训练后的结果
# torch.save(tudui.state_dict(),"tudui_{}.path".format(i)) # 保存方式二
print("模型已保存")
writer.close()
完整模型训练套路
1. 验证狗是否识别
① 完整的模型验证(测试,demo)套路,利用已经训练好的模型,然后给它提供输入。
import torchvision
from PIL import Image
from torch import nn
import torch
image_path = "imgs/dog.png"
image = Image.open(image_path) # PIL类型的Image
image = image.convert("RGB") # 4通道的RGBA转为3通道的RGB图片
print(image)
transform = torchvision.transforms.Compose([torchvision.transforms.Resize((32,32)),
torchvision.transforms.ToTensor()])
image = transform(image)
print(image.shape)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = nn.Sequential(
nn.Conv2d(3,32,5,1,2),
nn.MaxPool2d(2),
nn.Conv2d(32,32,5,1,2),
nn.MaxPool2d(2),
nn.Conv2d(32,64,5,1,2),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(64*4*4,64),
nn.Linear(64,10)
)
def forward(self, x):
x = self.model1(x)
return x
model = torch.load("model/tudui_29.pth",map_location=torch.device('cpu')) # GPU上训练的东西映射到CPU上
print(model)
image = torch.reshape(image,(1,3,32,32)) # 转为四维,符合网络输入需求
model.eval()
with torch.no_grad(): # 不进行梯度计算,减少内存计算
output = model(image)
output = model(image)
print(output)
print(output.argmax(1)) # 概率最大类别的输出
2. 验证飞机是否识别
import torchvision
from PIL import Image
from torch import nn
import torch
image_path = "Pytorch_learn/images/ply.png"
image = Image.open(image_path) # PIL类型的Image
image = image.convert("RGB") # 4通道的RGBA转为3通道的RGB图片
print(image)
transform = torchvision.transforms.Compose([torchvision.transforms.Resize((32, 32)),
torchvision.transforms.ToTensor()])
image = transform(image)
print(image.shape)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = nn.Sequential(
nn.Conv2d(3, 32, 5, 1, 2),
nn.MaxPool2d(2),
nn.Conv2d(32, 32, 5, 1, 2),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, 5, 1, 2),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(64 * 4 * 4, 64),
nn.Linear(64, 10)
)
def forward(self, x):
x = self.model1(x)
return x
model = torch.load("Pytorch_learn/tudui模型/tudui_29.pth", map_location=torch.device('cpu')) # GPU上训练的东西映射到CPU上
print(model)
image = torch.reshape(image, (1, 3, 32, 32)) # 转为四维,符合网络输入需求
model.eval()
with torch.no_grad(): # 不进行梯度计算,减少内存计算
output = model(image)
output = model(image)
print(output)
print(output.argmax(1)) # 概率最大类别的输出