PyTorch | 学习笔记3

一.基础

1.Dataset

提供一种方式去获取数据及其label;如何获取每一个数据及其label;告诉我们总共有多少数据

from torch.utils.data import Dataset
from PIL import Image
import os

class MyData(Dataset):
  def __init__(self,root_dir,label_dir):
    self.root_dir = root_dir
    self.label_dir = label_dir
    self.path = os.path.join(self.root_dir,self.label_dir)
    self.img_path = os.listdir(self.path)

  def __getitem(self,idx):
    img_name = self.img_path[idx]
    img_item_path = os.path.join(self.root_dir,self.label_dir,img_name)
    img = Image.open(img_item_path)
    label = self.label_dir
    return img,label

  def __len__(self):
    return len(self.img_path)

root_dir = "..."
ants_label_dir = "ants"
bees_label_dir = "bees"
ants_dataset = MyData(root_dir,ants_label_dir)
bees_dataset = MyData(root_dir,bees_label_dir)

train_dataset = ants_dataset + bees_dataset

ants_dataset[0]
img,label = ants_dataset[0]
img,label = bees_dataset[1]
img,label = train_dataset[123]

img.show()

2.Dataloader

为后面的网络提供不同的数据形式

import torchvision

test_data = torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor())

test_loader = DataLoader(dataset = test_data,batch_size=64,shuffle=True,num_workers=0,drop_last=False)

#测试数据集中第一张图片及target
img,target = test_data[0]
print(img.shape)
print(target)

writer = SummaryWriter("datqloader")
for epoch in range(2):
  step = 0
  for data in test_loader:
    imgs,target = data
    print(imgs.shape)
    print(targets)
    writer.add_images("test_data",imgs,step)
    step = step + 1

writer.close()

二.Transformer

transforms.py :就像一个工具箱

创建具体的工具:tool = transforms.ToTensor()

输出:result = tool(input)

from torchvision import transforms

'''python的用法 → tensor数据类型
通过transforms.ToTensor去看两个问题
1.transforms该如何使用?
2.为什么需要Tensor数据类型?'''

img_path = "..."
img = Image.open(img_path)

writer = SummaryWritter("logs")

#1.transforms该如何使用?
tensor_trans = transform.ToTensor()
tensor_img = tensor_trans(img)

writer.add_image("Tensor_img",tensor_img)

writer.close()

常见的Transforms 

from PIL import Image
from torch.utils.tensorboard import SummaryWritter
from torchvision import transforms


writter = SummaryWritter("logs")
img = Image.open("...")
print(img)

#ToTensor的使用
trans_totensor = transforms.ToTensor()
img_tensor = trans_totensor(img)
writer.add_image("ToTensor",img_tensor)

#Normalize归一化
print(img_tensor[0][0][0])
trans_norm = transforms.Normalize([0.5,0.5,0.5],[0.5,0.5,0.5])
img_norm = trans_norm(img_tensor)
print(img_norm[0][0][0])
writer.add_image("Normalize",img_norm)

#Resize等比缩放
print(img.size)
trans_resize = transforms.Resize(512,512))
#img PIL → resize → img_resize PIL
img_resize = trans_resize(img)
#img_resize PIL → totensor → img_resize tensor
img_resize = transform_totensor(img_resize)
writer.add_image("resize",img_resize,0)
print(img_resize)

#Compose - resize - 2
trans_resize_2 = transforms.Resize(512)
#PIL → PIL → tensor
trans_compose = transforms.Compose(trans_resize_2,trans_totensor])
img_resize_2 = trans_compose(img)
writer.add_image("resize",img_resize_2,1)

#RandomCrop
trans_random = transforms.RandomCrop(512)
trans_compose_2 = transforms.Compose([trans_random,trans_totensor])
for i in range(10):
  img_crop = trans_compose_2(img)
  writer.add_image("RandomCrop",img_crop,i)

writer.close()

三.神经网络

1.nn.Module的使用

from torch import nn

class Tudui(nn.Module):
  def __init__(self):
    super().__init__()

  def forward(self,input):
    output = input + 1
    return output

2.卷积层 

import torch
import torchvision

dataset = torchvision.datasets.CIFAR10("...",train=False,transform=torchvision.transforms.ToTensor(),download=True)
dataloader = DataLoader(dataset,batch_size=64)

class Tudui(nn.Module):
  def __init__(self):
    super(Tudui,self).__init__()
    self.conv1 = Conv2d(in_channels=3,out_channels=6,kernel_size=3,stride=1,padding=0)

  def forward(self,x):
    x = self.conv1(x)
    return x

tudui = Tudui()

writer = SummaryWriter("...")

step = 0
for data in dataloader:
  imgs,target = data
  output = tudui(imgs)
  print(imgs.shape)
  print(output.shape)
  #torch.size([64,3,32,32])
  writer.add_images("input",imgs,step)
  #torch.size([64,6,30,30]) → [xxx,3,30,30]
  output = torch.reshape(output,(-1,3,30,30))
  writer.add_images("output",output,stop)
  step = step + 1

3.池化层

import torch
import torchvision
from torch import nn
from torch.nn import MaxPool2d


dataset = torchvision.datasets.CIFAR10("...",train=False,download=True,transform=torchvison.transforms.ToTensor())
dataloader =DataLoader(dataset,batch_size=64)

'''
input = torch.tensor([[1,2,0,3,1],
                      [0,1,2,3,1],
                      [1,2,1,0,0],
                      [5,2,3,1,1],
                      [2,1,0,1,1]],dtype=torch.float32)

dataloader = DataLoader(dataset,batch_size=64)

input = torch.reshape(input,(-1,1,5,5))
print(input.shape)
'''

class Tudui(nn.Module):
  def __init__(self):
    super(Tudui,self).__init__()
    self.maxpool1 = MaxPool2d(kernel_size=3,ceil_mode=True)
  
  def forward(self,input):
    output = self.maxpool1(input)
    return output

tudui = Tudui()
output = tudui(input)

writer = SummaryWriter("../logs_maxpool")
step = 0

for data in dataloader:
  imgs,target = data
  writer.add_images("input",imgs,step)
  output = tudui(imgs)
  writer.add_images("output",output,step)
  step = step + 1

writer.close()

 4.Padding层

import torch
input = torch.tensor([[1,-0.5],
                      [-1,3]])


input = torch.reshape(input,(-1,1,2,2))
print(output.shape)

dataset = torchvision.datasets.CIFAR10("...",train=False,download=True,transform=torchvision.transforms.ToTensor())

downloader = DataLoader(dataset,batch_size=64)

class Tudui(nn.Module):
  def __init__(self):
    super(Tudui,self).__init__()
    self.relu1 = ReLU()
    self.sigmoid1 = Sigmoid()

  def forward(self,input):
    output = self.sigmoid(input)
    return output

tudui = Tudui()
output = tudui(input)

writer = SummaryWriter("...")
step = 0
for data in dataloader:
  imgs,targets = data
  writer.add_images("input",imgs,global_step=step)
  output = tudui(imgs)
  writer.add_images("output",output,step)
  step += 1

writer.close()

5.其它层

import torchvision
from torch.utils.data import DataLoader

dataset = torchvision.datasets.CIFAR10("...",train=False,transform=torchvision.transforms.ToTensor(),download=True)

dataloader = DownLoader(dataset,batch_size=64)

class Tudui(nn.Module):
  def __init__(self):
    super(Tudui,self).__init__()
    self.linear1 = Linear(19608,10)

  def forward(self,input):
    output = self.linear1(input)
    return output

tudui = Tudui()

for data in dataloader:
  imgs,targets = data
  print(imgs.shape)
  #output = torch.reshape(imgs,(1,1,1,-1))
  output = torch.flatten(imgs)
  print(output.shape)
  output = tudui(output)
  print(output.shape)

四.Sequential

from torch import nn
from torch.nn import Conv2d

class Tudui(nn.Module):
  def __init__(self):
    super(Tudui,self).__init__()

    '''
    self.conv1 = Conv2d(3,32,5,padding=2)
    self.maxpool1 = MaxPool2d(2)
    self.conv2 = Conv2d(32,32,5,padding=2)
    self.maxpool2 = MaxPool2d(2)
    self.con1 = Conv2d(32,64,5,padding=2)
    self.maxpool3 = MaxPool2d(2)
    self.flatten = Flatten()
    self.linear1 = Linear(1024,64)
    self.linear2 = Linear(64,10)'''

    self.model1 = Sequential(
      Conv2d(3,32,5,padding=2),
      MaxPool2d(2),
      Conv2d(32,32,5,padding=2),
      MaxPool2d(2),
      Conv2d(32,64,5,padding=2),
      MaxPool2d(2),
      Flatten(),
      Linear(1024,64)
      Linear(64,10)
    )

def forward(self,x):
  '''
  x = self.conv1(x)
  x = self.maxpool1(x)
  x = self.conv2(x)
  x = self.maxpool2(x)
  x = self.conv3(x)
  x = self.maxpool3(x)
  x = self.flatten(x)
  x = self.linear1(x)
  x = self.linear2(x)'''

  x = self.model1(x)
  return x

tudui = Tudui()
print(tudui)
input = torch.ones((64,3,32,32))
output = tudui(input)
print(output.shape)

writer = SummaryWriter("...")
writer.add_graph(tudui,input)
writer.close()

五.损失函数

import torch
from torch.nn import L1loss

inputs = torch.tensor([1,2,3],dtype=torch.float32)
targets = torch.tensor([1,2,5],dtype=torch.float32)

inputs = torch.reshape(inputs,(1,1,1,3))
targets = torch.reshape(targets,(1,1,1,3))

loss = L1loss(reduction='sum')
result = loss(inputs,targets)

loss_mse = nn.MSELoss()
result = loss_mse(inputs,targets

print(result)
print(result_mse)

x = torch.tensor([0.1,0.2,0.3])
y = torch.tensor([1])
x = torch.reshape(x,(1,3))
loss_cross = nn.CrossEntropyLoss()
result_cross = loss_cross(x,y)
print(result_cross)

六.模型训练

import torchvision
from model import *

#准备数据集
train_data = torchvision.datasets.CIFAR10(root="...",train=True,tranform=torchvision.transforms.ToTensor(),download=True)
test_data = torchvision.datasets.CIFAR10(root="...",train=False,transform=torchvision.transforms,ToTensor(),download=True)

train_data_size = len(train_data)
test_data_size = len(test_data)
print("训练数据集的长度为:".format(train_data_size))
print("测试数据集的长度为:".format(test_data_size))

#利用DataLoader加载数据集
train_dataloader = DataLoader(train_data,batch_size=64)
test_dataloader = DataLoader(test_data,batch_size=64)

#创建网络模型
tudui = Tudui()

#损失函数
loss_fn = nn.CrossEntropyLoss()

#优化器
#1e-2 = 1 * (10) ^(-2) = 1/100 = 0.01
learning_rate = 1e-2
optimizer = torch.optim.SGD(tudui.parameters(),lr=learning_rate)

#设置训练网络的一些参数
#记录训练的次数
total_train_step = 0
#记录测试的次数
total_test_step = 0
#训练的轮数
epoch = 10

#添加tensorboard
writer = SummaryWriter("...")

for i in range(epoch):
  print("...第{}轮训练开始...".format(i+1))
  
  #训练步骤开始
  for data in train_dataloader:
    imgs,targets = data
    outputs = tudui(imgs)
    loss = loss_fn(outputs,targets)
    #优化器优化模型
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    total_train_step = total_train_step + 1
    if total_train_step % 100 == 0:
      print("训练次数:{},loss:{}".format(total_train_step,loss))
      writer.add_scalar("train_loss",loss_item(),total_train_step)
  
  #测试步骤开始
  total_test_loss = 0
  with torch.no_grad():
    for data in test_dataloader:
      imgs,targets = data
      outputs = tudui(imgs)
      loss = loss_fn(outputs,targets)
      total_test_loss = total_test_loss + loss.item()
   print("整体测试集上的loss:{}".format(total_test_loss))
   writer.add_scalar("test_loss",total_test_loss,total_test_step)
   total_test_step = total_test_step + 1

   torch.save(tudui,"tudui_{}.pth".format(i))
   print("模型已保存")   

writer.close()
    

model:搭建神经网络代码

class Tudui(nn.Module):
  def __init__(self):
    super(Tudui,self).__init__()
    self.model = nn.Sequential(
      nn.Conv2d(3,32,5,1,2),
      nn.MaxPool2d(2),
      nn.Conv2d(32,32,5,1,2),
      nn.MaxPool2d(2),
      nn.Conv2d(32,64,5,1,2),
      nn.MaxPool2d(2),
      nn.Flatten(),
      nn.Linear(64*4*4,64)
      nn.Linear(64,10)
    )
    
  def forward(self,x):
    x = self.model(x)
    return x

if __name__ == '__main__':
  tudui = Tudui()
  input = torch.ones(64,3,32,32))
  output = tudui(input)
  print(output.shape)

  • 1
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Pytorch是机器学习中的一个重要框架,它与TensorFlow一起被认为是机器学习的两大框架。Pytorch学习可以从以下几个方面入手: 1. Pytorch基本语法:了解Pytorch的基本语法和操作,包括张量(Tensors)的创建、导入torch库、基本运算等\[2\]。 2. Pytorch中的autograd:了解autograd的概念和使用方法,它是Pytorch中用于自动计算梯度的工具,可以方便地进行反向传播\[2\]。 3. 使用Pytorch构建一个神经网络:学习使用torch.nn库构建神经网络的典型流程,包括定义网络结构、损失函数、反向传播和更新网络参数等\[2\]。 4. 使用Pytorch构建一个分类器:了解如何使用Pytorch构建一个分类器,包括任务和数据介绍、训练分类器的步骤以及在GPU上进行训练等\[2\]。 5. Pytorch的安装:可以通过pip命令安装Pytorch,具体命令为"pip install torch torchvision torchaudio",这样就可以在Python环境中使用Pytorch了\[3\]。 以上是一些关于Pytorch学习笔记,希望对你有帮助。如果你需要更详细的学习资料,可以参考引用\[1\]中提到的网上帖子,或者查阅Pytorch官方文档。 #### 引用[.reference_title] - *1* [pytorch自学笔记](https://blog.csdn.net/qq_41597915/article/details/123415393)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control_2,239^v3^insert_chatgpt"}} ] [.reference_item] - *2* *3* [Pytorch学习笔记](https://blog.csdn.net/pizm123/article/details/126748381)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control_2,239^v3^insert_chatgpt"}} ] [.reference_item] [ .reference_list ]

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值