2023.4.8 pytorch学习记录(池化层、非线性激活层、线性层、网络搭建和Sequential使网络搭建便利以及如何验证模型好坏、损失函数与反向传播、搭建神经网络中损失函数的应用、优化器使用)

池化层

非线性激活层

线性层

网络搭建和Sequential使网络搭建便利以及如何验证模型好坏

损失函数与反向传播

搭建神经网络中损失函数的应用

优化器

使用vgg16网络模型,以及修改网络模型中的层

import torch
import torchvision
from torch import nn
from torch.nn import MaxPool2d, ReLU, Sigmoid, Linear, Conv2d, Flatten, Sequential, L1Loss
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

'''
input = torch.tensor([[1,2,0,3,1],
                     [0,1,2,3,1],
                     [1,2,1,0,0],
                     [5,2,3,1,1],
                     [2,1,0,1,1]],dtype=torch.float32)
input = torch.reshape(input,(-1,1,5,5))

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.maxpool1 = MaxPool2d(kernel_size=3, ceil_mode=True)

    def forward(self, input):
        output = self.maxpool1(input)
        return output

tudui = Tudui()
output = tudui(input)
print(output)
'''

'''池化层
dataset = torchvision.datasets.CIFAR10("dataset", train=False, download=True,
                                       transform=torchvision.transforms.ToTensor())
dataloader = DataLoader(dataset, batch_size=64)

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.maxpool1 = MaxPool2d(kernel_size=3, ceil_mode=False)

    def forward(self, input):
        output = self.maxpool1(input)
        return output

tudui = Tudui()

writer = SummaryWriter("logs_maxpool")
step = 0

for data in dataloader:     # 读入dataloader中的每个数据
    imgs, targets = data
    writer.add_images("input", imgs, step)
    output = tudui(imgs)
    writer.add_images("output", output, step)
    step = step + 1

writer.close()
'''

'''非线性激活层
input = torch.tensor([[1, -0.5],
                      [-1, 3]])

input = torch.reshape(input, (-1, 1, 2, 2))
print(input.shape)

dataset = torchvision.datasets.CIFAR10("dataset", train=False, download=True,
                                       transform=torchvision.transforms.ToTensor())

dataloader = DataLoader(dataset, batch_size=64)

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.relu1 = ReLU()
        self.sigmoid1 = Sigmoid()

    def forward(self, input):
        #output = self.relu1(input)
        output = self.sigmoid1(input)
        return output

tudui = Tudui()
output = tudui(input)
print(output)

writer = SummaryWriter("logs_relu")
step = 0
for data in dataloader:
    imgs, targets = data
    writer.add_images("input", imgs, global_step=step)
    output = tudui(imgs)
    writer.add_images("output", output, step)
    step += 1
    
writer.close()
'''

'''线性层
dataset = torchvision.datasets.CIFAR10("dataset", train=False, transform=torchvision.transforms.ToTensor(),
                                       download=True)
dataloader = DataLoader(dataset, batch_size=64)

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.linear1 = Linear(196608, 10)

    def forward(self, input):
        output = self.linear1(input)
        return output

tudui = Tudui()

for data in dataloader:
    imgs, targets = data
    print(imgs.shape)
    output = torch.flatten(imgs)   #torch.flatten将数据展成一行
    print(output.shape)
    output = tudui(output)
    print(output.shape)
'''

'''
#网络搭建和Sequential使网络搭建便利以及如何验证模型好坏
class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.model1 = Sequential(
            Conv2d(3, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )

    def forward(self, x):
        x = self.model1(x)
        return x

tudui = Tudui()
print(tudui)
input = torch.ones((64, 3, 32, 32))   #用全都是1的随机数验证搭建的模型好坏
output = tudui(input)
print(output.shape)

writer = SummaryWriter("logs_seq")   #用SummaryWriter输出模型结构验证搭建的模型好坏
writer.add_graph(tudui, input)

writer.close()
'''


'''
#损失函数与反向传播
inputs = torch.tensor([1, 2, 3], dtype=torch.float32)
targets = torch.tensor([1, 2, 5], dtype=torch.float32)

inputs = torch.reshape(inputs, (1, 1, 1, 3))
targets = torch.reshape(targets, (1, 1, 1, 3))

loss = L1Loss(reduction='sum')    #求和损失函数
result = loss(inputs, targets)

loss_mse = nn.MSELoss()   #平方差损失函数
result_mse = loss_mse(inputs, targets)

print(result)
print(result_mse)


x = torch.tensor([0.1, 0.2, 0.3])
y = torch.tensor([1])
x = torch.reshape(x, (1, 3))
loss_cross = nn.CrossEntropyLoss()   #交叉熵损失函数
result_cross = loss_cross(x, y)
print(result_cross)
'''

'''
#搭建神经网络中损失函数的应用
dataset = torchvision.datasets.CIFAR10("dataset", train=False, transform=torchvision.transforms.ToTensor(),
                                       download=True)
dataloader = DataLoader(dataset, batch_size=1)

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.model1 = Sequential(
            Conv2d(3, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )

    def forward(self, x):
        x = self.model1(x)
        return x

loss = nn.CrossEntropyLoss()
tudui = Tudui()
for data in dataloader:
    imgs, targets = data
    outputs = tudui(imgs)
    result_loss = loss(outputs, targets)
    print("ok")
'''

'''
#优化器
dataset = torchvision.datasets.CIFAR10("dataset", train=False, transform=torchvision.transforms.ToTensor(),
                                       download=True)
dataloader = DataLoader(dataset, batch_size=1)   #将数据集用DataLoader进行加载

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.model1 = Sequential(
            Conv2d(3, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )

    def forward(self, x):
        x = self.model1(x)
        return x


loss = nn.CrossEntropyLoss()
tudui = Tudui()
optim = torch.optim.SGD(tudui.parameters(), lr=0.01)   #设置随机梯度下降优化器
scheduler = StepLR(optim, step_size=5, gamma=0.1)
#StepLR是PyTorch中的一个学习率调度器,它用于根据训练次数来调整学习率大小。其中,step_size表示学习率下降倍数的个数,
# gamma则表示学习率每次下降的幅度。使用StepLR可以在训练过程中动态地调整学习率,以便更好地优化神经网络的训练。

for epoch in range(20):
    running_loss = 0.0
    for data in dataloader:   #对数据进行一轮的学习
        imgs, targets = data
        outputs = tudui(imgs)
        result_loss = loss(outputs, targets)
        optim.zero_grad()   #将梯度调为0
        result_loss.backward()    #进行反向传播
        scheduler.step()
        running_loss = running_loss + result_loss
    print(running_loss)
'''

'''
#使用vgg16网络模型,以及修改网络模型中的层
vgg16_false = torchvision.models.vgg16(pretrained=False)   #数据集不需要下载,使用默认参数
#vgg16_true = torchvision.models.vgg16(pretrained=True)

print(vgg16_false)

train_data = torchvision.datasets.CIFAR10('dataset', train=True, transform=torchvision.transforms.ToTensor(),
                                          download=True)

vgg16_false.classifier.add_module('add_linear', nn.Linear(1000, 10))    #增加一个线性层,使输出类别为10
print(vgg16_false)


vgg16_false.classifier[6] = nn.Linear(4096, 10)     #修改原有的模型第六个线性层,使输出类别为10
print(vgg16_false)
'''

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值