神经网络的搭建

模块Module

为所有神经网络提供基本类

继承nn.Module类

1.初始化函数

2.前向传播函数(x先进性一个卷积1,在进行relu激活函数非线性处理。再进行一个卷积一个非线性处理)

搭建神经网络

#神经网络的搭建
import torch
from torch import nn

class Tudui(nn.Module):    #创建类(继承)
    def __init__(self):    #重写两个方法
        super(Tudui,self).__init__()
#         self.conv1 = nn.Conv2d(1,20,5)
#         self.conv2 = nn.Conv2d(20,20,5)
    
#     def forward(self,x):
#         x = F.relu(self.conv1(x))
#         return F.relu(self.conv2(x))

    def forward(self,input):    #举例简单的forward函数
        output = input + 1
        return output
    
tudui =Tudui()
x = torch,tensor(1.0)
output = tudui(x)
print(output)
#卷积层convolution layers
#nn.conv2d   二维的(图片)
import torch
import torch.nn.functional as F

input = torch.tensor([[1,2,0,3,1],
                      [0,1,2,3,1],
                      [1,2,1,0,0],
                      [5,2,3,1,1],
                      [2,1,0,1,1]])

kernel = torch.tensor([[1,2,1],
                       [0,1,0],
                       [2,1,0]])



#调整尺寸
input = torch.reshape(input,(1,1,5,5))   #bitch_size =1,1通道。5×5
kernel = torch.reshape(kernel,(1,1,3,3)) 

print(input.shape)      #输入是5x5,卷积核是3x3
print(kernel.shape)     #改变尺寸前不满足要求(要求四个数字的尺寸)

#输出
#torch.Size([1, 1, 5, 5])
#torch.Size([1, 1, 3, 3])


output = F.conv2d(input,kernel,stride = 1)
print(output)

输出:

torch.Size([1, 1, 5, 5])
torch.Size([1, 1, 3, 3])
tensor([[[[10, 12, 12],
          [18, 16, 16],
          [13,  9,  3]]]])

 改变步长为2

output2 = F.conv2d(input,kernel,stride = 2)
print(output2)
输出:
tensor([[[[10, 12],
          [13,  3]]]])

padding填充上下左右

#padding 填充上下左右
output3 = F.conv2d(input,kernel,stride = 1,padding = 1)
print(output3)

 输出:

tensor([[[[ 1,  3,  4, 10,  8],
          [ 5, 10, 12, 12,  6],
          [ 7, 18, 16, 16,  8],
          [11, 13,  9,  3,  4],
          [14, 13,  9,  7,  4]]]])

建立一个简单的神经网络模型

卷积层:

import torch
import torchvision
from torch import nn
from torch.nn import Conv2d
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter

dataset = torchvision.datasets.CIFAR10(root="E:\\notebookpytorch\\pyTorch学习\\NN\\data",
                                       train = False,transform = torchvision.transforms.ToTensor(),download = False)

dataloader = DataLoader(dataset,batch_size = 64)  #64为一包

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui,self).__init__()
        self.conv1 = Conv2d(in_channels=3,out_channels=6,kernel_size=3,stride=1,padding=0)   #一个卷积层
        
    def forward(self,x):
        x = self.conv1(x)
        return x

    
tudui = Tudui()
#print(tudui)   
#Tudui(
#  (conv1): Conv2d(3, 6, kernel_size=(3, 3), stride=(1, 1))
#)   #输入通道数为3,输出通道数为6.卷积核是3×3.步数为1


writer = SummaryWriter("logs")

step = 0
for data in dataloader:    #看dataloader里的每一个数据
    imgs,targets = data
    output = tudui(imgs)
    print(imgs.shape)
    print(output.shape)
    
    #torch.size([64, 3, 32, 32])     batch_size=64,3通道,32×32
    writer.add_images("input",imgs,step)
    #torch.size([64, 6, 30, 30])---> [xxx, 3, 30, 30]
   
    
    output = torch.reshape(output,(-1,3,30,30))
    writer.add_images("output",output,step)
    step = step+1
    

在黑框框中找网址查看卷积对比的结果

conda activate pytorch
tensorboard --logdir=E:\notebookpytorch\pyTorch学习\NN\logs

 在浏览器中复制网址查看结果

 注意:若已在电脑中下载数据集,使用时直接将数据存在的路径放在root下。并且download=False不让它重新下载即可。

若未下载数据集,则可在root下写入想要存放数据集的位置。download=True让它下载即可。

Train=False代表使用测试集而不是训练集。 

dataset = torchvision.datasets.CIFAR10(root="E:\\notebookpytorch\\pyTorch学习\\NN\\data",
                                       train = False,transform = torchvision.transforms.ToTensor(),download = False)

池化层

#池化层:保留特征但是将数据量减小
#dilation空洞卷积
#ceil为True即会使用ceil模式而不是floor模式(上取整和下取整)
#Input和Output必须是4维的。(N, C, Hin, Win)(N, C, Hout, Wout) 
#batch_size=-1表示根据其他设置,自动计算。channel,高,宽

1.简单的input

import torch
from torch import nn
from torch.nn import MaxPool2d

input = torch.tensor([[1,2,0,3,1],
                      [0,1,2,3,1],
                      [1,2,1,0,0],
                      [5,2,3,1,1],
                      [2,1,0,1,1]],dtype=torch.float32)

# print(input.shape)  #torch.Size([5, 5])

input = torch.reshape(input,(-1,1,5,5))
# print(input.shape)  #torch.Size([1, 1, 5, 5])

class Tudui(nn.Module):
    def __init__(self):
            super(Tudui,self).__init__()
            self.maxpool1 = MaxPool2d(kernel_size=3,ceil_mode=True)
            
    def forward(self,input):
        output = self.maxpool1(input)
        return output

tudui = Tudui()
output = tudui(input)
print(output)   #出错。要将输入改为浮点型。在上面输入中加上dtype=torch.float32即可。

输出:

tensor([[[[2., 3.],
          [5., 1.]]]])

数据集

import torch
from torch import nn
from torch.nn import MaxPool2d
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter

#用数据集
dataset = torchvision.datasets.CIFAR10(root="E:\\notebookpytorch\\pyTorch学习\\NN\\data",
                                       train = False,transform = torchvision.transforms.ToTensor(),download = False)

dataloader = DataLoader(dataset,batch_size=64)


class Tudui(nn.Module):
    def __init__(self):
            super(Tudui,self).__init__()
            self.maxpool1 = MaxPool2d(kernel_size=3,ceil_mode=True)
            
    def forward(self,input):
        output = self.maxpool1(input)
        return output

tudui = Tudui()

#把日志写出来
writer = SummaryWriter("logs_maxpool")
step = 0

for data in dataloader:
    imgs,targets = data
    writer.add_images("input",imgs,step)
    output = tudui(imgs)
    writer.add_images("output",output,step)
    step = step + 1
    
writer.close()


池化结果:

非线性激活函数Relu

#非线性激活:激活函数常用 RELU,Sigmoid 

import torch
from torch import nn
from torch.nn import ReLU

input = torch.tensor([[1,-0.5],
                      [-1,3]])

input = torch.reshape(input,(-1,1,2,2))
print(input.shape)

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui,self).__init__()
        self.relu1 = ReLU()  #inplace,原来的值是否被替代。默认为false
        
    def forward(self,input):
        output = self.relu1(input)
        return output
    
tudui = Tudui()
output = tudui(input)
print(output)

#输出:relu激活函数将大于0的保留,小小于0的数变为0;
#tensor([[[[1., 0.],
#         [0., 3.]]]])

输出:

torch.Size([1, 1, 2, 2])
tensor([[[[1., 0.],
          [0., 3.]]]])

激活函数Sigmoid

#Relu函数不是很明显,换成sigmoid激活函数
import torch
import torchvision
from torch import nn
from torch.nn import Sigmoid
from torch.utils.data import DataLoader 
from tensorboardX import SummaryWriter

dataset = torchvision.datasets.CIFAR10(root="E:\\notebookpytorch\\pyTorch学习\\NN\\data",
                                       train = False,transform = torchvision.transforms.ToTensor(),download = False)
 
dataloader = DataLoader(dataset,batch_size=64)


class Tudui(nn.Module):
    def __init__(self):
        super(Tudui,self).__init__()
        self.sigmoid1 = Sigmoid()  #inplace,原来的值是否被替代。默认为false
        
    def forward(self,input):
        output = self.sigmoid1(input)
        return output
    
tudui = Tudui()
writer = SummaryWriter("logs_relu")
step = 0

for data in dataloader:
    imgs,targets = data
    writer.add_images("input",imgs,global_step = step)
    output = tudui(imgs)
    writer.add_images("output",output,step)
    step += 1
    
writer.close()
    

结果:

Linear layer

import torch
import torchvision
from torch import nn
from torch.nn import Linear
from torch.utils.data import DataLoader

dataset = torchvision.datasets.CIFAR10("E:\\notebookpytorch\\pyTorch学习\\NN\\data",train=False,transform=torchvision.transforms.ToTensor(),download=False)

dataloader = DataLoader(dataset,batch_size = 64)

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui,self).__init__()
        self.linear1 = Linear(196608,10)  #infeature,outfeature
        
    def forward(self,input):
        output = self.linear1(input)
        return output

tudui = Tudui()

for data in dataloader:
    imgs,targets = data
    print(imgs.shape)     #一开始的图片大小[64, 3, 32, 32]
    #output = torch.reshape(imgs,(1, 1, 1, -1))
    
    output = torch.flatten(imgs)  #将数据展开,1×1×1×196608
    
    print(output.shape)   #变成[1, 1, 1, 196608]--->[196608]
    output = tudui(output)
    print(output.shape)   #  [1, 1, 1, 10]--->[10]
    

结果: 

 

  • 3
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值