Pytorch basic function

文章介绍了PyTorch中nn.Sequential模块的使用,包括如何按照顺序构建网络层以及如何使用OrderedDict指定模块名称。nn.ModuleList则是存储模块并自动添加参数到网络中的列表。此外,还讲解了torch.chunk用于张量分块,torch.nn.functional.pad进行张量填充的不同模式,以及torch.meshgrid生成坐标网格的功能。
摘要由CSDN通过智能技术生成

目录

nn.Sequential

nn.ModuleList

torch.chunk(tensor, chunks, dim=)

torch.nn.functional.pad(input, pad, mode=, value=)

torch.meshgrid(Tensor1, Tensor2)

torch.stack(tensors, dim=) 


nn.Sequential

里面的模块按照顺序进行排列的,所以必须确保前一个模块的输出大小和下一个模块的输入大小是一致

或者使用OrderedDict来指定每个module的名字。

# Example of using Sequential
model = nn.Sequential(
    nn.Conv2d(1, 20, 5),
    nn.ReLU(),
    nn.Conv2d(20, 64, 5),
    nn.ReLU()
    )
# Example of using Sequential with OrderedDict
model = nn.Sequential(OrderedDict([
    ('conv1', nn.Conv2d(1, 20, 5)),
    ('ReLU1', nn.ReLU()),
    ('conv2', nn.Conv2d(20, 64, 5)),
    ('ReLU2', nn.ReLU())
    ]))

nn.Sequential内部实现了forward函数,可以不用写forward函数。

#例1:这是来自官方文档的例子
seq = nn.Sequential(
          nn.Conv2d(1,20,5),
          nn.ReLU(),
          nn.Conv2d(20,64,5),
          nn.ReLU()
        )
print(seq)
# Sequential(
#   (0): Conv2d(1, 20, kernel_size=(5, 5), stride=(1, 1))
#   (1): ReLU()
#   (2): Conv2d(20, 64, kernel_size=(5, 5), stride=(1, 1))
#   (3): ReLU()
# )

#对上述seq进行输入
input = torch.randn(16, 1, 20, 20)
print(seq(input))
#torch.Size([16, 64, 12, 12])

但若继承nn.Module类的话还需要forward函数

#例2:继承nn.Module类的话,就要写出forward函数
class net1(nn.Module):
    def __init__(self):
        super(net1, self).__init__()
        self.seq = nn.Sequential(
                        nn.Conv2d(1,20,5),
                         nn.ReLU(),
                          nn.Conv2d(20,64,5),
                       nn.ReLU()
                       )      
    def forward(self, x):
        return self.seq(x)

    #注意:按照下面这种利用for循环的方式也是可以得到同样结果的
    #def forward(self, x):
    #    for s in self.seq:
    #        x = s(x)
    #    return x

 #对net1进行输入
input = torch.randn(16, 1, 20, 20)
net1 = net1()
print(net1(input).shape)
#torch.Size([16, 64, 12, 12])

而且nn.Sequential中的网络结构与顺序不变,所以nn.Sequential适合用来搭建 网络块 (block)。

nn.ModuleList

它是一个储存不同 module,并自动将每个 module 的 parameters 添加到网络之中的列表List

class net_modlist(nn.Module):
    def __init__(self):
        super(net_modlist, self).__init__()
        self.modlist = nn.ModuleList([
                       nn.Conv2d(1, 20, 5),
                       nn.ReLU(),
                        nn.Conv2d(20, 64, 5),
                        nn.ReLU()
                        ])

    def forward(self, x):
        for m in self.modlist:
            x = m(x)
        return x

net_modlist = net_modlist()
print(net_modlist)
#net_modlist(
#  (modlist): ModuleList(
#    (0): Conv2d(1, 20, kernel_size=(5, 5), stride=(1, 1))
#    (1): ReLU()
#    (2): Conv2d(20, 64, kernel_size=(5, 5), stride=(1, 1))
#    (3): ReLU()
#  )
#)

for param in net_modlist.parameters():
    print(type(param.data), param.size())
#<class 'torch.Tensor'> torch.Size([20, 1, 5, 5])
#<class 'torch.Tensor'> torch.Size([20])
#<class 'torch.Tensor'> torch.Size([64, 20, 5, 5])
#<class 'torch.Tensor'> torch.Size([64])

网络权重 (weithgs) 和偏置 (bias)等参数都自动加入这个网络之内。

class net_modlist(nn.Module):
    def __init__(self):
        super(net_modlist, self).__init__()
        self.modlist = [
                       nn.Conv2d(1, 20, 5),
                       nn.ReLU(),
                        nn.Conv2d(20, 64, 5),
                        nn.ReLU()
                        ]

    def forward(self, x):
        for m in self.modlist:
            x = m(x)
        return x

net_modlist = net_modlist()
print(net_modlist)
#net_modlist()
for param in net_modlist.parameters():
    print(type(param.data), param.size())
#None

如果是python内置列表,所添加的网络层和参数都没有注册到我们的网络中。无法训练!

用for循环+ModuleList写网络:

class net4(nn.Module):
    def __init__(self):
        super(net4, self).__init__()
        layers = [nn.Linear(10, 10) for i in range(5)] # 5个线性层
        self.linears = nn.ModuleList(layers) 
        # 将python列表转换为ModuleList

    def forward(self, x):
        for layer in self.linears:
            x = layer(x)
        return x

net = net4()
print(net)
# net4(
#   (linears): ModuleList(
#     (0): Linear(in_features=10, out_features=10, bias=True)
#     (1): Linear(in_features=10, out_features=10, bias=True)
#     (2): Linear(in_features=10, out_features=10, bias=True)
#   )
# )

torch.chunk(tensorchunksdim=)

对张量分块,返回一个张量列表

import numpy as np
import torch

data = torch.tensor([[0.6742, 0.5700, 0.3519, 0.4603, 0.9590],
        [0.9705, 0.8673, 0.8854, 0.9029, 0.5473],
        [0.0199, 0.4729, 0.4001, 0.7581, 0.5045]], dtype=torch.float64)

for i, data_i in enumerate(data.chunk(5, 1)): # 沿1轴分为5块
    print(str(data_i))
>>
tensor([[0.6742],
        [0.9705],
        [0.0199]], dtype=torch.float64)
tensor([[0.5700],
        [0.8673],
        [0.4729]], dtype=torch.float64)
tensor([[0.3519],
        [0.8854],
        [0.4001]], dtype=torch.float64)
tensor([[0.4603],
        [0.9029],
        [0.7581]], dtype=torch.float64)
tensor([[0.9590],
        [0.5473],
        [0.5045]], dtype=torch.float64)  

for i, data_i in enumerate(data.chunk(3, 0)): # 沿0轴分为3块
    print(str(data_i))
>>
tensor([[0.6742, 0.5700, 0.3519, 0.4603, 0.9590]], dtype=torch.float64)
tensor([[0.9705, 0.8673, 0.8854, 0.9029, 0.5473]], dtype=torch.float64)
tensor([[0.0199, 0.4729, 0.4001, 0.7581, 0.5045]], dtype=torch.float64)
   
for i, data_i in enumerate(data.chunk(3, 1)): # 沿1轴分为3块,除不尽
    print(str(data_i))
>>
tensor([[0.6742, 0.5700],
        [0.9705, 0.8673],
        [0.0199, 0.4729]], dtype=torch.float64)
tensor([[0.3519, 0.4603],
        [0.8854, 0.9029],
        [0.4001, 0.7581]], dtype=torch.float64)
tensor([[0.9590],
        [0.5473],
        [0.5045]], dtype=torch.float64)

torch.nn.functional.pad(input, pad, mode=, value=)

pad: 某维度上的扩充参数

mode: ’constant‘, ‘reflect’ or ‘replicate’三种模式,分别表示常量,反射,复制

value: 扩充时指定补充值,但是value只在mode='constant’有效

import torch
import torch.nn.functional as F

t4d = torch.empty(1, 3, 5, 3)

t4d.shape = (1, 3, 5, 3)

p2d = (1, 2, 3, 4)  # 二维tensor: 左,右,上,下
t2 = F.pad(t4d, p2d, 'constant', 2)

t2.shape = (1, 3, 12, 6)                       注:dim(pad)=4 是二维上填充

三维填充

p3d = (1, 2, 3, 4, 5, 6)
t3 = F.pad(t4d, p3d, 'constant', 3)

# t3.shape = torch.Size([1, 14, 12, 6])

左边填充数, 右边填充数, 上边填充数, 下边填充数, 前边填充数,后边填充数

torch.meshgrid(Tensor1, Tensor2)

生成网格,可以用于生成坐标。

输入为两个一维Tensor,输出两个Tensor。两个Tensor shape相同:行数为len(Tensor1), 列数为len(Tensor2)。

第一个输出张量填充第一个输入张量中的元素,各行元素相同;第二个输出张量填充第二个输入张量中的元素,各列元素相同。

如下例:即相当于生成一个14(x) * 3(y)的单位坐标系,共42(14*3)个点,因此rvs和cvs的对应位置即为点坐标。rvs为横坐标,cvs为纵坐标。

(0,0)(0,1)(0,2)(1,0)(1,1)(1,2)...... (13, 0) (13,1)(13,2)

rvs, cvs = torch.meshgrid([torch.arange(0, 14),torch.arange(0, 3)])
>>> rvs
tensor([[ 0,  0,  0],
        [ 1,  1,  1],
        [ 2,  2,  2],
        [ 3,  3,  3],
        [ 4,  4,  4],
        [ 5,  5,  5],
        [ 6,  6,  6],
        [ 7,  7,  7],
        [ 8,  8,  8],
        [ 9,  9,  9],
        [10, 10, 10],
        [11, 11, 11],
        [12, 12, 12],
        [13, 13, 13]])
>>> cvs
tensor([[0, 1, 2],
        [0, 1, 2],
        [0, 1, 2],
        [0, 1, 2],
        [0, 1, 2],
        [0, 1, 2],
        [0, 1, 2],
        [0, 1, 2],
        [0, 1, 2],
        [0, 1, 2],
        [0, 1, 2],
        [0, 1, 2],
        [0, 1, 2],
        [0, 1, 2]])
>>> rvs.shape == cvs.shape
True

torch.stack(tensorsdim=

沿着一个新维度对输入Tensor序列进行连接。 序列中所有的Tensor都应该为相同形状。

T1 = torch.tensor([[1, 2, 3],
        		[4, 5, 6],
        		[7, 8, 9]])
# T1.shape = (3,3)
T2 = torch.tensor([[10, 20, 30],
        		[40, 50, 60],
        		[70, 80, 90]])
# T2.shape = (3,3)


print(torch.stack((T1,T2),dim=0).shape)
# torch.Size([2, 3, 3]) 新维度作为第0维
print(torch.stack((T1,T2),dim=1).shape)
# torch.Size([3, 2, 3]) 新维度作为第1维
print(torch.stack((T1,T2),dim=2).shape)
# torch.Size([3, 3, 2]) 新维度作为第2维

torch.nonzero(input*out=Noneas_tuple=False)

返回非0元素位置序列。参数as_tuple=True即返回位置以元组形式表示,False为二维矩阵表示

torch.where(condition) 等价于torch.nonzero(input, as_tuple=True)

torch.nonzero(torch.tensor([[0.6, 2., 0.0, 0.0],
...                             [0.0, 0.4, 0.0, 0.0],
...                             [0.0, 0.0, 1.2, 0.0],
...                             [0.0, 0.0, 3.,-0.4]]))
# tensor([[0, 0],
#        [0, 1],
#        [1, 1],
#        [2, 2],
#        [3, 2],
#        [3, 3]])


#  as_tuple=True
# output: (tensor([0, 0, 1, 2, 3, 3]), tensor([0, 1, 1, 2, 2, 3]))

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
CrowdNet is a deep learning model developed for crowd counting and density estimation tasks. It is typically implemented using the PyTorch framework. The model utilizes convolutional neural networks (CNNs) to learn and predict crowd densities in images or videos. To use CrowdNet in PyTorch, you would need to download or clone the CrowdNet PyTorch implementation from a repository or source. Once you have the code, you can import the necessary modules, define the CrowdNet architecture, and train or evaluate the model using your dataset. Here is a basic example of how you can use CrowdNet in PyTorch: ```python import torch import torch.nn as nn import torch.optim as optim from torchvision.models import vgg16 # Define the CrowdNet architecture class CrowdNet(nn.Module): def __init__(self): super(CrowdNet, self).__init__() self.vgg = vgg16(pretrained=True) self.fc = nn.Linear(1000, 1) # Output layer for density estimation def forward(self, x): x = self.vgg(x) x = self.fc(x) return x # Instantiate the CrowdNet model model = CrowdNet() # Define loss function and optimizer criterion = nn.MSELoss() optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # Training loop for epoch in range(num_epochs): # Forward pass outputs = model(inputs) loss = criterion(outputs, labels) # Backward and optimize optimizer.zero_grad() loss.backward() optimizer.step() # Print progress if (epoch+1) % 10 == 0: print(f"Epoch {epoch+1}/{num_epochs}, Loss: {loss.item()}") # Evaluation with torch.no_grad(): outputs = model(test_inputs) # Perform evaluation metrics calculation ... ``` Please note that this is just a basic example, and depending on your specific requirements and dataset, you may need to modify the architecture, loss function, optimizer, and training loop accordingly. Also, make sure to preprocess your dataset appropriately before feeding it into the model. I hope this helps! Let me know if you have any more questions.
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值