nn.Conv2d: 可以看出通过groups划分卷积核参数量的变化:
from torch import nn
#这个在 depthwise 中的效果
t = nn.Conv2d(10, 10, 3, 2, 0, bias=False, groups=10)
t.weight.shape
#torch.Size([10, 1, 3, 3])
t = nn.Conv2d(10, 10, 3, 2, 0, bias=False, groups=2)
t.weight.shape
#torch.Size([10, 5, 3, 3])
t = nn.Conv2d(10, 10, 3, 2, 0, bias=False, groups=1)
t.weight.shape
#torch.Size([10, 10, 3, 3])
下面是卷积结果和我们手工计算结果:结果是一样的
import numpy as np
import torch
from torch import nn
input_feature_size = 5
t = nn.Conv2d(input_feature_size, input_feature_size, 3, 2, 0, bias=False, groups=input_feature_size)
print(t.weight.shape)
w0 = t.weight[0, 0]
feature_map = torch.rand([input_feature_size, 3, 3])
ret = t(feature_map)
print("torch 卷积结果:", ret)
my_ret = []
for idx, one_w in enumerate(t.weight):
my_ret.append(torch.sum(feature_map[idx] * one_w[0]).detach().numpy())
print("手工计算 卷积结果:", my_ret)
print('end!')
下面这个是普通卷积操作,groups=1:
import numpy as np
import torch
from torch import nn
input_feature_size = 5
t = nn.Conv2d(input_feature_size, input_feature_size, 3, 2, 0, bias=False, groups=1)
print(t.weight.shape)
w0 = t.weight[0, 0]
feature_map = torch.ones([input_feature_size, 3, 3])
ret = t(feature_map)
print("torch 卷积结果:", ret)
my_ret = []
for one_w in t.weight:
sum_ = 0
for one_fm in feature_map:
sum_ += torch.sum(one_fm * one_w)
my_ret.append(sum_)
print("手工计算 卷积结果:", ret)
print('end!')