import torch
import torch.nn as nn
from torchsummary import summary
import time
device = torch.device('cuda')
class Group_Conv(nn.Module):
def __init__(self, in_chs, out_chs, num_groups):
super(Group_Conv, self).__init__()
self.conv = nn.Conv2d(
in_channels=in_chs,
out_channels=out_chs,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
groups=num_groups,
bias=False,
)
def forward(self, x):
out = self.conv(x)
return out
s1 = time.perf_counter()
gc1 = Group_Conv(in_chs=6, out_chs=12, num_groups=1).to(device)
print(summary(gc1, input_size=(6, 64, 64)))
e1 = time.perf_counter()
print(e1 - s1)
s2 = time.perf_counter()
gc2 = Group_Conv(in_chs=6, out_chs=12, num_groups=2).to(device)
print(summary(gc2, input_size=(6, 64, 64)))
e2 = time.perf_counter()
print(e2 - s2)
s3 = time.perf_counter()
gc3 = Group_Conv(in_chs=6, out_chs=12, num_groups=3).to(device)
print(summary(gc2, input_size=(6, 64, 64)))
e3 = time.perf_counter()
print(e3 - s3)
Pytorch实现分组卷积
最新推荐文章于 2024-03-06 14:40:41 发布