import torch
conv = torch.nn.Conv2d(1,8,(2,3))
input = torch.rand(1,1,224,224) # batch,channel,width,height
output = conv(input)
print(output.shape)
bn = torch.nn.BatchNorm2d(8)
l = [conv,bn]
for module in l:
class_name = str(module.__class__.__name__)
if class_name.find("Conv") != -1 and hasattr(module, "weight"):
# flops为卷积核的参数量乘以输出特征图的分辨率
# 即inchannel*outchannel*kernel_width*kernel_height*output_width*output_height
flops = (
torch.prod(
torch.LongTensor(list(module.weight.data.size()))) *
torch.prod(
torch.LongTensor(list(output.size())[2:]))).item()
print(list(module.weight.data.size()))
print(list(output.size()))
print(flops)
Conv2d层的计算量FlOPs为:输入通道数×输出通道数×卷积核宽×卷积核高×输出特征图宽×输出特征图高。故此卷积层的计算量为1×8×2×3×223×222=2376288
而BN一般计算量比较小,不算FLOPs