import torch from collections import OrderedDict import math import torch.nn as nn import numpy as np from tensorboardX import SummaryWriter def conv2d(filter_in,filter_out,kernel_size): pad=(kernel_size-1)//2 if kernel_size else 0 return nn.Sequential( nn.Conv2d(filter_in,filter_out,kernel_size=kernel_size,stride=1,padding=pad,bias=False), nn.BatchNorm2d(filter_out), nn.LeakyReLU(0.1) ) def make_last_layers(filter_list,in_filters,out_filter): m=nn.ModuleList([ conv2d(in_filters,filter_list[0],1), conv2d(filter_list[0],filter_list[1],3), conv2d(filter_list[1], filter_list[0], 1), conv2d(filter_list[0], filter_list[1], 3), conv2d(filter_list[1], filter_list[0], 1), conv2d(filter_list[0], filter_list[1], 3), nn.Conv2d(filter_list[1], out_filter, kernel_size=1,stride=1,padding=0,bias=True) ]) return m # ModuleList( # (0): Sequential( # (0): Conv2d(3, 32, kernel_size=(1, 1), stride=(1, 1), bias=False) # (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) # (2): LeakyReLU(negative_slope=0.1) # ) # (1): Sequential( # (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) # (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) # (2): LeakyReLU(negative_slope=0.1) # ) # (2): Sequential( # (0): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False) # (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) # (2): LeakyReLU(negative_slope=0.1) # ) # (3): Sequential( # (0): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) #