材料:https://pan.baidu.com/s/1LUJkF__vlqAlbiTX1KE2aA
提取码:3061
README
train.py
config.py
模型
bank_model.py
import torch
from models.base_model import BaseModel
from architecture.generator_bank_net import GeneratorBankNet
from architecture.discriminator import Discriminator
# from arcitecture.transformer_net import TransformerNetBank
from torch.optim import Adam
import src.utils as utils
import os
base_model.py
from tqdm import tqdm
import torch
from torchvision import transforms
import torch.nn as nn
import os
import torchvision
import matplotlib.pyplot as plt
import src.utils as utils
gui 图形用户接口
模型结构architecture
main_net.py DCGAN
import torch.nn as nn
class MainNet(nn.Module):
def __init__(self, z_size=100, out_size=3, ngf=128):
super(MainNet, self).__init__()
self.z_size = z_size
self.ngf = ngf
self.out_size = out_size
self.layer0 = nn.Sequential(
# input size is z_size
nn.ConvTranspose2d(self.z_size, self.ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(self.ngf * 8),
nn.ReLU(inplace=True)
# state size: (ngf * 8) x 4 x 4
)
self.layer1 = nn.Sequential(
# state size: (ngf * 8) x 4 x 4
nn.ConvTranspose2d(self.ngf * 8, self.ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.ngf * 4),
nn.ReLU(inplace=True),
# state size: (ngf * 4) x 8 x 8
)
self.layer2 = nn.Sequential(
# state size: (ngf * 4) x 8 x 8
nn.ConvTranspose2d(self.ngf * 4, self.ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.ngf * 2),
nn.ReLU(inplace=True),
# state size: (ngf * 2) x 16 x 16
)
self.layer3 = nn.Sequential(
# state size: (ngf * 2) x 16 x 16
nn.ConvTranspose2d(self.ngf * 2, self.ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.ngf),
nn.ReLU(inplace=True),
# state size: ngf x 32 x 32
)
self.layer4 = nn.Sequential(
# state size: ngf x 32 x 32
nn.ConvTranspose2d(self.ngf, self.out_size, 4, 2, 1, bias=False),
nn.Tanh()
# state size: out_size x 64 x 64
)
for m in self.modules():
if isinstance(m, nn.ConvTranspose2d):
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, input):
self.l1out = output = self.layer0(input)
self.l2out = output = self.layer1(output)
self.l3out = output = self.layer2(output)
self.l4out = output = self.layer3(output)
output = self.layer4(output)
return output
discriminator.py
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self, in_size=3, ndf=128):
super(Discriminator, self).__init__()
self.in_size = in_size
self.ndf = ndf
self.l1 = nn.Sequential(# input size is in_size x 64 x 64
nn.Conv2d(self.in_size, self.ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True))
self.l2 = nn.Sequential(# state size: ndf x 32 x 32
nn.Conv2d(self.ndf, self.ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.ndf * 2),
nn.LeakyReLU(0.2, inplace=True),)
self.l3 = nn.Sequential(# state size: (ndf * 2) x 16 x 16
nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.ndf * 4),
nn.LeakyReLU(0.2, inplace=True),)
self.l4 = nn.Sequential(# state size: (ndf * 4) x 8 x 8
nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.ndf * 8),
nn.LeakyReLU(0.2, inplace=True),)
self.l5 = nn.Sequential(# state size: (ndf * 8) x 4 x 4
nn.Conv2d(self.ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
# state size: 1 x 1 x 1
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, input):
self.l1out = self.l1(input)
self.l2out = self.l2(self.l1out)
self.l3out = self.l3(self.l2out)
self.l4out = self.l4(self.l3out)
return self.l5(self.l4out)
## USED FOR FEEDBACK LOOP
def getLayersOutDet(self):
return [self.l1out.detach(), self.l2out.detach(), self.l3out.detach(), self.l4out.detach()]
FeedbackModel.py
generator_bank_net.py