MC-GAN:Multi-Content GAN for Few-Shot Font Style Transfer

MC-GAN是一种端到端的模型,用于从少量示例中生成风格一致的字体。文章介绍了多内容GAN架构,包括条件生成对抗网络、Glyph Network和Ornamentation Network,以及如何从观察到的字体中转移风格。通过在10,000种字体数据集上的实验,证明了模型的有效性。" 112288255,10542287,sqlmap深度解析:从检测逻辑到tamper探究,"['SQL注入', '安全工具', 'web安全', 'sqlmap技巧', '渗透测试']
摘要由CSDN通过智能技术生成

MC-GAN:Multi-Content GAN for Few-Shot Font Style Transfer

多内容GAN,可进行少量字体转换

文章下载链接:https://arxiv.org/pdf/1712.00516.pdf 

代码地址:https://github.com/azadis/MC-GAN

一、源码

1. 数据集

2. networks.py

import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import functools
from torch import index_select, LongTensor



def weights_init(m):
    classname = m.__class__.__name__
    print "classname",classname
    if classname.find('Conv') != -1:
        print "in random conv"
        m.weight.data.normal_(0.0, 0.02)
        if hasattr(m.bias, 'data'):
            m.bias.data.fill_(0)
    elif classname.find('BatchNorm2d') != -1:
        print "in random batchnorm"
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)



def get_norm_layer(norm_type):
    if norm_type == 'batch':
        norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
    elif norm_type == 'instance':
        norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
    else:
        norm_layer = None
        print('normalization layer [%s] is not found' %norm_type)
    return norm_layer

def conv_norm_relu_module(norm_type, norm_layer, input_nc, ngf, kernel_size, padding, stride=1, relu='relu'):

    model = [nn.Conv2d(input_nc, ngf, kernel_size=kernel_size, padding=padding,stride=stride)]
    if norm_layer:
        model += [norm_layer(ngf)]

    if relu=='relu':
        model += [nn.ReLU(True)]
    elif relu=='Lrelu':
        model += [nn.LeakyReLU(0.2, True)]


    return model



def convTranspose_norm_relu_module(norm_type, norm_layer, input_nc, ngf, kernel_size, padding, stride=1, output_padding=0):
    if norm_type=='batch' or norm_type=='instance':
        model = [nn.ConvTranspose2d(input_nc, ngf,
                    kernel_size=kernel_size, stride=stride,padding=padding, output_padding=output_padding),
                    norm_layer(int(ngf)),
                    nn.ReLU(True)]

    return model


def define_G_3d(input_nc, output_nc, norm='batch', groups=26, ksize=3,padding=1, gpu_ids=[]):
    
    netG_3d = None
    use_gpu = len(gpu_ids) > 0


    if use_gpu:
        assert(torch.cuda.is_available())
    netG_3d = ResnetGenerator_3d_conv(input_nc, output_nc, norm_type=norm, groups=groups, ksize=ksize, padding=padding, gpu_ids=gpu_ids)

    if len(gpu_ids) > 0:
        netG_3d.cuda(device_id=gpu_ids[0])

    netG_3d.apply(weights_init)
    return netG_3d



def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, gpu_ids=[]):
    
    netG = None
    use_gpu = len(gpu_ids) > 0

    norm_layer = get_norm_layer(norm_type=norm)

    if use_gpu:
        assert(torch.cuda.is_available())
    print which_model_netG
    if which_model_netG == 'resnet_9blocks':
        netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, norm_type=norm, gpu_ids=gpu_ids)
    elif which_model_netG == 'resnet_6blocks':
        netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6, norm_type=norm, gpu_ids=gpu_ids)
    elif which_model_netG == 'unet_128':
        netG = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
    elif which_model_netG == 'unet_256':
        netG = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
    else:
        print('Generator model name [%s] is not recognized' % which_model_netG)



    if len(gpu_ids) > 0:
        netG.cuda(device_id=gpu_ids[0])

    netG.apply(weights_init)
    return netG


def define_Enc(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, gpu_ids=[]):
    
    netG = None
    use_gpu = len(gpu_ids) > 0

    norm_layer = get_norm_layer(norm_type=norm)

    if use_gpu:
        assert(torch.cuda.is_available())
    print which_model_netG
    if which_model_netG == 'resnet_9blocks':
        netG = ResnetEncoder(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, norm_type=norm, gpu_ids=gpu_ids)
    elif which_model_netG == 'resnet_6blocks':
        netG = ResnetEncoder(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6, norm_type=norm, gpu_ids=gpu_ids)
    elif which_model_netG == 'unet_128':
        netG = UnetEncoder(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
    elif which_model_netG == 'unet_256':
        netG = UnetEncoder(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
    else:
        print('encoder model name [%s] is not recognized' % which_model_netG)

    if len(gpu_ids) > 0:
        netG.cuda(device_id=gpu_ids[0])

    netG.apply(weights_init)
    return netG



def define_Dec(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, gpu_ids=[]):
    
    netG = None
    use_gpu = len(gpu_ids) > 0

    norm_layer = get_norm_layer(norm_type=norm)

    if use_gpu:
        assert(torch.cuda.is_available())
    print which_model_netG
    if which_model_netG == 'resnet_9blocks':
        netG = ResnetDecoder(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, norm_type=norm, gpu_ids=gpu_ids)
    elif which_model_netG == 'resnet_6blocks':
        netG = ResnetDecoder(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6, norm_type=norm, gpu_ids=gpu_ids)
    elif which_model_netG == 'unet_128':
        netG = UnetDecoder(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
    elif which_model_netG == 'unet_256':
        netG = UnetDecoder(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
    else:
        print('Decoder model name [%s] is not recognized' % which_model_netG)

    if len(gpu_ids) > 0:
        netG.cuda(device_id=gpu_ids[0])

    netG.apply(weights_init)
    return netG



def define_D(input_nc, ndf, which_model_netD,
     
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

蹦跶的小羊羔

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值