使用tensorflow2.0搭建DCGAN网络生成卡通 头像

使用tensorflow2.0搭建DCGAN网络生成卡通头像

import tensorflow as tf
from tensorflow import keras
from keras import layers
from glob import glob
from data import make_anime_dataset
import numpy as np
import os
from scipy.misc import toimage
z_dim = 100 # 隐藏向量z的长度
epochs = 30 # 训练步数
batch_size = 64 # batch size
learning_rate = 0.0002
is_training = True
img_paths = glob('F:\\GAN\\DCGAN\\faces\\*.jpg')
dataset,img_shape,_ = make_anime_dataset(img_paths,batch_size=batch_size,resize=64)
dataset = dataset.repeat(100)
db_iter = iter(dataset)

class Generator(keras.Model):
    def __init__(self):
        super(Generator,self).__init__()
        filter = 64
        self.conv1 = layers.Conv2DTranspose(filters=filter*8,kernel_size=4,strides=1,padding='valid',use_bias=False)
        self.bn1 = layers.BatchNormalization()
        self.conv2 = layers.Conv2DTranspose(filters=filter*4,kernel_size=4,strides=2,padding='same',use_bias=False)
        self.bn2 = layers.BatchNormalization()
        self.conv3 = layers.Conv2DTranspose(filters=filter*2,kernel_size=4,strides=2,padding='same',use_bias=False)
        self.bn3 = layers.BatchNormalization()
        self.conv4 = layers.Conv2DTranspose(filters=filter*1,kernel_size=4,strides=2,padding='same',use_bias=False)
        self.bn4 = layers.BatchNormalization()
        self.conv5 = layers.Conv2DTranspose(filters=3,kernel_size=4,strides=2,padding='same',use_bias=False)
    def call(self,inputs,training=None):
        x = inputs
        x = tf.reshape(x,(x.shape[0],1,1,x.shape[1]))
        x = tf.nn.relu(x)
        x = tf.nn.relu(self.bn1(self.conv1(x),training=training))
        x = tf.nn.relu(self.bn2(self.conv2(x),training=training))
        x = tf.nn.relu(self.bn3(self.conv3(x),training=training))
        x = tf.nn.relu(self.bn4(self.conv4(x),training=training))
        x = self.conv5(x)
        x = tf.tanh(x)
        return x
class Discriminator(keras.Model):
    def __init__(self):
        super(Discriminator,self).__init__()
        filter = 64
        self.conv1 = layers.Conv2D(filters=filter,kernel_size=4,strides=2,padding='valid',use_bias=False)
        self.bn1 = layers.BatchNormalization()
        self.conv2 = layers.Conv2D(filters=filter*2,kernel_size=4,strides=2,padding='valid',use_bias=False)
        self.bn2 = layers.BatchNormalization()
        self.conv3 = layers.Conv2D(filters=filter*4,kernel_size=4,strides=2,padding='valid',use_bias=False)
        self.bn3 = layers.BatchNormalization()
        self.conv4 = layers.Conv2D(filters=filter*8,kernel_size=3,strides=1,padding='valid',use_bias=False)
        self.bn4 = layers.BatchNormalization()
        self.conv5 = layers.Conv2D(filters=filter*16,kernel_size=3,strides=1,padding='valid',use_bias=False)
        self.bn5 = layers.BatchNormalization()
        self.pool = layers.GlobalAveragePooling2D()
        self.flatten = layers.Flatten()
        self.fc = layers.Dense(1)
    def call(self,inputs,training=None):
        x = tf.nn.leaky_relu(self.bn1(self.conv1(inputs),training=training))
        x = tf.nn.leaky_relu(self.bn2(self.conv2(x),training=training))
        x = tf.nn.leaky_relu(self.bn3(self.conv3(x),training=training))
        x = tf.nn.leaky_relu(self.bn4(self.conv4(x),training=training))
        x = tf.nn.leaky_relu(self.bn5(self.conv5(x),training=training))
        print(x.shape)
        x = self.pool(x)
        print(x.shape)
        x = self.flatten(x)
        #x = tf.reshape(x,[-1,1024])
        logits = self.fc(x)
        return logits



def celoss_ones(logits):
    y = tf.ones_like(logits)
    loss = keras.losses.binary_crossentropy(y,logits,from_logits=True)
    return tf.reduce_mean(loss)

def celoss_zeros(logits):
    y = tf.zeros_like(logits)
    loss = keras.losses.binary_crossentropy(y,logits,from_logits=True)
    return tf.reduce_mean(loss)


def d_loss_fn(generator,discriminator,batch_z,batch_x,is_training):
    fake_image = generator(batch_z,is_training)
    d_fake_logits = discriminator(fake_image,is_training)
    d_real_logits = discriminator(batch_x,is_training)
    d_loss_real = celoss_ones(d_real_logits)
    d_loss_fake = celoss_zeros(d_fake_logits)
    loss = d_loss_fake + d_loss_real
    return loss

def g_loss_fn(generator,discriminator,batch_z,is_training):
    fake_image = generator(batch_z,is_training)
    d_fake_logits = discriminator(fake_image,is_training)
    loss = celoss_ones(d_fake_logits)
    return loss





generator = Generator()
#generator.build(input_shape=(4,z_dim))
discriminator = Discriminator()
#discriminator.build(input_shape=(4,64,64,3))
g_optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
d_optimizer = keras.optimizers.Adam(learning_rate=learning_rate)

def save_result(val_out, val_block_size, image_path, color_mode):
    def preprocess(img):
        img = ((img + 1.0) * 127.5).astype(np.uint8)
        # img = img.astype(np.uint8)
        return img

    preprocesed = preprocess(val_out)
    final_image = np.array([])
    single_row = np.array([])
    for b in range(val_out.shape[0]):
        # concat image into a row
        if single_row.size == 0:
            single_row = preprocesed[b, :, :, :]
        else:
            single_row = np.concatenate((single_row, preprocesed[b, :, :, :]), axis=1)

        # concat image row to final_image
        if (b+1) % val_block_size == 0:
            if final_image.size == 0:
                final_image = single_row
            else:
                final_image = np.concatenate((final_image, single_row), axis=0)

            # reset single row
            single_row = np.array([])

    if final_image.shape[2] == 1:
        final_image = np.squeeze(final_image, axis=2)
    toimage(final_image).save(image_path)


for epoch in range(epochs):
    for _ in range(5):
        batch_z = tf.random.normal([batch_size,z_dim])
        batch_x = next(db_iter)
        with tf.GradientTape() as tape:
            d_loss = d_loss_fn(generator,discriminator,batch_z,batch_x,is_training)
        grads = tape.gradient(d_loss,discriminator.trainable_variables)
        d_optimizer.apply_gradients(zip(grads,discriminator.trainable_variables))

    batch_z = tf.random.normal([batch_size,z_dim])
    batch_x = next(db_iter)
    with tf.GradientTape() as tape:
        g_loss = g_loss_fn(generator,discriminator,batch_z,is_training)
    grads = tape.gradient(g_loss,generator.trainable_variables)
    g_optimizer.apply_gradients(zip(grads,generator.trainable_variables))

    if epoch % 100 == 0:
        print(epoch,'d-loss:',float(d_loss),'g-loss:',float(g_loss))
        z = tf.random.normal([100,z_dim])
        fake_image = generator(z,is_training=False)
        img_path = 'F:\\GAN\\DCGAN\\images\\gan-%d.png' % epoch
        save_result(fake_image.numpy(), 10, img_path, color_mode='P')

 

  • 1
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

杨小嗨yang

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值