生成对抗网络——dcgan

参考链接:

https://blog.51cto.com/gloomyfish/2348169?source=drh

https://blog.csdn.net/hiudawn/article/details/80775740

https://blog.csdn.net/DLW__/article/details/99942598

https://blog.csdn.net/u013250416/article/details/78254444

数据集:

代码实现1:dcgan--mnist(手写数字生成)

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import os
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt


img_height = 28
img_width = 28
batch_size = 100
out_height = 28
out_width = 28
c_dim = 1
y_dim = 10
df_dim = 64
dfc_dim = 1024
gf_dim = 64
gfc_dim = 1024
max_epoch = 300
z_dim = 100 # 噪声维度
save_path = './out/'


def lrelu(x,leak=0.2):
    '''参考Rectier Nonlinearities Improve Neural Network Acoustic Models'''
    return tf.maximum(x,leak*x)  # 返回结果维度不变

def conv2d(input_,output_dim,name,k_h=5,k_w=5,s_h=2,s_w=2,stddev=0.02):
    '''普通的卷积层'''
    with tf.name_scope(name):
        w = tf.Variable(tf.truncated_normal(stddev=stddev,shape=[k_h, k_w, input_.shape.as_list()[-1], output_dim]))
        conv = tf.nn.conv2d(input_,w,strides=[1,s_h,s_w,1],padding='SAME')
        b = tf.Variable(tf.zeros([output_dim]))
        return tf.reshape(tf.nn.bias_add(conv,b),conv.shape)

def conv_cond_concat(xb,yb):
    '''把label条件附加在输入上,DCGAN用上了条件GAN'''
    # 输入的x第一个参数默认为batch_size
    xb_shape = xb.shape.as_list()
    yb_shape = yb.shape.as_list()
    yb = tf.reshape(yb,[yb_shape[0],1,1,yb_shape[-1]])
    return tf.concat([xb,yb*tf.ones([xb_shape[0],xb_shape[1],xb_shape[2],yb_shape[-1]])],3) # 连接最后一维

def batch_norm(x,name,train = True, epsilon=1e-5, momentum=0.9):
    '''如名字所示'''
    # 这里面也有可训练的变量
    return tf.contrib.layers.batch_norm(x, decay=momentum,updates_collections=None,epsilon=epsilon,scale=True,is_training=train,scope = name)

def linear(input_, output_dim,name,stddev=0.02):
    '''相当于全连接层,做矩阵的相乘'''
    with tf.name_scope(name):  # 作用于操作
        matrix = tf.Variable(tf.random_normal(shape=[input_.shape.as_list()[-1],output_dim],stddev=stddev,dtype=tf.float32))
        bias = tf.Variable(tf.zeros([output_dim]))
        return tf.matmul(input_, matrix) + bias

def deconvolution(input_,output_dim,name,k_h=5,k_w=5,s_h=2,s_w=2,stddev=0.02):
    '''反卷积,放大'''
    with tf.name_scope(name):
        w = tf.Variable(tf.truncated_normal(shape=[k_h,k_w,output_dim[-1],input_.shape.as_list()[-1]],stddev=stddev))
        deconv = tf.nn.conv2d_transpose(input_,w,output_shape=output_dim,strides=[1,s_h,s_w,1])
        b = tf.Variable(tf.zeros([output_dim[-1]]))
        return tf.reshape(tf.nn.bias_add(deconv,b),deconv.shape)

def get_z(shape):
    '''生成随机噪声,作为G的输入'''
    return np.random.uniform(-1.,1.,size=shape).astype(np.float32)

def discriminator(x,x_generated,y):
    # 这里遇到大坑,调用两次就跪了,除非用tf.get_variable()替代tf.Variable()
    # 因为x和x_generated要公用一套判别式的权值,如果调用两个discriminator会导致结果不一样,这里就合在一起了
    x = tf.concat([x,x_generated],0) 
    # 因此y也要做相应调整
    y = tf.concat([y,y],0)
    # 把条件和x连在一起
    x = conv_cond_concat(x,y)

    h0 = lrelu(conv2d(x,c_dim+y_dim,name='d_c'))
    h0 = conv_cond_concat(h0,y)

    h1 = lrelu(batch_norm(conv2d(h0,df_dim+y_dim,name='d_c'),name='d_cb1'))
    h1 = tf.reshape(h1,[batch_size+batch_size,-1])
    h1 = tf.concat([h1,y],1)

    h2 = lrelu(batch_norm(linear(h1,dfc_dim,name='d_c'),name='d_cb2'))
    h2 = tf.concat([h2,y],1)

    h3 = linear(h2,1,name='d_fc')

    # 把得到的结果按原来的逆步骤分成两个
    y_data = tf.nn.sigmoid(tf.slice(h3, [0, 0], [batch_size, -1], name=None))  
    y_generated = tf.nn.sigmoid(tf.slice(h3, [batch_size, 0], [-1, -1], name=None)) 

    return y_data,y_generated

def generator(z,y):
    s_h,s_w = out_height,out_width
    s_h2,s_w2 = int(s_h/2),int(s_w/2)
    s_h4,s_w4 = int(s_h/4),int(s_w/4)

    # 噪声也要连接标签
    z = tf.concat([z,y],1)

    h0 = tf.nn.relu(batch_norm(linear(z,gfc_dim,name='g_fc'),name='g_fcb1'))
    h0 = tf.concat([h0,y],1)

    h1 = tf.nn.relu(batch_norm(linear(h0,gf_dim*2*s_h4*s_w4,name='g_fc'),name='g_fcb2'))
    h1 = tf.reshape(h1,[batch_size,s_h4,s_w4,gf_dim*2])
    h1 = conv_cond_concat(h1,y)

    h2 = tf.nn.relu(batch_norm(deconvolution(h1,[batch_size,s_h2,s_w2,gf_dim*2],name='g_dc'),name='g_dcb'))
    h2 = conv_cond_concat(h2,y)
    # 原文这里用的是tanh,不过要输出图片的话建议用sigmoid
    return tf.nn.sigmoid(deconvolution(h2,[batch_size,s_h,s_w,c_dim],name='g_dc'))

def save(samples, index,shape):
    '''只是用来把图片保存到本地,和训练无关'''
    x,y=shape  # 保存图片的宽高(每个单位一张生成数字)
    fig = plt.figure(figsize=(x,y))
    gs = gridspec.GridSpec(x,y)
    gs.update(wspace=0.05,hspace=0.05)

    for i,sample in enumerate(samples):
        ax = plt.subplot(gs[i])
        plt.axis('off')
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.set_aspect('equal')
        plt.imshow(sample[:,:,0],cmap='Greys_r')
    plt.savefig(save_path+'{}.png'.format(str(index).zfill(3)))
    plt.close(fig)

mnist = input_data.read_data_sets('mnist/', one_hot=True)  # 加载数据集

z = tf.placeholder(tf.float32,shape=[None,z_dim])
x = tf.placeholder(tf.float32,shape=[batch_size,img_height,img_width,c_dim])
y = tf.placeholder(tf.float32,shape=[batch_size,y_dim])

x_generated = generator(z,y)  # 假图
d_real,d_fake = discriminator(x,x_generated,y)  # 真、假图各自概率

d_loss = -tf.reduce_mean(tf.log(d_real+1e-30) + tf.log(1.-d_fake+1e-30))  # 不加这个1e-30会出现log(0)
g_loss = -tf.reduce_mean(tf.log(d_fake+1e-30))  # tf有内置的sigmoid_cross_entropy_with_logits可以解决这个问题,但我没用它

# 这一步很关键,主要是用来取出一切可以训练的参数,命名前缀决定了这个参数属于谁(建层的时候特地写的)
t_vars = tf.trainable_variables()  # 所有可训练变量的列表
d_vars = [var for var in t_vars if var.name.startswith('d_')]
g_vars = [var for var in t_vars if var.name.startswith('g_')]

d_optimizer = tf.train.AdamOptimizer(0.0002,beta1=0.5)  # beta1是momentum
g_optimizer = tf.train.AdamOptimizer(0.0002,beta1=0.5)

# update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# with tf.control_dependencies(update_ops):
d_solver = d_optimizer.minimize(d_loss,var_list = d_vars)
g_solver = g_optimizer.minimize(g_loss,var_list = g_vars)



sess = tf.Session()
sess.run(tf.global_variables_initializer())

if not os.path.exists(save_path):
    os.makedirs(save_path)  # 保存图片的位置

iteration = int(50000/batch_size)
for epoch in range(max_epoch):

    # 以下几行和训练无关,只是把G的生成样本保存在本地save_path目录下
    labels = [i for i in range(10) for _ in range(10)]  # 我要让他生成的数字,每行相同,每列从0到1递增
    cond_y = sess.run(tf.one_hot(np.array(labels),depth=10))  # 喂的字典不能是tensor,我run成np array
    samples = sess.run(x_generated, feed_dict = {z:get_z([100,z_dim]),y:cond_y})
    shape = [10,10]  # 维度和labels的宽高匹配
    save(samples, epoch, shape)  # 保存图片
    # 以上几行和训练无关,去掉也可以,但就没有可视化结果了。
    
    # 主要的训练步骤
    for idx in range(iteration):
        # 提取及转换数据
        x_mb,y_mb = mnist.train.next_batch(batch_size)
        z_mb = get_z([batch_size,z_dim])
        x_mb = np.reshape(x_mb,[batch_size,out_height,out_width,1])
        # 判别器训练
        _,d_loss_ = sess.run([d_solver,d_loss],feed_dict={x:x_mb,z:z_mb,y:y_mb.astype(np.float32)})
        # 生成器训练
        _,g_loss_ = sess.run([g_solver,g_loss],feed_dict={x:x_mb,z:z_mb,y:y_mb.astype(np.float32)})
        if idx % 1 == 0:
            print('epoch:',epoch,'d_loss: ',d_loss_, 'g_loss:',g_loss_)

代码实现2:dcgan--生成自定义图片

test1.py

# import msvcrt
import os
import time

import tensorflow as tf
from keras import backend as K
import keras.backend.tensorflow_backend as KTF
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
import keras
from keras import layers, Sequential, models
import numpy as np

# 手动分配GPU
config = tf.ConfigProto()
config.gpu_options.allow_growth = True  # 不全部占满显存, 按需分配
# config.gpu_options.per_process_gpu_memory_fraction = 0.5  # 指定分配50%空间
sess = tf.Session(config=config)  # 设置session
KTF.set_session(sess)

# IO参数
latent_dim = 100
img_shape = (96, 96, 3)


# ************************** 生成器
def build_generator():
    model = Sequential()
    model.add(layers.Dense(512 * 6 * 6, activation='relu', input_dim=latent_dim))  # 输入维度为100
    model.add(layers.Reshape((6, 6, 512)))
    model.add(layers.UpSampling2D())  # 进行上采样,变成14*14*128
    model.add(layers.Conv2D(256, kernel_size=5, padding='same'))
    model.add(layers.BatchNormalization(momentum=0.8))
    model.add(layers.Activation("relu"))  #
    model.add(layers.UpSampling2D())
    model.add(layers.Conv2D(128, kernel_size=5, padding="same"))
    model.add(layers.BatchNormalization(momentum=0.8))
    model.add(layers.Activation("relu"))
    model.add(layers.UpSampling2D())
    model.add(layers.Conv2D(64, kernel_size=5, padding="same"))
    model.add(layers.BatchNormalization(momentum=0.8))
    model.add(layers.Activation("relu"))
    model.add(layers.UpSampling2D())
    model.add(layers.Conv2D(img_shape[-1], kernel_size=5, padding="same"))
    model.add(layers.Activation("tanh"))
    model.summary()  # 打印网络参数
    noise = models.Input(shape=(latent_dim,))
    img = model(noise)
    return models.Model(noise, img)  # 定义一个 一个输入noise一个输出img的模型


# ************************** 判别器
def build_discriminator():
    model = Sequential()
    dropout = 0.4
    model.add(layers.Conv2D(64, kernel_size=5, strides=2, input_shape=img_shape, padding="same"))
    model.add(layers.LeakyReLU(alpha=0.2))
    model.add(layers.Dropout(dropout))
    model.add(layers.Conv2D(128, kernel_size=5, strides=2, padding="same"))
    model.add(layers.ZeroPadding2D(padding=((0, 1), (0, 1))))
    model.add(layers.BatchNormalization(momentum=0.8))
    model.add(layers.LeakyReLU(alpha=0.2))
    model.add(layers.Dropout(dropout))
    model.add(layers.Conv2D(256, kernel_size=5, strides=2, padding="same"))
    model.add(layers.BatchNormalization(momentum=0.8))
    model.add(layers.LeakyReLU(alpha=0.2))
    model.add(layers.Dropout(dropout))
    model.add(layers.Conv2D(512, kernel_size=5, strides=1, padding="same"))
    model.add(layers.BatchNormalization(momentum=0.8))
    model.add(layers.LeakyReLU(alpha=0.2))
    model.add(layers.Dropout(dropout))
    model.add(layers.Flatten())
    model.add(layers.Dense(1, activation='sigmoid'))
    model.summary()
    img = models.Input(shape=img_shape)
    validity = model(img)
    return models.Model(img, validity)


def load_dir_img(sorcedir):
    print('正在读取图片...')
    files = os.listdir(sorcedir)
    data = np.zeros((files.__len__(),) + image.img_to_array(image.load_img(os.path.join(sorcedir, files[0]))).shape)
    for i in range(files.__len__()):
        data[i] = image.img_to_array(image.load_img(os.path.join(sorcedir, files[i])))
    return data / 127.5 - 1


# ************************** 建模
optimizer = keras.optimizers.Adam(lr=0.0002, beta_1=0.5)
# 对判别器进行构建和编译
discriminator = build_discriminator()
discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# 对生成器进行构造
generator = build_generator()
# 构造对抗模型
# 总体模型只对生成器进行训练
discriminator.trainable = False
input_noise = models.Input(shape=(latent_dim,))
combined = models.Model(input_noise, discriminator(generator(input_noise)))
combined.compile(loss='binary_crossentropy', optimizer=optimizer)

# ************************** Load Data
# 数据来源:https://drive.google.com/drive/folders/1mCsY5LEsgCnc0Txv0rpAUhKVPWVkbw5I?usp=sharing
x = load_dir_img(r'dataset/')


# ************************** 训练
"""
    gdrate:额外的生成器训练比率(判别器50%额外训练0次,100%额外训练gdrate次)
    save_interval:保存间隔(steap)
"""
def run(epochs=500, batch_size=2, save_interval=10, gdrate=3, save_dir='save/', history=None):
    last_time = time.clock()
    start_epoch = 0
    if history is None:
        history = []
    else:
        start_epoch = history[-1][0]
    valid = np.ones((batch_size, 1))
    fake = np.zeros((batch_size, 1))
    for epoch in range(epochs):
        for step in range(x.shape[0] // batch_size):
            # # 按q终止
            # while msvcrt.kbhit():
            #     char = ord(msvcrt.getch())
            #     if char == 113:
            #         return history
            g_loss = -1
            # 训练判别器
            imgs = x[step * batch_size:step * batch_size + batch_size]
            print(imgs.shape)
            noise = np.random.normal(0, 1, (batch_size, latent_dim))
            gen_imgs = generator.predict(noise)
            d_loss_real = discriminator.train_on_batch(imgs, valid)
            d_loss_fake = discriminator.train_on_batch(gen_imgs, fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
            # 训练生成器(动态训练比例)
            for i in range(1 + int(gdrate * np.maximum(d_loss[1] - .5, 0) * 2)):
                noise = np.random.normal(0, 1, (batch_size, latent_dim))
                g_loss = combined.train_on_batch(noise, valid)
            # Log
            if step % save_interval == 0:
                print(
                    "%d:%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch+start_epoch, step, d_loss[0], 100 * d_loss[1], g_loss))
                history.append((epoch+start_epoch, step, d_loss[0], 100 * d_loss[1], g_loss))
                combined.save('gan.h5')
                # 保存生成的图像
                img = image.array_to_img(gen_imgs[0] * 127 + 127., scale=False)
                img.save(os.path.join(save_dir, 'train_' + str(epoch+start_epoch) + '_' + str(step) + '.png'))
                # 保存真实图像,以便进行比较
                # img = image.array_to_img(imgs[0] * 127 + 127., scale=False)
                # img.save(os.path.join(save_dir, 'real_' + str(epoch+start_epoch) + '_' + str(step) + '.png'))
        # 计时
        print('epoch run %d s, total run %d s' % (time.clock() - last_time, time.clock()))
        last_time = time.clock()
    combined.save('model/gan.h5')
    return history


# ************************** 生成
def generate(generator, num=10, save_dir=r'gan_image'):
    noise = np.random.normal(0, 1, (num, K.int_shape(generator.layers[0].input)[1]))
    gen_imgs = generator.predict(noise)
    for i in range(gen_imgs.shape[0]):
        img = image.array_to_img(gen_imgs[i] * 127 + 127., scale=False)
        img.save(os.path.join(save_dir, 'generated_' + str(i) + '.png'))


# ************************** 运行
history = run()


test2.py

# import msvcrt
import os
import time

import tensorflow as tf
from keras import backend as K
import keras.backend.tensorflow_backend as KTF
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
import keras
from keras import layers, Sequential, models
import numpy as np
import matplotlib.pyplot as plt

# 手动分配GPU
config = tf.ConfigProto()
config.gpu_options.allow_growth = True  # 不全部占满显存, 按需分配
# config.gpu_options.per_process_gpu_memory_fraction = 0.5  # 指定分配50%空间
sess = tf.Session(config=config)  # 设置session
KTF.set_session(sess)

# IO参数
latent_dim = 100
img_shape = (96, 96, 3)

# 通用参数
optimizer = keras.optimizers.Adam(lr=0.0002, beta_1=0.5)
norm_momentum = 0.8
lrelu_alpha = 0.2

# ************************** 生成器
def build_generator():
    model = Sequential()
    model.add(layers.Dense(512 * 6 * 6, activation='relu', input_dim=latent_dim))  # 输入维度为100
    model.add(layers.Reshape((6, 6, 512)))
    model.add(layers.Conv2DTranspose(256, 5, strides=2, padding='same'))
    model.add(layers.BatchNormalization(momentum=norm_momentum))
    model.add(layers.Activation("relu"))
    model.add(layers.Conv2DTranspose(128, 5, strides=2, padding='same'))
    model.add(layers.BatchNormalization(momentum=norm_momentum))
    model.add(layers.Activation("relu"))
    model.add(layers.Conv2DTranspose(64, 5, strides=2, padding='same'))
    model.add(layers.BatchNormalization(momentum=norm_momentum))
    model.add(layers.Activation("relu"))
    model.add(layers.Conv2DTranspose(img_shape[-1], 5, strides=2, padding='same'))
    model.add(layers.Activation("tanh"))
    model.summary()  # 打印网络参数
    noise = models.Input(shape=(latent_dim,))
    img = model(noise)
    return models.Model(noise, img)  # 定义一个 一个输入noise一个输出img的模型


# ************************** 判别器
def build_discriminator():
    dropout = 0.4
    model = Sequential()
    model.add(layers.Conv2D(64, kernel_size=5, strides=2, input_shape=img_shape, padding="same"))
    model.add(layers.LeakyReLU(alpha=lrelu_alpha))
    model.add(layers.Dropout(dropout))
    model.add(layers.Conv2D(128, kernel_size=5, strides=2, padding="same"))
    model.add(layers.BatchNormalization(momentum=norm_momentum))
    model.add(layers.LeakyReLU(alpha=lrelu_alpha))
    model.add(layers.Dropout(dropout))
    model.add(layers.Conv2D(256, kernel_size=5, strides=2, padding="same"))
    model.add(layers.BatchNormalization(momentum=norm_momentum))
    model.add(layers.LeakyReLU(alpha=lrelu_alpha))
    model.add(layers.Dropout(dropout))
    model.add(layers.Conv2D(512, kernel_size=5, strides=1, padding="same"))
    model.add(layers.BatchNormalization(momentum=norm_momentum))
    model.add(layers.LeakyReLU(alpha=lrelu_alpha))
    model.add(layers.Dropout(dropout))
    model.add(layers.Flatten())
    model.add(layers.Dense(1, activation='sigmoid'))
    model.summary()
    img = models.Input(shape=img_shape)
    validity = model(img)
    return models.Model(img, validity)


# 从文件夹加载图片数据
def load_dir_img(sorcedir):
    print('正在读取图片...')
    files = os.listdir(sorcedir)
    data = np.zeros((files.__len__(),) + image.img_to_array(image.load_img(os.path.join(sorcedir, files[0]))).shape)
    for i in range(files.__len__()):
        data[i] = image.img_to_array(image.load_img(os.path.join(sorcedir, files[i]))) / 127.5 - 1
    return data


# ************************** 训练
"""
    gdrate:额外的生成器训练比率(判别器50%额外训练0次,100%额外训练gdrate次)
    save_interval:保存间隔(steap)
"""
def run(epochs=100, batch_size=128, save_interval=100, gdrate=3, save_dir='save/', history=None):
    last_time = time.clock()
    start_epoch = 0
    if history is None:
        history = []
    else:
        start_epoch = int(history[-1][0])
    valid = np.ones((batch_size, 1))
    fake = np.zeros((batch_size, 1))
    for epoch in range(epochs):
        for step in range(x.shape[0] // batch_size):
            # # 按q终止
            # while msvcrt.kbhit():
            #     char = ord(msvcrt.getch())
            #     if char == 113:
            #         return history
            g_loss = -1
            # 训练判别器
            imgs = x[step * batch_size:step * batch_size + batch_size]
            noise = np.random.normal(0, 1, (batch_size, latent_dim))
            gen_imgs = generator.predict(noise)
            d_loss_real = discriminator.train_on_batch(imgs, valid)
            d_loss_fake = discriminator.train_on_batch(gen_imgs, fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
            # 训练生成器(动态训练比例)
            for i in range(1 + int(gdrate * np.maximum(d_loss[1] - .5, 0) * 2)):
                noise = np.random.normal(0, 1, (batch_size, latent_dim))
                g_loss = combined.train_on_batch(noise, valid)
            # Log
            if step % save_interval == 0:
                print(
                    "%d:%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch+start_epoch, step, d_loss[0], 100 * d_loss[1], g_loss))
                history.append([epoch+start_epoch, step, d_loss[0], 100 * d_loss[1], g_loss])
                save('model/')
                # 保存生成的图像
                img = image.array_to_img(gen_imgs[0] * 127 + 127., scale=False)
                img.save(os.path.join(save_dir, 'train_' + str(epoch+start_epoch) + '_' + str(step) + '.png'))
                # 保存真实图像,以便进行比较
                # img = image.array_to_img(imgs[0] * 127 + 127., scale=False)
                # img.save(os.path.join(save_dir, 'real_' + str(epoch+start_epoch) + '_' + str(step) + '.png'))
        # 计时
        print('epoch run %d s, total run %d s' % (time.clock() - last_time, time.clock()))
        last_time = time.clock()
    save('model/')
    return history


# ************************** 生成
def generate(generator, save_dir=r'gan_image', num=100):
    noise = np.random.normal(0, 1, (num, K.int_shape(generator.layers[0].input)[1]))
    gen_imgs = generator.predict(noise)
    for i in range(gen_imgs.shape[0]):
        img = image.array_to_img(gen_imgs[i] * 127 + 127., scale=False)
        img.save(os.path.join(save_dir, 'generated_' + str(i) + '.png'))

def plot_history():
    h = np.array(history)
    plt.scatter(h[:,0], h[:,3])
    plt.show()


# ************************** 中途保存
def save(folder):
    combined.save(os.path.join(folder, 'gan.h5'))
    generator.save(os.path.join(folder, 'gan_g.h5'))
    discriminator.save(os.path.join(folder, 'gan_d.h5'))
    # np.save(os.path.join(folder, 'history.npy'), history)

def load(folder):
    history = np.load(os.path.join(folder, 'history.npy')).tolist()
    generator = models.load_model(os.path.join(folder, 'gan_g.h5'))
    discriminator = models.load_model(os.path.join(folder, 'gan_d.h5'))
    discriminator.trainable = False
    input_noise = models.Input(shape=(latent_dim,))
    combined = models.Model(input_noise, discriminator(generator(input_noise)))
    combined.compile(loss='binary_crossentropy', optimizer=optimizer)
    return history, generator, discriminator, combined


# ************************** Load Data
# 数据来源:https://drive.google.com/drive/folders/1mCsY5LEsgCnc0Txv0rpAUhKVPWVkbw5I?usp=sharing
# x = load_dir_img(r'C:\dataset\faces3m96')
print('正在加载数据')
# x = np.load(r'C:\dataset\faces5m96.npy')
x = load_dir_img(r'dataset/')

# ************************** 建模
# 对判别器进行构建和编译
discriminator = build_discriminator()
discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# 对生成器进行构造
generator = build_generator()
# 构造对抗模型
# 总体模型只对生成器进行训练
discriminator.trainable = False
input_noise = models.Input(shape=(latent_dim,))
combined = models.Model(input_noise, discriminator(generator(input_noise)))
combined.compile(loss='binary_crossentropy', optimizer=optimizer)


# ************************** 运行
history = run()

# 从断点开始训练
# history, generator, discriminator, combined=load(r'C:\temp\DCGAN')
# history = run(history=history)

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值