-- coding: utf-8 --
“”"
GAN mnist
“”"
#第一步:载入所需要的包,发现没有的包需要安装好先
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from keras.layers import Input
from keras.models import Model, Sequential
from keras.layers.core import Reshape, Dense, Dropout, Flatten
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.datasets import mnist
from keras.optimizers import Adam
from keras import initializers
np.random.seed(0)#设置随机数种子
randomDim = 100
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = (X_train.astype(np.float32) - 127.5)/127.5#归一化到 [-1,1]
X_train = X_train.reshape(X_train.shape[0], -1) #图片拉直
adam = Adam(lr=0.0002, beta_1=0.5)
generator = Sequential()
generator.add(Dense(256, input_dim=randomDim,
kernel_initializer=initializers.RandomNormal(stddev=0.02)))
generator.add(LeakyReLU(0.2))
generator.add(Dense(512))
generator.add(LeakyReLU(0.2))
generator.add(Dense(1024))
generator.add(LeakyReLU(0.2))
generator.add(Dense(784, activation=‘tanh’)) # tanh 对应图片的归一化方法
generator.compile(loss=‘binary_crossentropy’, optimizer=adam)
discriminator = Sequential()
discriminator.add(Dense(1024, input_dim=784,
kernel_initializer=initializers.RandomNormal(stddev=0.02)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(512))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(256))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(1, activation=‘sigmoid’))
discriminator.compile(loss=‘binary_crossentropy’, optimizer=adam)
discriminator.trainable = False
ganInput = Input(shape=(randomDim,))
x = generator(ganInput)
ganOutput = discriminator(x)
gan = Model(inputs=ganInput, outputs=ganOutput)
gan.compile(loss=‘binary_crossentropy’, optimizer=adam)
dLosses = []
gLosses = []
def plotLoss(epoch):
plt.figure(figsize=(10, 8))
plt.plot(dLosses, label=‘Discriminitive loss’)
plt.plot(gLosses, label=‘Generative loss’)
plt.xlabel(‘Epoch’)
plt.ylabel(‘Loss’)
plt.legend()
plt.savefig(‘images/gan_loss_epoch_%d.png’ % epoch)
def plotGeneratedImages(epoch, examples=100, dim=(10, 10), figsize=(10, 10)):
noise = np.random.normal(0, 1, size=[examples, randomDim])
generatedImages = generator.predict(noise)
generatedImages = generatedImages.reshape(examples, 28, 28)
plt.figure(figsize=figsize)
for i in range(generatedImages.shape[0]):
plt.subplot(dim[0], dim[1], i+1)
plt.imshow(generatedImages[i], interpolation=‘nearest’, cmap=‘gray_r’)
plt.axis(‘off’)
plt.tight_layout()
plt.savefig(‘images/gan_generated_image_epoch_%d.png’ % epoch)
def train(epochs=1, batchSize=128):
batchCount = X_train.shape[0] // batchSize
print (‘Epochs:’, epochs)
print (‘Batch size:’, batchSize)
print (‘Batches per epoch:’, batchCount)
for e in range(1, epochs+1):
print (’-’*15, ‘Epoch %d’ % e, '-'15)
for _ in tqdm(range(batchCount)):
noise = np.random.normal(0, 1, size=[batchSize, randomDim])
imageBatch = X_train[np.random.randint(0, X_train.shape[0], size=batchSize)]
generatedImages = generator.predict(noise) # 生成器生成图片
# 生成的图片与真实图片结合成训练数据
X = np.concatenate([imageBatch, generatedImages])
yDis = np.zeros(2batchSize) # 类标 0 假 / 1 真
yDis[:batchSize] = 0.9 # label smoothing
discriminator.trainable = True # 训练判别器
dloss = discriminator.train_on_batch(X, yDis)
noise = np.random.normal(0, 1, size=[batchSize, randomDim]) # 重新产生噪声
yGen = np.ones(batchSize) # 当成真实数据
discriminator.trainable = False # 固定住判别器训练生成器
gloss = gan.train_on_batch(noise, yGen)
dLosses.append(dloss) # 保存 loss
gLosses.append(gloss)
if e == 1 or e % 20 == 0:
plotGeneratedImages(e)
plotLoss(e)
train(60, 128)#前面那个60这个数字是迭代多少次产生多少张图片