对抗神经网络GAN

使用mnist数据集

from __future__ import print_function
from keras.datasets import mnist
from keras.layers import Input,Dense,Reshape,Flatten
from keras.layers import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
from keras.models import Sequential,Model
from keras.optimizers import Adam

import matplotlib.pyplot as plt
import numpy as np

class GAN():
    def __init__(self):
        self.img_rows = 28
        self.img_cols = 28
        self.channels = 1
        self.img_shape = (self.img_rows,self.img_cols,self.channels)
        self.laten_dim = 100

        optimizer = Adam(0.0002)
        #构造一个判别器
        self.discrimnator = self.build_discrimintor()
        self.discrimnator.compile(loss = "binary_crossentropy",optimizer = optimizer,
                                  metrics = ['accuracy'])

        #构建一个生成器
        self.generator  = self.build_generator()

        #生成器输入,噪音数据,输入:生成的图像数据
        z = Input(shape=(self.laten_dim,))
        img = self.generator(z)

        #在combined中训练生成器
        self.discrimnator.trainable = False

        #最后由生成器取骗过判别器
        self.combined = Model(z,validity)
        self.combined.compile(loss='binary_crossentropy',optimizer=optimizer)


    def discrimnator(self):
        model = Sequential()
        model.add(Flatten(input_shape=self.img_shape))
        model.add(Dense(512))#全连接层
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(256))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(1,activation='sigmoid'))
        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img,validity)#API固定搭配

    def build_generator(self):
        model = Sequential()


        model.add(Dense(256,input_dim=self.laten_dim))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(1024))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(np.prod(self.img_shape), activation='tanh'))
        model.add(Reshape(self.img_shape))
        noise = Input(shape=self.laten_dim)
        img = model(noise)

        return Model(noise,img)

    def train(self,iter,batch_size = 128,sample_interval = 50):
        #加载数据集
        (x_train,_),(_,_) = mnist.load_data()#_占位
        print('x_train,shape:'.x_train.shape)
        #数据预处理
        x_train = x_train/127.5 - 1.
        x_train = np.expand_dims(x_train,axis=3)
        print('x_train,shape:'.x_train.shape)
        #制作标签
        valid = np.ones(batch_size,1)
        fake = np.zeros(batch_size,1)

        for i in range(iter):
            #训练判别器
            #训练一个batch数据
            idx = np.random.randint(0,x_train.shape[0],batch_size)
            imgs = x_train[idx]

            noise = np.random.normal(0,1,(batch_size,self.laten_dim))

            #获取生成器
            gen_imgs  = self.generator.predict(noise)

            #训练
            d_loss_real = self.discrimnator.train_on_batch(imgs,valid)
            d_loss_fake = self.discrimnator.train_on_batch(gen_imgs,fake)
            d_loss = 0.5 * np.add(d_loss_real,d_loss_fake)

            #训练生成器
            noise = np.random

            #让生成器骗过判别器
            g_loss = self.combined.train_on_batch(noise,valid)

            #打印训练结果
            print("%d [D loss: %f,acc.:%.2f%%] [G loss: %f] "%(i,d_loss[0],100*d_loss[1],g_loss))

            #保存部分迭代效果
            if i %sample_interval ==0:
                self.sample_images(i)


    def sample_images(self,iter):
        r,c = 5,5
        noise = np.random.normal(0,1,(r*c,self.laten_dim))
        gen_imgs = self.generator.predict(noise)

        #预处理还原
        gen_imgs = 0.5 * gen_imgs + 0.5

        fig,axs = plt.subplots(r,c)
        cnt = 0
        for i in range(r):
            for j in range(c):
                axs[i,j].imshow(gen_imgs[cnt,:,:,0],cmap='gray')
                axs[i,j].axis('off')
                cnt +=1

        fig.savefig("images/%d.png"%iter)
        plt.close()


if __name__=="__main__":
    gan = GAN()


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
GAN(Generative Adversarial Networks)是一种生成模型,由两个神经网络组成:生成器和判别器。生成器用于生成新的数据样本,而判别器则用于判定生成生成的样本是否真实。两个神经网络不断进行对抗训练,使得生成器逐渐生成更加逼真的样本。 以下是一个简单的GAN代码实现: ```python import tensorflow as tf from tensorflow.keras.layers import Dense, Input, LeakyReLU from tensorflow.keras import Model # 定义生成器 def build_generator(input_shape, output_shape): inputs = Input(shape=input_shape) x = Dense(64, activation=LeakyReLU(alpha=0.2))(inputs) x = Dense(128, activation=LeakyReLU(alpha=0.2))(x) x = Dense(256, activation=LeakyReLU(alpha=0.2))(x) outputs = Dense(output_shape, activation='sigmoid')(x) generator = Model(inputs=inputs, outputs=outputs) return generator # 定义判别器 def build_discriminator(input_shape): inputs = Input(shape=input_shape) x = Dense(256, activation=LeakyReLU(alpha=0.2))(inputs) x = Dense(128, activation=LeakyReLU(alpha=0.2))(x) x = Dense(64, activation=LeakyReLU(alpha=0.2))(x) outputs = Dense(1, activation='sigmoid')(x) discriminator = Model(inputs=inputs, outputs=outputs) return discriminator # 定义GAN模型 def build_gan(generator, discriminator): discriminator.trainable = False inputs = Input(shape=generator.input_shape[1:]) generated_data = generator(inputs) outputs = discriminator(generated_data) gan = Model(inputs=inputs, outputs=outputs) return gan # 定义损失函数和优化器 cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True) generator_optimizer = tf.keras.optimizers.Adam(1e-4) discriminator_optimizer = tf.keras.optimizers.Adam(1e-4) # 定义训练步骤 @tf.function def train_step(real_data, generator, discriminator): # 生成生成假数据 noise = tf.random.normal([real_data.shape[0], 100]) generated_data = generator(noise) # 训练判别器 with tf.GradientTape() as tape: real_output = discriminator(real_data) fake_output = discriminator(generated_data) real_loss = cross_entropy(tf.ones_like(real_output), real_output) fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output) discriminator_loss = real_loss + fake_loss grads = tape.gradient(discriminator_loss, discriminator.trainable_variables) discriminator_optimizer.apply_gradients(zip(grads, discriminator.trainable_variables)) # 训练生成器 with tf.GradientTape() as tape: fake_output = discriminator(generated_data) generator_loss = cross_entropy(tf.ones_like(fake_output), fake_output) grads = tape.gradient(generator_loss, generator.trainable_variables) generator_optimizer.apply_gradients(zip(grads, generator.trainable_variables)) # 加载数据集 (train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data() train_images = train_images.reshape(train_images.shape[0], 784).astype('float32') train_images = (train_images - 127.5) / 127.5 # 将像素值归一化到[-1, 1]之间 BUFFER_SIZE = 60000 BATCH_SIZE = 256 train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE) # 创建生成器和判别器 generator = build_generator((100,), 784) discriminator = build_discriminator((784,)) # 创建GAN模型 gan = build_gan(generator, discriminator) # 训练GAN模型 EPOCHS = 100 for epoch in range(EPOCHS): for real_data in train_dataset: train_step(real_data, generator, discriminator) if epoch % 10 == 0: print('Epoch {}: done.'.format(epoch)) ``` 上述代码中,我们通过 `build_generator` 和 `build_discriminator` 分别定义了生成器和判别器。然后,我们通过 `build_gan` 将生成器和判别器组合成一个GAN模型。 在训练过程中,我们需要定义 `train_step` 函数,并在其中完成生成器和判别器的训练。具体来说,我们首先使用生成生成假数据,然后训练判别器来区分真实数据和假数据。接着,我们使用生成器的输出来训练生成器,使得生成器逐渐生成更加逼真的假数据。 最后,我们使用 `train_dataset` 中的真实数据来训练GAN模型。在每个epoch结束后,我们打印一条日志。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值