CGAN生成指定数据

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import os
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
mnist = input_data.read_data_sets('./MNIST_data', one_hot=True)

def xavier_init(shape):
    in_dim = shape[0]
    stddev = 1./tf.sqrt(in_dim/2.)
    return tf.random_normal(shape=shape,stddev=stddev)

def Gnet(rand_x,y):
    z_cond = tf.concat([rand_x,y],axis=1)  # 噪声的输入也要加上标签,意思就是“我想伪造谁”
    w1 = tf.Variable(xavier_init([128+10,128]))
    b1 = tf.Variable(tf.zeros([128]),dtype=tf.float32)
    y1 = tf.nn.relu(tf.matmul(z_cond,w1)+b1)
    w2= tf.Variable(xavier_init([128,784]))
    b2 = tf.Variable(tf.zeros([784]),dtype=tf.float32)
    y2 = tf.nn.sigmoid(tf.matmul(y1,w2)+b2)
    # 待训练参数要一并返回
    params = [w1,b1,w2,b2]
    return y2, params

def Dnet(real_x,fack_x,y):
    realx_cond = tf.concat([real_x,y],axis=1)  # 把原始样本和其标签一起放入
    fackx_cond = tf.concat([fack_x,y],axis=1)  # 把生成样本和其伪造标签一起放入
    w1 = tf.Variable(xavier_init([784+10,128]))
    b1 = tf.Variable(tf.zeros([128]),dtype=tf.float32)
    real_y1 = tf.nn.dropout(tf.nn.relu(tf.matmul(realx_cond,w1)+b1),0.5)  # 不加dropout迭代到一定次数会挂掉
    fack_y1 = tf.nn.dropout(tf.nn.relu(tf.matmul(fackx_cond,w1)+b1),0.5)
    w2 = tf.Variable(xavier_init([128,1]))
    b2 = tf.Variable(tf.zeros([1]),dtype=tf.float32)
    real_y2 = tf.nn.sigmoid(tf.matmul(real_y1,w2)+b2)
    fack_y2 = tf.nn.sigmoid(tf.matmul(fack_y1,w2)+b2)

    params = [w1,b1,w2,b2]
    return real_y2,fack_y2,params

def save(samples, index,shape):
    '''只是用来把图片保存到本地,和训练无关'''
    x,y=shape  # 保存图片的宽高(每个单位一张生成数字)
    fig = plt.figure(figsize=(x,y))
    gs = gridspec.GridSpec(x,y)
    gs.update(wspace=0.05,hspace=0.05)

    for i,sample in enumerate(samples):
        ax = plt.subplot(gs[i])
        plt.axis('off')
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.set_aspect('equal')
        plt.imshow(sample.reshape(28,28),cmap='Greys_r')
    plt.savefig('./d_img/'+'{}.png'.format(str(index).zfill(3)))
    plt.close(fig)

real_x = tf.placeholder(tf.float32,shape=[None,784])
lable = tf.placeholder(tf.float32,shape=[None,10])
rand_x = tf.placeholder(tf.float32,shape=[None,128])
fack_g_out,g_params = Gnet(rand_x,lable)  # 生产伪造样本
real_d_out,g_d_out,d_params = Dnet(real_x,fack_g_out,lable)  # 把伪造样本和生成的一并传入计算各自概率
d_loss = -tf.reduce_mean(tf.log(real_d_out+1e-30) + tf.log(1.-g_d_out+1e-30))  # 不加这个1e-30会出现log(0)
g_loss = -tf.reduce_mean(tf.log(g_d_out+1e-30))
g_opt = tf.train.AdamOptimizer(0.001).minimize(g_loss,var_list=g_params)
d_opt = tf.train.AdamOptimizer(0.001).minimize(d_loss,var_list=d_params)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(100000):
    x_random = np.random.uniform(-1.,1.,size=[100,128]).astype(np.float32)
    if i % 1000 == 0:  # 这个只是用来保存图片,和训练没什么关系
        labels = [i for i in range(10) for _ in range(10)]  # 我要让他生成的数字,每行相同,每列从0到1递增
        cond_y = sess.run(tf.one_hot(np.array(labels),depth=10))  # 喂的字典不能是tensor,我run成np array
        samples = sess.run(fack_g_out, feed_dict = {rand_x:x_random,lable:cond_y})
        index = int(i/1000)  # 用来当保存图片的名字
        shape = [10,10]  # 维度和labels的宽高匹配
        save(samples, index, shape)  # 保存图片
    x,y = mnist.train.next_batch(100)
    D_loss,D_opt = sess.run([d_loss,d_opt],feed_dict={real_x:x,rand_x:x_random,lable:y.astype(np.float32)})
    G_loss,G_opt = sess.run([g_loss,g_opt],feed_dict={rand_x:x_random,lable:y.astype(np.float32)})
    if i % 1000 == 0:
        print('iter: %d, d_loss: %.3f, g_loss: %.3f\n' % (i,D_loss,G_loss))

 

  • 0
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 6
    评论
评论 6
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值