跟我学算法-对抗生成网络

对抗生成网络,通过对分别两个矛盾的函数,进行各自的训练,已达到两个函数都能实现各自的最优化,得到的参数就是一个较好的参数

两个对抗函数: 

函数一:是判别损失函数,使得判别式的准确率越来越大, 及self.D1被判断为1, self.D2被判断为0

self.loss_d = tf.reduce_mean(-tf.log(self.D1) - tf.log(1-self.D2))

函数二: 是造假损失值,及其self.D2越接近1越好,就是被判断为正确的概率越大越好

self.loss_g = tf.reduce_mean(-tf.log(self.D2))

在下面的程序中我们使用了一个预判别函数

接下来将按照顺序解析

# 设置命令行选项
def parse_args():
    parser = argparse.ArgumentParser()
    # 用于参数的直接输入,默认值用default 表示
    parser.add_argument('--num-steps', type=int, default=1200,
                        help='the number of training step to take')
    parser.add_argument('--batch-size', type=int, default=12,
                        help='the batch size')
    parser.add_argument('--log-every', type=int, default=10,
                        help='print loss after this many steps')

    return parser.parse_args()

主函数

if __name__ == '__main__':
    main(parse_args())

定义主函数

def main(args):
    # 初始化GAN实例
    model = GAN(
        # 产生真实值
        DataDistribution(),
        # 产生虚假值
        GeneratorDistribution(range=8),
        args.num_steps,
        args.batch_size,
        args.log_every,
    )
    model.train()

编写GAN函数

class GAN(object):
# 初始化变量
def __init__(self, data, gen, num_steps, batch_size, log_every): self.data = data self.gen = gen self.num_steps = num_steps self.batch_size = batch_size self.log_every = log_every self.mip_hidden_size = 4 self.learning_rate = 0.03 self._create_model() # 建立模型 def _create_model(self):
# 建立预判别模型 with tf.variable_scope(
'D_pre'): self.pre_input = tf.placeholder(tf.float32, shape=(self.batch_size, 1)) self.pre_labels = tf.placeholder(tf.float32, shape=(self.batch_size, 1))
# 获得预测结果 D_pre
= discriminator(self.pre_input, self.mip_hidden_size) # 预测值与真实之间的差异 self.pre_loss = tf.reduce_mean(tf.square(D_pre - self.pre_labels))
# 训练缩小预测值与真实值的差异 self.pre_opt
= optimizer(self.pre_loss, None, self.learning_rate) # 建立造假模型 with tf.variable_scope('Gen'): # 伪造数据的生成 self.z = tf.placeholder(tf.float32, shape=(self.batch_size, 1)) self.G = generator(self.z, self.mip_hidden_size) # 建立判别模型 with tf.variable_scope('Disc') as scope:
# 对真实值做预测, D1为真实值的概率 self.x
= tf.placeholder(tf.float32, shape=(self.batch_size, 1)) self.D1 = discriminator(self.x, self.mip_hidden_size) # 变量重用 scope.reuse_variables()
# 对造假值做预测, D2为预测到造假值的概率 self.D2
= discriminator(self.G, self.mip_hidden_size) # 第一个对抗函数 self.loss_d = tf.reduce_mean(-tf.log(self.D1) - tf.log(1-self.D2))
# 第二个对抗函数 self.loss_g
= tf.reduce_mean(-tf.log(self.D2)) # 打包参数 self.d_pre_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='D_pre') self.d_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Disc') self.g_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Gen') # 获得训练以后的参数 self.opt_d = optimizer(self.loss_d, self.d_params, self.learning_rate) self.opt_g = optimizer(self.loss_g, self.g_params, self.learning_rate) # 进行训练 def train(self): with tf.Session() as session: # 变量初始化 tf.global_variables_initializer().run() # 进行预处理的训练 num_pretrain_steps = 1000 for step in range(num_pretrain_steps):
# 生成随机值 d
= (np.random.random(self.batch_size) - 0.5) * 10.0
# 生成随机值的标签 labels = norm.pdf(d, loc=self.data.mu, scale=self.data.sigma) pretrain_loss, _ = session.run([self.pre_loss, self.pre_opt], { self.pre_input : np.reshape(d, (self.batch_size, 1)), self.pre_labels : np.reshape(labels, (self.batch_size, 1)) }) # 获得参数 self.weightsD = session.run(self.d_pre_params) # 将d_pre_params 参数拷贝给 d_params for i, v in enumerate(self.d_params): session.run(v.assign(self.weightsD[i])) # 进行两个对抗函数的参数训练 for step in range(self.num_steps):
# 第一个对抗函数的训练 x
= self.data.sample(self.batch_size) z = self.gen.sample(self.batch_size) loss_d, _ = session.run([self.loss_d, self.opt_d],{ self.x: np.reshape(x, (self.batch_size, 1)), self.z : np.reshape(z, (self.batch_size, 1)) }) # 第二个对抗函数的训练 z = self.gen.sample(self.batch_size) loss_g, _ = session.run([self.loss_g, self.opt_g], { self.z : np.reshape(z, (self.batch_size, 1)) }) # 输出结果 if step % self.log_every == 0: print('{}:{}\t{}'.format(step, loss_d, loss_g)) # 迭代一百次或者在最后一次进行画图 if step % 100 == 0 or step == self.num_steps - 1: self._plot_distributions(session) def _samples(self, session, num_points=10000, num_bins=100): xs = np.linspace(-self.gen.range, self.gen.range, num_points) bins = np.linspace(-self.gen.range, self.gen.range, num_bins) # data distribution
# 实际数据 d = self.data.sample(num_points) pd, _ = np.histogram(d, bins=bins, density=True) # generated samples
# 造假数据 zs = np.linspace(-self.gen.range, self.gen.range, num_points) g = np.zeros((num_points, 1)) for i in range(num_points // self.batch_size): g[self.batch_size * i:self.batch_size * (i + 1)] = session.run(self.G, { self.z: np.reshape( zs[self.batch_size * i:self.batch_size * (i + 1)], (self.batch_size, 1) ) })
# 返回造假数据 pg, _
= np.histogram(g, bins=bins, density=True) return pd, pg
# 画图
def _plot_distributions(self, session): pd, pg = self._samples(session) p_x = np.linspace(-self.gen.range, self.gen.range, len(pd)) f, ax = plt.subplots(1) ax.set_ylim(0, 1) plt.plot(p_x, pd, label='real data') plt.plot(p_x, pg, label='generated data') plt.title('1D Generative Adversarial Network') plt.xlabel('Data values') plt.ylabel('Probability density') plt.legend() plt.show()
定义discriminator函数,D判别式用于判别结果,使用了4层神经网络
def discriminator(input, h_dim):
    h0 = tf.tanh(linear(input, h_dim * 2, 'd0'))
    h1 = tf.tanh(linear(h0, h_dim * 2, 'd1'))
    h2 = tf.tanh(linear(h1, h_dim * 2, scope='d2'))

    h3 = tf.sigmoid(linear(h2, 1, scope='d3'))

    return h3

定义optimizer函数,用于优化参数

def optimizer(loss, var_list, initial_learning_rate):
    # 学习率衰减系数
    decay = 0.95
    num_decay_steps = 150
    batch = tf.Variable(0)
    #进行学习率的衰减
    learning_rate = tf.train.exponential_decay(
        initial_learning_rate,
        batch,
        num_decay_steps,
        decay,
        staircase= True
    )
# 对损失函数进行最小化的参数优化 optimizer
= tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=batch, var_list = var_list, ) return optimizer

定义generator,用于生成造假的数据,这里使用两层神经网络

def generator(input, h_dim):
    h0 = tf.nn.softplus(linear(input, h_dim, 'g0'))
    h1 = linear(h0, 1, 'g1')
    return h1

定义linear, 用于进行卷积

def linear(input, output_dim, scope=None, stddev=1.0):
    norm = tf.random_normal_initializer(stddev=stddev)
    const = tf.constant_initializer(0.0)
    with tf.variable_scope(scope or 'linear'):
        w = tf.get_variable('w', [input.get_shape()[1], output_dim], initializer=norm)
        b = tf.get_variable('b', [output_dim], initializer=const)
    return tf.matmul(input, w) + b

 

转载于:https://www.cnblogs.com/my-love-is-python/p/9615119.html

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值