python神经网络训练损失率nan_对抗神经网络(python-一维数据)

##按照下面的代码生成的数据一直是nan,如果有大佬知道为什么,求解答

参考:https://github.com/starhou/One-dimensional-GAN/commit/797e15044e8359626ee6b6667227ff8c5b394bf8(github上一个生成一维数据的代码)

https://www.bilibili.com/video/BV1f7411E7wU?p=4(下面的代码就是参考这个)

https://tensorflow.google.cn/tutorials/generative/dcgan(tensorflow官网)

先保存一下,########################

from __future__ import absolute_import, division, print_function, unicode_literals

import tensorflow as tf

import glob

import imageio

import matplotlib.pyplot as plt

import numpy as np

import os

import PIL

from tensorflow.keras import layers

import time

from IPython import display

import pandas as pd

###导入我的数据

data = pd.read_excel('D://complete.xlsx')

train_data=data.iloc[:,0:117]

train_labels=data.iloc[:,117]

train_data=train_data.astype('float32')

###归一化数据

Zmax,Zmin=train_data.max(axis=0),train_data.min(axis=0)

Z=(train_data-Zmin)/(Zmax-Zmin)

train_images = train_data.values.reshape(252,13,9,1).astype('float32')

BUFFER_SIZE = 252

BATCH_SIZE = 63

print(train_images.shape)

# 批量化和打乱数据

train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)

print(train_dataset)

###生成器

def make_generator_model():

model = tf.keras.Sequential()

model.add(layers.Dense(128, input_shape=(100,),use_bias=False))#首次输入100个数据

model.add(layers.BatchNormalization())

model.add(layers.LeakyReLU())

model.add(layers.Dense(160, use_bias=False))

model.add(layers.BatchNormalization())

model.add(layers.LeakyReLU())

model.add(layers.Dense(13*9*1,use_bias=False,activation='tanh'))

model.add(layers.BatchNormalization())

#REshape需要以元组的形式存在

model.add(layers.Reshape((13,9,1)))

return model

###判别器

generator=make_generator_model()

def make_discriminator_model():

model = tf.keras.Sequential()

model.add(layers.Flatten())

model.add(layers.Dense(160, use_bias=False))

model.add(layers.BatchNormalization())

model.add(layers.LeakyReLU())

model.add(layers.Dense(128, use_bias=False))

model.add(layers.BatchNormalization())

model.add(layers.LeakyReLU())

model.add(layers.Dense(1))

return model

discriminator=make_discriminator_model()

cross_entropy=tf.keras.losses.BinaryCrossentropy(from_logits=True)

###判别器损失函数

def discriminator_loss(real_out,fake_out):

real_loss=cross_entropy(tf.ones_like(real_out),real_out)

fake_loss=cross_entropy(tf.zeros_like(fake_out),fake_out)

return real_loss+fake_loss

###生成器损失函数

def generator_loss(fake_out):

return cross_entropy(tf.ones_like(fake_out),fake_out)

generator_opt=tf.keras.optimizers.Adam(0.001)

discriminator_opt=tf.keras.optimizers.Adam(0.001)

EPOCHS=100

noise_dim=100

number_exp_to_generate=16

seed=tf.random.normal([number_exp_to_generate,noise_dim])

###优化生成器判别器

@tf.function

def train_step(images):

noise = tf.random.normal([BATCH_SIZE, noise_dim])

with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:

real_out = discriminator(images, training=True)

gen_image=generator(noise,training=True)

fake_out = discriminator(gen_image, training=True)

gen_loss = generator_loss(fake_out)

disc_loss = discriminator_loss(real_out, fake_out)

gradients_gen = gen_tape.gradient(gen_loss, generator.trainable_variables)

gradients_disc = disc_tape.gradient(disc_loss, discriminator.trainable_variables)

generator_opt.apply_gradients(zip(gradients_gen, generator.trainable_variables))

discriminator_opt.apply_gradients(zip(gradients_disc, discriminator.trainable_variables))

###绘图

def generator_plot_images(gen_model,test_noise):

pre_images=gen_model(test_noise,training=False)

fig=plt.figure(figsize=(4,4))

for i in range(pre_images.shape[0]):

plt.subplot(4,4,i+1)

plt.imshow((pre_images[i,:,:,0]+1)/2,cmap='gray')

plt.show()

###按批次训练

def train(dataset,epochs):

for epoch in range (epochs):

for image_batch in dataset:

train_step(image_batch)

generator_plot_images(generator,seed)

train(train_dataset,EPOCHS)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值