import matplotlib.pyplot as plt
import glob#glob模块用来查找文件目录和文件,并将搜索结果返回到一个列表当中
import imageio#读取RGB内容转换照片格式
from tensorflow.keras import Model,layers
from tensorflow.keras.layers import BatchNormalization,Dense,Conv2D,Dropout,Flatten,Activation,Reshape,Conv2DTranspose,LeakyReLU
import tensorflow as tf
import os
import numpy as np
import PIL
import pathlib
from IPython import display
AUTOTUNE = tf.data.experimental.AUTOTUNE
BATCH_SIZE=10#注意,tensorflow2.6要求batch_size是数据集能整除的数
DISCRIMINATOR_FACTOR = 2
ALPHA_WEIGHT = 200
OUTPUT_CHANNELS=3#RGB三通道
#BUFFER_SIZE = 4000#图片数量
daccuracies = []
gaccuracies = []#用于梯度更新
PATH="/content/drive/MyDrive/Colab Notebooks/项目2/ResnetGAN/faces/"
data_root = pathlib.Path(PATH+'/woman/')
#data_root = pathlib.Path('/root/data/woman/')#智星云上y
all_image_paths = list(data_root.iterdir())
all_image_paths = [str(path) for path in all_image_paths]
image_count = len(all_image_paths)#
######进行函数包装###
def preprocess_image(image):#处理图像
image = tf.image.decode_jpeg(image, channels=3)#decode_jpeg是将jpg图片解压成张量形式
image = tf.image.resize(image, [128,128])#调整图片大小
#image /= 255.0 #调正像素值
image=(image-127.5)/127.5
return image
def load_and_preprocess_image(path):#加载图像路径
image = tf.io.read_file(path)
return preprocess_image(image)
##################
#现在来构造数据集dataset,由于gan训练只要图片,所以数据集没有标签部分
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
#下面来加入batch_size部分
# 设置一个和数据集大小一致的 shuffle buffer size(随机缓冲区大小)以保证数据被充分打乱。
ds = image_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
# 当模型在训练的时候,`prefetch` 使数据集在后台取得 batch。
train_dataset = ds.prefetch(buffer_size=AUTOTUNE)
#下面来构造一个反向卷积网络
def upsample(filters, size, apply_dropout=False):
initializer = tf.random_normal_initializer(0., 0.02)
result = tf.keras.Sequential()
result.add(
tf.keras.layers.Conv2DTranspose(filters, size, strides=2,padding='same',kernel_initializer=initializer,use_bias=False))
result.add(tf.keras.layers.BatchNormalization())
if apply_dropout:
result.add(tf.keras.layers.Dropout(0.2))
result.add(tf.keras.layers.LeakyReLU())
return result
def Generator():
#initializer = tf.random_normal_initializer(0., 0.02)
model=tf.keras.Sequential([
Dense(1*1*512, use_bias=False, input_shape=(100,)),
BatchNormalization(),
LeakyReLU(),
Reshape((1,1,512)),
upsample(512, 4, apply_dropout=True), # (batch_size, 2, 2, 512)
upsample(512, 4, apply_dropout=True), # (batch_size, 4, 4, 512)
upsample(512, 4, apply_dropout=True), # (batch_size, 8, 8, 512)
upsample(512, 4), # (batch_size, 16, 16, 512)
upsample(256, 4), # (batch_size, 32, 32, 256)
upsample(128, 4), # (batch_size, 64, 64, 128)
upsample(64, 4), # (batch_size, 128, 128, 64)
Conv2DTranspose(OUTPUT_CHANNELS, 1,strides=1,padding='same',kernel_initializer=initializer,activation='tanh')
#(batch_size, *, *, 3)
])
return model
generator=Generator()
class ResnetBlock(Model):
def __init__(self, filters, strides=1, residual_path=False):
super(ResnetBlock, self).__init__()
self.filters = filters
self.strides = strides
self.residual_path = residual_path
self.c1 = Conv2D(filters, (3, 3), strides=strides, padding='same', use_bias=False)
self.b1 = BatchNormalization()
self.a1 = LeakyReLU()#Activation('relu')
self.c2 = Conv2D(filters, (3, 3), strides=1, padding='same', use_bias=False)
self.b2 = BatchNormalization()
# residual_path为True时,对输入进行下采样,即用1x1的卷积核做卷积操作,保证x能和F(x)维度相同,顺利相加
if residual_path:
self.down_c1 = Conv2D(filters, (1, 1), strides=strides, padding='same', use_bias=False)
self.down_b1 = BatchNormalization()
self.a2 = LeakyReLU()#Activation('relu')
def call(self, inputs):
residual = inputs # residual等于输入值本身,即residual=x
# 将输入通过卷积、BN层、激活层,计算F(x)
x = self.c1(inputs)
x = self.b1(x)
x = self.a1(x)
x = self.c2(x)
y = self.b2(x)
if self.residual_path:
residual = self.down_c1(inputs)
residual = self.down_b1(residual)
out = self.a2(y + residual) # 最后输出的是两部分的和,即F(x)+x或F(x)+Wx,再过激活函数
return out
class Discriminator(Model):
def __init__(self, block_list, initial_filters=64): # block_list表示每个block有几个卷积层
super(Discriminator, self).__init__()
self.num_blocks = len(block_list) # 共有几个block
self.block_list = block_list
self.out_filters = initial_filters
self.c1 = Conv2D(self.out_filters, (3, 3), strides=1, padding='same', use_bias=False)
self.b1 = BatchNormalization()
self.a1 = LeakyReLU()#Activation('relu')
self.blocks = tf.keras.models.Sequential()
# 构建ResNet网络结构
for block_id in range(len(block_list)): # 第几个resnet block
for layer_id in range(block_list[block_id]): # 第几个卷积层
if block_id != 0 and layer_id == 0: # 对除第一个block以外的每个block的输入进行下采样
block = ResnetBlock(self.out_filters, strides=2, residual_path=True)
else:
block = ResnetBlock(self.out_filters, residual_path=False)
self.blocks.add(block) # 将构建好的block加入resnet
self.out_filters *= 2 # 下一个block的卷积核数是上一个block的2倍
self.p1 = tf.keras.layers.GlobalAveragePooling2D()#用全局平均池化代替全连接层
self.f1 = tf.keras.layers.Dense(1)# activation='sigmoid', kernel_regularizer=tf.keras.regularizers.l2())
def call(self, inputs):
x = self.c1(inputs)
x = self.b1(x)
x = self.a1(x)
x = self.blocks(x)
x = self.p1(x)
y = self.f1(x)
return y
discriminator=Discriminator([2,2,2,2])
discriminator_optimizer=tf.keras.optimizers.Adam(learning_rate=3e-4, beta_1=0.5, beta_2=0.9)#3e-4
generator_optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4, beta_1=0.5, beta_2=0.9)
def discriminator_loss(real_output, fake_output):
real_loss = tf.reduce_mean(real_output)
fake_loss = tf.reduce_mean(fake_output)
total_loss = fake_loss - real_loss
d = total_loss.numpy()
daccuracies.append(-d)
return total_loss
def generator_loss(fake_output):
x = -tf.reduce_mean(fake_output)
t = x.numpy()
for i in range(DISCRIMINATOR_FACTOR):
gaccuracies.append(-t)
return x
def gradient_penalty(real_images, fake_images, batch_size, alpha_weight):
# get the interpolated image
alpha = tf.random.uniform([batch_size,1,1,1],0.0,1.0)
diff = fake_images - real_images######
interpolated = alpha*real_images + ((1-alpha)*fake_images)
# 2. Calculate the gradient
with tf.GradientTape() as tape:
tape.watch(interpolated)
pred = discriminator(interpolated)
grads = tape.gradient(pred, interpolated)[0]
# 3. Calcuate the norm of the gradients
norm = ((tf.norm(grads)-0)**2)
gp1 = tf.reduce_mean(norm)
gp = alpha_weight*norm
return gp
checkpoint_dir="./checkpoint"
checkpoint_save_path = "./checkpoint/ResNetGAN.ckpt"
checkpoint=tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
if os.path.exists(checkpoint_dir):# + '.index'):
print('-------------load the model-----------------')
checkpoint.restore(tf.train.latest_checkpoint("./checkpoint"))
epochs=2
noise_dim=100
num_examples_to_generate=4
seed=tf.random.normal([num_examples_to_generate,noise_dim])#正态分布矩阵,用于可视化图片
def train_step(image,batch_size, discriminator_factor,alpha_weight):
noise = tf.random.normal([batch_size,noise_dim])
with tf.GradientTape() as gen_tape:
generated_images = generator(noise)
fake_output = discriminator(generated_images)
gen_loss = generator_loss(fake_output)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
#for i in range(discriminator_factor):
with tf.GradientTape() as disc_tape:
generated_images = generator(noise)
real_output = discriminator(image)
fake_output = discriminator(generated_images)
print(tf.shape(image))#
print(tf.shape(generated_images))#batch_size,28,28,1
gp = gradient_penalty(image, generated_images,batch_size, alpha_weight)###
disc_loss = discriminator_loss(real_output, fake_output)
d_cost = disc_loss+gp
gradients_of_discriminator = disc_tape.gradient(d_cost, discriminator.trainable_variables)
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
def train(dataset,epochs):
for epoch in range(epochs):
for image_batch in dataset:
train_step(image_batch,BATCH_SIZE,DISCRIMINATOR_FACTOR, ALPHA_WEIGHT)
display.clear_output(wait=True)
generate_and_save_images(generator,epoch+1,seed)
print('epoch:',epoch)
#每n轮保存一次模型
display.clear_output(wait=True)
generate_and_save_images(generator,epochs,seed)
#生成与保存图片
def generate_and_save_images(model, epoch, test_input):
predictions = model(test_input, training=False)
fig = plt.figure(figsize=(2,2))
for i in range(predictions.shape[0]):
plt.subplot(2,2, i+1)
plt.imshow(predictions[i, :, :, ]*0.5+0.5)
plt.axis('off')
plt.savefig('./picture(face)/image_at_epoch_{:04d}.png'.format(epoch))
plt.show()
train(train_dataset,epochs)
checkpoint.save(file_prefix=checkpoint_save_path)
毕设代码:对抗生成网络
于 2022-03-30 18:28:06 首次发布