对抗网络实现动漫头像合成


#这个程序是我在最新的tensorflow2.1.0版本写的,如果想要运行的话tensorflow至少在2.0.0版本以上,否则会出现一大堆的bug
#(Anaconda一上来就给我装的tensorflow2.1.0版本了)

#生成对抗网络结构说明:
#基本采用了DCGANs的网络架构思想,但是由于运算能力差,数据样本小。所以做了一些改变

#初始化声明
import tensorflow#如果tensorflow库不全,可以删掉这个,但下面的必须有
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, BatchNormalization, Activation
from tensorflow.keras.layers import Conv2DTranspose, Conv2D, Reshape, LeakyReLU
import matplotlib.pyplot as plt
from PIL import Image
import os
import math
import numpy as np
#统一化输入图片尺寸设置为64*64
#不同尺寸图片可以通过我的另一个python程序进行批量尺寸统一
image_size =64

#检测操作:
#检测是否能够正常操作数据集图片
#查看一张图片
im = Image.open('C:/Users/faces/1.png')
im = im.resize((image_size,image_size),Image.ANTIALIAS)
plt.imshow(im)
plt.show()
#读取一张图片
X_train = Image.open('C:/Users/faces/1.png')
X_train = X_train.resize((image_size,image_size),Image.ANTIALIAS)
X_train = np.asanyarray(X_train)
X_train = np.expand_dims(X_train, axis=0) # 增加维数:将(64,64,3)维拓展为(1,64,64,3)
print(X_train.shape)
#检测结束

#路径配置:
model_path = 'C:/Users/model/'
generated_image_path = 'C:/Users/generate/'
result_path = 'C:/Users/generate/results/'
dataset_path = 'C:/Users/faces/'
#特征数选择
character = 300 # 特征数(由于我的电脑的运算能力的限制导致的训练的数据量过小,故特征数较小)


#进行图片的拼接
for dirname, _, filenames in os.walk(dataset_path):
    for filename in filenames:
        if X_train.shape[0] > 3000:     #自己测试的时候发现,自己电脑到3000左右基本就算不动了,所以数据集大小尽量不要超过3000左右
            break
        try:
            im = Image.open(os.path.join(dirname, filename))
            im = im.resize((image_size,image_size),Image.ANTIALIAS)
            image_array = np.asanyarray(im)
            image_array = np.expand_dims(image_array, axis=0)
            X_train = np.concatenate((X_train, image_array), axis=0)
        except:
            pass
print(str(X_train.shape[0]))


# 构造生成器
# 结构特点:
# 1.进行了多次批处理规范化---->好处是:有更好的鲁棒性(让错误较大的影响小)
# 2.全卷积结构
# 3.除最后输出的激活函数使用了tanh,其余均使用relu为激活函数
def generator_model():
    model = Sequential()
    model.add(Dense(int(image_size/8)*int(image_size/8)*256, input_shape=(character,)))
    
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Reshape((int(image_size/8),int(image_size/8),256))) # output: 8*8*256
    
    model.add(Conv2DTranspose(128,5,strides=2,padding='SAME')) # output: (None,16,16,128)
    
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(Conv2DTranspose(64,5,strides=2,padding='SAME')) # output: (None, 32, 32, 64)
    
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    
    model.add(Conv2DTranspose(3,5, strides=2,padding='SAME')) # output: (None, 64, 64, 3)

    model.add(Activation('tanh'))
        
    return model
    
#显示生成器具体结构
g = generator_model()
g.summary()

# 构造判别器
# 结构特点:
# 1.进行了多次批处理规范化
# 2.全卷积结构
# 3.除最后输出的激活函数使用了tanh和simoid归一化外,其余均使用LeakyReLU为激活函数--->在辨别网络下,leakyRelU激活函数可以工作的很好,特别是在更高分辨率的模型上(出于DCGANs论文)
def discriminator_model():
    model = Sequential()
    model.add(Conv2D(64, padding='SAME',kernel_size=5,strides=2, input_shape=(image_size, image_size, 3)))
    model.add(LeakyReLU())
    model.add(BatchNormalization())
    model.add(Conv2D(128,padding='SAME',kernel_size=5,strides=2))
    model.add(LeakyReLU())
    model.add(BatchNormalization())
    model.add(Conv2D(256,padding='SAME',kernel_size=5,strides=2))
    model.add(LeakyReLU())
    model.add(BatchNormalization())
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model

#显示鉴别器具体结构
d = discriminator_model()
d.summary()



#连接生成器和判别器
def combine(g,d):
    model = Sequential()
    model.add(g)
    d.trainable = False
    model.add(d)
    return model
g = generator_model()
d = discriminator_model()
g_d = combine(g,d)
g_d.summary()


#连接图片(用于训练过程中观看训练效果,多张图片一起便于观察)
def combine_images(images):
    num = images.shape[0]
    width = int(math.sqrt(num))
    height = int(math.ceil(float(num) / width))
    shape = images.shape[1:3]
    image = np.zeros((height*shape[0], width*shape[1], 3),
                    dtype = images.dtype)
    for index,img in enumerate(images):
        i = int(index / width)
        j = index % width
        image[i * shape[0]:(i+1) * shape[0], j * shape[1]:(j+1) * shape[1], 0:3] = img[:,:,:]
    return image


#结果输出函数,输出一张合成图片
if os.path.exists(result_path)==False:
    os.makedirs(result_path)
    
def generated(name):
    g = generator_model()
    try:
        g.load_weights(model_path+"generatorA")
        print("生成器权重导入成功")
    except:
        print("无权重")
    noise_need = np.random.normal(-1,1,size=(1,character))
    generated_image_need = g.predict(noise_need, verbose=0)
    image = combine_images(generated_image_need)
    image = image * 127.5 + 127.5
    Image.fromarray(image.astype(np.uint8)).save(
        result_path+name+".png")

if os.path.exists(model_path)==False:
    os.makedirs(model_path)
if os.path.exists(generated_image_path)==False:
    os.makedirs(generated_image_path)

def train(BATCH_SIZE, Xtrain):
    # 生成图片的连接图片数
    generated_image_size = 36
    # 读取图片
    Xtrain = ((Xtrain.astype(np.float32)) - 127.5) / 127.5 
    
    #构造模型时,优化函数中生成器我使用了SGD,因为生成器参数较多优化速度慢,所以只能使用SGD最简单的优化函数进行优化
    #判别器由于参数较少,故选择性能更好的RMSprop
    # 模型及其优化器
    d = discriminator_model()
    g = generator_model()
    g_d = combine(g,d)
    #d_optimizer = RMSprop()
    #g_optimizer = RMSprop()
    #gg_optimizer= SGD()
    g.compile(loss='binary_crossentropy', optimizer=tensorflow.keras.optimizers.SGD()) # 生成器
    g_d.compile(loss='binary_crossentropy',optimizer=tensorflow.keras.optimizers.RMSprop()) # 联合模型
    d.trainable = True
    d.compile(loss='binary_crossentropy',optimizer=RMSprop()) # 判别器


    # 上次训练参数的导入
    try:
        d.load_weights(model_path+"discriminatorA")
        print("判别器权重导入成功")
        g.load_weights(model_path+"generatorA")
        print("生成器权重导入成功")
    except:
        print("无权重")
    
    for epoch in range(500):
        # 每1轮打印一次当前轮数
        if epoch % 1 == 0:
            print('Epoch is ',epoch)
        if epoch % 1 == 0:
                noise_need = np.random.normal(-1,1,size=(generated_image_size,character))
                generated_image_need = g.predict(noise_need, verbose=0)
                image = combine_images(generated_image_need)
                image = image * 127.5 + 127.5
                Image.fromarray(image.astype(np.uint8)).save(
                generated_image_path+str(epoch)+"_"+str(epoch)+".png")
        for index in range(Xtrain.shape[0]//BATCH_SIZE):
             # 产生(-1,1)的正态分布的维度为(BATCH_SIZE, character)的矩阵
            noise = np.random.normal(-1,1,size=(BATCH_SIZE,character))
            train_batch = Xtrain[index*BATCH_SIZE:(index+1)*BATCH_SIZE]
            generated_image = g.predict(noise, verbose=0)


            if index % 1 == 0:
                X = np.concatenate((train_batch,generated_image))
                Y = list((np.random.rand(BATCH_SIZE)*10+90)/100) + [0]*BATCH_SIZE
                d_loss = d.train_on_batch(X,Y)
            
            noise = np.random.normal(-1,1,size=(BATCH_SIZE,character))
            d.trainable = False
            g_loss = g_d.train_on_batch(noise, list((np.random.rand(BATCH_SIZE)*10+90)/100))
            d.trainable = True
            if index % 10 == 0:
                print('batch: %d, g_loss: %f, d_loss: %f' % (index, g_loss, d_loss))
            
            if index % 10 == 0:
                g.save_weights(model_path+'generatorA', True)
                print('Successfully save generatorA')
                d.save_weights(model_path+'discriminatorA', True)
                print('Successfully save discriminatorA')
train(BATCH_SIZE=128, Xtrain=X_train)#DCGANs论文中提到batch size为128时效果最好
#generated('result_cartoon_face')     #训练好之后,输出一张合成的动漫人脸```

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值