size: filter size
norm_type: Normalization type; either ‘batchnorm’ or ‘instancenorm’.
apply_dropout: If True, adds the dropout layer
Returns:
Upsample Sequential Model
“”"
initializer = tf.random_normal_initializer(0., 0.02)
result = tf.keras.Sequential()
result.add(
tf.keras.layers.Conv2DTranspose(filters, size, strides=2,
padding=‘same’,
kernel_initializer=initializer,
use_bias=False))
if norm_type.lower() == ‘batchnorm’:
result.add(tf.keras.layers.BatchNormalization())
elif norm_type.lower() == ‘instancenorm’:
result.add(InstanceNormalization())
if apply_dropout:
result.add(tf.keras.layers.Dropout(0.5))
result.add(tf.keras.layers.ReLU())
return result
接下来构建生成器和鉴别器,其中生成器基于 U-Net
:
def unet_generator(output_channels, norm_type=‘batchnorm’):
“”"
Args:
output_channels: Output channels
norm_type: Type of normalization. Either ‘batchnorm’ or ‘instancenorm’.
Returns:
Generator model
“”"
down_stack = [
downsample(64, 4, norm_type, apply_norm=False), # (bs, 128, 128, 64)
downsample(128, 4, norm_type), # (bs, 64, 64, 128)
downsample(256, 4, norm_type), # (bs, 32, 32, 256)
downsample(512, 4, norm_type), # (bs, 16, 16, 512)
downsample(512, 4, norm_type), # (bs, 8, 8, 512)
downsample(512, 4, norm_type), # (bs, 4, 4, 512)
downsample(512, 4, norm_type), # (bs, 2, 2, 512)
downsample(512, 4, norm_type), # (bs, 1, 1, 512)
]
up_stack = [
upsample(512, 4, norm_type, apply_dropout=True), # (bs, 2, 2, 1024)
upsample(512, 4, norm_type, apply_dropout=True), # (bs, 4, 4, 1024)
upsample(512, 4, norm_type, apply_dropout=True), # (bs, 8, 8, 1024)
upsample(512, 4, norm_type), # (bs, 16, 16, 1024)
upsample(256, 4, norm_type), # (bs, 32, 32, 512)
upsample(128, 4, norm_type), # (bs, 64, 64, 256)
upsample(64, 4, norm_type), # (bs, 128, 128, 128)
]
initializer =