最大池化
"""
池化处理 layers.MaxPool2D
上采样处理 layers.UpSampling2D
relu处理 layers.ReLU
"""
import tensorflow as tf
from tensorflow.keras import layers
gpu_lst = tf.config.experimental.list_physical_devices("GPU")
print("GPU:{}个".format(len(gpu_lst)))
for gpu in gpu_lst:
tf.config.experimental.set_memory_growth(gpu, True)
print("--" * 30)
x = tf.random.normal([1, 14, 14, 4])
print("原始数据:\n", x.shape)
pool = layers.MaxPool2D(2, strides=2)
out = pool(x)
print("最大池化(卷积核:2 步长=2):\n", out.shape)
pool = layers.MaxPool2D(3, strides=1, padding="same")
out = pool(x)
print("最大池化(卷积核:3 步长=1 padding=same):\n", out.shape)
pool = layers.MaxPool2D(3, strides=2, padding="valid")
out = pool(x)
print("最大池化(卷积核:3 步长=2):\n", out.shape)
out = tf.nn.max_pool2d(x, 2, strides=2, padding="VALID")
print("tf.nn.max_pool2d 函数实现:\n", out.shape)
上采样
"""
上采样
"""
print("上采样" + "--" * 30)
x = tf.random.normal([1, 7, 7, 4])
layer = layers.UpSampling2D(size=3)
out = layer(x)
print("上采样 倍数=3\n", out.shape)
layer = layers.UpSampling2D(size=2, )
out = layer(x)
print("上采样 倍数=2\n", out.shape)
Relu
"""
ReLU
"""
print("Relu", "--" * 30)
x = tf.random.normal([2, 3])
print("原始数据\n", x)
layer = layers.ReLU()
out = layer(x)
print("ReLU 类的方式\n", out)
out = tf.nn.relu(x)
print("tf.nn.relu 函数方式\n", out)