keras 自定义层input_tf2.0:自定义loss层/layer层

e5d8e0382f8becfc331719200f9ff478.png

自定义损失函数/层

1. 自定义损失函数:

只需要将model.compile()中的loss参数更改为自定义的损失函数即可。

自定义损失函数可以直接调用tf下数学相关的api进行数学运算。

# 自定义损失函数
def customized_mse(y_true, y_pred):
  return tf.reduce_mean(tf.square(y_pred - y_true))

# 模型构建
model = keras.Sequential([
    keras.layers.Dense(30, activation="relu", input_shape=x_train_all.shape[1:]),
    keras.layers.Dense(1),
])
# 查看模型结构
model.summary()

# 模型配置编译
model.compile(loss = customized_mse ,optimizer = "adam", metrics=["accuracy", "mean_squared_error"])

# 定义所需的callbacks
callbacks = [keras.callbacks.EarlyStopping(patience=5, min_delta=1e-3)]
# 模型训练(fit为拟合之意)
history = model.fit(x_train_scaled, y_train,
                    validation_data = (x_valid_scaled, y_valid),
                    epochs = 100,
                    callbacks = callbacks)

2. 自定义layer:

使用函数式方法调用layer

layer = tf.keras.layers.Dense(100, activation = "relu", input_shape =(None, 5))
layer(tf.zeros([10,5]))
<tf.Tensor: shape=(10, 100), dtype=float32, numpy= array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,        0., 0., 0., 0.],......

layer常用方法:

layer.variables
# 1. keral:权重项
layer.variables[0]
# 2. bias:偏置项
layer.variables[1]
[<tf.Variable 'dense_6/kernel:0' shape=(5, 100) dtype=float32, numpy=
 array([[]],dtype=float32)>,
 <tf.Variable 'dense_6/bias:0' shape=(100,) dtype=float32, numpy=
 array([[]]dtype=float32)>]
# 获取可训练的参数
layer.trainable_variables
help(layer)

使用子类自定义layer层

# customized dense layer.
class CustomizedDenseLayer(keras.layers.Layer):
  def __init__(self, units, activation=None, **kwargs):
    self.units = units
    self.activation = keras.layers.Activation(activation)
    super(CustomizedDenseLayer, self).__init__(**kwargs)

  # initialize data
  def build(self, input_shape):
    """构建所需要的参数"""
    # x * w + b. input_shape:[None, a] w:[a, b] output_shape:[None, b]
    self.kernel = self.add_weight(name= 'kernel',
                    shape = (input_shape[1], self.units),
                    initializer = 'uniform',
                    trainable = True)
    self.bias = self.add_weight(name = 'bias',
                   shape = (self.units),
                   initializer = 'zeros',
                   trainable = True)
    super(CustomizedDenseLayer, self).build(input_shape)

  def call(self, x):
    """完整的正向计算"""
    return self.activation(x @ self.kernel + self.bias)

model = keras.Sequential([
    CustomizedDenseLayer(30, activation="relu", input_shape=x_train_all.shape[1:]),
    CustomizedDenseLayer(1),
    customized_softplus,
    # keras.layers.Dense(1, activation="softplus"),
    # keras.layers.Dense(1), keras.layers.Activation('softplus'),
])
model.summary()
model.compile(loss = "mse" ,optimizer = "adam", metrics=["accuracy", "mean_squared_error"])
callbacks = [keras.callbacks.EarlyStopping(patience=5, min_delta=1e-3)]

super()继承父类的方法而无需知道父类的确切名称,此方法还常用于多重继承。

lambda函数自定义layer

# tf.nn.softplus : log(1+e^x)
customized_softplus = keras.layers.Lambda(lambda x:tf.nn.softplus(x))
print(customized_softplus([-10.,-5.,0.,5.,10.]))
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
逐行详细解释以下代码并加注释from tensorflow import keras import matplotlib.pyplot as plt base_image_path = keras.utils.get_file( "coast.jpg", origin="https://img-datasets.s3.amazonaws.com/coast.jpg") plt.axis("off") plt.imshow(keras.utils.load_img(base_image_path)) #instantiating a model from tensorflow.keras.applications import inception_v3 model = inception_v3.InceptionV3(weights='imagenet',include_top=False) #配置各对DeepDream损失的贡献 layer_settings = { "mixed4": 1.0, "mixed5": 1.5, "mixed6": 2.0, "mixed7": 2.5, } outputs_dict = dict( [ (layer.name, layer.output) for layer in [model.get_layer(name) for name in layer_settings.keys()] ] ) feature_extractor = keras.Model(inputs=model.inputs, outputs=outputs_dict) #定义损失函数 import tensorflow as tf def compute_loss(input_image): features = feature_extractor(input_image) loss = tf.zeros(shape=()) for name in features.keys(): coeff = layer_settings[name] activation = features[name] loss += coeff * tf.reduce_mean(tf.square(activation[:, 2:-2, 2:-2, :])) return loss #梯度上升过程 @tf.function def gradient_ascent_step(image, learning_rate): with tf.GradientTape() as tape: tape.watch(image) loss = compute_loss(image) grads = tape.gradient(loss, image) grads = tf.math.l2_normalize(grads) image += learning_rate * grads return loss, image def gradient_ascent_loop(image, iterations, learning_rate, max_loss=None): for i in range(iterations): loss, image = gradient_ascent_step(image, learning_rate) if max_loss is not None and loss > max_loss: break print(f"... Loss value at step {i}: {loss:.2f}") return image #hyperparameters step = 20. num_octave = 3 octave_scale = 1.4 iterations = 30 max_loss = 15. #图像处理方面 import numpy as np def preprocess_image(image_path): img = keras.utils.load_img(image_path) img = keras.utils.img_to_array(img) img = np.expand_dims(img, axis=0) img = keras.applications.inception_v3.preprocess_input(img) return img def deprocess_image(img): img = img.reshape((img.shape[1], img.shape[2], 3)) img /= 2.0 img += 0.5 img *= 255. img = np.clip(img, 0, 255).astype("uint8") return img #在多个连续 上运行梯度上升 original_img = preprocess_image(base_image_path) original_shape = original_img.shape[1:3] successive_shapes = [original_shape] for i in range(1, num_octave): shape = tuple([int(dim / (octave_scale ** i)) for dim in original_shape]) successive_shapes.append(shape) successive_shapes = successive_shapes[::-1] shrunk_original_img = tf.image.resize(original_img, successive_shapes[0]) img = tf.identity(original_img) for i, shape in enumerate(successive_shapes): print(f"Processing octave {i} with shape {shape}") img = tf.image.resize(img, shape) img = gradient_ascent_loop( img, iterations=iterations, learning_rate=step, max_loss=max_loss ) upscaled_shrunk_original_img = tf.image.resize(shrunk_original_img, shape) same_size_original = tf.image.resize(original_img, shape) lost_detail = same_size_original - upscaled_shrunk_original_img img += lost_detail shrunk_original_img = tf.image.resize(original_img, shape) keras.utils.save_img("DeepDream.png", deprocess_image(img.numpy()))
最新发布
06-07

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值