tensorflow2 kernel_regularizer是计算什么

kernel_regularizer 计算的就是层参数的相应值(l1、l2等)
class Outlayer(layers.Layer):
    def __init__(self):
        super(Outlayer, self).__init__()
        self.dense = layers.Dense(32, kernel_regularizer=keras.regularizers.l2(1e-2))
        self.dense1 = layers.Dense(10, kernel_regularizer=keras.regularizers.l1(1e-2))
    def call(self, inputs):
        h1 = self.dense(inputs)
        return self.dense1(h1)

my_layer = Outlayer()
y = my_layer(tf.ones([2,2]))
print(y)
print(my_layer.losses)
print(tf.square(tf.norm(my_layer.dense.kernel)) * 1e-2)
print(tf.norm(my_layer.dense1.kernel, ord=1) * 1e-2)
y:<tf.Tensor: id=233, shape=(2, 10), dtype=float32, numpy=
array([[ 0.36865774,  0.5883919 ,  0.2711479 ,  0.3792193 , -0.3248419 ,
         0.5872762 , -0.02513709, -0.1538085 ,  0.02563459,  1.1253573 ],
       [ 0.36865774,  0.5883919 ,  0.2711479 ,  0.3792193 , -0.3248419 ,
         0.5872762 , -0.02513709, -0.1538085 ,  0.02563459,  1.1253573 ]],
      dtype=float32)>

my_layer.losses:
[<tf.Tensor: id=241, shape=(), dtype=float32, numpy=0.030209353>,
 <tf.Tensor: id=249, shape=(), dtype=float32, numpy=0.6088533>]

tf.square(tf.norm(my_layer.dense.kernel)) * 1e-2:
<tf.Tensor: id=309, shape=(), dtype=float32, numpy=0.030209355>
tf.norm(my_layer.dense1.kernel, ord=1) * 1e-2:
<tf.Tensor: id=334, shape=(), dtype=float32, numpy=0.6088533>

my_layer.losses,是以列表显示,所以如果对损失函数添加正则,计算每一层regularizers,加在loss项即可。
如有问题,请各位看官指正。

  • 1
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
def MEAN_Spot(opt): # channel 1 inputs1 = layers.Input(shape=(42,42,1)) conv1 = layers.Conv2D(3, (5,5), padding='same', activation='relu', kernel_regularizer=l2(0.001))(inputs1) bn1 = layers.BatchNormalization()(conv1) pool1 = layers.MaxPooling2D(pool_size=(3, 3), padding='same', strides=(3,3))(bn1) do1 = layers.Dropout(0.3)(pool1) # channel 2 inputs2 = layers.Input(shape=(42,42,1)) conv2 = layers.Conv2D(3, (5,5), padding='same', activation='relu', kernel_regularizer=l2(0.001))(inputs2) bn2 = layers.BatchNormalization()(conv2) pool2 = layers.MaxPooling2D(pool_size=(3, 3), padding='same', strides=(3,3))(bn2) do2 = layers.Dropout(0.3)(pool2) # channel 3 inputs3 = layers.Input(shape=(42,42,1)) conv3 = layers.Conv2D(8, (5,5), padding='same', activation='relu', kernel_regularizer=l2(0.001))(inputs3) bn3 = layers.BatchNormalization()(conv3) pool3 = layers.MaxPooling2D(pool_size=(3, 3), padding='same', strides=(3,3))(bn3) do3 = layers.Dropout(0.3)(pool3) # merge 1 merged = layers.Concatenate()([do1, do2, do3]) # interpretation 1 merged_conv = layers.Conv2D(8, (5,5), padding='same', activation='relu', kernel_regularizer=l2(0.1))(merged) merged_pool = layers.MaxPooling2D(pool_size=(2, 2), padding='same', strides=(2,2))(merged_conv) flat = layers.Flatten()(merged_pool) flat_do = layers.Dropout(0.2)(flat) # outputs outputs = layers.Dense(1, activation='linear', name='spot')(flat_do) #Takes input u, v, os model = keras.models.Model(inputs=[inputs1, inputs2, inputs3], outputs=[outputs]) model.compile( loss={'spot':'mse'}, optimizer=opt, metrics={'spot':tf.keras.metrics.MeanAbsoluteError()}, ) return model 如何添加通道注意力、空间注意力
最新发布
05-30
要添加通道注意力和空间注意力,可以按照以下步骤进行: 1. 导入相应的库: ``` from tensorflow.keras import layers from tensorflow.keras import backend as K ``` 2. 定义通道注意力函数: ``` def channel_attention(inputs, reduction_ratio=8): # 获取输入张量的尺寸 input_shape = K.int_shape(inputs) # 获取通道数 channels = input_shape[-1] # 计算压缩后的通道数 units = channels // reduction_ratio # 定义共享的全连接层 shared_layer_one = layers.Dense(units, activation='relu', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros') shared_layer_two = layers.Dense(channels, kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros') # 计算均值 avg_pool = layers.GlobalAveragePooling2D()(inputs) # 通过全连接层获得激活的均值 avg_pool = shared_layer_one(avg_pool) # 通过全连接层获得激活的权重 avg_pool = shared_layer_two(avg_pool) # 将均值和权重相乘,并将结果送入 sigmoid 函数 channel_attention = layers.multiply([inputs, layers.Activation('sigmoid')(avg_pool)]) return channel_attention ``` 3. 定义空间注意力函数: ``` def spatial_attention(inputs): # 计算均值和最大值 avg_pool = layers.Lambda(lambda x: K.mean(x, axis=3, keepdims=True))(inputs) max_pool = layers.Lambda(lambda x: K.max(x, axis=3, keepdims=True))(inputs) # 将均值和最大值拼接在一起 concat = layers.Concatenate(axis=3)([avg_pool, max_pool]) # 通过卷积层获取激活的权重 spatial_attention = layers.Conv2D(filters=1, kernel_size=7, activation='sigmoid', padding='same', kernel_initializer='he_normal', use_bias=False)(concat) # 将权重和输入张量相乘 return layers.multiply([inputs, spatial_attention]) ``` 4. 在模型中添加注意力: ``` # channel 1 inputs1 = layers.Input(shape=(42,42,1)) conv1 = layers.Conv2D(3, (5,5), padding='same', activation='relu', kernel_regularizer=l2(0.001))(inputs1) ca1 = channel_attention(conv1) sa1 = spatial_attention(ca1) bn1 = layers.BatchNormalization()(sa1) pool1 = layers.MaxPooling2D(pool_size=(3, 3), padding='same', strides=(3,3))(bn1) do1 = layers.Dropout(0.3)(pool1) # channel 2 inputs2 = layers.Input(shape=(42,42,1)) conv2 = layers.Conv2D(3, (5,5), padding='same', activation='relu', kernel_regularizer=l2(0.001))(inputs2) ca2 = channel_attention(conv2) sa2 = spatial_attention(ca2) bn2 = layers.BatchNormalization()(sa2) pool2 = layers.MaxPooling2D(pool_size=(3, 3), padding='same', strides=(3,3))(bn2) do2 = layers.Dropout(0.3)(pool2) # channel 3 inputs3 = layers.Input(shape=(42,42,1)) conv3 = layers.Conv2D(8, (5,5), padding='same', activation='relu', kernel_regularizer=l2(0.001))(inputs3) ca3 = channel_attention(conv3) sa3 = spatial_attention(ca3) bn3 = layers.BatchNormalization()(sa3) pool3 = layers.MaxPooling2D(pool_size=(3, 3), padding='same', strides=(3,3))(bn3) do3 = layers.Dropout(0.3)(pool3) # merge 1 merged = layers.Concatenate()([do1, do2, do3]) # interpretation 1 merged_conv = layers.Conv2D(8, (5,5), padding='same', activation='relu', kernel_regularizer=l2(0.1))(merged) merged_pool = layers.MaxPooling2D(pool_size=(2, 2), padding='same', strides=(2,2))(merged_conv) flat = layers.Flatten()(merged_pool) flat_do = layers.Dropout(0.2)(flat) # outputs outputs = layers.Dense(1, activation='linear', name='spot')(flat_do) #Takes input u, v, os model = keras.models.Model(inputs=[inputs1, inputs2, inputs3], outputs=[outputs]) model.compile( loss={'spot':'mse'}, optimizer=opt, metrics={'spot':tf.keras.metrics.MeanAbsoluteError()}, ) return model ``` 在上面的代码中,我们将 channel_attention() 和 spatial_attention() 函数分别应用于每个通道,然后将它们的输出与原始输入进行相乘,以获得加强的特征表示。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值