keras中获取model各层参数

1、构建一个基础神经网络

from keras.layers import Dense, ZeroPadding2D
from keras import Sequential,Model
from keras.layers import Conv2D,MaxPooling2D,BatchNormalization,Dropout
from keras.layers import AveragePooling2D,Flatten

def net(label_size):
    model = Sequential()
    model.add(ZeroPadding2D((1,1),input_shape=(38, 38, 1)))
    model.add(Conv2D(filters=32, kernel_size = (3,3), strides=(1,1),activation='relu', padding='same'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1, 1), activation='relu', padding='same'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(128, (3,3), activation='relu', padding='same'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Dropout(0.25))

    model.add(AveragePooling2D(pool_size=(5,5)))
    model.add(Flatten())
    model.add(Dense(label_size, activation='softmax'))
    return model

2、获取layer名

if __name__ =='__main__':
    label_size = 5
    model = net(label_size)
    #model.summary()
    names = [layer.name for layer in model.layers]
    print(names, len(names))

#输出结果如下:
['zero_padding2d_1', 'conv2d_1', 'batch_normalization_1', 'max_pooling2d_1', 'dropout_1', 'conv2d_2', 'batch_normalization_2', 'max_pooling2d_2', 'dropout_2', 'conv2d_3', 'batch_normalization_3', 'max_pooling2d_3', 'dropout_3', 'average_pooling2d_1', 'flatten_1', 'dense_1'] 16

if __name__ =='__main__':
    label_size = 5
    model = net(label_size)
    for index in range(len(model.layers)):
        print(model.get_layer(index=index).name, model.get_layer(index=index).output_shape)
#输出结果如下:
conv2d_1/kernel:0 (3, 3, 1, 32)
conv2d_1/bias:0 (32,)
batch_normalization_1/gamma:0 (32,)
batch_normalization_1/beta:0 (32,)
batch_normalization_1/moving_mean:0 (32,)
batch_normalization_1/moving_variance:0 (32,)
conv2d_2/kernel:0 (3, 3, 32, 64)
conv2d_2/bias:0 (64,)
batch_normalization_2/gamma:0 (64,)
batch_normalization_2/beta:0 (64,)
batch_normalization_2/moving_mean:0 (64,)
batch_normalization_2/moving_variance:0 (64,)
conv2d_3/kernel:0 (3, 3, 64, 128)
conv2d_3/bias:0 (128,)
batch_normalization_3/gamma:0 (128,)
batch_normalization_3/beta:0 (128,)
batch_normalization_3/moving_mean:0 (128,)
batch_normalization_3/moving_variance:0 (128,)
dense_1/kernel:0 (128, 5)
dense_1/bias:0 (5,)
zero_padding2d_1 (None, 40, 40, 1)
conv2d_1 (None, 40, 40, 32)
batch_normalization_1 (None, 40, 40, 32)
max_pooling2d_1 (None, 20, 20, 32)
dropout_1 (None, 20, 20, 32)
conv2d_2 (None, 20, 20, 64)
batch_normalization_2 (None, 20, 20, 64)
max_pooling2d_2 (None, 10, 10, 64)
dropout_2 (None, 10, 10, 64)
conv2d_3 (None, 10, 10, 128)
batch_normalization_3 (None, 10, 10, 128)
max_pooling2d_3 (None, 5, 5, 128)
dropout_3 (None, 5, 5, 128)
average_pooling2d_1 (None, 1, 1, 128)
flatten_1 (None, 128)
dense_1 (None, 5)

3、获取各层权重信息

if __name__ =='__main__':
    label_size = 5
    model = net(label_size)
    model.summary()
    # plot_model(model, to_file='test.png', show_shapes=True)
    # names = [layer.name for layer in model.layers]
    for layer in model.layers:
        for weight in layer.weights:
            print(weight.name, weight.shape)

#该代码也可以等价于如下写法:
if __name__ =='__main__':
    label_size = 5
    model = net(label_size)
    model.summary()
    # plot_model(model, to_file='test.png', show_shapes=True)
    # names = [layer.name for layer in model.layers]
    names = [weight.name for layer in model.layers for weight in layer.weights]
    weights = model.get_weights()
    for name, weight in zip(names, weights):
        print(name, weight.shape)

#输出结果如下:
conv2d_1/kernel:0 (3, 3, 1, 32)
conv2d_1/bias:0 (32,)
batch_normalization_1/gamma:0 (32,)
batch_normalization_1/beta:0 (32,)
batch_normalization_1/moving_mean:0 (32,)
batch_normalization_1/moving_variance:0 (32,)
conv2d_2/kernel:0 (3, 3, 32, 64)
conv2d_2/bias:0 (64,)
batch_normalization_2/gamma:0 (64,)
batch_normalization_2/beta:0 (64,)
batch_normalization_2/moving_mean:0 (64,)
batch_normalization_2/moving_variance:0 (64,)
conv2d_3/kernel:0 (3, 3, 64, 128)
conv2d_3/bias:0 (128,)
batch_normalization_3/gamma:0 (128,)
batch_normalization_3/beta:0 (128,)
batch_normalization_3/moving_mean:0 (128,)
batch_normalization_3/moving_variance:0 (128,)
dense_1/kernel:0 (128, 5)
dense_1/bias:0 (5,)

4、获取指定layer的网络信息

if __name__ =='__main__':
    label_size = 5
    model = net(label_size)
    #model.summary()
    # plot_model(model, to_file='test.png', show_shapes=True)
    
    #如下两者等价
    # dense_layer = model.get_layer(name='dense_1')
    dense_layer = model.get_layer(index=-1)
    print(dense_layer.name)
    for weight in dense_layer.weights:
        print(weight.name, weight.shape)
#输出结果如下:
dense_1
dense_1/kernel:0 (128, 5)
dense_1/bias:0 (5,)

5、获取layer权重并且修改

if __name__ =='__main__':
    label_size = 5
    model = net(label_size)
    #model.summary()
    # plot_model(model, to_file='test.png', show_shapes=True)

    # dense_layer = model.get_layer(name='dense_1')
    dense_layer = model.get_layer(index=-1)
    print(dense_layer.name)
    weights = []
    for weight in dense_layer.weights:
        print(weight.name, weight.shape)
        weights.append(np.random.randint(1, 10, weight.shape))
        if 'bias' in weight.name:
            print(dense_layer.get_weights()[-1])

    dense_layer.set_weights(weights)
    print('after set weights...')
    print(dense_layer.get_weights()[-1])

#输出结果如下:
dense_1
dense_1/kernel:0 (128, 5)
dense_1/bias:0 (5,)
[0. 0. 0. 0. 0.]
after set weights...
[9. 4. 4. 6. 2.]

注:由于weight中包含bias,因为我们在设置新的权重时,也需要一并设置layer中的kernel和bias weight。

6、获取layer的feature数据

if __name__ =='__main__':
    label_size = 5
    model = net(label_size)
    #model.summary()

    img_path = '02.jpg'
    img = image.load_img(img_path, grayscale=True, target_size=(38, 38))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    print(x.shape)

    #默认最后一层输出
    predict = model.predict(x)
    print(predict, predict.shape)

    #可以指定某一层的输出
    index = -1
    layer_model = Model(inputs=model.input, outputs=model.layers[index].output)
    predict = layer_model.predict(x)
    print(predict, predict.shape)

#输出结果如下:
(1, 38, 38, 1)
[[2.6020217e-07 9.9917740e-01 5.2408193e-04 1.6608767e-05 2.8174208e-04]] (1, 5)
[[2.6020217e-07 9.9917740e-01 5.2408193e-04 1.6608767e-05 2.8174208e-04]] (1, 5)

 

  • 10
    点赞
  • 45
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值