Keras关于trainable的实验

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time   : 2021/1/28 17:20
# @Author : Jin Echo
"""
结论:
1.对已经编译后的模型设置不可训练trainable,设置之后还需要再编译才会生效,否则会有一个warning询问是否要进行编译
2.对组合网络,例如C = G+D,D编译后设置False,再C编译,则训练C时D参数不变;训练D时参数改变(D未编译)
此外,也对拼接的网络进行了实验
"""

from keras.models import Sequential, Model
from keras.layers import Input, Dense, Activation, Dropout, Subtract
import numpy as np

x = Input(shape=(10, ))
y = Dense(10)(x)
G = Model(inputs=x, outputs=y)                  # 110参数
# print('G.summary:')
# G.summary
G.compile(optimizer='rmsprop',
          loss='binary_crossentropy',
          metrics=['accuracy'])
# print('G.summary:')
# G.summary()

x = Input(shape=(10, ))
x_gen = Input(shape=(10, ))
output = Subtract()([x, x_gen])
output = Dense(1, activation='sigmoid')(output)
D = Model(inputs=[x, x_gen], outputs=output)        # 11参数
D.compile(optimizer='rmsprop',
          loss='binary_crossentropy',
          metrics=['accuracy'])
# print('D.summary:')
# D.summary()

x = Input(shape=(10, ))
x_gen = G(x)
pre = D([x, x_gen])
C = Model(inputs=x, outputs=pre)            # 121参数
C.compile(optimizer='rmsprop',
          loss='binary_crossentropy',
          metrics=['accuracy'])
# print('C.summary:')
# C.summary()


"""
# 实验1:D.fit
# 对D,尽管设置了False,但未编译,D仍然进行了训练
# (每轮的loss在下降,训练前后的参数改变都可证明)
# C与G、D参数同步,注意此时D、C的sumary结果很怪异,还会输出一个warning
D.trainable = False
G.summary()
D.summary()
C.summary()
print(G.get_weights())
print(D.get_weights())
print(C.get_weights())
print('--------------------------------------------------------------------------')
D.fit(x=[np.random.uniform(size=(2, 10)), np.random.uniform(size=(2, 10))],
      y=np.ones(shape=(2, 1)), epochs=3, verbose=2)
G.summary()
D.summary()
C.summary()
print(G.get_weights())
print(D.get_weights())
print(C.get_weights())
"""


"""
# 实验2:D.fit
# 对D,设置了False,并且D编译,D才不会进行训练)
# (每轮的loss不变,训练前后的参数不变都可证明)
# C与G、D参数同步,注意此时C的sumary结果很怪异,还会输出一个warning
D.trainable = False
D.compile(optimizer='rmsprop',
          loss='binary_crossentropy',
          metrics=['accuracy'])
G.summary()
D.summary()
C.summary()
print(G.get_weights())
print(D.get_weights())
print(C.get_weights())
print('--------------------------------------------------------------------------')
D.fit(x=[np.random.uniform(size=(2, 10)), np.random.uniform(size=(2, 10))],
      y=np.ones(shape=(2, 1)), epochs=3, verbose=2)
G.summary()
D.summary()
C.summary()
print(G.get_weights())
print(D.get_weights())
print(C.get_weights())
"""


"""
# 实验3:D.fit
# 对D,设置了False,并且C编译D不编译,D会进行训练
# (每轮的loss变化,训练前后D的参数变化都可证明)
# C与G、D参数同步,注意此时D的sumary结果很怪异,还会输出一个warning
D.trainable = False
C.compile(optimizer='rmsprop',
          loss='binary_crossentropy',
          metrics=['accuracy'])
G.summary()
D.summary()
C.summary()
print(G.get_weights())
print(D.get_weights())
print(C.get_weights())
print('--------------------------------------------------------------------------')
D.fit(x=[np.random.uniform(size=(2, 10)), np.random.uniform(size=(2, 10))],
      y=np.ones(shape=(2, 1)), epochs=3, verbose=2)
G.summary()
D.summary()
C.summary()
print(G.get_weights())
print(D.get_weights())
print(C.get_weights())
"""


"""
# 实验4:C.fit
# a.对D设置False,D编译,C并不编译
# a.结果:loss减小,G参数变化、D参数变化,C=G+D,训练前后C的参数与G和D同步变换,C.summary怪异出现warning(相当于False未设置成功)
# b.对D设置False,C编译,D并不编译
# b.结果:loss减小,G参数变化、D参数不变、C=G+D,训练前后C的参数与G和D同步变换,D.summary怪异出现warning
# c.对D设置False,D、C先后编译
# c.结果:参数变换情况与b相同,差别仅是无怪异无waring
# d.对D设置False,C、D先后编译
# d.结果:与C相同
D.trainable = False
C.compile(optimizer='rmsprop',
          loss='binary_crossentropy',
          metrics=['accuracy'])
D.compile(optimizer='rmsprop',
          loss='binary_crossentropy',
          metrics=['accuracy'])
G.summary()
D.summary()
C.summary()
print(G.get_weights())
print(D.get_weights())
print(C.get_weights())
print('-------------------------------------------------------------------------------------------------------')
C.fit(x=np.random.uniform(size=(2, 10)),
      y=np.ones(shape=(2, 1)), epochs=3, verbose=2)
G.summary()
D.summary()
C.summary()
print(G.get_weights())
print(D.get_weights())
print(C.get_weights())
"""




  • 4
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是使用Keras训练自己的数据的ResNet模型的示例代码: ```python from keras.applications.resnet50 import ResNet50 from keras.models import Model from keras.layers import Dense, GlobalAveragePooling2D from keras.preprocessing.image import ImageDataGenerator # 创建ResNet模型 base_model = ResNet50(weights='imagenet', include_top=False, input_shape=(224, 224, 3)) # 添加全局平均池化层 x = base_model.output x = GlobalAveragePooling2D()(x) # 添加全连接层 x = Dense(1024, activation='relu')(x) predictions = Dense(num_classes, activation='softmax')(x) # 构建完整模型 model = Model(inputs=base_model.input, outputs=predictions) # 冻结ResNet的权重 for layer in base_model.layers: layer.trainable = False # 编译模型 model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # 数据增强 train_datagen = ImageDataGenerator( rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( train_data_dir, target_size=(224, 224), batch_size=batch_size, class_mode='categorical') validation_generator = test_datagen.flow_from_directory( validation_data_dir, target_size=(224, 224), batch_size=batch_size, class_mode='categorical') # 训练模型 model.fit_generator( train_generator, steps_per_epoch=nb_train_samples // batch_size, epochs=epochs, validation_data=validation_generator, validation_steps=nb_validation_samples // batch_size) # 保存模型 model.save('resnet_model.h5') ``` 请注意,上述代码中的一些变量(如`train_data_dir`,`validation_data_dir`,`num_classes`等)需要根据您的数据集进行相应的设置。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值