tensorflow: keras 高层接口

常用的keras接口有:datasets layers losses metrics optimizers
1 keras高层接口datasets

#mnist
(x, y), (test_x, test_y)=keras.datasets.mnist.load_data()
#数据集的划分
train_x, val_x=tf.split(x, num_or_size_splits=[50000, 10000])
train_y, val_y=tf.split(y, num_or_size_splits=[50000, 10000])
y_onehot=tf.one_hot(y, depth=10)
db=tf.datal.Dataset.from_tensor_slices((test_x, test_y))
next(iter(db))[0].shape
#shuffle
db=db.shuffle(1000)\
#.map: 数据预处理
def preprocess(x, y):
	x=tf.cast(x, dtype=tf.float32)/255
	y=tf.cast(x, dtype=tf.int32)
	y=tf.one_hot(y, depth=10)
	return x, y
db2=db.map(preprocess)
#.batch
db3=db2.batch(128)
#.repeat()  能一直取到数据,不会出发StopIteration异常
db4=db3.repeat(2)   # 2次

#For example
def prepare_mnist_features_lables(x, y)
	x=tf.cast(x, tf.float32)/255.0
	y=tf.cast(y, tf.int32)
	return x, y
def mnist_dataset():
	(x, y), (x_val, y_val) = keras.datasets.fashion_mnist.load_data()
	y=tf.one_hot(y, depth=10)
	y_val=tf.one_hot(y_val, depth=10)
	db=tf.data.Dataset.from_tensor_slices((x, y))
	db=db.map(prepare_mnist_features_labels)
	db=db.shuffle(10000).batch(128)
	db_val=tf.data.Dataset.from_tensor_slices((x_val, y_val))
	db_val=db_val.map(prepare_mnist_features_labels)
 	db_val=db_val.shuffle(10000).batch(128)

#交叉验证
for epoch in range(500):
	idx = tf.range(60000)
	idx = tf.random.shuffle(idx)
	x_train, y_train = tf.gather(x, idx[:50000]), tf.gather(y, idx[:50000])
	x_val, y_val = tf.gather(x, idx[-10000:]), tf.gather(y, idx[-10000:])
	db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
	db_train = db_train.map(preprocess).shuffle(10000).batch(128)
	db_val = tf.data.Dataset.from_tensor_slices((x_val, y_val))
	db_val = db_val.map(preprocess).shuffle(10000).batch(128)
#keras 有直接定义的接口
network.fit(db_train_val, epochs=4, validation_split=0.1, validation_freq=2)

2 keras 高层接口 metrics

#步骤:Metrics < update_state < result().numpy() < reset_states
#第一步,新建一个meter
acc_meter = metrics.Accuracy()
loss_meter = metrics.Mean()   # 求取均值
#第二步, 更新数据,实际存到一个list中
acc_meter.update_state(y, pred)
loss_meter.update_state(loss)
#第三步,输出结果
loss_meter.result().numpy()
acc_meter.result().numpy()
#第四步,清除缓存,如果重新开始计算时,需要将之前结果清除
loos_meter.reset_states()
acc_meter.reset_states()
#正确率在每个epoch结束时,输出的是整个数据集的正确率,之后清零

3 keras.Sequential()

network = keras.Sequential([layers.Dense(256, activation='relu',
							layers.Dense(10)])
network.build(input_shape=[None, 28*28])
network.summary()		# 输出模型参数等	

4 compile and fit

#将优化器,损失函数,评价指标等放在一起
network.compile(optimizer=optimizers.Adam(lr=0.01), loss=tf.losses.CategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
network.fit(train_db, epoch=10, validation_data=val_db, validation_step=2) #每隔两步进行评价
network.evaluate(test_db)   # 训练完成后对测试集进行测试
pred = network.predict(x)    # 对单个batch进行测试,输出结果

5 自定义网络层

#keras.layers.Layer(__init__, call)  Model(__init__, call, compile/fit/evaluate
#自定义线性层
class MyDense(layers.Layer):
	def __init__(self, in_dim, out_dim):
		super(MyDense, self).__init__()
		self.kernel = self.add_variable('w', [in_dim, out_dim])
		self.bias = self.add_variable('b', [out_dim])

	def call(self, inputs, training=None):
		out = inputs @ self.kernel + self.bias
		return out
#自定义网络
class MyModel(keras.Model):
    def __init__(self):
        super(MyModel, self).__init__()
        self.fc1 = MyDense(28 * 28, 16)
        self.fc2 = MyDense(16, 10)

    def call(self, inputs, training=None):
        x = self.fc1(inputs)
        x = tf.nn.relu(x)
        x = self.fc2(x)
        return x

6 模型保存与加载

#save_weights: 只保存权值
checkpoint_path = './checkpoint'
model.save_weights(checkpoint_path)
#restore the weights
model = create_model()  # 必须与原来保存的模型结构一样
model.load_weights(checkpoint_path)

#save: 保存整个模型
model.save('model.h5')
#restore
model = tf.keras.models.load_model('model.h5')
model.evaluate(x_val, y_val)    # 恢复完可以直接进行预测

#tf.saved_model.save:   供其他语言等调用
tf.saved_model.save(model, './tmp/saved_model/')
#restore
imported = tf.saved_model.load(path)
f = imported.signatures["serving_default"]
print(f(x=tf.constant([[1.]])))

7 其他

#正则化
network = keras.Sequential(layers.Dense(128, activation='relu', kernel_regularizer=keras.regularizers.l2(0.01)
							layes.Dense(10))
#更加灵活的正则化方式
for step, (x, y) in enumerate(train_db):
	with tf.GradientTape() as tape:
		loss = tf.reduce_mean(tf.losses.categorical_crossentropy(y_onehot, out, from_logits=True)
		loss_regularization = []
		for p in network.trainable_variables():
			loss_regularization.append(tf.nn.l2_loss(p)        
	    loss_regularization = tf.reduce_sum(tf.stack(loss_regularization))
	    loss = loss + weight_decay * loss_regularization
	    grads = tape.gradient(loss, network.trainable_variables)
	    optimizer.apply_gradients(zip(grads, network. trainable_variables))
#drop_out
layers.Dropout(0.5)

#momentum
optimizer = SGD(learning_rate=0.01, momentum=0.9)
optimizer = RMSprop(learning_rate=0.01, momentum=0.9)
optimizer = Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.99)

#学习率调整
optimizer = SGD(learning_rate=0.01, momentum=0.9)
for epoch in range(500)
	optimizer.learning_rate = 0.01 * (500-epoch) / 500   # 自己选择
  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值