模
型
保
存
与
加
载
模型保存与加载
模 型 保 存 与 加 载
Keras版本模型保存与加载
保存模型与加载模型
import numpy as np
import tensorflow as tf
x_train = np. random. random( ( 1000 , 32 ) )
y_train = np. random. randint( 10 , size= ( 1000 , ) )
x_val = np. random. random( ( 200 , 32 ) )
y_val = np. random. randint( 10 , size= ( 200 , ) )
x_test = np. random. random( ( 200 , 32 ) )
y_test = np. random. randint( 10 , size= ( 200 , ) )
def get_uncompiled_model ( ) :
inputs = tf. keras. Input( shape= ( 32 , ) , name= 'digits' )
x = tf. keras. layers. Dense( 64 , activation= 'relu' , name= 'dense_1' ) ( inputs)
x = tf. keras. layers. Dense( 64 , activation= 'relu' , name= 'dense_2' ) ( x)
outputs = tf. keras. layers. Dense( 10 , name= 'predictions' ) ( x)
model = tf. keras. Model( inputs= inputs, outputs= outputs)
return model
def get_compiled_model ( ) :
model = get_uncompiled_model( )
model. compile ( optimizer= tf. keras. optimizers. RMSprop( learning_rate= 1e - 3 ) ,
loss= tf. keras. losses. SparseCategoricalCrossentropy( from_logits= True ) ,
metrics= [ 'sparse_categorical_accuracy' ] )
return model
model = get_compiled_model( )
model. fit( x_train, y_train, batch_size= 32 , epochs= 5 , validation_data= ( x_val, y_val) )
model. summary( )
方法一
model. load_weights( "adasd.h5" )
model. predict( x_test)
model. save_weights( './checkpoints/mannul_checkpoint' )
model. load_weights( './checkpoints/mannul_checkpoint' )
model. predict( x_test)
方法二
model. save( 'keras_model_tf_version' , save_format= 'tf' )
new_model = tf. keras. models. load_model( 'keras_model_tf_version' )
new_model. predict( x_test)
方法三
model. save( 'keras_model_hdf5_version.h5' )
new_model = tf. keras. models. load_model( 'keras_model_hdf5_version.h5' )
new_model. predict( x_test)
方法四
tf. saved_model. save( model, 'tf_saved_model_version' )
restored_saved_model = tf. saved_model. load( 'tf_saved_model_version' )
f = restored_saved_model. signatures[ "serving_default" ]
f( digits = tf. constant( x_test. tolist( ) ) )
!saved_model_cli show - - dir tf_saved_model_version - - all
自定义版本模型保存与加载
import tensorflow as tf
class MyModel ( tf. keras. Model) :
def __init__ ( self, num_classes= 10 ) :
super ( MyModel, self) . __init__( name= 'my_model' )
self. num_classes = num_classes
self. dense_1 = tf. keras. layers. Dense( 32 , activation= 'relu' )
self. dense_2 = tf. keras. layers. Dense( num_classes)
@tf. function( input_signature= [ tf. TensorSpec( [ None , 32 ] , tf. float32, name= 'digits' ) ] )
def call ( self, inputs) :
x = self. dense_1( inputs)
return self. dense_2( x)
import numpy as np
x_train = np. random. random( ( 1000 , 32 ) )
y_train = np. random. random( ( 1000 , 10 ) )
x_val = np. random. random( ( 200 , 32 ) )
y_val = np. random. random( ( 200 , 10 ) )
x_test = np. random. random( ( 200 , 32 ) )
y_test = np. random. random( ( 200 , 10 ) )
optimizer = tf. keras. optimizers. SGD( learning_rate= 1e - 3 )
loss_fn = tf. keras. losses. CategoricalCrossentropy( from_logits= True )
train_acc_metric = tf. keras. metrics. CategoricalAccuracy( )
val_acc_metric = tf. keras. metrics. CategoricalAccuracy( )
batch_size = 64
train_dataset = tf. data. Dataset. from_tensor_slices( ( x_train, y_train) )
train_dataset = train_dataset. shuffle( buffer_size= 1024 ) . batch( batch_size)
val_dataset = tf. data. Dataset. from_tensor_slices( ( x_val, y_val) )
val_dataset = val_dataset. batch( 64 )
model = MyModel( num_classes= 10 )
epochs = 3
for epoch in range ( epochs) :
print ( 'Start of epoch %d' % ( epoch, ) )
for step, ( x_batch_train, y_batch_train) in enumerate ( train_dataset) :
with tf. GradientTape( ) as tape:
logits = model( x_batch_train)
loss_value = loss_fn( y_batch_train, logits)
grads = tape. gradient( loss_value, model. trainable_weights)
optimizer. apply_gradients( zip ( grads, model. trainable_weights) )
train_acc_metric( y_batch_train, logits)
if step % 200 == 0 :
print ( 'Training loss (for one batch) at step %s: %s' % ( step, float ( loss_value) ) )
print ( 'Seen so far: %s samples' % ( ( step + 1 ) * 64 ) )
train_acc = train_acc_metric. result( )
print ( 'Training acc over epoch: %s' % ( float ( train_acc) , ) )
train_acc_metric. reset_states( )
for x_batch_val, y_batch_val in val_dataset:
val_logits = model( x_batch_val)
val_acc_metric( y_batch_val, val_logits)
val_acc = val_acc_metric. result( )
val_acc_metric. reset_states( )
print ( 'Validation acc: %s' % ( float ( val_acc) , ) )
模型保存方法一
model. save_weights( "adasd.h5" )
model. load_weights( "adasd.h5" )
model. predict( x_test)
model. save_weights( './checkpoints/mannul_checkpoint' )
model. load_weights( './checkpoints/mannul_checkpoint' )
model. predict( x_test)
模型保存方法二
模型保存方法三
model. save( 'path_to_my_model' , save_format= 'tf' )
new_model = tf. keras. models. load_model( 'path_to_my_model' )
new_model. predict( x_test)
模型保存方法四
tf. saved_model. save( model, 'my_saved_model' )
restored_saved_model = tf. saved_model. load( 'my_saved_model' )
f = restored_saved_model. signatures[ "serving_default" ]
f( digits = tf. constant( x_test. tolist( ) ) )
!saved_model_cli show - - dir my_saved_model - - all
a = x_test. tolist( ) [ 0 ]
len ( a)
aa = list ( map ( lambda x: int ( x) , a) )
aa
f( digits = tf. constant( [ aa] ) )