tensorflow的Sequential不能够改名字,源代码处仅为可读,可以修改为读写,layer = model.layers,layer.name。 Sequential可以添加模型,层,比较灵活。但是,同一个模型因为具有相同的名字,重复添加只保留第一次添加,尝试copy.deepcopy(模型),只能得到相同名字的model,因此不能使用。可以直接添加drop层,另外,第二份代码使用了tensorboard。训练用到的方法是通过compile实现的。训练数据是fit,当然损失函数依据不同的问题类型选择。
import tensorflow as tf
mnist= tf. keras. datasets. mnist
( x_train, y_train) , ( x_test, y_test) = mnist. load_data( )
y_train
array([5, 0, 4, ..., 5, 6, 8], dtype=uint8)
import matplotlib. pyplot as plt
plt. imshow( x_train[ 0 ] )
<matplotlib.image.AxesImage at 0x1b4dab17f88>
x_train= tf. keras. utils. normalize( x_train, axis= 1 )
x_test= tf. keras. utils. normalize( x_test, axis= 1 )
model = tf. keras. models. Sequential( )
model. add( tf. keras. layers. Flatten( ) )
model. add( tf. keras. layers. Dense( 128 , activation= tf. nn. relu) )
model. add( tf. keras. layers. Dense( 130 , activation= tf. nn. relu) )
model. add( tf. keras. layers. Dense( 10 , activation= tf. nn. softmax) )
model. compile ( optimizer= 'adam' , loss= 'sparse_categorical_crossentropy' , metrics= [ 'accuracy' ] )
model. fit( x_train, y_train, epochs= 5 )
Epoch 1/5
1875/1875 [==============================] - 11s 5ms/step - loss: 0.2621 - accuracy: 0.9227
Epoch 2/5
1875/1875 [==============================] - 9s 5ms/step - loss: 0.1040 - accuracy: 0.9686
Epoch 3/5
1875/1875 [==============================] - 9s 5ms/step - loss: 0.0718 - accuracy: 0.9775
Epoch 4/5
1875/1875 [==============================] - 11s 6ms/step - loss: 0.0516 - accuracy: 0.9839
Epoch 5/5
1875/1875 [==============================] - 9s 5ms/step - loss: 0.0407 - accuracy: 0.9865
<tensorflow.python.keras.callbacks.History at 0x1b4d877b888>
val_loss, val_acc= model. evaluate( x_test, y_test)
313/313 [==============================] - 2s 4ms/step - loss: 0.0903 - accuracy: 0.9750
predictions = model. predict( [ x_test[ 5 : 8 ] ] )
print ( predictions)
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'tuple'> input: (<tf.Tensor 'IteratorGetNext:0' shape=(None, 28, 28) dtype=float32>,)
Consider rewriting this model with the Functional API.
[[3.14021698e-10 9.99962211e-01 2.31917916e-08 5.51868453e-08
1.11546058e-06 1.32857991e-09 2.55177746e-09 3.54971853e-05
6.47773959e-07 4.19447389e-07]
[7.13356707e-09 1.39042722e-05 3.48455131e-08 3.54035663e-08
9.99842525e-01 7.90912527e-05 3.22929310e-07 6.94255186e-06
1.20427085e-05 4.49937252e-05]
[5.06938420e-08 3.42604551e-07 2.70455502e-07 1.93817541e-04
6.74861003e-05 1.81618889e-05 1.80747389e-10 1.53591927e-07
7.90399952e-07 9.99718845e-01]]
plt. imshow( x_test[ 6 ] )
---------------------------------------------------------------------------------------
import tensorflow as tf
import os
import copy
mnist= tf. keras. datasets. mnist
( x_train, y_train) , ( x_test, y_test) = mnist. load_data( )
y_train
import matplotlib. pyplot as plt
plt. imshow( x_train[ 0 ] )
x_train= tf. keras. utils. normalize( x_train, axis= 1 )
x_test= tf. keras. utils. normalize( x_test, axis= 1 )
model = tf. keras. models. Sequential( )
model. add( tf. keras. layers. Flatten( ) )
model. add( tf. keras. layers. Dense( 128 , activation= tf. nn. relu) )
model. add( tf. keras. layers. Dense( 130 , activation= tf. nn. relu) )
model. add( tf. keras. layers. Dropout( 0.2 ) )
model. add( tf. keras. layers. Dense( 10 , activation= tf. nn. softmax) )
model2 = tf. keras. models. Sequential( )
model2. add( model)
model2. compile ( optimizer= 'adam' , loss= 'sparse_categorical_crossentropy' , metrics= [ 'accuracy' ] )
log_dir = os. path. join( "logs" )
if not os. path. exists( log_dir) :
os. mkdir( log_dir)
tensorboard_callback = tf. keras. callbacks. TensorBoard( log_dir= log_dir, histogram_freq= 1 )
model2. fit( x_train, y_train, epochs= 5 , validation_data= ( x_test, y_test) , callbacks= [ tensorboard_callback] )
model2. summary( )
layer = model2. layers
layer[ 0 ] . weights
model. summary( )
import tensorflow as tf
import os
import copy
mnist= tf. keras. datasets. mnist
( x_train, y_train) , ( x_test, y_test) = mnist. load_data( )
y_train
import matplotlib. pyplot as plt
plt. imshow( x_train[ 0 ] )
x_train= tf. keras. utils. normalize( x_train, axis= 1 )
x_test= tf. keras. utils. normalize( x_test, axis= 1 )
model = tf. keras. models. Sequential( )
model. add( tf. keras. layers. Flatten( ) )
model. add( tf. keras. layers. Dense( 128 , activation= tf. nn. relu) )
model. add( tf. keras. layers. Dense( 130 , activation= tf. nn. relu) )
model. add( tf. keras. layers. Dropout( 0.2 ) )
model. add( tf. keras. layers. Dense( 10 , activation= tf. nn. softmax) )
model2 = tf. keras. models. Sequential( )
model2. add( model)
model2. compile ( optimizer= 'adam' , loss= 'sparse_categorical_crossentropy' , metrics= [ 'accuracy' ] )
log_dir = os. path. join( "logs" )
if not os. path. exists( log_dir) :
os. mkdir( log_dir)
tensorboard_callback = tf. keras. callbacks. TensorBoard( log_dir= log_dir, histogram_freq= 1 )
model2. fit( x_train, y_train, epochs= 5 , validation_data= ( x_test, y_test) , callbacks= [ tensorboard_callback] )
layer = model2. layers
layer[ 0 ] . weights
model2. summary( )
model. summary( )
val_loss, val_acc= model2. evaluate( x_test, y_test)
predictions = model. predict( [ x_test[ 5 : 8 ] ] )
print ( predictions)
plt. imshow( x_test[ 6 ] )