你在训练的时候看到下面这样的图片是不是感觉不太直观
当迭代次数多的时候,前面的loss和accuracy与后面的交织在一起,变化变得不那么直观,看起来会比较麻烦
那咱把这东西整成图不就好了,loss变成啥样,accuracy咋变的,一目了然,开整!
# history=model.fit(训练集数据,训练集标签,batch_size=,epochs=,validation_split=用作测试数据的比例,validation_data=测试集,
# validation_freq=测试频率)
# history:
# 训练集loss:loss
# 测试集loss:val_loss
# 训练集准确率:sparse_categorical_accuracy
# 测试集准确率:val_sparse_categorical_accuracy
import tensorflow as tf
import os
from tensorflow import keras
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(threshold=np.inf)
mnist = keras.datasets.mnist
(x_train,y_train),(x_test,y_test) = mnist.load_data()
x_train,x_test = x_train/255,x_test/255
model = keras.models.Sequential([
keras.layers.Flatten(),
keras.layers.Dense(128,activation='relu'),
keras.layers.Dense(10,activation='softmax')
])
model.compile(optimizer='adam',
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['sparse_categorical_accuracy'])
# 生成临时文件的同时会自动生成索引文件
checkpoint_save_path = './临时文件/mnist.ckpt'
if os.path.exists(checkpoint_save_path+'.index'):
print('-'*20,'load the model','-'*20)
model.load_weights(checkpoint_save_path)
cp_callback = keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,
save_weights_only=True,
save_best_only=True)
history = model.fit(x_train,y_train,epochs=5,batch_size=32,validation_data=(x_test,y_test),validation_freq=1,callbacks=[cp_callback])
model.summary()
with open('./临时文件/weight.txt','w')as f:
for v in model.trainable_variables:
f.write(str(v.name)+'\n')
f.write(str(v.shape) + '\n')
f.write(str(v.numpy()) + '\n')
print(history.history.keys())
acc = history.history['sparse_categorical_accuracy']
val_acc = history.history['val_sparse_categorical_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
# 注意
# 一行两列,第一个
plt.subplot(1,2,1)
plt.plot(acc,label='Training Accuracy')
plt.plot(val_acc,label='Validation Accuracy')
plt.title('Training and Validation Accuract')
plt.legend()
# 一行两列第二个
plt.subplot(1,2,2)
plt.plot(loss,label='Training Loss')
plt.plot(val_loss,label='Validation Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.show()