#获取训练数据
train_set = paddle.dataset.mnist.train()
train_reader = paddle.batch(train_set,batch_size=16)
#获取测试数据
test_set = paddle.dataset.mnist.test()
test_reader = paddle.batch(test_set,batch_size=32)
# 定义飞桨动态图工作环境
with fluid.dygraph.guard():
# 实例化模型
# 以下三个模型任选其一
# Softmax分类器
# model = SoftmaxRegression('mnist')
# 定义多层感知器分类器
# model = MultilayerPerceptron('mnist')
# 卷积神经网络分类器
model = ConvolutionalNeuralNetwork('mnist')
# 开启模型训练模式
model.train()
# 使用Adam优化器
# 学习率为0.001
opt = fluid.optimizer.Adam(learning_rate=0.001, parameter_list=model.parameters())
# 迭代次数设为5
EPOCH_NUM = 10
test_loss = []
test_acc = []
idx = []
with fluid.dygraph.guard():
# 定义外层循环
for pass_num in range(EPOCH_NUM):
# 定义内层循环
for batch_id,data in enumerate(train_reader()):
# 调整数据shape使之适合模型
images = np.array([x[0].reshape(1, 28, 28) for x in data],np.float32)
labels = np.array([x[1] for x in data]).astype('int64').reshape(-1,1)
# 将numpy数据转为飞桨动态图variable形式
image = fluid.dygraph.to_variable(images)
label = fluid.dygraph.to_variable(labels)
# 前向计算
predict = model(image)
# 计算损失
loss = fluid.layers.cross_entropy(predict,label)
avg_loss = fluid.layers.mean(loss)
# 计算精度
acc = fluid.layers.accuracy(predict,label)
if batch_id % 500 == 0:
print("pass:{},batch_id:{},train_loss:{},train_acc:{}".
format(pass_num,batch_id,avg_loss.numpy(),acc.numpy()))
# 反向传播
avg_loss.backward()
# 最小化loss,更新参数
opt.minimize(avg_loss)
# 清除梯度
model.clear_gradients()
# 保存模型文件到指定路径
fluid.save_dygraph(model.state_dict(), 'mnist')
with fluid.dygraph.guard():
# 读取模型
# 参数为保存模型参数的文件地址
model_dict, _ = fluid.load_dygraph('mnist')
# 加载模型参数
model.load_dict(model_dict)
#评估模式
model.eval()
avg_cost = 0.0
avg_acc= 0.0
num = 0
for batch_id,data in enumerate(test_reader()):
num = num + 1
# 调整数据shape使之适合模型
images = np.array([x[0].reshape(1, 28, 28) for x in data],np.float32)
labels = np.array([x[1] for x in data]).astype('int64').reshape(-1,1)
# 将numpy数据转为飞桨动态图variable形式
image = fluid.dygraph.to_variable(images)
label = fluid.dygraph.to_variable(labels)
# 前向计算
predict = model(image)
# 计算损失
loss = fluid.layers.cross_entropy(predict,label)
avg_loss = fluid.layers.mean(loss)
avg_cost = avg_cost + avg_loss
# 计算精度
acc = fluid.layers.accuracy(predict,label)
avg_acc = avg_acc + acc
if batch_id % 50 == 0:
print("pass:{},batch_id:{},test_loss:{},test_acc:{}".
format(pass_num,batch_id,avg_loss.numpy(),acc.numpy()))
idx.append(pass_num)
test_loss.append(float(avg_cost.numpy())/num)
test_acc.append(float(avg_acc.numpy())/num)
draw_line(test_loss,test_acc,idx)
print(test_loss)
print(test_acc)
from IPython import display
import matplotlib.pyplot as plt
def draw_line(costs,acc, idx):
"""
动态绘制训练中costs的曲线
:param costs: 记录了训练过程的cost变化的list
"""
fig, axis_loss = plt.subplots()
axis_acc = axis_loss.twinx()
line_loss = axis_loss.plot(idx, costs, c ="g", label="loss")
line_acc = axis_acc.plot(idx, acc, c="r", label="acc")
plt.title("Learning rate = 0.001" )
axis_loss.set_xlabel('iterations')
axis_loss.set_ylabel('loss',color='g')
axis_loss.tick_params(axis='y', colors='g')
axis_acc.set_ylabel('acc', color='r')
axis_acc.tick_params(axis='y', colors='r')
#plt.legend(loc="best")
plt.xlabel('iterations')
#plt.pause(0.05)
#display.clear_output(wait=True)
aa=[0.2,0.1,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.9,0.8,0.10,0.12]
bb=[1,2,3,4,5,6,7,8,9,10,11,12,13]
idx = bb
draw_line(aa,bb,idx)