n_epochs =5for epoch inrange(n_epochs):
running_loss =0.0 #损失个数初始化为0
running_correct =0 #命中个数初始化为0print("Epoch {}/{}".format(epoch , n_epochs))print("-"*20)for data in data_loader_train : #逐一读取训练集中的数据
X_train , y_train = data
X_train , y_train =Variable(X_train),Variable(y_train) #使用Variable将Tensor数据变量进行封装,以便自动梯度的实现
outputs =model(X_train) #输出卷积后的训练集
_,pred = torch.max(outputs.data ,1)
optimizer.zero_grad() #自适应下的梯度清零
loss =cost(outputs , y_train) #损失函数的计算:将训练后的参数与目标数进行 交叉熵 的计算
loss.backward() #后向传播
optimizer.step() #自适应算法下的梯度更新
running_loss += loss.data.item() #识别失败数据统计,.item()等同于data[0]
running_correct += torch.sum(pred == y_train.data) #识别正确数据统计
testing_correct =0for data in data_loader_test: #逐一读取测试集中的数据
X_test , y_test = data
X_test , y_test =Variable(X_test),Variable(y_test)
outputs =model(X_test) #输出卷积后的测试集
_ , pred = torch.max(outputs.data ,1)
testing_correct += torch.sum(pred == y_test.data)print("Loss is : {:.4f} , Train Accuracy is : {:.4f}% , Test Accuracy is :{:.4f}".format(running_loss/len(data_train),#len求字符串长度
100*running_correct/len(data_train), #百分比样式
100*testing_correct/len(data_test))) #百分比样式
6、最终结果
Epoch 0/5--------------------
Loss is :0.0021, Train Accuracy is :95.0000%, Test Accuracy is :98.0000
Epoch 1/5--------------------
Loss is :0.0007, Train Accuracy is :98.0000%, Test Accuracy is :99.0000
Epoch 2/5--------------------
Loss is :0.0004, Train Accuracy is :99.0000%, Test Accuracy is :99.0000
Epoch 3/5--------------------
Loss is :0.0003, Train Accuracy is :99.0000%, Test Accuracy is :99.0000
Epoch 4/5--------------------
Loss is :0.0002, Train Accuracy is :99.0000%, Test Accuracy is :99.0000