网络搭建:搭建一个包含两个隐含层的神经网络采用ReLU激活函数。
import torch
from torch import nn, optim
class Batch_Net(nn.Module):
def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):
super(Batch_Net, self).__init__()
self.layer1 = nn.Sequential( # 2 hidden layers
nn.Linear(in_dim, n_hidden_1),
nn.BatchNorm1d(n_hidden_1), nn.ReLU(True))
self.layer2 = nn.Sequential(
nn.Linear(n_hidden_1, n_hidden_2),
nn.BatchNorm1d(n_hidden_2), nn.ReLU(True))
self.layer3 = nn.Sequential(nn.Linear(n_hidden_2, out_dim))
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
return x
训练和测试:
import torch
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt
#from network import Batch_Net
import network
# Hyperparameters
batch_size = 64
learning_rate = 1e-2
num_epoches = 20
data_tf = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])])
# download MNIST dataset
train_dataset = datasets.MNIST(
root = './data', train = True, transform = data_tf, download = True)
test_dataset = datasets.MNIST(root = './data', train = False, transform = data_tf)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
model = network.Batch_Net(28 * 28, 300, 100, 10)
# define loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
# train model
train_loss_all = []
train_acc_all = []
print('training...')
for epoch in range(num_epoches):
loss_sum = 0
acc_sum = 0
for data in train_loader:
img, label = data
img = img.view(img.size(0), -1)
img = Variable(img)
label = Variable(label)
# farward
out = model(img)
loss = criterion(out, label)
loss_sum += loss.data.item() * label.size(0)
_, pred = torch.max(out, 1)
num_correct = (pred == label).sum()
acc_sum += num_correct.data.item()
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss = loss_sum / len(train_dataset)
train_acc = acc_sum / len(train_dataset)
train_loss_all.append(train_loss)
train_acc_all.append(train_acc)
print('epoch: {}, train Loss: {:.6f}, Acc: {:.6f}'.format(
epoch + 1, train_loss, train_acc))
print('train done!')
# plot train curve
plt.figure()
index = np.arange(len(train_loss_all))
plt.plot(index, train_loss_all, 'r')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
plt.figure()
plt.plot(index, train_acc_all, 'b')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.show()
# test model
model.eval()
eval_loss = 0
eval_acc = 0
print('testing...')
for data in test_loader:
img, label = data
img = img.view(img.size(0), -1)
img = Variable(img, volatile = True)
label = Variable(label, volatile = True)
out = model(img)
loss = criterion(out, label)
eval_loss += loss.data.item() * label.size(0)
_, pred = torch.max(out, 1)
num_correct = (pred == label).sum()
eval_acc += num_correct.data.item()
print('Test Loss: {:.6f}, Acc: {}'.format(
eval_loss / len(test_dataset),
eval_acc / len(test_dataset)))
print('test done!')
训练损失下降曲线:
训练准确率曲线: