导包
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from torchvision.datasets import MNIST
#Hyper-parameters
input_size = 784 #输入层神经元大小
num_classes = 10 #种类类别,数字0-9
num_epochs = 5#迭代次数
batch_size_train = 100 #每次训练取得样本数
batch_size_test=1000
learning_rate = 0.01 #学习率
准备数据
# MNIST dataset (images and labels)
#训练集
train_dataset = torchvision.datasets.MNIST(root='data/',
train=True,
transform=transforms.ToTensor(),
download=True)
#测试集
test_dataset = torchvision.datasets.MNIST(root='data/',
train=False,
transform=transforms.ToTensor())
# Data loader (input pipeline)
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('./data/',train=True,download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('./data/',train=False,download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
网络
#Logistic regression model
model = nn.Linear(input_size, num_classes)
#Loss and optimizer 损失函数和优化器
# nn.CrossEntropyLoss() computes softmax internally
criterion = nn.CrossEntropyLoss() #采用多分类交叉熵损失函数
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate,momentum=0.5)
训练
# Train the model 训练模型
total_step = len(train_loader)
total_losses=[]
step=[i*10000 for i in range(1,num_epochs*600+1)]
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# Reshape images to (batch_size, input_size)
images = images.reshape(-1, 28*28)
# Forward pass 前向传播
outputs = model(images) #100x10
loss = criterion(outputs, labels)
total_losses.append(loss.item())
# Backward and optimize 反向传播
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1,num_epochs,i+1,total_step,loss.item()))
#可视化
import matplotlib.pyplot as plt
fig = plt.figure()
plt.plot(step,total_losses,color='blue')
plt.legend(['Train Loss'],loc='upper right')
plt.xlabel("number of training examples seen")
plt.ylabel("negative log likelihood loss")
plt.show()
测试模型
#Test the model
# In test phase, we don't need to compute gradients (for memory efficiency)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, 28*28)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
#Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
导入模型的时候,需要把网络模型写出来,再导入,直接model=torch.load('model.ckpt')会报错
model = nn.Linear(784, 10)
model.load_state_dict(torch.load('model.ckpt'))#再加载网络的参数
print("load success")