本篇使用Pytorch实现一个简单的深度神经网络,使用的数据集是MNIST。
1.配置库和配置参数
import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
# 配置参数
torch.manual_seed(1) #设置随机数种子,确保结果可重复
input_size = 784
hidden_size = 500
num_classes = 10
num_epoched = 5
batch_size = 100
learning_rate = 0.001
2.加载MNIST数据
train_dataset = dsets.MNIST(
root = './data', # 数据保持的位置
train = True, #训练集
transform = transforms.ToTensor(), #一个取值范围[0,255]的PIL.Image,转化为[0, 1.0]的torch.FloatTensor
download = True # 下载数据
)
test_dataset = dsets.MNIST(
root = './data',
train = True, #测试集
transform = transforms.ToTensor()
)
3.数据的批处理
train_loader = torch.utils.data.DataLoader(
datasets = train_dataset,
batch_size = batch_size,
shuffle = True
)
test_loader = torch.utils.data.DataLoader(
datasets = test_dataset,
batch_size = batch_size,
shuffle = False
)
4.定义神经网络模型
class Net(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(Net, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
net = Net(input_size, hidden_size, num_classes)
5.训练数据
# 定义loss和optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr = learning rate)
for epoch in range(num_epoches):
for i, (image, labels) in emurate(train_loader):
# convert torch tensor to Variable
images = Variable(image.view(-1, 28*28))
labels = Variable(labels)
optimizer.zero_grad() # 梯度清零,以免影响其它batch
outputs = net(images) # 前向传播
loss = criterion(outputs, labels) # loss计算
loss.backward() # 反向传播,计算梯度
optimizer.step() # 梯度更新
6.在测试集测试识别率
correct = 0
total = 0
for images, lables in test_loader:
images = = Variable(image.view(-1, 28*28))
outputs = net(images)
_, predicted = torch.max(outputs.data, 1) #按维度1返回最大值
total += labels.size(0) # 正确结果
correct += (predicted == labels).sum() # 正确结果总数
参考资料:PyTorch机器学习从入门到实战