一、方法一
import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
import torchvision
from matplotlib import pyplot
batch_size = 512
transform = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor(), torchvision.transforms.Normalize((0.1307,), (0.3081,))])
train_datasets = torchvision.datasets.MNIST('mnist', train=True, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_datasets, batch_size=batch_size, shuffle=True)
test_datasets = torchvision.datasets.MNIST('mnist', train=False, download=True, transform=transform)
test_loader = torch.utils.data.DataLoader(test_datasets, batch_size=batch_size, shuffle=True)
x, y = next(iter(train_loader))
print(x.shape, y.shape, x.min(), x.max())
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(784, 128, bias=True)
self.fc2 = nn.Linear(128, 64, bias=True)
self.fc3 = nn.Linear(64, 10, bias=True)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
def one_hot(label, depth=10):
out = torch.zeros(label.size(0), depth)
idx = torch.LongTensor(label).view(-1, 1)
out.scatter_(dim=1, index=idx, value=1)
return out
for epoch in range(10):
net.train()
for batch_idx, (x, y) in enumerate(train_loader):
x = x.view(x.size(0), x.size(1) * x.size(2) * x.size(3))
out = net(x)
y_onehot = one_hot(y)
optimizer.zero_grad()
loss = F.mse_loss(out, y_onehot)
loss.backward()
optimizer.step()
corrects = 0
net.eval()
for batch_idx, (x, y) in enumerate(test_loader):
x = x.view(x.size(0), x.size(1) * x.size(2) * x.size(3))
out = net(x)
pred = out.argmax(dim=1)
correct = pred.eq(y).sum().float().item()
corrects += correct
totalnum = len(test_loader.dataset)
acc = corrects / totalnum
print(acc)
二、方法二
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
batch_size=200
learning_rate=0.01
epochs=10
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True)
w1, b1 = torch.randn(200, 784, requires_grad=True),\
torch.zeros(200, requires_grad=True)
w2, b2 = torch.randn(200, 200, requires_grad=True),\
torch.zeros(200, requires_grad=True)
w3, b3 = torch.randn(10, 200, requires_grad=True),\
torch.zeros(10, requires_grad=True)
torch.nn.init.kaiming_normal_(w1)
torch.nn.init.kaiming_normal_(w2)
torch.nn.init.kaiming_normal_(w3)
def forward(x):
x = x@w1.t() + b1
x = F.relu(x)
x = x@w2.t() + b2
x = F.relu(x)
x = x@w3.t() + b3
x = F.relu(x)
return x
optimizer = optim.SGD([w1, b1, w2, b2, w3, b3], lr=learning_rate)
criteon = nn.CrossEntropyLoss()
for epoch in range(epochs):
for batch_idx, (data, target) in enumerate(train_loader):
data = data.view(-1, 28*28)
logits = forward(data)
loss = criteon(logits, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
test_loss = 0
correct = 0
for data, target in test_loader:
data = data.view(-1, 28 * 28)
logits = forward(data)
test_loss += criteon(logits, target).item()
pred = logits.data.max(1)[1]
correct += pred.eq(target.data).sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))