一. CNN图像分类
PyTorch Version: 1.0.0
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
print("PyTorch Version: ",torch.__version__)
(1)首先定义一个基于ConvNet的简单神经网络
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
NLL loss的定义:
ℓ(x,y)=L={l1,…,lN}⊤,ln=−wynxn,yn,wc=weight[c]⋅1{c≠ignore_index}
def train(model, device, train_loader, optimizer, epoch, log_interval=100):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print("Train Epoch: {} [{}/{} ({:0f}%)]\tLoss: {:.6f}".format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()
))
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
torch.manual_seed(53113)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
batch_size = test_batch_size = 32
kwargs = {
'num_workers': 1, 'pin_memory': True} if use_cuda else {
}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('./mnist_data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('./mnist_data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=test_batch_size, shuffle=True, **kwargs)
lr = 0.01
momentum = 0.5
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)
epochs = 2
for epoch in range(1, epochs + 1):
train(model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
save_model = True
if (save_model):
torch.save(model.state_dict(),"mnist_cnn.pt")
结果:
Train Epoch: 1 [0/60000 (0.000000%)] Loss: 2.297938
Train Epoch: 1 [3200/60000 (5.333333%)] Loss: 0.567845
Train Epoch: 1 [6400/60000 (10.666667%)] Loss: 0.206370
Train Epoch: 1 [9600/60000 (16.000000%)] Loss: 0.094653
Train Epoch: 1 [12800/60000 (21.333333%)] Loss: 0.180530
Train Epoch: 1 [16000/60000 (26.666667%)] Loss: 0.041645
Train Epoch: 1 [19200/60000 (32.000000%)] Loss: 0.135092
Train Epoch: 1 [22400/60000 (37.333333%)] Loss: 0.054001
Train Epoch: 1 [25600/60000 (42.666667%)] Loss: 0.111863
Train Epoch: 1 [28800/60000 (48.000000%)] Loss: 0.059039
Train Epoch: 1 [32000/60000 (53.333333%)] Loss: 0.089227
Train Epoch: 1 [35200/60000 (58.666667%)] Loss: 0.186015
Train Epoch: 1 [38400/60000 (64.000000%)] Loss: 0.093208
Train Epoch: 1 [41600/60000 (69.333333%)] Loss: 0.077090
Train Epoch: 1 [44800/60000 (74.666667%)] Loss: 0.038075
Train Epoch: 1 [48000/60000 (80.000000%)] Loss: 0.036247
Train Epoch: 1 [51200/60000 (85.333333%)] Loss: 0.052358
Train Epoch: 1 [54400/60000 (90.666667%)] Loss: 0.013201
Train Epoch: 1 [57600/60000 (96.000000%)] Loss: 0.036660
Test set: Average loss: 0.0644, Accuracy: 9802/10000 (98%)
Train Epoch: 2 [0/60000 (0.000000%)] Loss: 0.054402
Train Epoch: 2 [3200/60000 (5.333333%)] Loss: 0.032239
Train Epoch: 2 [6400/60000 (10.666667%)] Loss: 0.092350
Train Epoch: 2 [9600/60000 (16.000000%)] Loss: 0.058544
Train Epoch: 2 [12800/60000 (21.333333%)] Loss: 0.029762
Train Epoch: 2 [16000/60000 (26.666667%)] Loss: 0.012521
Train Epoch: 2 [19200/60000 (32.000000%)] Loss: 0.101891
Train Epoch: 2 [22400/60000 (37.333333%)] Loss: 0.127773
Train Epoch: 2 [25600/