dataiter = iter(trainloader) images , labels = dataiter.next() for i in range(4): p = plt.subplot() p.set_title(“label: %5s” % classes[labels[i]]) imshow(images[i]) #构建网络 from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch.optim as optim
dataiter = iter(testloader) images , labels = dataiter.next() imshow(torchvision.utils.make_grid(images)) print(‘GroundTruth:’ , ’ '.join(classes[labels[j]] for j in range(4)))
outputs = net(Variable(images.cuda()))
_ , predicted = torch.max(outputs.data , 1) print(‘Predicted: ’ , ’ ‘.join(’%5s’ % classes[predicted[j]] for j in range(4)))
correct = 0 total = 0 for data in testloader: images , labels = data outputs = net(Variable(images.cuda())) _ , predicted = torch.max(outputs.data , 1) correct += (predicted == labels.cuda()).sum() total += labels.size(0) print(‘Accuracy of the network on the 10000 test images: %d %%’ % (100 * correct / total))
class_correct = torch.ones(10).cuda() class_total = torch.ones(10).cuda() for data in testloader: images , labels = data outputs = net(Variable(images.cuda())) _ , predicted = torch.max(outputs.data , 1) c = (predicted == labels.cuda()).squeeze() #print(predicted.data[0]) for i in range(4): label = labels[i] class_correct[label] += c[i] class_total[label] += 1
for i in range(10): print(‘Accuracy of %5s : %2d %%’ % (classes[i] , 100 * class_correct[i] / class_total[i]))
# -*- coding:utf-8 -*-
from __future__ import print_function , division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import numpy as np
import torchvision
from torchvision import datasets , models , transforms
import matplotlib.pyplot as plt
import time
import os
import pylab
data_dir = ‘hymenoptera_data’ image_datasets = {x : datasets.ImageFolder(os.path.join(data_dir , x) , data_transforms[x]) for x in [‘train’ , ‘val’]} dataloders = {x : torch.utils.data.DataLoader(image_datasets[x] , batch_size = 4 , shuffle = True , num_workers = 4) for x in [‘train’ , ‘val’]} dataset_sizes = {x : len(image_datasets[x]) for x in [‘train’ , ‘val’]} class_names = image_datasets[‘train’].classes print(class_names) use_gpu = torch.cuda.is_available() #show several images def imshow(inp , title = None): inp = inp.numpy().transpose((1 , 2 , 0)) mean = np.array([0.485 , 0.456 , 0.406]) std = np.array([0.229 , 0.224 , 0.225]) inp = std * inp + mean inp = np.clip(inp , 0 , 1) plt.imshow(inp) if title is not None: plt.title(title) pylab.show() plt.pause(0.001)
inputs , classes = next(iter(dataloders[‘train’])) out = torchvision.utils.make_grid(inputs) imshow(out , title = [class_names[x] for x in classes]) #train the model def train_model(model , criterion , optimizer , scheduler , num_epochs = 25):
since </span>=<span style="color: #000000;"> time.time()
best_model_wts </span>= model.state_dict() <span style="color: #008000;">#</span><span style="color: #008000;">Returns a dictionary containing a whole state of the module.</span>
best_acc = 0.0
<span style="color: #0000ff;">for</span> epoch <span style="color: #0000ff;">in</span><span style="color: #000000;"> range(num_epochs):
</span><span style="color: #0000ff;">print</span>(<span style="color: #800000;">'</span><span style="color: #800000;">Epoch {}/{}</span><span style="color: #800000;">'</span>.format(epoch , num_epochs - 1<span style="color: #000000;">))
</span><span style="color: #0000ff;">print</span>(<span style="color: #800000;">'</span><span style="color: #800000;">-</span><span style="color: #800000;">'</span> * 10<span style="color: #000000;">)
</span><span style="color: #008000;">#</span><span style="color: #008000;">set the mode of model</span>
<span style="color: #0000ff;">for</span> phase <span style="color: #0000ff;">in</span> [<span style="color: #800000;">'</span><span style="color: #800000;">train</span><span style="color: #800000;">'</span> , <span style="color: #800000;">'</span><span style="color: #800000;">val</span><span style="color: #800000;">'</span><span style="color: #000000;">]:
</span><span style="color: #0000ff;">if</span> phase == <span style="color: #800000;">'</span><span style="color: #800000;">train</span><span style="color: #800000;">'</span><span style="color: #000000;">:
scheduler.step() </span><span style="color: #008000;">#</span><span style="color: #008000;">about lr and gamma</span>
model.train(True) <span style="color: #008000;">#</span><span style="color: #008000;">set model to training mode</span>
<span style="color: #0000ff;">else</span><span style="color: #000000;">:
model.train(False) </span><span style="color: #008000;">#</span><span style="color: #008000;">set model to evaluate mode</span>
model_conv = torchvision.models.resnet18(pretrained=True)
for param in model_conv.parameters():
param.requires_grad = False
# Parameters of newly constructed modules have requires_grad=True by default num_ftrs = model_conv.fc.in_features model_conv.fc = nn.Linear(num_ftrs, 2)
if use_gpu: model_conv = model_conv.cuda()
criterion = nn.CrossEntropyLoss()
# Observe that only parameters of final layer are being optimized as # opoosed to before. optimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1) model_conv = train_model(model_conv, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=25)