CNN模型的迁移学习
import numpy as np
import torch
import torch.nn as nn
import torchvision
from torchvision import datasets, transforms, models
import time
import os
import copy
data_dir = './data/hymenoptera_data'
model_name = 'resnet'
num_classes = 2
batch_size = 32
num_epochs = 15
feature_extract = True
input_size = 224
all_imgs = datasets.ImageFolder(os.path.join(data_dir, "train"), transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]))
loader = torch.utils.data.DataLoader(all_imgs, batch_size=batch_size, shuffle=True, num_workers=4)
data_transforms = {
"train": transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
"val": transforms.Compose([
transforms.Resize(input_size),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
}
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ["train", "val"]}
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x],
batch_size=batch_size, shuffle=True, num_workers=4) for x in ["train", "val"]}
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def set_parameter_requires_grad(model, feature_extract):
if feature_extract:
for param in model.parameters():
param.requires_grad = False
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):
if model_name == "resnet":
model_ft = models.resnet18(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 224
else:
print("model not implemented")
return None, None
return model_ft, input_size
model_ft, input_size = initialize_model(model_name, num_classes, feature_extract, use_pretrained=True)
def train_model(model, dataloaders, loss_fn, optimizer, num_epochs=5):
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.
val_acc_history = []
for epoch in range(num_epochs):
print("Epoch: ", epoch)
for phase in ["train", "val"]:
running_loss = 0.
running_corrects = 0.
if phase == "train":
model.train()
else:
model.eval()
for inputs, labels in dataloaders[phase]:
inputs, labels = inputs.to(device), labels.to(device)
with torch.autograd.set_grad_enabled(phase=="train"):
outputs = model(inputs)
loss = loss_fn(outputs, labels)
preds = outputs.argmax(dim=1)
if phase == "train":
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds.view(-1) == labels.view(-1)).item()
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects / len(dataloaders[phase].dataset)
print("Phase {} loss: {}, acc: {}".format(phase, epoch_loss, epoch_acc))
if phase == "val" and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if phase == "val":
val_acc_history.append(epoch_acc)
model.load_state_dict(best_model_wts)
return model, val_acc_history
model_ft = model_ft.to(device)
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model_ft.parameters()), lr=0.001, momentum=0.9)
loss_fn = nn.CrossEntropyLoss()
_, hist = train_model(model_ft, dataloaders_dict, loss_fn, optimizer, num_epochs=num_epochs)