# Freeze parameters so we don't backprop through them
#print("model.parameters()=",type(model.parameters()))
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import helper
from collections import OrderedDict
data_dir = 'Cat_Dog_data'
# TODO: Define transforms for the training data and testing data
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
])
test_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
])
# Pass transforms in here, then run the next cell to see how the transforms look
train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms)
test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=64)
epoches =1
every=70
steps=0
tr_losses = []
te_losses = []
test_accu=0
train_loss=0
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") #选什么设备
model = models.densenet121(pretrained=True)
for param in model.parameters():
param.requires_grad = False
from collections import OrderedDict
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(1024, 500)),
('relu', nn.ReLU()),
('fc2', nn.Linear(500, 2)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
criterion = nn.NLLLoss()
opti = optim.Adam(model.classifier.parameters(),lr=0.001)
model.to(device)
for e in range(epoches):
print("trainloader.len=",len(trainloader))
for inputs,labels in trainloader:
opti.zero_grad()
inputs,labels = inputs.to(device),labels.to(device)
steps+=1
#print("inputs.shape=",inputs.shape)
#imgs = imgs.view(inputs.shape[0],-1)
ps = model.forward(inputs)
loss = criterion(ps,labels)
train_loss+=loss
tr_losses.append(loss)
loss.backward()
opti.step()
if steps%every==0:
test_loss=0
test_accu=0
model.eval()
with torch.no_grad():
for imgs2 ,lb2 in testloader:
imgs2,lb2=imgs2.to(device),lb2.to(device)
#imgs2 = imgs2.view(imgs2.shape[0],-1)
ps2=model.forward(imgs2)
loss2 = criterion(ps2,lb2)
te_losses.append(loss2)
test_loss+=loss2
ps2 = torch.exp(ps2)#为了计算准确率
t_p,t_c = ps2.topk(1,dim=1)
equals = t_c==lb2.view(t_c.shape)
test_accu = torch.mean(equals.type(torch.FloatTensor))
model.train()
print("accur={}".format(test_accu),"train_loss={}".format(train_loss/every),"test_loss={}".format(test_loss/len(testloader)))
train_loss=0
print("######################################观察训练与校验损失函数走向便于确定过拟合位置")
plt.plot(tr_losses, label='Training loss')
plt.plot(te_losses, label='Validation loss')
plt.legend(frameon=False)