import os
import numpy as np
from PIL import Image
import torch.nn.functional as F
import torch.optim as optim
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from torch.utils.data import Dataset, DataLoader
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
transform = transforms.Compose([
transforms.Resize(100),
transforms.RandomVerticalFlip(),
transforms.RandomCrop(50),
transforms.RandomResizedCrop(150),
transforms.ColorJitter(brightness=0.5, contrast=0.5, hue=0.5),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
class MyDataSet(Dataset):
def __init__(self, path_dir, transform=None, train=True, test=False):
super(MyDataSet, self).__init__()
self.path = path_dir
self.transform = transform
# 路径下的所有文件放在一个列表里
images = os.listdir(self.path)
np.random.seed(10000)
np.random.permutation(images)
len_imgs = len(images)
self.test = test
if self.test:
self.images = images
elif train:
self.images = images[: int(0.7 * len_imgs)]
else:
self.images = images[int(0.7 * len_imgs):]
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image_index = self.images[index]
img_path = os.path.join(self.path, image_index)
img = Image.open(img_path).convert('RGB')
label = img_path.split('/')[-1].split('.')[0]
# 狗为1 猫为0
label = 1 if 'dog' else 0
if self.transform is not None:
img = self.transform(img)
return img, label
# 设置超参数
BATCH_SIZE = 20
EPOCHS = 10
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dataset_train = MyDataSet('/media/fancy/本地磁盘/DL_data/data/kaggle_cat_dog/train1', transform, train=True)
dataset_valid = MyDataSet('/media/fancy/本地磁盘/DL_data/data/kaggle_cat_dog/train1', transform, train=False)
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=BATCH_SIZE, shuffle=True)
valid_loader = torch.utils.data.DataLoader(dataset_valid, batch_size=BATCH_SIZE, shuffle=True)
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 3)
self.max_pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(32, 64, 3)
self.max_pool2 = nn.MaxPool2d(2)
self.conv3 = nn.Conv2d(64, 64, 3)
self.conv4 = nn.Conv2d(64, 64, 3)
self.max_pool3 = nn.MaxPool2d(2)
self.conv5 = nn.Conv2d(64, 128, 3)
self.conv6 = nn.Conv2d(128, 128, 3)
self.max_pool4 = nn.MaxPool2d(2)
self.fc1 = nn.Linear(4608, 512) # 128 *6 * 6
self.fc2 = nn.Linear(512, 1)
def forward(self, x):
in_size = x.size(0)
x = self.conv1(x)
x = F.relu(x)
x = self.max_pool1(x)
x = self.conv2(x)
x = F.relu(x)
x = self.max_pool2(x)
x = self.conv3(x)
x = F.relu(x)
x = self.conv4(x)
x = F.relu(x)
x = self.max_pool3(x)
x = self.conv5(x)
x = F.relu(x)
x = self.conv6(x)
x = F.relu(x)
x = self.max_pool4(x)
# 展开
x = x.view(in_size, -1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = torch.sigmoid(x)
return x
modellr = 1e-4
model = ConvNet().to(DEVICE)
optimizer = optim.Adam(model.parameters(), lr=modellr)
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
modellrnew = modellr * (0.1 ** (epoch // 5))
print("lr:", modellrnew)
for param_group in optimizer.param_groups:
param_group['lr'] = modellrnew
# 定义训练过程
def train(model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device).float().unsqueeze(1)
# print(data.shape)
optimizer.zero_grad()
output = model(data)
loss = F.binary_cross_entropy(output, target)
loss.backward()
optimizer.step()
if (batch_idx + 1) % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, (batch_idx + 1) * len(data), len(train_loader.dataset),
100. * (batch_idx + 1) / len(train_loader), loss.item()))
# 定义测试过程
def val(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device).float().unsqueeze(1)
output = model(data)
# print(output)
test_loss += F.binary_cross_entropy(output, target, reduction='mean').item()
pred = torch.tensor([[1] if num[0] >= 0.5 else [0] for num in output]).to(device)
correct += pred.eq(target.long()).sum().item()
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
# 训练
for epoch in range(1, EPOCHS + 1):
adjust_learning_rate(optimizer, epoch)
train(model, DEVICE, train_loader, optimizer, epoch)
val(model, DEVICE, valid_loader)
torch.save(model, "model_" + str(epoch) + ".pth")
# class MyDataSet(Dataset):
# def __init__(self, path_dir, transform=None):
# self.path = path_dir
# self.transform = transform
# # 路径下的所有文件放在一个列表里
# self.images = os.listdir(self.path)
#
# def __len__(self):
# return len(self.images)
#
# def __getitem__(self, index):
# image_index = self.images[index]
# img_path = os.path.join(self.path, image_index)
# img = Image.open(img_path).convert('RGB')
# label = img_path.split('/')[-1].split('.')[0]
# # 狗为1 猫为0
# label = 1 if 'dog' else 0
# if self.transform is not None:
# img = self.transform(img)
# return img, label
resnet18迁移学习:
import os
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.data import Dataset
from torchvision import transforms, models
from torch.autograd import Variable
from PIL import Image
import matplotlib.pyplot as plt
transform = transforms.Compose([
transforms.Resize(100),
transforms.RandomVerticalFlip(),
transforms.RandomCrop(50),
transforms.RandomResizedCrop(224),
transforms.ColorJitter(brightness=0.5, contrast=0.5, hue=0.5),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
class MyDataSet(Dataset):
def __init__(self, path_dir, transform=None, train=True, test=False):
super(MyDataSet, self).__init__()
self.path = path_dir
self.transform = transform
# 路径下的所有文件放在一个列表里
images = os.listdir(self.path)
np.random.seed(10000)
np.random.permutation(images)
len_imgs = len(images)
self.test = test
if self.test:
self.images = images
elif train:
self.images = images[: int(0.8 * len_imgs)]
else:
self.images = images[int(0.8 * len_imgs):]
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image_index = self.images[index]
img_path = os.path.join(self.path, image_index)
img = Image.open(img_path).convert('RGB')
label = img_path.split('/')[-1].split('.')[0]
# 狗为1 猫为0
if label == 'dog':
label = 1
else:
label = 0
# label = 1 if 'dog' else 0
if self.transform is not None:
img = self.transform(img)
return img, label
# 设置超参数
BATCH_SIZE = 64
epochs = 10
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dataset_train = MyDataSet('/root/data/kaggle/train', transform, train=True)
dataset_valid = MyDataSet('/root/data/kaggle/train', transform, train=False)
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=BATCH_SIZE, shuffle=True)
valid_loader = torch.utils.data.DataLoader(dataset_valid, batch_size=BATCH_SIZE, shuffle=True)
net = models.resnet18(pretrained=True)
num_ftrs = net.fc.in_features
# 更新resnet18模型的fc模型输出,
net.fc = nn.Linear(num_ftrs, 2)
# print(net)
# 定义loss和optimizer
cirterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.0001, momentum=0.9)
# 开始训练
net.train()
for epoch in range(epochs):
running_loss = 0.0
train_correct = 0
train_total = 0
train_loss = []
train_acc = []
valid_acc = []
for i, data in enumerate(train_loader, 0):
inputs, train_labels = data
# print("labels = ", train_labels)
inputs, labels = Variable(inputs), Variable(train_labels)
optimizer.zero_grad()
outputs = net(inputs)
# print("output = ", outputs)
_, train_predicted = torch.max(outputs.data, 1)
# print("predict = ", train_predicted)
train_correct += (train_predicted == labels.data).sum()
# print("train_correct = ", train_correct)
loss = cirterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
print("epoch: ", epoch, "batch_size: ", i, "/", len(train_loader), " loss: ", loss.item())
train_loss.append(loss.item())
train_total += train_labels.size(0)
# print(train_labels.size(0))
print('train %d epoch loss: %.3f acc: %.3f ' % (epoch + 1, running_loss / train_total * BATCH_SIZE, 100 * train_correct / train_total))
train_acc.append(100 * train_correct / train_total)
plt.figure()
plt.plot([index for index in range(len(train_loss))], train_loss)
plt.savefig(str(epoch) + '_train_loss.jpg')
# plt.show()
train_loss.clear()
# 模型验证
valid_loss = []
correct = 0
test_loss = 0.0
test_total = 0
test_total = 0
net.eval()
for data in valid_loader:
images, labels = data
images, labels = Variable(images), Variable(labels)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
loss = cirterion(outputs, labels)
test_loss += loss.item()
test_total += labels.size(0)
correct += (predicted == labels).sum()
valid_loss.append(float(test_loss / test_total))
plt.figure()
plt.plot([index for index in range(len(valid_loss))], valid_loss)
plt.savefig(str(epoch) + '_valid_loss.jpg')
# plt.show()
valid_loss.clear()
print('valid %d epoch loss: %.3f acc: %.3f ' % (epoch + 1, test_loss / test_total, 100 * correct / test_total))
valid_acc.append(100 * correct / test_total)
torch.save(net, "/home/fancy/PythonProgram/exercise/My_resnet18_" + str(epoch) + ".pkl")
print('train_acc: ', train_acc)
print('valid_acc: ', valid_acc)
AlexNet迁移:
# -*-coding:utf-8-*-
import os
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.data import Dataset
from torchvision import transforms, models
from torch.autograd import Variable
from PIL import Image
import matplotlib.pyplot as plt
transform = transforms.Compose([
transforms.Resize(100),
transforms.RandomVerticalFlip(),
transforms.RandomCrop(50),
transforms.RandomResizedCrop(224),
transforms.ColorJitter(brightness=0.5, contrast=0.5, hue=0.5),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
class MyDataSet(Dataset):
def __init__(self, path_dir, transform=None, train=True, test=False):
super(MyDataSet, self).__init__()
self.path = path_dir
self.transform = transform
# 路径下的所有文件放在一个列表里
images = os.listdir(self.path)
np.random.seed(10000)
np.random.permutation(images)
len_imgs = len(images)
self.test = test
if self.test:
self.images = images
elif train:
self.images = images[: int(0.8 * len_imgs)]
else:
self.images = images[int(0.8 * len_imgs):]
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image_index = self.images[index]
img_path = os.path.join(self.path, image_index)
img = Image.open(img_path).convert('RGB')
label = img_path.split('/')[-1].split('.')[0]
# 狗为1 猫为0
label = 1 if 'dog' else 0
if self.transform is not None:
img = self.transform(img)
return img, label
# 设置超参数
BATCH_SIZE = 32
epochs = 10
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dataset_train = MyDataSet('/media/fancy/本地磁盘/DL_data/data/kaggle_cat_dog/train1', transform, train=True)
dataset_valid = MyDataSet('/media/fancy/本地磁盘/DL_data/data/kaggle_cat_dog/train1', transform, train=False)
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=BATCH_SIZE, shuffle=True)
valid_loader = torch.utils.data.DataLoader(dataset_valid, batch_size=BATCH_SIZE, shuffle=True)
net = models.alexnet(pretrained=False)
net.classifier[6] = nn.Linear(4096, 2)
# num_ftrs = net.fc.in_features
# 更新resnet18模型的fc模型输出,
# net.fc = nn.Linear(num_ftrs, 2)
# print(net)
# 定义loss和optimizer
cirterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.0001, momentum=0.9)
# 开始训练
net.train()
for epoch in range(epochs):
running_loss = 0.0
train_correct = 0
train_total = 0
train_loss = []
train_acc = []
valid_acc = []
for i, data in enumerate(train_loader, 0):
inputs, train_labels = data
inputs, labels = Variable(inputs), Variable(train_labels)
optimizer.zero_grad()
outputs = net(inputs)
_, train_predicted = torch.max(outputs.data, 1)
train_correct += (train_predicted == labels.data).sum()
loss = cirterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
print("epoch: ", epoch, "batch_size: ", i, "/", len(train_loader), " loss: ", loss.item())
train_loss.append(loss.item())
train_total += train_labels.size(0)
print('train %d epoch loss: %.3f acc: %.3f ' % (epoch + 1, running_loss / train_total * BATCH_SIZE, 100 * train_correct / train_total))
train_acc.append(100 * train_correct / train_total)
plt.figure()
plt.plot([index for index in range(len(train_loss))], train_loss)
plt.savefig(str(epoch) + '_train_loss.jpg')
# plt.show()
train_loss.clear()
# 模型验证
valid_loss = []
correct = 0
test_loss = 0.0
test_total = 0
test_total = 0
net.eval()
for data in valid_loader:
images, labels = data
images, labels = Variable(images), Variable(labels)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
loss = cirterion(outputs, labels)
test_loss += loss.item()
test_total += labels.size(0)
correct += (predicted == labels).sum()
valid_loss.append(float(test_loss / test_total))
plt.figure()
plt.plot([index for index in range(len(valid_loss))], valid_loss)
plt.savefig(str(epoch) + '_valid_loss.jpg')
# plt.show()
valid_loss.clear()
print('valid %d epoch loss: %.3f acc: %.3f ' % (epoch + 1, test_loss / test_total, 100 * correct / test_total))
valid_acc.append(100 * correct / test_total)
torch.save(net, "/home/fancy/PythonProgram/exercise/My_alexnet18_" + str(epoch) + ".pth")
print('train_acc: ', train_acc)
print('valid_acc: ', valid_acc)
VGG-16迁移:
import os
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.data import Dataset
from torchvision import transforms, models
from torch.autograd import Variable
from PIL import Image
transform = transforms.Compose([
transforms.Resize(100),
transforms.RandomVerticalFlip(),
transforms.RandomCrop(50),
transforms.RandomResizedCrop(224),
transforms.ColorJitter(brightness=0.5, contrast=0.5, hue=0.5),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
class MyDataSet(Dataset):
def __init__(self, path_dir, transform=None, train=True, test=False):
super(MyDataSet, self).__init__()
self.path = path_dir
self.transform = transform
# 路径下的所有文件放在一个列表里
images = os.listdir(self.path)
np.random.seed(10000)
np.random.permutation(images)
len_imgs = len(images)
self.test = test
if self.test:
self.images = images
elif train:
self.images = images[: int(0.7 * len_imgs)]
else:
self.images = images[int(0.7 * len_imgs):]
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image_index = self.images[index]
img_path = os.path.join(self.path, image_index)
img = Image.open(img_path).convert('RGB')
label = img_path.split('/')[-1].split('.')[0]
# 狗为1 猫为0
label = 1 if 'dog' else 0
if self.transform is not None:
img = self.transform(img)
return img, label
# 设置超参数
BATCH_SIZE = 32
epochs = 10
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dataset_train = MyDataSet('/media/fancy/本地磁盘/DL_data/data/kaggle_cat_dog/train1', transform, train=True)
dataset_valid = MyDataSet('/media/fancy/本地磁盘/DL_data/data/kaggle_cat_dog/train1', transform, train=False)
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=BATCH_SIZE, shuffle=True)
valid_loader = torch.utils.data.DataLoader(dataset_valid, batch_size=BATCH_SIZE, shuffle=True)
net = models.vgg16(pretrained=True)
net.classifier[6] = nn.Linear(4096, 2)
# print(net)
# num_ftrs = net.fc.in_features
# net.fc = nn.Linear(num_ftrs, 2)
# exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
# 定义loss和optimizer
cirterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.0001, momentum=0.9)
# 开始训练
net.train()
for epoch in range(epochs):
running_loss = 0.0
train_correct = 0
train_total = 0
for i, data in enumerate(train_loader, 0):
inputs, train_labels = data
inputs, labels = Variable(inputs), Variable(train_labels)
optimizer.zero_grad()
outputs = net(inputs)
_, train_predicted = torch.max(outputs.data, 1)
train_correct += (train_predicted == labels.data).sum()
loss = cirterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
print("epoch: ", epoch, "batch_size: ", i, "/", len(train_loader), " loss: ", loss.item())
train_total += train_labels.size(0)
print('train %d epoch loss: %.3f acc: %.3f ' % (
epoch + 1, running_loss / train_total * BATCH_SIZE, 100 * train_correct / train_total))
# 模型验证
correct = 0
test_loss = 0.0
test_total = 0
test_total = 0
net.eval()
for data in valid_loader:
images, labels = data
images, labels = Variable(images), Variable(labels)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
loss = cirterion(outputs, labels)
test_loss += loss.item()
test_total += labels.size(0)
correct += (predicted == labels.data).sum()
print('valid %d epoch loss: %.3f acc: %.3f ' % (epoch + 1, test_loss / test_total, 100 * correct / test_total))
torch.save(net, "My_VGG16_" + str(epoch) + ".pth")