kaggle的猫狗分类

import os
import numpy as np
from PIL import Image
import torch.nn.functional as F
import torch.optim as optim
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from torch.utils.data import Dataset, DataLoader
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets

transform = transforms.Compose([
    transforms.Resize(100),
    transforms.RandomVerticalFlip(),
    transforms.RandomCrop(50),
    transforms.RandomResizedCrop(150),
    transforms.ColorJitter(brightness=0.5, contrast=0.5, hue=0.5),
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])

class MyDataSet(Dataset):
    def __init__(self, path_dir, transform=None, train=True, test=False):
        super(MyDataSet, self).__init__()
        self.path = path_dir
        self.transform = transform
        # 路径下的所有文件放在一个列表里
        images = os.listdir(self.path)
        np.random.seed(10000)
        np.random.permutation(images)

        len_imgs = len(images)
        self.test = test

        if self.test:
            self.images = images
        elif train:
            self.images = images[: int(0.7 * len_imgs)]
        else:
            self.images = images[int(0.7 * len_imgs):]

    def __len__(self):
        return len(self.images)

    def __getitem__(self, index):
        image_index = self.images[index]
        img_path = os.path.join(self.path, image_index)
        img = Image.open(img_path).convert('RGB')
        label = img_path.split('/')[-1].split('.')[0]
        # 狗为1 猫为0
        label = 1 if 'dog' else 0
        if self.transform is not None:
            img = self.transform(img)
        return img, label

# 设置超参数
BATCH_SIZE = 20
EPOCHS = 10
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

dataset_train = MyDataSet('/media/fancy/本地磁盘/DL_data/data/kaggle_cat_dog/train1', transform, train=True)
dataset_valid = MyDataSet('/media/fancy/本地磁盘/DL_data/data/kaggle_cat_dog/train1', transform, train=False)
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=BATCH_SIZE, shuffle=True)
valid_loader = torch.utils.data.DataLoader(dataset_valid, batch_size=BATCH_SIZE, shuffle=True)

class ConvNet(nn.Module):
    def __init__(self):
        super(ConvNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 32, 3)
        self.max_pool1 = nn.MaxPool2d(2)
        self.conv2 = nn.Conv2d(32, 64, 3)
        self.max_pool2 = nn.MaxPool2d(2)
        self.conv3 = nn.Conv2d(64, 64, 3)
        self.conv4 = nn.Conv2d(64, 64, 3)
        self.max_pool3 = nn.MaxPool2d(2)
        self.conv5 = nn.Conv2d(64, 128, 3)
        self.conv6 = nn.Conv2d(128, 128, 3)
        self.max_pool4 = nn.MaxPool2d(2)
        self.fc1 = nn.Linear(4608, 512)  # 128 *6 * 6
        self.fc2 = nn.Linear(512, 1)

    def forward(self, x):
        in_size = x.size(0)
        x = self.conv1(x)
        x = F.relu(x)
        x = self.max_pool1(x)
        x = self.conv2(x)
        x = F.relu(x)
        x = self.max_pool2(x)
        x = self.conv3(x)
        x = F.relu(x)
        x = self.conv4(x)
        x = F.relu(x)
        x = self.max_pool3(x)
        x = self.conv5(x)
        x = F.relu(x)
        x = self.conv6(x)
        x = F.relu(x)
        x = self.max_pool4(x)
        # 展开
        x = x.view(in_size, -1)
        x = self.fc1(x)
        x = F.relu(x)
        x = self.fc2(x)
        x = torch.sigmoid(x)
        return x

modellr = 1e-4
model = ConvNet().to(DEVICE)
optimizer = optim.Adam(model.parameters(), lr=modellr)

def adjust_learning_rate(optimizer, epoch):
    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
    modellrnew = modellr * (0.1 ** (epoch // 5))
    print("lr:", modellrnew)
    for param_group in optimizer.param_groups:
        param_group['lr'] = modellrnew

# 定义训练过程
def train(model, device, train_loader, optimizer, epoch):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device).float().unsqueeze(1)
        # print(data.shape)
        optimizer.zero_grad()
        output = model(data)
        loss = F.binary_cross_entropy(output, target)
        loss.backward()
        optimizer.step()
        if (batch_idx + 1) % 10 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, (batch_idx + 1) * len(data), len(train_loader.dataset),
                       100. * (batch_idx + 1) / len(train_loader), loss.item()))

# 定义测试过程
def val(model, device, test_loader):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device).float().unsqueeze(1)
            output = model(data)
            # print(output)
            test_loss += F.binary_cross_entropy(output, target, reduction='mean').item()
            pred = torch.tensor([[1] if num[0] >= 0.5 else [0] for num in output]).to(device)
            correct += pred.eq(target.long()).sum().item()

        print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
            test_loss, correct, len(test_loader.dataset),
            100. * correct / len(test_loader.dataset)))

# 训练
for epoch in range(1, EPOCHS + 1):
    adjust_learning_rate(optimizer, epoch)
    train(model, DEVICE, train_loader, optimizer, epoch)
    val(model, DEVICE, valid_loader)
    torch.save(model, "model_" + str(epoch) + ".pth")


# class MyDataSet(Dataset):
#     def __init__(self, path_dir, transform=None):
#         self.path = path_dir
#         self.transform = transform
#         # 路径下的所有文件放在一个列表里
#         self.images = os.listdir(self.path)
#
#     def __len__(self):
#         return len(self.images)
#
#     def __getitem__(self, index):
#         image_index = self.images[index]
#         img_path = os.path.join(self.path, image_index)
#         img = Image.open(img_path).convert('RGB')
#         label = img_path.split('/')[-1].split('.')[0]
#         # 狗为1 猫为0
#         label = 1 if 'dog' else 0
#         if self.transform is not None:
#             img = self.transform(img)
#         return img, label

resnet18迁移学习:

import os
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.data import Dataset
from torchvision import transforms, models
from torch.autograd import Variable
from PIL import Image
import matplotlib.pyplot as plt

transform = transforms.Compose([
    transforms.Resize(100),
    transforms.RandomVerticalFlip(),
    transforms.RandomCrop(50),
    transforms.RandomResizedCrop(224),
    transforms.ColorJitter(brightness=0.5, contrast=0.5, hue=0.5),
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])

class MyDataSet(Dataset):
    def __init__(self, path_dir, transform=None, train=True, test=False):
        super(MyDataSet, self).__init__()
        self.path = path_dir
        self.transform = transform
        # 路径下的所有文件放在一个列表里
        images = os.listdir(self.path)
        np.random.seed(10000)
        np.random.permutation(images)

        len_imgs = len(images)
        self.test = test

        if self.test:
            self.images = images
        elif train:
            self.images = images[: int(0.8 * len_imgs)]
        else:
            self.images = images[int(0.8 * len_imgs):]

    def __len__(self):
        return len(self.images)

    def __getitem__(self, index):
        image_index = self.images[index]
        img_path = os.path.join(self.path, image_index)
        img = Image.open(img_path).convert('RGB')
        label = img_path.split('/')[-1].split('.')[0]
        # 狗为1 猫为0
        if label == 'dog':
            label = 1
        else:
            label = 0

        # label = 1 if 'dog' else 0
        if self.transform is not None:
            img = self.transform(img)
        return img, label

# 设置超参数
BATCH_SIZE = 64
epochs = 10
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

dataset_train = MyDataSet('/root/data/kaggle/train', transform, train=True)

dataset_valid = MyDataSet('/root/data/kaggle/train', transform, train=False)
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=BATCH_SIZE, shuffle=True)
valid_loader = torch.utils.data.DataLoader(dataset_valid, batch_size=BATCH_SIZE, shuffle=True)

net = models.resnet18(pretrained=True)
num_ftrs = net.fc.in_features
# 更新resnet18模型的fc模型输出,
net.fc = nn.Linear(num_ftrs, 2)
# print(net)

# 定义loss和optimizer
cirterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.0001, momentum=0.9)

# 开始训练
net.train()
for epoch in range(epochs):
    running_loss = 0.0
    train_correct = 0
    train_total = 0
    train_loss = []
    train_acc = []
    valid_acc = []
    for i, data in enumerate(train_loader, 0):
        inputs, train_labels = data
        # print("labels = ", train_labels)
        inputs, labels = Variable(inputs), Variable(train_labels)
        optimizer.zero_grad()
        outputs = net(inputs)
        # print("output = ", outputs)
        _, train_predicted = torch.max(outputs.data, 1)
        # print("predict = ", train_predicted)
        train_correct += (train_predicted == labels.data).sum()
        # print("train_correct = ", train_correct)
        loss = cirterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        print("epoch: ",  epoch, "batch_size: ", i, "/", len(train_loader), " loss: ", loss.item())
        train_loss.append(loss.item())
        train_total += train_labels.size(0)
        # print(train_labels.size(0))
    print('train %d epoch loss: %.3f  acc: %.3f ' % (epoch + 1, running_loss / train_total * BATCH_SIZE, 100 * train_correct / train_total))
    train_acc.append(100 * train_correct / train_total)
    plt.figure()
    plt.plot([index for index in range(len(train_loss))], train_loss)
    plt.savefig(str(epoch) + '_train_loss.jpg')
    # plt.show()
    train_loss.clear()

    # 模型验证
    valid_loss = []
    correct = 0
    test_loss = 0.0
    test_total = 0
    test_total = 0
    net.eval()
    for data in valid_loader:
        images, labels = data
        images, labels = Variable(images), Variable(labels)
        outputs = net(images)
        _, predicted = torch.max(outputs.data, 1)
        loss = cirterion(outputs, labels)
        test_loss += loss.item()
        test_total += labels.size(0)
        correct += (predicted == labels).sum()
        valid_loss.append(float(test_loss / test_total))
    plt.figure()
    plt.plot([index for index in range(len(valid_loss))], valid_loss)
    plt.savefig(str(epoch) + '_valid_loss.jpg')
    # plt.show()
    valid_loss.clear()
    print('valid  %d epoch loss: %.3f  acc: %.3f ' % (epoch + 1, test_loss / test_total, 100 * correct / test_total))
    valid_acc.append(100 * correct / test_total)
    torch.save(net, "/home/fancy/PythonProgram/exercise/My_resnet18_" + str(epoch) + ".pkl")

print('train_acc: ', train_acc)
print('valid_acc: ', valid_acc)

AlexNet迁移:

# -*-coding:utf-8-*-
import os
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.data import Dataset
from torchvision import transforms, models
from torch.autograd import Variable
from PIL import Image
import matplotlib.pyplot as plt

transform = transforms.Compose([
    transforms.Resize(100),
    transforms.RandomVerticalFlip(),
    transforms.RandomCrop(50),
    transforms.RandomResizedCrop(224),
    transforms.ColorJitter(brightness=0.5, contrast=0.5, hue=0.5),
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])

class MyDataSet(Dataset):
    def __init__(self, path_dir, transform=None, train=True, test=False):
        super(MyDataSet, self).__init__()
        self.path = path_dir
        self.transform = transform
        # 路径下的所有文件放在一个列表里
        images = os.listdir(self.path)
        np.random.seed(10000)
        np.random.permutation(images)

        len_imgs = len(images)
        self.test = test

        if self.test:
            self.images = images
        elif train:
            self.images = images[: int(0.8 * len_imgs)]
        else:
            self.images = images[int(0.8 * len_imgs):]

    def __len__(self):
        return len(self.images)

    def __getitem__(self, index):
        image_index = self.images[index]
        img_path = os.path.join(self.path, image_index)
        img = Image.open(img_path).convert('RGB')
        label = img_path.split('/')[-1].split('.')[0]
        # 狗为1 猫为0
        label = 1 if 'dog' else 0
        if self.transform is not None:
            img = self.transform(img)
        return img, label

# 设置超参数
BATCH_SIZE = 32
epochs = 10
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

dataset_train = MyDataSet('/media/fancy/本地磁盘/DL_data/data/kaggle_cat_dog/train1', transform, train=True)
dataset_valid = MyDataSet('/media/fancy/本地磁盘/DL_data/data/kaggle_cat_dog/train1', transform, train=False)
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=BATCH_SIZE, shuffle=True)
valid_loader = torch.utils.data.DataLoader(dataset_valid, batch_size=BATCH_SIZE, shuffle=True)

net = models.alexnet(pretrained=False)
net.classifier[6] = nn.Linear(4096, 2)
# num_ftrs = net.fc.in_features
# 更新resnet18模型的fc模型输出,
# net.fc = nn.Linear(num_ftrs, 2)
# print(net)

# 定义loss和optimizer
cirterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.0001, momentum=0.9)

# 开始训练
net.train()
for epoch in range(epochs):
    running_loss = 0.0
    train_correct = 0
    train_total = 0
    train_loss = []
    train_acc = []
    valid_acc = []
    for i, data in enumerate(train_loader, 0):
        inputs, train_labels = data
        inputs, labels = Variable(inputs), Variable(train_labels)
        optimizer.zero_grad()
        outputs = net(inputs)
        _, train_predicted = torch.max(outputs.data, 1)
        train_correct += (train_predicted == labels.data).sum()
        loss = cirterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        print("epoch: ",  epoch, "batch_size: ", i, "/", len(train_loader), " loss: ", loss.item())
        train_loss.append(loss.item())
        train_total += train_labels.size(0)
    print('train %d epoch loss: %.3f  acc: %.3f ' % (epoch + 1, running_loss / train_total * BATCH_SIZE, 100 * train_correct / train_total))
    train_acc.append(100 * train_correct / train_total)
    plt.figure()
    plt.plot([index for index in range(len(train_loss))], train_loss)
    plt.savefig(str(epoch) + '_train_loss.jpg')
    # plt.show()
    train_loss.clear()

    # 模型验证
    valid_loss = []
    correct = 0
    test_loss = 0.0
    test_total = 0
    test_total = 0
    net.eval()
    for data in valid_loader:
        images, labels = data
        images, labels = Variable(images), Variable(labels)
        outputs = net(images)
        _, predicted = torch.max(outputs.data, 1)
        loss = cirterion(outputs, labels)
        test_loss += loss.item()
        test_total += labels.size(0)
        correct += (predicted == labels).sum()
        valid_loss.append(float(test_loss / test_total))
    plt.figure()
    plt.plot([index for index in range(len(valid_loss))], valid_loss)
    plt.savefig(str(epoch) + '_valid_loss.jpg')
    # plt.show()
    valid_loss.clear()
    print('valid  %d epoch loss: %.3f  acc: %.3f ' % (epoch + 1, test_loss / test_total, 100 * correct / test_total))
    valid_acc.append(100 * correct / test_total)
    torch.save(net, "/home/fancy/PythonProgram/exercise/My_alexnet18_" + str(epoch) + ".pth")

print('train_acc: ', train_acc)
print('valid_acc: ', valid_acc)

VGG-16迁移:

import os
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.data import Dataset
from torchvision import transforms, models
from torch.autograd import Variable
from PIL import Image

transform = transforms.Compose([
    transforms.Resize(100),
    transforms.RandomVerticalFlip(),
    transforms.RandomCrop(50),
    transforms.RandomResizedCrop(224),
    transforms.ColorJitter(brightness=0.5, contrast=0.5, hue=0.5),
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])

class MyDataSet(Dataset):
    def __init__(self, path_dir, transform=None, train=True, test=False):
        super(MyDataSet, self).__init__()
        self.path = path_dir
        self.transform = transform
        # 路径下的所有文件放在一个列表里
        images = os.listdir(self.path)
        np.random.seed(10000)
        np.random.permutation(images)

        len_imgs = len(images)
        self.test = test

        if self.test:
            self.images = images
        elif train:
            self.images = images[: int(0.7 * len_imgs)]
        else:
            self.images = images[int(0.7 * len_imgs):]

    def __len__(self):
        return len(self.images)

    def __getitem__(self, index):
        image_index = self.images[index]
        img_path = os.path.join(self.path, image_index)
        img = Image.open(img_path).convert('RGB')
        label = img_path.split('/')[-1].split('.')[0]
        # 狗为1 猫为0
        label = 1 if 'dog' else 0
        if self.transform is not None:
            img = self.transform(img)
        return img, label

# 设置超参数
BATCH_SIZE = 32
epochs = 10
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

dataset_train = MyDataSet('/media/fancy/本地磁盘/DL_data/data/kaggle_cat_dog/train1', transform, train=True)
dataset_valid = MyDataSet('/media/fancy/本地磁盘/DL_data/data/kaggle_cat_dog/train1', transform, train=False)
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=BATCH_SIZE, shuffle=True)
valid_loader = torch.utils.data.DataLoader(dataset_valid, batch_size=BATCH_SIZE, shuffle=True)

net = models.vgg16(pretrained=True)
net.classifier[6] = nn.Linear(4096, 2)
# print(net)
# num_ftrs = net.fc.in_features
# net.fc = nn.Linear(num_ftrs, 2)
# exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)

# 定义loss和optimizer
cirterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.0001, momentum=0.9)

# 开始训练
net.train()
for epoch in range(epochs):
    running_loss = 0.0
    train_correct = 0
    train_total = 0
    for i, data in enumerate(train_loader, 0):
        inputs, train_labels = data
        inputs, labels = Variable(inputs), Variable(train_labels)
        optimizer.zero_grad()
        outputs = net(inputs)
        _, train_predicted = torch.max(outputs.data, 1)
        train_correct += (train_predicted == labels.data).sum()
        loss = cirterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        print("epoch: ",  epoch, "batch_size: ", i, "/", len(train_loader), " loss: ", loss.item())
        train_total += train_labels.size(0)

    print('train %d epoch loss: %.3f  acc: %.3f ' % (
    epoch + 1, running_loss / train_total * BATCH_SIZE, 100 * train_correct / train_total))

    # 模型验证
    correct = 0
    test_loss = 0.0
    test_total = 0
    test_total = 0
    net.eval()
    for data in valid_loader:
        images, labels = data
        images, labels = Variable(images), Variable(labels)
        outputs = net(images)
        _, predicted = torch.max(outputs.data, 1)
        loss = cirterion(outputs, labels)
        test_loss += loss.item()
        test_total += labels.size(0)
        correct += (predicted == labels.data).sum()
    print('valid  %d epoch loss: %.3f  acc: %.3f ' % (epoch + 1, test_loss / test_total, 100 * correct / test_total))
    torch.save(net, "My_VGG16_" + str(epoch) + ".pth")

好的,你想了解关于 Kaggle 分类问题的 PyTorch 实现,是吗? 首先,你需要下载 Kaggle分类数据集。你可以从这个链接中下载:https://www.kaggle.com/c/dogs-vs-cats/data 接下来,你需要使用 PyTorch 搭建一个卷积神经网络模型。以下是一个简单的示例代码: ```python import torch.nn as nn import torch.optim as optim import torchvision.models as models class CatDogClassifier(nn.Module): def __init__(self): super(CatDogClassifier, self).__init__() self.resnet = models.resnet18(pretrained=True) self.fc = nn.Linear(1000, 2) def forward(self, x): x = self.resnet(x) x = self.fc(x) return x ``` 在这个示例中,我们使用了 ResNet18 模型,并在其之上添加了一个全连接层作为分类器。 接下来,你需要准备数据集。你可以使用 PyTorch 中的 `torchvision.datasets.ImageFolder` 类来读取数据集。以下是一个读取数据集的示例代码: ```python import torchvision.transforms as transforms from torchvision.datasets import ImageFolder data_transforms = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), ]) train_dataset = ImageFolder('data/train', transform=data_transforms) val_dataset = ImageFolder('data/val', transform=data_transforms) ``` 在这个示例中,我们使用了 `Resize` 和 `ToTensor` 转换来对数据进行预处理。 最后,你需要定义损失函数和优化器,并使用 PyTorch 中的`DataLoader`类来加载数据集。以下是一个训练模型的示例代码: ```python from torch.utils.data import DataLoader model = CatDogClassifier() criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False) for epoch in range(10): for inputs, labels in train_loader: optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # 在验证集上进行评估 total = 0 correct = 0 with torch.no_grad(): for inputs, labels in val_loader: outputs = model(inputs) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(f"Epoch {epoch}: Accuracy = {correct / total}") ``` 在这个示例中,我们使用了交叉熵损失函数和 Adam 优化器,并进行了10个 epoch 的训练。 希望这个示例对你有所帮助!
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

fancyNSEU

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值