python猫狗大战pytorch_深度学习实战---猫狗大战(pytorch实现)

数据准备

猫狗大战数据集下载链接

微软的数据集已经分好类,直接使用就行,

数据划分

我们将猫和狗的图片分别移动到训练集和验证集中,其中90%的数据作为训练集,10%的图片作为验证集,使用shutil.move()来移动图片。

新建文件夹train,test,将数据集放入train中,利用代码将10%的数据移动到test中

文件移动代码

import os

import shutil

source_path = r"E:\猫狗大战数据集\PetImages"

train_dir = os.path.join(source_path, "train")

test_dir = os.path.join(source_path,"test")

train_dir_list = os.listdir(train_dir)

for dir in train_dir_list:

category_dir_path = os.path.join(train_dir, dir)

image_file_list = os.listdir(category_dir_path)

num = int(0.1*len(image_file_list))

#移动10%文件到对应目录

for i in range(num):

shutil.move(os.path.join(category_dir_path,image_file_list[i]),os.path.join(test_dir,dir,image_file_list[i]))

移动后

数据可视化

import matplotlib.pyplot as plt

import numpy

import os

from PIL import Image #读取图片模块

from matplotlib.image import imread

source_path = r"E:\猫狗大战数据集\PetImages"

#分别从Dog,Cat文件夹中选取10张图片显示

train_Dog_dir = os.path.join(source_path, "train","Dog")

train_Cat_dir = os.path.join(source_path, "train","Cat")

Dog_image_list = os.listdir(train_Dog_dir)

Cat_image_list = os.listdir(train_Cat_dir)

show_image = [os.path.join(train_Dog_dir,Dog_image_list[i]) for i in range(10)]

show_image.extend([os.path.join(train_Cat_dir,Cat_image_list[i]) for i in range(10)])

for i in show_image:

print(i)

plt.figure()

for i in range(1,20):

plt.subplot(4,5,i)

img = Image.open(show_image[i-1])

plt.imshow(img)

plt.show()

效果图:

可以看出图片的尺寸不同,在数据预处理时需要将图片resize,

使用预训练模型(resnet)进行训练

from torchvision import datasets, transforms

import torch.utils.data

import torch.nn as nn

import torchvision.models as models

import torch.optim as optim

from visdom import Visdom

if __name__ == '__main__':

#数据处理

data_transform = transforms.Compose([

transforms.Resize(128),

transforms.CenterCrop(128),

transforms.ToTensor(),

transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])

])

train_dataset = datasets.ImageFolder(root=r'E:/猫狗大战数据集/PetImages/train/', transform=data_transform)

train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=4, shuffle=True, num_workers=4)

test_dataset = datasets.ImageFolder(root=r'E:/猫狗大战数据集/PetImages/test/', transform=data_transform)

test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=4, shuffle=True, num_workers=4)

#损失函数

criteon = nn.CrossEntropyLoss()

#加载预训练模型

transfer_model = models.resnet18(pretrained=True)

dim_in = transfer_model.fc.in_features

transfer_model.fc = nn.Linear(dim_in, 2)

#优化器adam

optimizer = optim.Adam(transfer_model.parameters(), lr=0.01)

#加载模型到GPU

transfer_model = transfer_model.cuda()

viz = Visdom()

viz.line([[0.0,0.0]],[0.],win='train',opts=dict(title="train_loss&&acc", legend=['loss','acc']))

viz.line([[0.0,0.0]], [0.], win='test', opts=dict(title="test loss&&acc.",legend=['loss', 'acc']))

global_step =0

#模型训练

transfer_model.train()

for epoch in range(10):

train_acc_num =0

test_acc_num =0

for batch_idx,(data,target) in enumerate(train_loader):

data, target = data.cuda(), target.cuda()

#投入数据,得到预测值

logits = transfer_model(data)

_,pred = torch.max(logits.data,1)

#print(pred, target)

loss = criteon(logits, target)

optimizer.zero_grad()

loss.backward()

optimizer.step()

#准确度计算

train_acc_num += pred.eq(target).float().sum().item()

#print("准确数:",train_acc_num," ",batch_idx, " ",len(data))

train_acc = train_acc_num/((batch_idx+1)*len(data))

#print(train_acc)

#print(train_acc.item())

global_step +=1

viz.line([[loss.item(), train_acc]],[global_step],win='train',update='append')

if batch_idx %200 ==0:

print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f},acc:{}'.format(

epoch, batch_idx * len(data), len(train_loader.dataset),

100. * batch_idx / len(train_loader), loss.item(),train_acc))

test_loss =0

for data, target in test_loader:

data, target = data.cuda(), target.cuda()

logits = transfer_model(data)

test_loss += criteon(logits,target).item()

_, pred = torch.max(logits.data, 1)

# 准确度计算

test_acc_num += pred.eq(target).float().sum().item()

viz.line([[test_loss / len(test_loader.dataset), test_acc_num / len(test_loader.dataset)]],

[global_step], win='test', update='append')

test_acc = train_acc_num / len(test_loader.dataset)

viz.images(data.view(-1, 3, 128, 128), win='x')

viz.text(str(pred.detach().cpu().numpy()), win='pred',

opts=dict(title='pred'))

test_loss /= len(test_loader.dataset)

print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(

test_loss, test_acc, len(test_loader.dataset),

100. * test_acc / len(test_loader.dataset)))

Train Epoch: 0 [0/22498 (0%)]    Loss: 1.061759,acc:0.25

Train Epoch: 0 [800/22498 (4%)]    Loss: 0.708053,acc:0.5174129353233831

Train Epoch: 0 [1600/22498 (7%)]    Loss: 0.403057,acc:0.5155860349127181

Train Epoch: 0 [2400/22498 (11%)]    Loss: 0.721054,acc:0.5033277870216306

Train Epoch: 0 [3200/22498 (14%)]    Loss: 0.629318,acc:0.5037453183520599

其中visdom是模型可视化模块

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值