深度学习365天之P5运动鞋识别心得体会

本文记录了作者在深度学习领域进行P5运动鞋识别的365天实践心得,主要使用PyTorch框架,通过Python实现模型训练与优化,探讨了图像分类中的挑战与解决方案。
摘要由CSDN通过智能技术生成
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision
import torch.nn.functional as F
from torchvision import transforms,datasets
import os,PIL,pathlib,random
from PIL import Image
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)#设置gpu
dizhi='D://BaiduNetdiskDownload//深度学习p5'
data_dir=dizhi
data_dir=pathlib.Path(data_dir)
data_paths=list(data_dir.glob('*'))
print(data_paths)#[WindowsPath('D:/BaiduNetdiskDownload/深度学习p5/test'), WindowsPath('D:/BaiduNetdiskDownload/深度学习p5/train')]
classeNames=[str(path).split('\\')[-1] for path in data_paths]
print(classeNames)#取最后的train,test
train_transforms=transforms.Compose([
    transforms.Resize([224,224]),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize(
        mean=[0.485,0.456,0.406],
        std=[0.229,0.224,0.225]
    )
])#这边我犯了一个错误,transforms.Compose(),这里面应该还有个[],然后里面装各种参数,比如transforms.ToTensor()等参数,我漏掉了
test_transforms=transforms.Compose([
    transforms.Resize([224,224]),
    transforms.ToTensor(),
    transforms.Normalize(
        mean=[0.485,0.456,0.406],
        std=[0.229,0.224,0.225]
    )
])
train_dataset=datasets.ImageFolder('D://BaiduNetdiskDownload//深度学习p5//train//',transform=train_transforms)
test_dataset=datasets.ImageFolder('D://BaiduNetdiskDownload//深度学习p5//test//',transform=test_transforms)
#读取里面的文件内容,并经过特定形式的转化
print(train_dataset.class_to_idx)#class_to_idx设定类别,{'adidas': 0, 'nike': 1},将其分为两类

batch_size=32
train_dl=torch.utils.data.DataLoader(train_dataset,
                                     batch_size=batch_size,
                                     shuffle=True,
                                     num_workers=0)#将数据进行批次化处理,也就是原先一大块数据改为各个小块,比如一批次32个,要打乱的
test_dl=torch.utils.data.DataLoader(test_dataset,
                                    batch_size=batch_size,
                                    shuffle=False,
                                    num_workers=0)
for X,y in test_dl:
    print('SHAPE OF X[N,C,H,W]:',X.shape)#SHAPE OF X[N,C,H,W]: torch.Size([76, 3, 224, 224]),四维图片数据(N, C, H, W)分别表示(数量,色道,高,宽)
    print('SHAPE OF y:',y.shape,y.dtype)#SHAPE OF y: torch.Size([76]) torch.int64
    break

class Model(nn.Module):

    def __init__(self):
        super(Model, self).__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2d(3, 12, kernel_size=5, padding=0),
            nn.BatchNorm2d(12),
            nn.ReLU()
        )#数据经过conv1,要经过卷积,批量标准化,以及激活三个步骤
        self.conv2 = nn.Sequential(
            nn.Conv2d(12, 12, kernel_size=5, padding=0),
            nn.BatchNorm2d(12),
            nn.ReLU()
        )
        self.pool3 = nn.Sequential(
            nn.MaxPool2d(2)
        )
        self.conv4 = nn.Sequential(
            nn.Conv2d(12, 24, kernel_size=5, padding=0),
            nn.BatchNorm2d(24),
            nn.ReLU()
        )
        self.conv5 = nn.Sequential(
            nn.Conv2d(24, 24, kernel_size=5, padding=0),
            nn.BatchNorm2d(24),
            nn.ReLU()
        )
        self.pool6 = nn.Sequential(
            nn.MaxPool2d(2)
        )
        self.dropout = nn.Sequential(
            nn.Dropout(0.2)
        )#使用dropout进行减少网络复杂度
        self.fc = nn.Sequential(
            nn.Linear(24 * 50 * 50, len(classeNames))
        )

    def forward(self, x):
        batch_size = x.size(0)
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.pool3(x)
        x = self.conv4(x)
        x = self.conv5(x)
        x = self.pool6(x)
        x = self.dropout(x)
        x = x.view(batch_size, -1)  # 24*50*50
        x = self.fc(x)
        return x
model=Model().to(device)
print(model)

def train(dataloader,model,loss_fn,optimizer):
    size=len(dataloader.dataset)
    num_batches=len(dataloader)
    train_acc,train_loss=0,0
    for X,y in dataloader:
        X,y=X.to(device),y.to(device)

        pred=model(X)
        loss=loss_fn(pred,y)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        train_acc+=(pred.argmax(1)==y).type(torch.float).sum().item()
        train_loss+=loss.item()
    train_acc/=size
    train_loss/=num_batches
    return train_acc,train_loss

def test(dataloader,model,loss_fn):
    size=len(dataloader.dataset)
    num_batches=len(dataloader)
    test_acc,test_loss=0,0
    with torch.no_grad():
        for imgs,target in dataloader:
            imgs,target=imgs.to(device),target.to(device)
            pred=model(imgs)
            loss=loss_fn(pred,target)
            test_acc+=(pred.argmax(1)==target).type(torch.float).sum().item()
            test_loss=loss.item()
    test_acc/=size
    test_loss/=num_batches
    return test_acc,test_loss


learn_rate=1e-4
lambda1=lambda epoch:(0.92**(epoch//2))
optimizer=torch.optim.Adam(model.parameters(),lr=learn_rate)
scheduler=torch.optim.lr_scheduler.LambdaLR(optimizer,lr_lambda=lambda1)
#学习率衰减模式,通过这个方法,模型刚开始学习的很快,但是到达一定程度,就不在有进一步变化了
loss_fn=nn.CrossEntropyLoss()
epochs=20
train_acc=[]
train_loss=[]
test_acc=[]
test_loss=[]

for epoch in range(epochs):

    model.train()
    epoch_train_acc,epoch_train_loss=train(train_dl,model,loss_fn,optimizer)
    scheduler.step()
    model.eval()
    epoch_test_acc,epoch_test_loss=test(test_dl,model,loss_fn)
    train_acc.append(epoch_train_acc)
    train_loss.append(epoch_train_loss)
    test_acc.append(epoch_test_acc)
    test_loss.append(epoch_test_loss)
    lr=optimizer.state_dict()['param_groups'][0]['lr']
    template=('Epoch:{:2d},train_acc:{:.1f}%,train_loss:{:.3f},test_acc:{:.1f}%,test_loss:{:.3f},Lr:{:.2E}')
    print(template.format(epoch+1,epoch_train_acc*100,epoch_train_loss,epoch_test_acc*100,epoch_test_loss,lr))
print('Done')

plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
plt.rcParams['figure.dpi']=100
epoch_range=range(epochs)
plt.figure(figsize=[20,10])

plt.subplot(1,2,1)
plt.plot(epoch_range,train_acc,label='Training Accuracy')
plt.plot(epoch_range,test_acc,label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Test Accuracy')

plt.subplot(1,2,2)
plt.plot(epoch_range,train_loss,label='Training Loss')
plt.plot(epoch_range,test_loss,label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Test Loss')

plt.show()

classes=list(train_dataset.class_to_idx)
def predict_one_image(image_path,model,transform,classes):
    test_img=Image.open(image_path).convert('RGB')#打开的图片要转化成颜色三通道
    plt.imshow(test_img)
    test_img=transform(test_img)
    img=test_img.to(device).unsqueeze(0)
    model.eval()
    output=model(img)
    _,pred=torch.max(output,1)#输出数组的最大值
    pred_class=classes[pred]
    print(f"预测结果是:{pred_class}")

predict_one_image(image_path='D://BaiduNetdiskDownload//深度学习p5//test//adidas//1.jpg',
                  model=model,
                  transform=train_transforms,
                  classes=classes)
#PATH='./model.pth'
#torch.save(model.state_dict(),PATH),保存模型参数到对应的地址
#model.load_state_dict(torch.load(PATH,map_location=device)),加载保存好的模型参数

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值