《AI夏令营(第二期) - CV实践赛事》- Datawhale(优化)

第二次打卡

实践任务:农民身份识别挑战赛

https://challenge.xfyun.cn/topic/info?type=peasant-identity&ch=ymfk4uU

 上次打卡已经足够详细,这次主要从优化的角度开始

#0. 解压数据集
import os
!unzip -qo -O utf-8 data/data233283/农民身份识别挑战赛公开数据.zip
!mv $'\305\251\303\361\311\355\267\335\312\266\261\360\314\364\325\275\310\374\271\253\277\252\312\375\276\335' mydata
import os, sys, glob, argparse
import pandas as pd
import numpy as np
from tqdm import tqdm

import cv2, time
from PIL import Image
from sklearn.model_selection import train_test_split, StratifiedKFold, KFold

import paddle
paddle.seed(0)

import paddle.vision.models as models
import paddle.vision.transforms as transforms
import paddle.vision.datasets as datasets
import paddle.nn as nn
import paddle.nn.functional as F
import paddle.optimizer as optim
from paddle.autograd import PyLayer
from paddle.io import Dataset
import warnings
warnings.filterwarnings('ignore')

# Check if GPU is available
use_gpu = paddle.is_compiled_with_cuda()
if use_gpu:
    paddle.set_device("gpu")
else:
    paddle.set_device("cpu")


!pip install albumentations

import albumentations as A
# 读取数据集
train_path = glob.glob('./mydata/train/*')
test_path = glob.glob('./mydata/test/*')

train_path.sort()
test_path.sort()

train_df = pd.read_csv('mydata/train.csv')
train_df = train_df.sort_values(by='name')
train_label = train_df['label'].values

# 自定义数据集
# 带有图片缓存的逻辑
DATA_CACHE = {}
class XunFeiDataset(Dataset):
    def __init__(self, img_path, img_label, transform=None):
        self.img_path = img_path
        self.img_label = img_label
        if transform is not None:
            self.transform = transform
        else:
            self.transform = None
    def __getitem__(self, index):
        if self.img_path[index] in DATA_CACHE:
            img = DATA_CACHE[self.img_path[index]]
        else:
            img = cv2.imread(self.img_path[index])
            DATA_CACHE[self.img_path[index]] = img
        if self.transform is not None:
            img = self.transform(image = img)['image']
        img = img.transpose([2,0,1])
        return img, paddle.to_tensor(np.array(self.img_label[index]))
    def __len__(self):
        return len(self.img_path)
        
import albumentations as A
# 训练集
train_loader = paddle.io.DataLoader(
    XunFeiDataset(train_path[:-1000], train_label[:-1000],
            A.Compose([
            A.RandomRotate90(),
            A.Resize(512, 512),
            A.RandomCrop(224, 224),
            A.HorizontalFlip(p=0.5),
            A.RandomContrast(p=0.5),
            A.RandomBrightnessContrast(p=0.5),
            A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
        ])
    ), batch_size=30, shuffle=True, num_workers=1
)

# 验证集
val_loader = paddle.io.DataLoader(
    XunFeiDataset(train_path[-1000:], train_label[-1000:],
            A.Compose([
            A.Resize(512, 512),
            A.RandomCrop(224, 224),
            A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
        ])
    ), batch_size=30, shuffle=False, num_workers=1
)

# 测试集
test_loader = paddle.io.DataLoader(
    XunFeiDataset(test_path, [0] * len(test_path),
            A.Compose([
            A.Resize(512, 512),
            A.RandomCrop(224, 224),
            A.HorizontalFlip(p=0.5),
            A.RandomContrast(p=0.5),
            A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
        ])
    ), batch_size=2, shuffle=False, num_workers=1
)
class XunFeiNet(paddle.nn.Layer):
    def __init__(self):
        super(XunFeiNet, self).__init__()
        model = models.resnet18(True)
        model.avgpool = paddle.nn.AdaptiveAvgPool2D(1)
        model.fc = nn.Linear(512, 25)
        self.resnet = model
    def forward(self, img):
        out = self.resnet(img)
        return out
        
model = XunFeiNet()
model = model
criterion = nn.CrossEntropyLoss()
optimizer = paddle.optimizer.AdamW(parameters=model.parameters(), learning_rate=0.001)
# 模型训练
def train(train_loader, model, criterion, optimizer):
    model.train()
    train_loss = 0.0
    for i, data in enumerate(train_loader()):
        input, target = data
        input = paddle.to_tensor(input)
        target = paddle.to_tensor(target)
        output = model(input)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()
        optimizer.clear_grad()

        if i % 100 == 0:
            print('Train loss', float(loss))
            
        train_loss += float(loss)
    return train_loss/len(train_loader)

# 模型验证       
def validate(val_loader, model, criterion):
    model.eval()
    val_acc = 0.0
    
    for i, data in enumerate(val_loader()):
        input, target = data
        input = paddle.to_tensor(input)
        target = paddle.to_tensor(target)
        output = model(input)
        loss = criterion(output, target)
            
        val_acc += float((output.argmax(1) == target).sum())
            
    return val_acc / len(val_loader.dataset)
    
# 模型预测
def predict(test_loader, model, criterion):
    model.eval()
    val_acc = 0.0
    
    test_pred = []
    for i, data in enumerate(test_loader()):
        input, target = data
        input = paddle.to_tensor(input)
        target = paddle.to_tensor(target)
        output = model(input)
        test_pred.append(output.data.cpu().numpy())
            
    return np.vstack(test_pred)

    
for _  in range(50):
    train_loss = train(train_loader, model, criterion, optimizer)
    val_acc  = validate(val_loader, model, criterion)
    train_acc = validate(train_loader, model, criterion)
    print(train_loss, train_acc, val_acc)
# 模型预测
def predict(test_loader, model, criterion):
    model.eval()
    val_acc = 0.0
    
    test_pred = []
    for i, data in enumerate(test_loader()):
        input, target = data
        input = paddle.to_tensor(input)
        target = paddle.to_tensor(target)
        output = model(input)
        test_pred.append(output.numpy())
            
    return np.vstack(test_pred)
    
# 对测试集多次预测
pred = None
for _ in range(3):
    if pred is None:
        pred = predict(test_loader, model, criterion)
    else:
        pred += predict(test_loader, model, criterion)
submit = pd.DataFrame(
    {
        'name': [x.split('/')[-1] for x in test_path],
        'label': pred.argmax(1)
})

# 生成提交结果
submit = submit.sort_values(by='name')
submit.to_csv('submit.csv', index=None)

本次优化主要改了循环次数和图片的尺寸

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

拾-光

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值