目标检测学习笔记——kaggle上学习代码

参考链接:Detecting Impact With MMdetection Training

MMDetection training cascade rcnn wheat

Simple YoloX Dataset Generator (COCO-JSON)
Install MMdetection from scratch

Version info.

  1. MMdetection 2.6.0
  2. mmcv-full 1.2.0, torch 1.6, cu102

Because this Competetion is Notebook Competetion, we need to inference .mp4 video without interent.

So I made *.whl files to install MMdetection, mmcv-full without internet. you can use this files from mmdetection-v2.6.0 dataset.

note I think we don’t need to train without internet so It’s better to set local env if you want. this step for inference using weight.

! pip install ../input/mmdetectionv260/addict-2.4.0-py3-none-any.whl
! pip install ../input/mmdetectionv260/mmcv_full-latesttorch1.6.0cu102-cp37-cp37m-manylinux1_x86_64.whl
! pip install ../input/mmdetectionv260/mmpycocotools-12.0.3-cp37-cp37m-linux_x86_64.whl
! pip install ../input/mmdetectionv260/mmdet-2.6.0-py3-none-any.whl

Set up environment
import copy
import json
import os.path as osp
from glob import glob
from tqdm import tqdm

Check Pytorch installation

import torch, torchvision
print(torch.version, torch.cuda.is_available())

from sklearn.model_selection import train_test_split

Check MMDetection installation

import mmdet
print(mmdet.version)

Check mmcv installation

import mmcv
from mmcv.ops import get_compiling_cuda_version, get_compiler_version
from mmcv import Config
print(get_compiling_cuda_version())
print(get_compiler_version())

from mmdet.datasets import build_dataset, CocoDataset
from mmdet.models import build_detector
from mmdet.datasets.builder import DATASETS
from mmdet.datasets.custom import CustomDataset
from mmdet.apis import train_detector, set_random_seed, init_detector, inference_detector

from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval

import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation

import nflimpact

在这里插入图片描述

Data preparation

I refered 2Class Object Detection Training by @tito

I set ±4 frames from impact as a impact class

there are two classes

impact : 1 --> impact
impact : 2 --> helmet

video_labels = pd.read_csv('../input/nfl-impact-detection/train_labels.csv')
video_labels_with_impact = video_labels[video_labels['impact'] > 0]
for row in tqdm(video_labels_with_impact[['video','frame','label']].values):
    frames = np.array([-4, -3, -2,-1, 1,2, 3, 4])+row[1]
    video_labels.loc[(video_labels['video'] == row[0]) 
                                 & (video_labels['frame'].isin(frames))
                                 & (video_labels['label'] == row[2]), 'impact'] = 1
video_labels['image_name'] = video_labels['video'].str.replace('.mp4', '') + '_' + video_labels['frame'].astype(str) + '.jpg'
video_labels = video_labels[video_labels.groupby('image_name')['impact'].transform("sum") > 0].reset_index(drop=True)
video_labels.fillna({'impact': 2}, inplace=True)

在这里插入图片描述

train_labels = video_labels.copy()
train_labels.head()

在这里插入图片描述

unique_image_lists = list(train_labels['image_name'].unique())

Split train validation dataset

train_images, valid_images = train_test_split(unique_image_lists, test_size = 0.05, random_state=42)
train_isin_filter = train_labels['image_name'].isin(train_images)
valid_isin_filter = train_labels['image_name'].isin(valid_images)

train_df = train_labels[train_isin_filter]
valid_df = train_labels[valid_isin_filter]
train_df = train_df.reset_index(drop=True)
valid_df = valid_df.reset_index(drop=True)
print(f'train labels: {len(train_df)}, valid labels {len(valid_df)}')

在这里插入图片描述

Generate COCO Format Json

To use MMdetection tool box, we need to set-up COCO Format Json for our dataset.

def gen_classes(CLASSES):
    classes = list()      
    
    for i, CLASS in enumerate(CLASSES):
        single_class = {} 
        single_class['id'] = i + 1
        single_class['name'] = CLASS
        classes.append(single_class)
    return classes
def gen_objs(df, debug = False):
    
    if debug:
        
        df = df[:int(len(df)*0.05)]
        
    
    img_lists = list(df['image_name'].unique())
    imgs = list()
    objs = list()   
    
    ## gen images information
    for i in tqdm(range(len(img_lists))):
        '''
        
        I just notice that all images were preprocessed image size 720x1280
        If you want to check real image size, use this code below
        
        img = cv2.imread(os.path.join(data_path, img_lists[i]))
        
        single_img_obj = {}
        single_img_obj['file_name'] = img_lists[i]
        single_img_obj['height'] = img.shape[0]
        single_img_obj['width'] = img.shape[1]
        single_img_obj['id'] = i + 1
        '''
        
        single_img_obj = {}
        single_img_obj['file_name'] = img_lists[i]
        single_img_obj['height'] = 720 
        single_img_obj['width'] = 1280
        single_img_obj['id'] = i + 1        
        
        imgs.append(single_img_obj)
        
  
    ## gen objs information    
    for j in tqdm(range(len(df))):
        single_obj = {}
        single_obj['id'] = j + 1
        single_obj['image_id'] = img_lists.index(df['image_name'][j]) + 1
        single_obj['category_id'] = int(df['impact'][j] ) ## You need to customize if you want to add some 'impact' class
        single_obj['area'] = float(df['width'][j]*df['height'][j])
        single_obj['bbox'] = [int(df['left'][j]), int(df['top'][j]), int(df['width'][j]), int(df['height'][j])]
        single_obj['iscrowd'] = 0        
        
        objs.append(single_obj)
    
    print(f'images: {len(imgs)}, objs: {len(objs)}')
    
    return imgs, objs
def gen_coco(outpath, classes, objs, imgs, train=True):
    if train:
        data_dict = {}
        data_dict['images'] = []
        data_dict['annotations'] = []
        data_dict['categories'] = []
        data_dict['images'].extend(imgs)
        data_dict['annotations'].extend(objs)
        data_dict['categories'].extend(classes)
        
    else:
        data_dict = {}
        data_dict['images'] = []
        data_dict['categories'] = []
        
        data_dict['images'].extend(imgs)
        data_dict['categories'].extend(classes)   

    with open(outpath, 'w') as f_out:
        json.dump(data_dict, f_out)

CLASSES = ['impact', 'helmet']
classes = gen_classes(CLASSES)
classes

在这里插入图片描述

train_imgs, train_objs = gen_objs(train_df)
valid_imgs, valid_objs = gen_objs(valid_df)

在这里插入图片描述

gen_coco('train.json', classes, train_objs, train_imgs, train=True)
gen_coco('valid.json', classes, valid_objs, valid_imgs, train=True)

Set Dataset

This class is from MMdetection COCO.py

I just set CLASSES only impact, helmet, So you can customize if you want.

@DATASETS.register_module()
class ImpactDataset(CocoDataset):
    CLASSES = set(CLASSES)# 仅仅重写了个继承父类的属性

Model build

In this notebook, I used cascade_rcnn_r50_fpn for baseline. I trained 9epochs and I got a `` public LB score. config_file is from nfl_baseline_cascade_rcnn dataset which I made for this Notebook.

I saved 9epochs pretrained model with configs

config_file = '../input/nflbaselinecascadercnn/cascade_rcnn_r50_fpn.py'
cfg = Config.fromfile(config_file)
cfg.total_epochs = 1
cfg.work_dir = './'
cfg.seed = 0
set_random_seed(0, deterministic=False)
cfg.gpu_ids = range(1)
cfg.data_root = '../input/nfl-frame/'

cfg.data.train.ann_file='./train.json'
cfg.data.train.img_prefix= cfg.data_root

cfg.data.val.ann_file='./valid.json',
cfg.data.val.img_prefix= cfg.data_root

cfg.data.test.ann_file='./valid.json', ## I just set validation data for test because it's not our main purpose
cfg.data.test.img_prefix= cfg.data_root

You can check config

print(f'Config:\n{cfg.pretty_text}')

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

Train model

# Build dataset
datasets = [build_dataset(cfg.data.train)]

# Build the detector
model = build_detector(
    cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
# Add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES

# Create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
train_detector(model, datasets, cfg, distributed=False, validate=True)

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

Test video

Let’s test video and visualize
fig = plt.figure()

# Specify the path to model config and checkpoint file
checkpoint_file = '../input/nflbaselinecascadercnn/epoch_9.pth' # 10 epochs

# build the model from a config file and a checkpoint file
model = init_detector(config_file, checkpoint_file, device='cuda:0')

# test a video and show the results
video = mmcv.VideoReader('../input/nfl-impact-detection/test/57906_000718_Endzone.mp4')

ims = []

for frame in video:
    result = inference_detector(model, frame)
    single_img = model.show_result(frame, result, wait_time=1)
    im = plt.imshow(single_img, animated=True)
    ims.append([im])
    
ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True,
                                repeat_delay=1000)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值