行人检测-Caltech Pedestrian Dataset 数据集下载及格式转换

Caltech Pedestrian Dataset 数据集

加理工(caltech)提供的数据集
该数据集主要包括

  1. 训练集+测试集:seq格式的数据;
  2. 行人标签数据:vbb(video bounding box)格式的数据,该格式数据主要是数据集1中的行人bounding box。
    由于我们training时需要的主要是图像格式的数据,所以需要将.seq .vbb这两个格式的数据转换为图像。

数据集下载

谷歌网盘:Caltech Pedestrian Dataset

数据集下载脚本

#!/bin/bash

# # Get files from Google Drive
annolist=(https://drive.google.com/file/d/1EsAL5Q9FfOQls28qYmr2sO6rha1d4YVz/view?usp=sharing)
for dir in ${annolist[@]};do
    echo ${dir}
    echo ${dir:32:33}
    URL="https://drive.google.com/u/0/uc?export=download&id=${dir:32:33}"
    wget --load-cookies /tmp/cookies.txt "https://drive.google.com/u/0/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate $URL -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=${dir:32:33}" -O anno.zip && rm -rf /tmp/cookies.txt
    unzip anno.zip
done
rm -rf anno.zip

# USA set00-set10
setlist=(https://drive.google.com/file/d/1tPeaQr1cVmSABNCJQsd8OekOZIjpJivj/view?usp=sharing
https://drive.google.com/file/d/1apo5VxoZA5m-Ou4GoGR_voUgLN0KKc4g/view?usp=sharing
https://drive.google.com/file/d/1yvfjtQV6EnKez6TShMZQq_nkGyY9XA4q/view?usp=sharing
https://drive.google.com/file/d/1jvF71hw4ztorvz0FWurtyCBs0Dy_Fh0A/view?usp=sharing
https://drive.google.com/file/d/11Q7uZcfjHLdwpLKwDQmr5gT8LoGF82xY/view?usp=sharing
https://drive.google.com/file/d/1Q0pnxM5cnO8MJJdqzMGIEryZaEKk_Un_/view?usp=sharing
https://drive.google.com/file/d/1ft6clVXKdaxFGeihpth_jdBQxOIirSk7/view?usp=sharing
https://drive.google.com/file/d/1-E_B3iAPQKTvkZ8XyuLcE2Lytog3AofW/view?usp=sharing
https://drive.google.com/file/d/1oXCaTPOV0UYuxJJrxVtY9_7byhOLTT8G/view?usp=sharing
https://drive.google.com/file/d/1f0mpL2C2aRoF8bVex8sqWaD8O3f9ZgfR/view?usp=sharing
https://drive.google.com/file/d/18TvsJ5TKQYZRlj7AmcIvilVapqAss97X/view?usp=sharing
)

for setdir in ${setlist[@]};do
    echo ${setdir}
    echo ${setdir:32:33}
    URL="https://drive.google.com/u/0/uc?export=download&id=${setdir:32:33}"
    wget --load-cookies /tmp/cookies.txt "https://drive.google.com/u/0/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate $URL -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=${setdir:32:33}" -O set.tar && rm -rf /tmp/cookies.txt
    tar -xvf set.tar
done
rm -rf set.tar

在这里插入图片描述

数据格式转换(转为VOC)

seq转为jpg

脚本:seq2jpg.py

#!/usr/bin/env python
# encoding: utf-8
'''
@Author  : pentiumCM
@Email   : 842679178@qq.com
@Software: PyCharm
@File    : seq_process.py
@Time    : 2020/9/7 21:44
@desc	 : Caltech数据集 seq 类型文件数据集处理
'''

# Deal with .seq format for video sequence
# The .seq file is combined with images,
# so I split the file into several images with the image prefix
# "\xFF\xD8\xFF\xE0\x00\x10\x4A\x46\x49\x46".

import os.path
import fnmatch
import shutil


def open_save(file, savepath):
    """
    read .seq file, and save the images into the savepath

    :param file: .seq文件路径
    :param savepath: 保存的图像路径
    :return:
    """

    # 读入一个seq文件,然后拆分成image存入savepath当中
    f = open(file, 'rb+')
    # 将seq文件的内容转化成str类型
    string = f.read().decode('latin-1')

    # splitstring是图片的前缀,可以理解成seq是以splitstring为分隔的多个jpg合成的文件
    splitstring = "\xFF\xD8\xFF\xE0\x00\x10\x4A\x46\x49\x46"

    # split函数做一个测试,因此返回结果的第一个是在seq文件中是空,因此后面省略掉第一个
    """
    >>> a = ".12121.3223.4343"
    >>> a.split('.')
    ['', '12121', '3223', '4343']
    """
    # split .seq file into segment with the image prefix
    strlist = string.split(splitstring)
    f.close()
    count = 0
    # delete the image folder path if it exists
    # if os.path.exists(savepath):
    #     shutil.rmtree(savepath)
    # create the image folder path
    # if not os.path.exists(savepath):
    #     os.makedirs(savepath)
    # deal with file segment, every segment is an image except the first one
    for img in strlist:
        filename = str(count) + '.jpg'
        filenamewithpath = savepath + '_' + filename #os.path.join(savepath, filename)
        # abandon the first one, which is filled with .seq header
        if count > 0:
            i = open(filenamewithpath, 'wb+')
            i.write(splitstring.encode('latin-1'))
            i.write(img.encode('latin-1'))
            i.close()
        count += 1


if __name__ == "__main__":
    rootdir = "/workspace/dataset/zfjuan/data/CaltechPedestrian/"
    saveroot = "/workspace/dataset/zfjuan/data/CaltechPedestrian/caltech_voc/JPEGImages"

    # walk in the rootdir, take down the .seq filename and filepath
    for parent, dirnames, filenames in os.walk(rootdir):
        for filename in filenames:
            # check .seq file with suffix
            # fnmatch 全称是 filename match,主要是用来匹配文件名是否符合规则的
            if fnmatch.fnmatch(filename, '*.seq'):
                # take down the filename with path of .seq file
                thefilename = os.path.join(parent, filename)
                # create the image folder by combining .seq file path with .seq filename
                parent_path = parent
                parent_path = parent_path.replace('\\', '/')
                thesavepath = saveroot + '/' + parent_path.split('/')[-1] + '_' + filename.split('.')[0]
                print("Filename=" + thefilename)
                print("Savepath=" + thesavepath)
                open_save(thefilename, thesavepath)

vbb转voc xml

脚本:vbb2voc.py

# -*- coding:utf-8 -*-
# -*- coding:utf-8 -*-
import os, glob
from scipy.io import loadmat
from collections import defaultdict
import numpy as np
from lxml import etree, objectify


def vbb_anno2dict(vbb_file, cam_id):
    filename = os.path.splitext(os.path.basename(vbb_file))[0]

    # 定义字典对象annos
    annos = defaultdict(dict)
    vbb = loadmat(vbb_file)
    # object info in each frame: id, pos, occlusion, lock, posv
    objLists = vbb['A'][0][0][1][0]
    objLbl = [str(v[0]) for v in vbb['A'][0][0][4][0]]     # 可查看所有类别
    # person index
    person_index_list = np.where(np.array(objLbl) == "person")[0]   # 只选取类别为‘person’的xml
    for frame_id, obj in enumerate(objLists):
        if len(obj) > 0:
            frame_name = str(cam_id) + "_" + str(filename) + "_" + str(frame_id+1) + ".jpg"
            annos[frame_name] = defaultdict(list)
            annos[frame_name]["id"] = frame_name
            annos[frame_name]["label"] = "person"
            for id, pos, occl in zip(obj['id'][0], obj['pos'][0], obj['occl'][0]):
                id = int(id[0][0]) - 1  # for matlab start from 1 not 0
                if not id in person_index_list:  # only use bbox whose label is person
                    continue
                pos = pos[0].tolist()
                occl = int(occl[0][0])
                annos[frame_name]["occlusion"].append(occl)
                annos[frame_name]["bbox"].append(pos)
            if not annos[frame_name]["bbox"]:
                del annos[frame_name]
    print(annos)
    return annos


def instance2xml_base(anno, bbox_type='xyxy'):
    """bbox_type: xyxy (xmin, ymin, xmax, ymax); xywh (xmin, ymin, width, height)"""
    assert bbox_type in ['xyxy', 'xywh']
    E = objectify.ElementMaker(annotate=False)
    anno_tree = E.annotation(
        E.folder('VOC2014_instance/person'),
        E.filename(anno['id']),
        E.source(
            E.database('Caltech pedestrian'),
            E.annotation('Caltech pedestrian'),
            E.image('Caltech pedestrian'),
            E.url('None')
        ),
        E.size(
            E.width(640),
            E.height(480),
            E.depth(3)
        ),
        E.segmented(0),
    )
    for index, bbox in enumerate(anno['bbox']):
        bbox = [float(x) for x in bbox]
        if bbox_type == 'xyxy':
            xmin, ymin, w, h = bbox
            xmax = xmin+w
            ymax = ymin+h
        else:
            xmin, ymin, xmax, ymax = bbox
        E = objectify.ElementMaker(annotate=False)
        anno_tree.append(
            E.object(
            E.name(anno['label']),
            E.bndbox(
                E.xmin(int(xmin)),
                E.ymin(int(ymin)),
                E.xmax(int(xmax)),
                E.ymax(int(ymax))
            ),
            E.difficult(0),
            E.occlusion(anno["occlusion"][index])
            )
        )
    return anno_tree


def parse_anno_file(vbb_inputdir, vbb_outputdir):
    # annotation sub-directories in hda annotation input directory
    assert os.path.exists(vbb_inputdir)
    sub_dirs = os.listdir(vbb_inputdir)     # 对应set00,set01...
    for sub_dir in sub_dirs:
        print("Parsing annotations of camera: ", sub_dir)
        cam_id = sub_dir
        # 获取某一个子set下面的所有vbb文件
        vbb_files = glob.glob(os.path.join(vbb_inputdir, sub_dir, "*.vbb"))
        for vbb_file in vbb_files:
            # 返回一个vbb文件中所有的帧的标注结果
            annos = vbb_anno2dict(vbb_file, cam_id)
            
            if annos:
                # 组成xml文件的存储文件夹,形如“/Users/chenguanghao/Desktop/Caltech/xmlresult/”
                vbb_outdir = vbb_outputdir
                                              
                # 如果不存在 vbb_outdir
                if not os.path.exists(vbb_outdir):
                    os.makedirs(vbb_outdir)

                for filename, anno in sorted(annos.items(), key=lambda x: x[0]):                  
                    if "bbox" in anno:
                        anno_tree = instance2xml_base(anno)
                        outfile = os.path.join(vbb_outdir, os.path.splitext(filename)[0]+".xml")
                        print("Generating annotation xml file of picture: ", filename)
                        # 生成最终的xml文件,对应一张图片
                        etree.ElementTree(anno_tree).write(outfile, pretty_print=True)


def visualize_bbox(xml_file, img_file):
    import cv2
    tree = etree.parse(xml_file)
    # load image
    image = cv2.imread(img_file)
    origin = cv2.imread(img_file)
    # 获取一张图片的所有bbox
    for bbox in tree.xpath('//bndbox'):
        coord = []
        for corner in bbox.getchildren():
            coord.append(int(float(corner.text)))
        print(coord)
        cv2.rectangle(image, (coord[0], coord[1]), (coord[2], coord[3]), (0, 0, 255), 2)
    # visualize image
    cv2.imshow("test", image)
    cv2.imshow('origin', origin)
    cv2.waitKey(0)


def main():
    vbb_inputdir = "data/CaltechPedestrian/annotations"
    vbb_outputdir = "data/CaltechPedestrian/caltech_voc/Annotations"
    parse_anno_file(vbb_inputdir, vbb_outputdir)


if __name__ == "__main__":
    main()
    print("Success!")

数据筛选

很多xml文件内标注的person由于目标太小,导致极其不清晰,应该将其去除;
同时,删掉无person目标或目标小于预设面积的图片;

import xml.etree.ElementTree as ET
import os
import shutil
space = 40*70
xml_path = "data/CaltechPedestrian/caltech_voc/Annotations"
jpg_path = "data/CaltechPedestrian/caltech_voc/JPEGImages"
save_xml_path = "data/CaltechPedestrian/caltech_voc/Annotations_filtered"
save_jpg_path = "data/CaltechPedestrian/caltech_voc/JPEGImages_filtered"

def filter_xml():
    for file_name in os.listdir(xml_path):
        file_path = os.path.join(xml_path, file_name)
        print(file_path)
        in_file = open(file_path)
        tree = ET.parse(in_file)  # ET是一个xml文件解析库,ET.parse()打开xml文件。parse--"解析"
        root = tree.getroot()  # 获取根节点

        write_flag = False

        for obj in root.findall('object'):  # 找到根节点下所有“object”节点
            bbox = obj.find('bndbox')
            x1 = int(bbox.find('xmin').text)
            y1 = int(bbox.find('ymin').text)
            x2 = int(bbox.find('xmax').text)
            y2 = int(bbox.find('ymax').text)

            if (x2-x1)*(y2-y1) >= space:
                write_flag = True
        if write_flag:
            tree.write(os.path.join(save_xml_path, file_name))
            print("save xml file: {}".format(os.path.join(save_xml_path, file_name)))
            jpg_name = file_name.replace('xml', 'jpg')
            old_path = os.path.join(jpg_path, jpg_name)
            new_path = os.path.join(save_jpg_path, jpg_name)
            shutil.copyfile(old_path, new_path)
            print("save jpg file: {}".format(new_path))


if __name__ == '__main__':
    filter_xml()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

JoannaJuanCV

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值