第一个py:
# 提取COCO关键点并保存在h5文件中 Date: 2018.11.22
from pycocotools.coco import COCO
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
import pylab
import os
import h5py
from PIL import Image
from PIL import ImageDraw
pylab.rcParams['figure.figsize'] = (8.0, 10.0)
# initialize COCO api for person keypoints annotations
dataDir = '/home/myubuntu/Desktop/human-pose-estimation.pytorch-master/data/coco'
dataType = 'new' #'person_keypoints_val2017'
annFile = '{}/annotations/{}.json'.format(dataDir,dataType)
coco_kps=COCO(annFile)
# display COCO categories and supercategories
cats = coco_kps.loadCats(coco_kps.getCatIds())
nms=[cat['name'] for cat in cats]
print('COCO categories: \n{}\n'.format(' '.join(nms)))
nms = set([cat['supercategory'] for cat in cats])
print('COCO supercategories: \n{}'.format(' '.join(nms)))
# get all images containing given categories, select one at random
catIds = coco_kps.getCatIds(catNms=['person'])
imgIds = coco_kps.getImgIds(catIds=catIds )
print ('there are %d images containing human'%len(imgIds))
#print (imgIds)
def getBndboxKeypointsGT():
'''
firstRow = ['imagename','bndbox','nose',
'left_eye','right_eye','left_ear','right_ear','left_shoulder','right_shoulder',
'left_elbow','right_elbow','left_wrist','right_wrist','left_hip','right_hip',
'left_knee','right_knee','left_ankle','right_ankle']
keypointsWriter.writerow(firstRow)'''
h5_imgname = []
h5_bndbox = []
h5_keypoints = []
for i in range(len(imgIds)):
imageNameTemp = coco_kps.loadImgs(imgIds[i])[0]
imageName = imageNameTemp['file_name'].encode('raw_unicode_escape')
img = coco_kps.loadImgs(imgIds[i])[0]
annIds = coco_kps.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None)
anns = coco_kps.loadAnns(annIds)
#print(anns)
personNumber = len(anns)
#print(personNumber)
#np.fromstring(imageName, dtype=np.uint8).astype('float64')
#print (imageName)
#imageName=imageName.tolist()
for i in range(personNumber):
i=str(anns[i]['image_id'])
#print(i)
while len(i)<12:
i='0'+i
i=i+'.jpg'
temp3 = []
for h in range(len(i)):
jiji=ord(i[h])
#print(jiji)
temp3.append(jiji)
#print(temp3)
temp3 = np.array(temp3)
temp3 = temp3.astype(np.float64)
#print (temp3)
h5_imgname.append(temp3)
#print(h5_imgname)
for j in range(personNumber):
temp = []
b=anns[j]['bbox']
b[2] = int(b[0]+b[2])
b[3] = int(b[1]+b[3])
b[0] = int(b[0])
b[1] = int(b[1])
#print(b)
temp.append(b)
h5_bndbox.append(temp)
#print(h5_bndbox)
#h5_keyPoints.append(anns[j]['keypoints'])
temp1 = []
#print(anns[j]['keypoints'])
for k in range (len(anns[j]['keypoints'])):
if(k%3==0):
temp2=[]
if (k % 3 != 2):
temp2.append(anns[j]['keypoints'][k])
if (k % 3 == 2):
temp1.append(temp2)
#print(temp1)
h5_keypoints.append(temp1)
h5_imgname = np.array(h5_imgname)
h5_bndbox = np.array(h5_bndbox)
h5_keypoints = np.array(h5_keypoints)
#print(h5_imgname)
h5file = h5py.File('.../h5/new.h5', 'w')
h5file.create_dataset('imgname', data=h5_imgname)
h5file.create_dataset('bndbox',data=h5_bndbox)
h5file.create_dataset('part',data=h5_keypoints)
h5file.close()
if __name__ == "__main__":
print ('Writing bndbox and keypoints to h5 files..."')
getBndboxKeypointsGT()
第二个py:
# 合并两个h5文件 Date: 2018.12.05
import h5py
import numpy as np
#读取h5信息
f = h5py.File('.../h5/new.h5', 'r')
for k in f.keys():
print(k)
bnds1 = f['bndbox'].value
imgs1 = f['imgname'].value
parts1 = f['part'].value
f = h5py.File('.../h5/val2017.h5', 'r')
for k in f.keys():
print(k)
bnds2 = f['bndbox'].value
imgs2 = f['imgname'].value
parts2 = f['part'].value
#融合两个h5文件的imgname
imgs = np.append(imgs1, imgs2)
imgs=imgs.tolist()
#print(imgs)
h5_imgs=[]
for k in range(len(imgs)):
if (k % 16 == 0):
tep2 = []
tep2.append(imgs[k])
if (k % 16 == 15):
h5_imgs.append(tep2)
h5_imgs = np.array(h5_imgs)
#print(h5_imgs)
#融合两个h5文件的boundingbox
bnds = np.append(bnds1, bnds2)
bnds = bnds.tolist()
# print(bnds)
h5_bnds = []
for k in range(len(bnds)):
if (k % 4 == 0):
tmp1 = []
tmp1.append(bnds[k])
if (k % 4 != 3):
tmp2 = []
tmp2.append(tmp1)
if (k % 4 == 3):
h5_bnds.append(tmp2)
h5_bnds = np.array(h5_bnds)
#print(h5_bnds)
#融合两个h5文件的keypoint
parts = np.append(parts1, parts2)
parts = parts.astype(np.int)
#print(parts)
parts = parts.tolist()
h5_parts = []
tkp1=[]
for k in range(len(parts)):
if (k % 2 == 0):
tkp = []
tkp.append(parts[k])
if (k % 2 == 1):
tkp1.append(tkp)
#tkp1 = np.array(tkp1)
#print(tkp1)
for i in range(len(tkp1)):
if (i % 17 == 0):
tkp2 = []
tkp2.append(tkp1[i])
if (i % 17 == 16):
h5_parts.append(tkp2)
h5_parts = np.array(h5_parts)
#print(h5_parts)
#写入h5文件
h5file = h5py.File('.../h5/new_val2017.h5','w')
h5file.create_dataset('imgname', data=h5_imgs)
h5file.create_dataset('bndbox',data=h5_bnds)
h5file.create_dataset('part',data=h5_parts)
第三个py:
#将h5分割成任意大小
import h5py
with h5py.File('.../h5/coco.h5', 'r') as annot:
# train
imgname_coco_train = annot['imgname'][:10000]
bndbox_coco_train = annot['bndbox'][:10000]
part_coco_train = annot['part'][:10000]
#写入h5文件
h5file = h5py.File('.../h5/train2017.h5','w')
h5file.create_dataset('imgname', data=imgname_coco_train)
h5file.create_dataset('bndbox',data=bndbox_coco_train)
h5file.create_dataset('part',data=part_coco_train)
第三个py:
#查看h5文件的属性,大小
import h5py
f = h5py.File('.../new.h5','r')
for k in f.keys():
print(k)
b=f['bndbox']
i=f['imgname']
p=f['part']
print(b.shape)
print(i.shape)
print(p.shape)