def predict_image(detector):
if FLAGS.run_benchmark:
detector.predict(
FLAGS.image_file,
FLAGS.threshold,
warmup=100,
repeats=100,
run_benchmark=True)
else:
imgs_lists = get_image_list(FLAGS.image_file)
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_gpu = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
os.system('rm tmp')
GPU_free1 = memory_gpu[FLAGS.GPU_id]
# print('GPU:%d - free: %s' % (FLAGS.GPU_id, str(GPU_free1)))
GPU_free2 = 0
result_save_path = os.path.join(FLAGS.output_dir, 'result.txt')
with open(result_save_path,'w') as f:
time_start = time.time()
for i,img in enumerate(imgs_lists):
results = detector.predict(img, FLAGS.threshold)
# os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
# memory_gpu = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
# os.system('rm tmp')
# GPU_free = memory_gpu[FLAGS.GPU_id]
# print('GPU:%d - free: %s' % (FLAGS.GPU_id, str(GPU_free)))
if i == len(imgs_lists)-1:
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_gpu = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
os.system('rm tmp')
GPU_free2 = memory_gpu[FLAGS.GPU_id]
dict = {}
dict['image_name']=img.split('.jpg')[0].split('/')[-1]+'.jpg'
# image = cv2.imread(img)
# dict['width']=image.shape[1]
# dict['height']=image.shape[0]
# dict['bbox']=[]
classes = []
bboxes = []
for dt in results['boxes']:
clsid, bbox, score = int(dt[0]), list(dt[2:]), float(dt[1])
classes.append(detector.config.labels[clsid])
bboxes.append(bbox)
# obj={}
# obj['label']=detector.config.labels[clsid]
# obj['bbox']=bbox
# obj['rate']=score
# dict['bbox'].append(obj)
dict['classes'] = classes
dict['bboxes'] = bboxes
f.write(json.dumps(dict,cls=NpEncoder)+'\n')
visualize(
img,
results,
detector.config.labels,
mask_resolution=detector.config.mask_resolution,
output_dir=FLAGS.output_dir,
threshold=FLAGS.threshold)
time_fn = time.time()
print('-----------------------------------------------------------------')
print('GPU:%d - Consumption: %s M' % (FLAGS.GPU_id, int(GPU_free1)-int(GPU_free2)))
ms = (time_fn - time_start) * 1000.0 / len(imgs_lists)
print("Inference: {} ms per batch image".format(ms))
08-26
1162
09-19
885
06-02
577