这一部分我们主要完成了对于硬件设备的检测和调试工作。
脚本测试,运行这段程序可以测试平台能够正确运行脚本
import argparse
def arg_test():
parser = argparse.ArgumentParser()
parser.add_argument('--foo', help='foo help')
args = parser.parse_args()
print(args.foo)
def list_test():
list1= ['a', 'b', 'c']
a, b, c = list1
print(a, b,c )
if __name__ == "__main__":
# arg_test()
list_test()
camera测试,用于检测摄像头硬件是否正常工作,以及调用对应的图像检测模块
# 添加上层目录到sys
import os, sys
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
sys.path.insert(0, parent_path)
from camera import Camera
import cv2, time
import numpy as np
from paddle_jetson import MotHuman, YoloeInfer, HummanAtrr, OCRReco, LaneInfer
if __name__ == "__main__":
yoloe_test()
# hum_mot_test()
# attr_test()
# infer_test()
# cam_test()
# OCR_test()
# cam_infer_test()
yoloe_test :采用yolo架构,测试yolo模块是否正常运行
def yoloe_test():
cap = Camera(2)
# infer = YoloeInfer("front_model", "trt_fp32")
# infer = YoloeInfer("front_model")
# infer = YoloeInfer("ppyoloe_crn_s_400e_coco_wbt")
# infer = YoloeInfer("front_model2", "trt_fp16")
# infer = YoloeInfer("front_model2", "trt_fp32")
# infer = YoloeInfer("front_model", "trt_fp16")
# infer = YoloeInfer("mot_ppyoloe_s_36e_pipeline")
# infer = YoloeInfer("mot_ppyoloe_s_36e_pipeline")
infer = YoloeInfer("task_model3")
# infer = YoloeInfer("ppyoloe_plus_crn_t_auxhead_448_300e_coco")
print("start ok")
time_l = time.time()
while True:
img = cap.read()
response = infer.predict(img)
print(response)
for ret in response:
# cv画出方框
label = ret.label_name
bbox = ret.bbox
cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2)
cv2.putText(img, label, (bbox[0], bbox[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
fps = int(1 / (time.time() - time_l))
time_l = time.time()
print("fps:", fps)
# cv2.putText(img, "fps:{}".format(fps), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.imshow("img", img)
key = cv2.waitKey(10)
if key == ord('q'):
break
cap.close()
cv2.destroyAllWindows()
hum_mot_test:这个函数使用 MotHuman
类进行多目标人类跟踪,并可视化跟踪结果。每帧图像显示跟踪结果,并计算帧率。
def hum_mot_test():
cap = Camera(2)
cap.start_back_thread()
mot_hum = MotHuman()
while True:
time_l = time.time()
img = cap.read()
res = mot_hum.predict(img, visualize=True)
fps = int(1 / (time.time() - time_l))
# time_l = time.time()
print("fps:", fps)
# cv2.putText(img, "fps:{}".format(fps), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
print(res)
cv2.imshow("img", img)
key = cv2.waitKey(10)
if key == ord('q'):
break
cv2.destroyAllWindows()
cap.close()
OCR_test:这个函数使用 OCRReco
类对图像进行光学字符识别(OCR),并输出识别结果。
def OCR_test():
image = cv2.imread("12.jpg")
ocr = OCRReco()
res = ocr.predict(image)
# for bbox in res:
# bbox = bbox.astype(np.int32)
# print(bbox)
# cv2.line(image, bbox[0], bbox[1], (0, 0, 255), 2)
# cv2.line(image, bbox[1], bbox[2], (0, 0, 255), 2)
# cv2.line(image, bbox[2], bbox[3], (0, 0, 255), 2)
# cv2.line(image, bbox[3], bbox[0], (0, 0, 255), 2)
# # cv2.putText(im, bbox[4], (bbox[0][0], bbox[0][1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
# cv2.imwrite("res.jpg", image)
print(res)
attr_test:这个函数使用 HummanAtrr
类进行人类属性预测,并输出预测结果。
def attr_test():
im = cv2.imread("h5.jpg")
infer = HummanAtrr()
response = infer(im)
print(response)
cam_test:这个函数从摄像头读取图像并显示,显示图像和计算帧率。
def cam_test():
cap = Camera(2)
cap.start_back_thread()
# ocr = OCRReco()
fps = 0
start_time = time.time()
while True:
img = cap.read()
# res = ocr.predict(img)
# print(res)
fps = int(1 / (time.time() - start_time))
start_time = time.time()
# print("fps:", fps)
# cv2.putText(img, "fps:{}".format(fps), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.imshow("img", img)
key = cv2.waitKey(10)
if key == ord('q'):
break
cap.close()
infer_test:这个函数使用 LaneInfer
类对图像进行车道线检测,并输出检测结果。
def infer_test():
im = cv2.imread("1.png")
# infer = YoloInfer()
infer = LaneInfer()
response = infer(im)
print(response)
cam_infer_test:这个函数从摄像头读取图像并使用 LaneInfer
类进行车道线检测,输出检测结果和帧率。
def cam_infer_test():
cap = cv2.VideoCapture(0)
infer = LaneInfer()
time_start = time.time()
while True:
ret, img = cap.read()
if ret:
response = infer(img)
print(response)
fps = 1/(time.time()-time_start)
print("fps:", fps)
time_start = time.time()
# cv2.putText(img, "fps:{}".format(fps), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# cv2.imshow("img", img)
key = cv2.waitKey(10)
if key == ord('q'):
break
至此完成了,脚本注入检测和图像硬件检测,并且设计了各类视觉识别模块的测试接口,为后续的模型测试做好了准备。