from time import sleep
import ogl_viewer.viewer as gl
import cv_viewer.tracking_viewer as cv_viewer
lock = Lock()
run_signal = False
exit_signal = False
def xywh2abcd(xywh, im_shape):
output = np.zeros((4, 2))
# Center / Width / Height -> BBox corners coordinates
x_min = (xywh[0] - 0.5\*xywh[2]) #\* im\_shape[1]
x_max = (xywh[0] + 0.5\*xywh[2]) #\* im\_shape[1]
y_min = (xywh[1] - 0.5\*xywh[3]) #\* im\_shape[0]
y_max = (xywh[1] + 0.5\*xywh[3]) #\* im\_shape[0]
# A ------ B
# | Object |
# D ------ C
output[0][0] = x_min
output[0][1] = y_min
output[1][0] = x_max
output[1][1] = y_min
output[2][0] = x_max
output[2][1] = y_max
output[3][0] = x_min
output[3][1] = y_max
return output
def detections_to_custom_box(detections, im0):
output = []
for i, det in enumerate(detections):
xywh = det.xywh[0]
# Creating ingestable objects for the ZED SDK
obj = sl.CustomBoxObjectData()
obj.bounding_box_2d = xywh2abcd(xywh, im0.shape)
obj.label = det.cls
obj.probability = det.conf
obj.is_grounded = False
output.append(obj)
return output
def torch_thread(weights, img_size, conf_thres=0.2, iou_thres=0.45):
global image_net, exit_signal, run_signal, detections
print("Intializing Network...")
model = YOLO(weights)
while not exit_signal:
if run_signal:
lock.acquire()
img = cv2.cvtColor(image_net, cv2.COLOR_BGRA2RGB)
# https://docs.ultralytics.com/modes/predict/#video-suffixes
det = model.predict(img, save=False, imgsz=img_size, conf=conf_thres, iou=iou_thres)[0].cpu().numpy().boxes
# ZED CustomBox format (with inverse letterboxing tf applied)
detections = detections_to_custom_box(det, image_net)
lock.release()
run_signal = False
sleep(0.01)
def main():
global image_net, exit_signal, run_signal, detectio