侧视图
H:摄像头高度
Dmin: 图像底边距摄像头实际距离
Dmax: 图像顶边距摄像头实际距离
: 摄像头视角下边缘与竖直夹角
: 摄像头视角上边缘与下边缘夹角
: 目标点与摄像头视角下边缘夹角
俯视图
: 摄像头视角水平夹角
h: 图像像素高度
w: 图像像素宽度
x0: 目标点的像素x坐标
y0: 目标点的像素y坐标
x1: 目标点实际映射x坐标
y1: 目标点实际映射y坐标
B0: 摄像头视角最远出宽度/2
B1: 目标点所在位置x轴方向如图所示距离
推理过程
使用
需要提前测得的量:
H, Dmin, Dmax, B0, h, w
输入:
x0, y0
输出:
x1, y1
python用
# 像素映射坐标
def get_location(x0, y0):
H = 20
Dmin = 20
Dmax = 50
B0 = 100
beta = np.arctan(B0 / Dmax) * 2
# beta = pi/3
h = 640
w = 640
alpha = np.arctan(Dmin / H)
theta = np.arctan(Dmax / H) - alpha
delta = (h - y0) * theta / h
y1 = H * math.tan(alpha + delta)
B1 = (y1 + Dmin) * np.tan(beta / 2)
x1 = 2 * B1 * (x0 - w / 2) / w
return [x1, y1]
# frame = 相隔几帧计算速度
# fps = 一秒被分成几帧
# bias = 检测框抖动像素距离
# peak = 最大限速
def get_speed(id_dic, id, fps, bias=1, peak=120):
frame = round(fps/4)
if len(id_dic[id]) <= frame:
speed = 0
else:
x1 = id_dic[id][-1][0]
x2 = id_dic[id][-1 - frame][0]
y1 = id_dic[id][-1][1]
y2 = id_dic[id][-1 - frame][1]
if abs(x1 - x2) <= bias and abs(y1 - y2) <= bias:
speed = 0
else:
speed = math.sqrt(math.pow(x1 - x2, 2) + math.pow(y1 - y2, 2)) / (frame / fps)
if speed > peak:
speed = 0
return round(speed)
# prediction
if len(outputs) > 0:
for j, (output, conf) in enumerate(zip(outputs, confs)):
bboxes = output[0:4]
id = output[4]
cls = output[5]
x = (bboxes[0] + bboxes[2]) / 2
y = bboxes[3]
fps = vid_cap.get(cv2.CAP_PROP_FPS)
if id_dic.__contains__(id):
id_dic[id].append(get_location(x, y))
else:
id_dic[id] = [get_location(x, y)]
speed = get_speed(id_dic, id, fps)
#待验证