在B站上看到可以对行人方向做统计:
感觉很有意思,之前在github上发现有个类似的项目:项目在这里
思路是:
- 准备一个背景板,上面画出2个区域,分为上行标识区、下行标识区。
- 当人经过上行标识区、下行标识区的时候,就相应地记数
效果:
当行人经过标识区,其中心蓝色点显示出来,并进行相应记数
添加行人行走方向记数的main.cpp
/*!
@Description : https://github.com/shaoshengsong/
@Author : shaoshengsong
@Date : 2022-09-23 02:52:22
*/
#include <fstream>
#include <sstream>
//#include <iostream>
//#include <cstring>
#include <opencv2/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>
#include "YOLOv5Detector.h"
//#include "FeatureTensor.h"
#include "BYTETracker.h" //bytetrack
#include "tracker.h"//deepsort
//Deep SORT parameter
// https://cloud.tencent.com/developer/article/2099504
std::vector<STrack> test_bytetrack(cv::Mat& frame, std::vector<detect_result>& results,
BYTETracker& tracker, std::vector<std::string> & classes)
{
int track_num=0;
std::vector<detect_result> objects;
for (detect_result dr : results)
{
if(dr.classId == 0 ) // person
{
objects.push_back(dr);
}
}
// bytetrack主函数
std::vector<STrack> output_stracks = tracker.update(objects);
// 对于track结果
for (unsigned long i = 0; i < output_stracks.size(); i++)
{
std::vector<float> tlwh = output_stracks[i].tlwh;
// 目标像素不能小于20个像素、宽高比需 < 1.6
// 其实这里加一个判定不是很合理,因为 bytetrack主函数中ID已经增加了,只不过没有显示该目标而已
// bool vertical = tlwh[2] / tlwh[3] > 1.6;
// if (tlwh[2] * tlwh[3] > 20 && !vertical)
if(1)
{
// 框出目标,并标注ID
cv::Scalar s = tracker.get_color(output_stracks[i].track_id);
cv::putText(frame, cv::format("%s--%d",classes[output_stracks[i].class_index].c_str(),output_stracks[i].track_id),
cv::Point(tlwh[0], tlwh[1] - 5),
0, 0.6, cv::Scalar(0, 0, 255), 2, cv::LINE_AA);
cv::rectangle(frame, cv::Rect(tlwh[0], tlwh[1], tlwh[2], tlwh[3]), s, 2);
}
}
return output_stracks;
}
int main(int argc, char *argv[])
{
// 加载类别名称
std::vector<std::string> classes;
std::string file="/home/jason/work/my-deploy/01-bytetrack-deepsort/coco_80_labels_list.txt";
std::ifstream ifs(file);
if (!ifs.is_open())
CV_Error(cv::Error::StsError, "File " + file + " not found");
std::string line;
while (std::getline(ifs, line))
{
classes.push_back(line);
}
// 上行 下行 记数
int total_num=0;
std::vector<int> up_ID;
std::vector<int> down_ID;
cv::Mat background= cv::Mat::zeros(1080, 1920, CV_8U); // 高、宽
std::vector<cv::Point> down ={cv::Point(0,500), cv::Point(1920, 500),
cv::Point(1920, 540),cv::Point(0,540)};
std::vector<cv::Point> up ={cv::Point(0,550), cv::Point(1920, 550),
cv::Point(1920, 580),cv::Point(0,580)};
cv::fillPoly(background, down, cv::Scalar(60));
cv::fillPoly(background, up, cv::Scalar(100));
cv::imshow("back", background);
// 检测器
std::cout<<"classes:"<<classes.size();
std::shared_ptr<YOLOv5Detector> detector(new YOLOv5Detector());
detector->init(k_detect_model_path);
//bytetrack设置
int fps=20;
BYTETracker bytetracker(fps, 30); // 后面的30是30帧没有发现,
// 读取视频
std::cout<<"begin read video"<<std::endl;
cv::VideoCapture capture("/home/jason/work/my-deploy/01-bytetrack-deepsort/1.mp4");
if (!capture.isOpened()) {
printf("could not read this video file...\n");
return -1;
}
std::cout<<"end read video"<<std::endl;
// 结果
std::vector<detect_result> results;
// 输出另存
cv::VideoWriter video("out.avi",cv::VideoWriter::fourcc('M','J','P','G'),10, cv::Size(1920,1080));
int num_frames = 0;
cv::Mat frame;
while (true)
{
if (!capture.read(frame)) // if not success, break loop
{
std::cout<<"\n Cannot read the video file. please check your video.\n";
break;
}
num_frames ++;
//Second/Millisecond/Microsecond 秒s/毫秒ms/微秒us
auto start = std::chrono::system_clock::now();
// 获得检测结果
detector->detect(frame, results);
// 计算检测耗时
auto end = std::chrono::system_clock::now();
auto detect_time =std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();//ms
// std::cout<<classes.size()<<":"<<results.size()<<":"<<num_frames<<std::endl;
printf("视频尺寸:%d宽 * %d高\n", frame.cols, frame.rows );
printf("帧数:%d 检测器耗时:%dms ",num_frames, (int)detect_time);
// 进行跟踪
std::vector<STrack> temp_tracks;
auto start2 = std::chrono::system_clock::now();
temp_tracks = test_bytetrack(frame, results,bytetracker,classes);
auto end2 = std::chrono::system_clock::now();
// 计算跟踪器耗时
auto detect_time2 =std::chrono::duration_cast<std::chrono::milliseconds>(end2 - start2).count();//ms
printf("跟踪器耗时:%dms \n", (int)detect_time2);
//上下行走方向记数
for (size_t i=0; i<temp_tracks.size(); i++)
{
// 目标id
if (temp_tracks[i].track_id > total_num)
{
total_num = temp_tracks[i].track_id;
}
int left= temp_tracks[i].tlwh[0];
int top = temp_tracks[i].tlwh[1];
int width = temp_tracks[i].tlwh[2];
int height = temp_tracks[i].tlwh[3];
int bbox_ID = temp_tracks[i].track_id;
cv::Point center = cv::Point(left+width/2, top +height/2);
int down = 60;// 下行标识区颜色
int up = 100; // 上行标识去颜色
int move =background.at<uchar>(center.y,center.x);
int buffer = 0; // background非标识区颜色为0
if (move != buffer)
{
if (move == down)
{
if (!std::count(up_ID.begin(), up_ID.end(), bbox_ID)) // 判断 bbox_ID 是否在up_ID列表中
{
cv::circle(frame, center,10, cv::Scalar(255, 0,0),-1); // 撞线的目标标识蓝色实心点
if (!std::count(down_ID.begin(), down_ID.end(), bbox_ID)) // 判断 bbox_ID 是否在down_ID列表中
{
down_ID.push_back(bbox_ID);
}
}
}
else if (move == up)
{
if (!std::count(down_ID.begin(), down_ID.end(), bbox_ID))
{
cv::circle(frame, center,10, cv::Scalar(255, 0,0),-1); // 撞线的目标标识蓝色实心点
if (!std::count(up_ID.begin(), up_ID.end(), bbox_ID))
{
up_ID.push_back(bbox_ID);
}
}
}
}
}
// 显示人流量
cv::putText(frame, cv::format("total:%d up:%d down:%d ", total_num, (int)up_ID.size(), (int)down_ID.size()),
cv::Point(30, 30),
0,1,cv::Scalar(0, 0, 255), 2, 8);
cv::line(frame, cv::Point(0, 500), cv::Point(1920, 500), cv::Scalar(0, 100, 0), 2);
cv::line(frame, cv::Point(0, 540), cv::Point(1920, 540), cv::Scalar(0, 150, 0), 2);
cv::line(frame, cv::Point(0, 580), cv::Point(1920, 580), cv::Scalar(0, 200, 0), 2);
cv::imshow("YOLOv5-6.x", frame);
//------------------------------------------------------------------
video.write(frame);
if(cv::waitKey(30) == 27) // Wait for 'esc' key press to exit
{
break;
}
results.clear();
}
capture.release();
video.release();
cv::destroyAllWindows();
}
yolov5+bytetrack原项目地址:https://github.com/shaoshengsong/DeepSORT
作为一个计算机视觉领域刚刚入门的新人,很崇拜大佬能把这么庞大的python项目转为C+++,必须star!