Qt C++海康SDK类中注册回调代码

一、程序入口
main.cpp

#include "mainwindow.h"
#include <QApplication>
using namespace std;

int main(int argc, char *argv[])
{
    QApplication a(argc, argv);

    MainWindow w;

    w.show();
    return a.exec();
}

二、数据共享类
CameraGlobalData头文件

#ifndef CAMERAGLOBALDATA_H
#define CAMERAGLOBALDATA_H
#include <iostream>
#include <vector>
using namespace std;

struct CameraStruct{
    string ipaddress;
    string username;
    string password;
    int port;

};
class CameraGlobalData
{
public:
    CameraGlobalData();
    vector<CameraStruct> cameras;
    string rtsp[8];
};

#endif // CAMERAGLOBALDATA_H

CameraGlobalData源文件

#include "cameraglobaldata.h"

CameraGlobalData::CameraGlobalData()
{
    CameraStruct temp;
    temp.ipaddress="192.168.2.174";
    temp.username="admin";
    temp.password="a12131415";
    temp.port=8000;
    cameras.push_back(temp);

    temp.ipaddress="192.168.2.175";
    temp.username="admin";
    temp.password="a12131415";
    temp.port=8000;
    cameras.push_back(temp);

    temp.ipaddress="192.168.2.176";
    temp.username="admin";
    temp.password="a12131415";
    temp.port=8000;
    cameras.push_back(temp);

    temp.ipaddress="192.168.2.177";
    temp.username="admin";
    temp.password="a12131415";
    temp.port=8000;
    cameras.push_back(temp);

    temp.ipaddress="192.168.2.178";
    temp.username="admin";
    temp.password="a12131415";
    temp.port=8000;
    cameras.push_back(temp);

    temp.ipaddress="192.168.2.179";
    temp.username="admin";
    temp.password="a12131415";
    temp.port=8000;
    cameras.push_back(temp);

    temp.ipaddress="192.168.2.181";
    temp.username="admin";
    temp.password="a12131415";
    temp.port=8000;
    cameras.push_back(temp);

    temp.ipaddress="192.168.2.184";
    temp.username="admin";
    temp.password="a12131415";
    temp.port=8000;
    cameras.push_back(temp);

    for(int i=0;i<8;i++){

        rtsp[i]="rtsp://"+cameras[i].username+":"+cameras[i].password+"@"+cameras[i].ipaddress+":554/h264/ch1/sub/av_stream";

    }
}

三、线程类
CameraThread 头文件

#ifndef CAMERATHREAD_H
#define CAMERATHREAD_H

#include <QThread>
#include <QImage>
#include <QLabel>
#include <QTime>
#include <QElapsedTimer>
#include <opencv2/opencv.hpp>
#include "Windows.h"
#include "HCNetSDK.h"
#include "cameraglobaldata.h"


using namespace std;
using namespace cv;


class CameraThread : public QThread
{
    Q_OBJECT

public:
    CameraThread(QLabel *displayLabel, int cameraIndex, QObject *parent = nullptr);
    void gpu_process(UMat frame);
    void cpu_process(Mat frame);
    int gpu_CheckPoint(int spec_x, int spec_y, cv::UMat frame);
    int cpu_CheckPoint(int spec_x, int spec_y, cv::Mat frame);


    ~CameraThread();


private:
     static void CALLBACK cbMessageCallback(LONG lCommand, NET_DVR_ALARMER *pAlarmer, char *pAlarmInfo, DWORD dwBufLen, void* pUser);
     void cbMessageCallbackInstance(LONG lCommand, NET_DVR_ALARMER *pAlarmer, char *pAlarmInfo, DWORD dwBufLen);
     int registerHikonSDKCallback(char* ipaddress,char* username,char* password,int port,int channelNO);
protected:
    void run() override;

signals:
    void frameReady(const QImage &frame, int index);
    void frameReady2(int index,int count);

private:
    CameraGlobalData cgd;

    int count_1;
    int count_2;
    int count_3;
    int iNum;



    int framecount;
    double usedTimecnt;
    QLabel *displayLabel;
    int cameraIndex;

    cv::Mat currentFrame;


    cv::Mat prevFrame_cpu;//上一帧图像
    cv::UMat prevFrame_gpu;//上一帧图像


    int ret=0;
    int noContourFrameCount=0;//no Contour Frame count
    QTime beginTime;//开始时间
    cv::Mat elm_1;
    cv::Mat elm_2;
    int roiWidth = 40;//检测区宽度
    int roiHeight = 20;//检测区高度
    int count;//流量计数
    //车入标记
    bool carIn=false;
    bool carIn_isTimed=false;
    QTime carIn_datetime;

    //车出标记
    bool carOut=false;
    bool carOut_isTimed=false;
    QTime carOut_datetime;
    QTime lastCountTime;//上次计数时间

    //旧的轮廓检测法的参数
    cv::Mat curFrameImg;//当前帧图像
    cv::Mat preFrameImg_cpu;
    cv::Mat img_roi1,gray_1,gray_2,diff_img,thres_img,erode_img,median_img,dilate_img,img_color;
    int sl;
    int frameCount;
    double minarea;
    QString checked_Car_Tag;
    int pusleDataStatus;

    //新的轮廓检测法的参数
    cv::Mat grayFrame,subRegion_cpu,prevGrayFrame,frameDiff,thresholdedDiff;
    bool carin2=false; //车入
    bool carout2=true; //车出
    bool nocar2=false;  //无车
    bool isCount2=false;//是否计数
    //QTime countTime;//计数时间
    QElapsedTimer countTime;
    int carCount;//计数
    float speed;

    cv::UMat elm_1_gpu;
    cv::UMat elm_2_gpu;
    //旧的轮廓检测法的参数
    cv::UMat curFrameImg_gpu;//当前帧图像
    cv::UMat img_roi1_gpu,gray_1_gpu,gray_2_gpu,diff_img_gpu,thres_img_gpu,erode_img_gpu,median_img_gpu,dilate_img_gpu,img_color_gpu;
    cv::UMat grayFrame_gpu,subRegion_gpu,prevGrayFrame_gpu,frameDiff_gpu,thresholdedDiff_gpu;


};

#endif // CAMERATHREAD_H

CameraThread源文件

#include "camerathread.h"
#include <QDebug>
#include <QThread>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/types_c.h>
#include <unistd.h>
#include <pthread.h>
#include <sys/time.h>
#include <iconv.h>
#include <sstream>
#include <string>
#include <string.h>
#include <QImage>
#include <QThread>
#include <QTime>
#include <QPen>
#include <QPainter>
#include <QPointF>
#include <opencv2/core/ocl.hpp>
#define GPU 0

CameraThread::CameraThread(QLabel *displayLabel, int cameraIndex, QObject *parent)
    : QThread(parent), displayLabel(displayLabel), cameraIndex(cameraIndex)
{
    // 初始化摄像头和图像捕获器的代码,这里省略...
//    rtsp[0]="rtspsrc location=rtsp://admin:a12131415@192.168.2.184:554/h264/ch1/sub/av_stream latency=100 ! rtph264depay ! h264parse  config-interval=1 ! mppvideodec ! videoconvert ! appsink max-buffers=3 drop=false sync=false";
//    rtsp[1]="rtspsrc location=rtsp://admin:a12131415@192.168.2.174:554/h264/ch1/main/av_stream latency=100 ! rtph264depay ! h264parse  config-interval=1 ! mppvideodec ! videoconvert ! appsink max-buffers=3 drop=false sync=false";
//    rtsp[2]="rtspsrc location=rtsp://admin:a12131415@192.168.2.176:554/h264/ch1/main/av_stream latency=100 ! rtph264depay ! h264parse  config-interval=1 ! mppvideodec ! videoconvert ! appsink max-buffers=3 drop=false sync=false";
//    rtsp[3]="rtspsrc location=rtsp://admin:a12131415@192.168.2.177:554/h264/ch1/main/av_stream latency=100 ! rtph264depay ! h264parse  config-interval=1 ! mppvideodec ! videoconvert ! appsink max-buffers=3 drop=false sync=false";
//    rtsp[4]="rtspsrc location=rtsp://admin:a12131415@192.168.2.178:554/h264/ch1/main/av_stream latency=100 ! rtph264depay ! h264parse  config-interval=1 ! mppvideodec ! videoconvert ! appsink max-buffers=3 drop=false sync=false";
//    rtsp[5]="rtspsrc location=rtsp://admin:a12131415@192.168.2.179:554/h264/ch1/main/av_stream latency=100 ! rtph264depay ! h264parse  config-interval=1 ! mppvideodec ! videoconvert ! appsink max-buffers=3 drop=false sync=false";
//    rtsp[6]="rtspsrc location=rtsp://admin:a12131415@192.168.2.175:554/h264/ch1/main/av_stream latency=100 ! rtph264depay ! h264parse  config-interval=1 ! mppvideodec ! videoconvert ! appsink max-buffers=3 drop=false sync=false";
//    rtsp[7]="rtspsrc location=rtsp://admin:a12131415@192.168.2.174:554/h264/ch1/main/av_stream latency=100 ! rtph264depay ! h264parse  config-interval=1 ! mppvideodec ! videoconvert ! appsink max-buffers=3 drop=false sync=false";


      if(cameraIndex==7)
         registerHikonSDKCallback((char *)cgd.cameras.at(cameraIndex).ipaddress.c_str(),(char *)cgd.cameras.at(cameraIndex).username.c_str(),(char *)cgd.cameras.at(cameraIndex).password.c_str(),cgd.cameras.at(cameraIndex).port,1);



}

CameraThread::~CameraThread()
{
    delete displayLabel;
    // 清理摄像头和图像捕获器资源的代码,这里省略...
}

void CALLBACK CameraThread::cbMessageCallback(LONG lCommand, NET_DVR_ALARMER *pAlarmer, char *pAlarmInfo, DWORD dwBufLen, void* pUser)
{
    if (pUser != nullptr)
       {
           CameraThread* pThis = static_cast<CameraThread*>(pUser);
           pThis->cbMessageCallbackInstance(lCommand, pAlarmer, pAlarmInfo, dwBufLen);
       }
}

void CameraThread::cbMessageCallbackInstance(LONG lCommand, NET_DVR_ALARMER *pAlarmer, char *pAlarmInfo, DWORD dwBufLen)
{
    int i=0;
    char filename[100];
    FILE *fSnapPic=NULL;
    FILE *fSnapPicPlate=NULL;

    //以下代码仅供参考,实际应用中不建议在该回调函数中直接处理数据保存文件,
    //例如可以使用消息的方式(PostMessage)在消息响应函数里进行处理。

    switch (lCommand)
    {
    case COMM_UPLOAD_PLATE_RESULT:
    {
        NET_DVR_PLATE_RESULT struPlateResult={0};
        memcpy(&struPlateResult, pAlarmInfo, sizeof(struPlateResult));
        printf("%s %s\n",QString("车牌号:").toLocal8Bit().toStdString().c_str(), struPlateResult.struPlateInfo.sLicense);//车牌号

        switch(struPlateResult.struPlateInfo.byColor)//车牌颜色
        {
        case VCA_BLUE_PLATE:
            printf("车辆颜色: 蓝色\n");
            break;
        case VCA_YELLOW_PLATE:
            printf("车辆颜色: 黄色\n");
            break;
        case VCA_WHITE_PLATE:
            printf("车辆颜色: 白色\n");
            break;
        case VCA_BLACK_PLATE:
            printf("车辆颜色: 黑色\n");
            break;
        default:
            break;
        }

        //场景图
        if (struPlateResult.dwPicLen != 0 && struPlateResult.byResultType == 1 )
        {
            sprintf(filename,"testpic_%d.jpg",iNum);
            fSnapPic=fopen(filename,"wb");
            fwrite(struPlateResult.pBuffer1,struPlateResult.dwPicLen,1,fSnapPic);
            iNum++;
            fclose(fSnapPic);
        }
        //车牌图
        if (struPlateResult.dwPicPlateLen != 0 && struPlateResult.byResultType == 1)
        {
            sprintf(filename,"testPicPlate_%d.jpg",iNum);
            fSnapPicPlate=fopen(filename,"wb");
            fwrite(struPlateResult.pBuffer1,struPlateResult.dwPicLen,1,fSnapPicPlate);
            iNum++;
            fclose(fSnapPicPlate);
        }

        //其他信息处理......
        break;
    }
    case COMM_ITS_PLATE_RESULT:
    {
        //COMM_ALARM_TPS_REAL_TIME             0x3081  //TPS实时过车数据上传

        NET_ITS_PLATE_RESULT struITSPlateResult={0};
        memcpy(&struITSPlateResult, pAlarmInfo, sizeof(struITSPlateResult));
        // 将前12位转换为 QString
        QString absTimeStr = QString::fromLatin1((char *)struITSPlateResult.struPicInfo[0].byAbsTime);
        printf("%s %s\n",QString("车牌号:").toLocal8Bit().toStdString().c_str(),struITSPlateResult.struPlateInfo.sLicense);//车牌号
        qDebug()<<"current time:"<<absTimeStr;

        for (i=0;i < struITSPlateResult.dwPicNum-1;i++)
        {
            // printf("车牌号: %s\n", struITSPlateResult.struPlateInfo.sLicense);//车牌号
            //byDriveChan:车道号 东左:1 东直1:2 东直2:3
            if(struITSPlateResult.byDriveChan==1){
                count_1++;
                std::cout<<"count:"<<count_1<<std::endl;
                WORD w_year= struITSPlateResult.struSnapFirstPicTime.wYear;
                byte b_month=struITSPlateResult.struSnapFirstPicTime.byMonth;
                byte b_day=struITSPlateResult.struSnapFirstPicTime.byDay;
                byte b_hour=struITSPlateResult.struSnapFirstPicTime.byHour;
                byte b_minute=struITSPlateResult.struSnapFirstPicTime.byMinute;
                byte b_second=struITSPlateResult.struSnapFirstPicTime.bySecond;
                byte b_milliSec=struITSPlateResult.struSnapFirstPicTime.wMilliSec;
                QString year_str=QString::number(w_year);
                QString month_str=QString::number(b_month);
                if(b_month<10){
                    month_str="0"+month_str;
                }
                QString day_str=QString::number(b_day);
                if(b_day<10){
                    day_str="0"+day_str;
                }
                QString hour_str=QString::number(b_hour);
                if(b_hour<10){
                    hour_str="0"+hour_str;
                }

                QString minute_str=QString::number(b_minute);
                if(b_minute<10){
                    minute_str="0"+minute_str;
                }

                QString second_str=QString::number(b_second);
                if(b_second<10){
                    second_str="0"+second_str;
                }

                QString milliSec_str=QString::number(b_milliSec);
                if(b_milliSec<10){
                    milliSec_str="0"+milliSec_str;
                }



                QString car_inTime=year_str+"-"+month_str+"-"+day_str+" "+hour_str+":"+minute_str+":"+second_str+"."+milliSec_str;
                QString car_outTime=QDateTime::currentDateTime().toString("yyyy-MM-dd HH:mm:ss.zzz");



                qDebug()<<"car_inTime:"<<car_inTime;
                qDebug()<<"car_outTime:"<<car_outTime;

                WORD speed=struITSPlateResult.struVehicleInfo.wSpeed;
                std::cout<<"speed:"<<speed<<std::endl;


            }

        }
        break;
    }
    default:
        break;
    }

    return;
}

int CameraThread::registerHikonSDKCallback(char* ipaddress,char* username,char* password,int port,int channelNO)
{
    //---------------------------------------
    //初始化
    NET_DVR_Init();

    //设置连接时间与重连时间
    NET_DVR_SetConnectTime(2000, 1);
    NET_DVR_SetReconnect(10000, true);

    //---------------------------------------
    //注册设备

    //登录参数,包括设备地址、登录用户、密码等
    LONG lUserID = -1; //初始化lUserID
    NET_DVR_USER_LOGIN_INFO struLoginInfo = {0};//创建一个数据结构对象并为每个属性赋初值为0
    struLoginInfo.bUseAsynLogin = 0; //设置为同步登录方式
    strcpy(struLoginInfo.sDeviceAddress, ipaddress); //设置设备IP地址为192.168.2.9
    struLoginInfo.wPort = port; //设置服务端口为8000
    strcpy(struLoginInfo.sUserName, username); //设置:用户名
    strcpy(struLoginInfo.sPassword, password); //设置:密码

    //设备信息, 输出参数
    NET_DVR_DEVICEINFO_V40 struDeviceInfoV40 = {0};//创建一个V40数据结构对象

    lUserID = NET_DVR_Login_V40(&struLoginInfo, &struDeviceInfoV40);//登录注册并将设备信息存储在struDeviceInfoV40结构中
    if (lUserID < 0)
    {
        //登录失败时提示错误信息
        qDebug("Login failed, error code: %d\n", NET_DVR_GetLastError());
        NET_DVR_Cleanup();//登出
        return 0;
    }

    //---------------------------------------
    //报警布防

    //设置报警回调函数
    //NET_DVR_SetDVRMessageCallBack_V31(MSesGCallback, NULL);

    /*注:多台设备对接时也只需要调用一次设置一个回调函数,不支持不同设备的事件在不同的回调函数里面返回*/
    NET_DVR_SetDVRMessageCallBack_V50(0, cbMessageCallback, this);//如果是主线程中调用第三个参数可为NULL,如果是在类中调用设置为this



    //启用布防
    NET_DVR_SETUPALARM_PARAM struSetupParam={0};//创建布防结构对象并初始化为0
    struSetupParam.dwSize=sizeof(NET_DVR_SETUPALARM_PARAM);//根据NET_DVR_SETUPALARM_PARAM的大小来设置dwSize的值
    struSetupParam.byLevel = 1; //布防优先级:0- 一等级(高),1- 二等级(中)
    struSetupParam.byAlarmInfoType = 1; //上传报警信息类型: 0- 老报警信息(NET_DVR_PLATE_RESULT), 1- 新报警信息(NET_ITS_PLATE_RESULT)

    LONG lHandle = NET_DVR_SetupAlarmChan_V41(lUserID,&struSetupParam);//开始布防并设置回调函数
    //如果lHandle<0则布防失败,然后出登出并清除
    if (lHandle < 0)
    {
        qDebug("NET_DVR_SetupAlarmChan_V41 failed, error code: %d\n", NET_DVR_GetLastError());
        NET_DVR_Logout(lUserID);
        NET_DVR_Cleanup();
        return 0;
    }
    qDebug("布防成功!\n");

    //---------------------------------------
    //网络触发抓拍

    NET_DVR_SNAPCFG struSnapCfg;
    memset(&struSnapCfg, 0, sizeof(NET_DVR_SNAPCFG));

    //结构体大小
    struSnapCfg.dwSize = sizeof(NET_DVR_SNAPCFG);

    //线圈抓拍次数,0-不抓拍,非0-连拍次数,目前最大5次
    struSnapCfg.bySnapTimes  = 0;

    //抓拍等待时间,单位ms,取值范围[0,60000]
    struSnapCfg.wSnapWaitTime   = 1;//1000

    //连拍间隔时间,单位ms,取值范围[67,60000]
    struSnapCfg.wIntervalTime[0]  = 67;//1000
    struSnapCfg.wIntervalTime[1]  = 67;//1000

    //触发IO关联的车道号,取值范围[0,9]
    struSnapCfg.byRelatedDriveWay = channelNO;

    //网络触发连拍
    if (!NET_DVR_ContinuousShoot(lUserID, &struSnapCfg))
    {
        qDebug("NET_DVR_ContinuousShoot failed, error code: %d\n", NET_DVR_GetLastError());
        return 0;
    }
    qDebug("网络触发连拍!\n");

    //    Sleep(20000); //等待接收数据

    //    //---------------------------------------
    //    //退出

    //    //撤销布防上传通道
    //    if (!NET_DVR_CloseAlarmChan_V30(lHandle))
    //    {
    //        qDebug("NET_DVR_CloseAlarmChan_V30 failed, error code: %d\n", NET_DVR_GetLastError());
    //        NET_DVR_Logout(lUserID);
    //        NET_DVR_Cleanup();
    //        return 0;
    //    }

    //    //注销用户
    //    NET_DVR_Logout(lUserID);

    //    //释放SDK资源
    //    NET_DVR_Cleanup();

    return 0;
}

int CameraThread::gpu_CheckPoint(int spec_x, int spec_y, cv::UMat frame)
{
    ret=0;
    if(spec_x>frame.cols||spec_y>frame.rows||frame.empty())
    {
        return  0;
    }
    frameCount++;//不要放在上面,因为仅计算有效帧
    //根据分辨率的不同对检测区做相应的调整,只为适应不同的分辨率,默认值为w:50px,h:20px(352*255的分辨率)检测区域越大速度越慢,
    //漏检率就越高,建议用352*288的分辩率来检测车辆。
    roiWidth=50*frame.cols/352;
    if(frame.size().width==1920){
        roiHeight=20*1080/288;
    }else if(frame.size().width==1280){
        roiHeight=20*720/288;
    }else if(frame.size().width==704){
        roiHeight=20*576/288;
    }else if(frame.size().width==640){
        roiHeight=20*480/288;
    }else if(frame.size().width==352){
        roiHeight=20*288/288;
    }
    //定义矩形
    Rect virtualLine(spec_x, spec_y, roiWidth, roiHeight);//50 20

    //1、截取检测带
    frame(virtualLine).copyTo(subRegion_gpu);

    cv::cvtColor(subRegion_gpu, grayFrame_gpu,cv::COLOR_BGR2GRAY);

    // 增加亮度
    grayFrame_gpu.convertTo(grayFrame_gpu, -1, 1.5, 0); // 调整亮度的比例因子在这里设置

    // 如果有前一帧
    if (!prevFrame_gpu.empty())
    {
        // 计算当前帧与前一帧的差count值
        absdiff(prevFrame_gpu, grayFrame_gpu, frameDiff_gpu);
        //去噪点
        medianBlur(frameDiff_gpu,median_img_gpu,7); //防止抖动的关键代码不能去,去了就会自动加数,参数至少为7
        // 去除阴影
        cv::threshold(median_img_gpu, thres_img_gpu, 127, 255, cv::THRESH_BINARY);

        // 进行形态学操作以去除噪声和填充空洞,4,4最理想
        cv::Mat kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(2, 2));
        cv::morphologyEx(thres_img_gpu, thres_img_gpu, cv::MORPH_OPEN, kernel);

        // 必须做二值化处理差值图像,才能检到轮廓
        threshold(thres_img_gpu, thresholdedDiff_gpu, 30, 255, THRESH_BINARY);

        // 轮廓检测
        vector<vector<Point>> contours;
        vector<Vec4i> hierarchy;
        findContours(thresholdedDiff_gpu, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);


        //         double t = 0.0;
        //         double t1 = 0.0;
        //         t = (double)cv::getTickCount();
        //         t1 = (double)cv::getTickCount();
        //         // 计算当前帧与前一帧的差值
        //         absdiff(prevFrame_gpu, grayFrame_gpu, frameDiff_gpu);
        //         t1 = ((double)cv::getTickCount() - t1) / cv::getTickFrequency();
        //         std::cout << "absdiff Time Cost:" << t1 << std::endl;
        //         t1 = (double)cv::getTickCount();
        //         //去噪点
        //         medianBlur(frameDiff_gpu,median_img_gpu,7); //防止抖动的关键代码不能去,去了就会自动加数,参数至少为7
        //         t1 = ((double)cv::getTickCount() - t1) / cv::getTickFrequency();
        //         std::cout << "medianBlur Time Cost:" << t1 << "s" << std::endl;
        //         // 去除阴影
        //         t1 = (double)cv::getTickCount();
        //         cv::threshold(median_img_gpu, thres_img_gpu, 127, 255, cv::THRESH_BINARY);
        //         t1 = ((double)cv::getTickCount() - t1) / cv::getTickFrequency();
        //         std::cout << "threshold Time Cost:" << t1 << "s" << std::endl;
        //         // 进行形态学操作以去除噪声和填充空洞,4,4最理想
        //         t1 = (double)cv::getTickCount();
        //         cv::Mat kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(2, 2));
        //         cv::morphologyEx(thres_img_gpu, thres_img_gpu, cv::MORPH_OPEN, kernel);
        //         t1 = ((double)cv::getTickCount() - t1) / cv::getTickFrequency();
        //         std::cout << "morphologyEx Time Cost:" << t1 << "s" << std::endl;

        //         // 必须做二值化处理差值图像,才能检到轮廓
        //         t1 = (double)cv::getTickCount();
        //         threshold(thres_img_gpu, thresholdedDiff_gpu, 30, 255, THRESH_BINARY);
        //         t1 = ((double)cv::getTickCount() - t1) / cv::getTickFrequency();
        //         std::cout << "threshold Time Cost:" << t1 << "s" << std::endl;

        //         // 轮廓检测
        //         t1 = (double)cv::getTickCount();
        //         vector<vector<Point>> contours;
        //         vector<Vec4i> hierarchy;
        //         findContours(thresholdedDiff_gpu, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
        //         t1 = ((double)cv::getTickCount() - t1) / cv::getTickFrequency();
        //         std::cout << "find Time Cost:" << t1 << "s" << std::endl;

        //         t = ((double)cv::getTickCount() - t) / cv::getTickFrequency();
        //         std::cout << "gpu Time Cost:" << t <<std::endl;
        //         std::cout<<"gpu access ended!"<<std::endl;

        //         framecount++;
        //         if(framecount>10){
        //             usedTimecnt+=t;
        //             double avg=usedTimecnt/(framecount-10);

        //             std::cout <<"framecount:"<<framecount<< " gpu Time avg:" << avg << std::endl;
        //         }

        //计数代码:
        //when the number of contours is not empty
        if(!contours.empty()){
            //come in area
            noContourFrameCount=0;//if have contours then the value=0
            if(!carin2){
                cout<<"already come in"<<endl;
                carin2=true;                         //come in detection area
                carout2=false;                       //not leave detection area
                nocar2=true;                         //have car
                //               countTime=QTime::currentTime();
                countTime.restart();
                isCount2=false;                      //count flag
                carIn_datetime=QTime::currentTime();
                beginTime=QTime::currentTime();
                if(pusleDataStatus==2){
                    pusleDataStatus =1;//表标车辆已进入检测区
                    checked_Car_Tag=QDateTime::currentDateTime().toString("yyyyMMddHHmmss_zzz");
                }

            }
        }
        //when the number of contours is empty
        else{
            noContourFrameCount++;
            //when not counted
            if(noContourFrameCount>5){
                if(!isCount2){
                    cout<<"already leave "<<endl;
                    carin2=false;          //car not come in detection area
                    carout2=true;          //car already leave detection area
                    nocar2=false;          //no have car
                    if(countTime.elapsed()>850){//设置这个参数主要是为了防止多检,比如客车过长容易多检 原:450
                        carCount++;
                        isCount2=true; //already count
                        carOut_datetime=QTime::currentTime();
                        if(pusleDataStatus==1){
                            pusleDataStatus =0;//表标车辆已离开检测区
                        }
                        ret=1;
                        cout<<"already count"<<endl;
                    }
                }
            }

        }
    }
    // 保存当前帧及灰度图像作为前一帧
    prevFrame_gpu = grayFrame_gpu.clone();

    return ret;
}

int CameraThread::cpu_CheckPoint(int spec_x, int spec_y, cv::Mat frame)
{
    ret=0;
    if(spec_x>frame.cols||spec_y>frame.rows||frame.empty())
    {
        return  0;
    }
    frameCount++;//不要放在上面,因为仅计算有效帧
    //根据分辨率的不同对检测区做相应的调整,只为适应不同的分辨率,默认值为w:50px,h:20px(352*255的分辨率)检测区域越大速度越慢,
    //漏检率就越高,建议用352*288的分辩率来检测车辆。
    roiWidth=50*frame.cols/352;
    if(frame.size().width==1920){
        roiHeight=20*1080/288;
    }else if(frame.size().width==1280){
        roiHeight=20*720/288;
    }else if(frame.size().width==704){
        roiHeight=20*576/288;
    }else if(frame.size().width==640){
        roiHeight=20*480/288;
    }else if(frame.size().width==352){
        roiHeight=20*288/288;
    }
    //定义矩形
    Rect virtualLine(spec_x, spec_y, roiWidth, roiHeight);//50 20

    //1、截取检测带
    frame(virtualLine).copyTo(subRegion_cpu);

    cv::cvtColor(subRegion_cpu, grayFrame,cv::COLOR_BGR2GRAY);

    // 增加亮度
    grayFrame.convertTo(grayFrame, -1, 1.5, 0); // 调整亮度的比例因子在这里设置

    // 如果有前一帧
    if (!prevFrame_cpu.empty())
    {
        // 计算当前帧与前一帧的差count值
        absdiff(prevFrame_cpu, grayFrame, frameDiff);
        //去噪点
        medianBlur(frameDiff,median_img,7); //防止抖动的关键代码不能去,去了就会自动加数,参数至少为7
        // 去除阴影
        cv::threshold(median_img, thres_img, 127, 255, cv::THRESH_BINARY);

        // 进行形态学操作以去除噪声和填充空洞,4,4最理想
        cv::Mat kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(2, 2));
        cv::morphologyEx(thres_img, thres_img, cv::MORPH_OPEN, kernel);

        // 必须做二值化处理差值图像,才能检到轮廓
        threshold(thres_img, thresholdedDiff, 30, 255, THRESH_BINARY);

        // 轮廓检测
        vector<vector<Point>> contours;
        vector<Vec4i> hierarchy;
        findContours(thresholdedDiff, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);

        //         double t = 0.0;
        //         double t1 = 0.0;
        //         t = (double)cv::getTickCount();
        //         t1 = (double)cv::getTickCount();
        //         // 计算当前帧与前一帧的差值
        //         absdiff(prevFrame_cpu, grayFrame, frameDiff);
        //         t1 = ((double)cv::getTickCount() - t1) / cv::getTickFrequency();
        //         std::cout << "absdiff Time Cost:" << t1 << "s" << std::endl;
        //         t1 = (double)cv::getTickCount();
        //         //去噪点
        //         medianBlur(frameDiff,median_img,7); //防止抖动的关键代码不能去,去了就会自动加数,参数至少为7
        //         t1 = ((double)cv::getTickCount() - t1) / cv::getTickFrequency();
        //         std::cout << "medianBlur Time Cost:" << t1 << "s" << std::endl;
        //         // 去除阴影
        //         t1 = (double)cv::getTickCount();
        //         cv::threshold(median_img, thres_img, 127, 255, cv::THRESH_BINARY);
        //         t1 = ((double)cv::getTickCount() - t1) / cv::getTickFrequency();
        //         std::cout << "threshold Time Cost:" << t1 << "s" << std::endl;
        //         // 进行形态学操作以去除噪声和填充空洞,4,4最理想
        //         t1 = (double)cv::getTickCount();
        //         cv::Mat kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(2, 2));
        //         cv::morphologyEx(thres_img, thres_img, cv::MORPH_OPEN, kernel);
        //         t1 = ((double)cv::getTickCount() - t1) / cv::getTickFrequency();
        //         std::cout << "morphologyEx Time Cost:" << t1 << "s" << std::endl;

        //         // 必须做二值化处理差值图像,才能检到轮廓
        //         t1 = (double)cv::getTickCount();
        //         threshold(thres_img, thresholdedDiff, 30, 255, THRESH_BINARY);
        //         t1 = ((double)cv::getTickCount() - t1) / cv::getTickFrequency();
        //         std::cout << "threshold Time Cost:" << t1 << "s" << std::endl;

        //         // 轮廓检测
        //         t1 = (double)cv::getTickCount();
        //         vector<vector<Point>> contours;
        //         vector<Vec4i> hierarchy;
        //         findContours(thresholdedDiff, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
        //         t1 = ((double)cv::getTickCount() - t1) / cv::getTickFrequency();
        //         std::cout << "find Time Cost:" << t1 << "s" << std::endl;

        //         t = ((double)cv::getTickCount() - t) / cv::getTickFrequency();

        //         std::cout << "cpu Time Cost:" << t << std::endl;
        //         std::cout<<"cpu access ended!"<<std::endl;
        //         framecount++;
        //         if(framecount>10){
        //             usedTimecnt+=t;
        //             double avg=usedTimecnt/(framecount-10);
        //             std::cout <<"framecount:"<<framecount<< " cpu Time avage:" << avg << std::endl;
        //         }


        //计数代码:
        //when the number of contours is not empty
        if(!contours.empty()){
             noContourFrameCount=0;//if have contours then the value=0
            //come in area
            if(!carin2){
                cout<<"already come in"<<endl;
                carin2=true;                         //come in detection area
                carout2=false;                       //not leave detection area
                nocar2=true;                         //have car
                //               countTime=QTime::currentTime();
                countTime.restart();
                isCount2=false;                      //count flag
                carIn_datetime=QTime::currentTime();
                beginTime=QTime::currentTime();
                if(pusleDataStatus==2){
                    pusleDataStatus =1;//表标车辆已进入检测区
                    checked_Car_Tag=QDateTime::currentDateTime().toString("yyyyMMddHHmmss_zzz");
                }

            }
        }
        //when the number of contours is empty
        else{
            noContourFrameCount++;
            //when not counted
            if(noContourFrameCount>10){
                //when not counted
                if(!isCount2){
                    cout<<"already leave "<<endl;
                    carin2=false;          //car not come in detection area
                    carout2=true;          //car already leave detection area
                    nocar2=false;          //no have car
                    if(countTime.elapsed()>650){//设置这个参数主要是为了防止多检,比如客车过长容易多检 原:450
                        carCount++;
                        isCount2=true; //already count
                        carOut_datetime=QTime::currentTime();
                        if(pusleDataStatus==1){
                            pusleDataStatus =0;//表标车辆已离开检测区
                        }
                        ret=1;
                        cout<<"already count"<<endl;
                    }
                }
            }

        }
    }
    // 保存当前帧及灰度图像作为前一帧
    prevFrame_cpu = grayFrame.clone();

    return ret;
}
void CameraThread::gpu_process(UMat frame){
    std::vector<cv::ocl::PlatformInfo> plats;
    cv::ocl::getPlatfomsInfo(plats);
    const cv::ocl::PlatformInfo *platform = &plats[0];
    //cout << "Platform Name:" << platform->name().c_str() << endl;
    cv::ocl::Device dev;
    platform->getDevice(dev,0);
    //cout << "Device name:" << dev.name().c_str() << endl;

    cv::ocl::setUseOpenCL(true);
    //cout << "Use the OpenCL Deivice?" << cv::ocl::useOpenCL() << endl;
    double t = 0.0;
    UMat  grayFrame, edges;
    t = (double)cv::getTickCount();
    cv::cvtColor(frame,grayFrame,cv::COLOR_BGR2GRAY);
    // 进行边缘检测
    cv::Canny(grayFrame, edges, 50, 150);
    t = ((double)cv::getTickCount() - t) / cv::getTickFrequency();
    std::cout << "GPU Time Cost:" << t << "s" << std::endl;
    putText(edges, "Platform : " + platform->name(), Point(5, 30), FONT_HERSHEY_SIMPLEX, 1., Scalar(255, 100, 0), 2);
    putText(edges, "Device : " + dev.name(), Point(5, 60), FONT_HERSHEY_SIMPLEX, 1., Scalar(255, 100, 0), 2);
    putText(edges, "Time : " + std::to_string(t) + " s", Point(5, 90), FONT_HERSHEY_SIMPLEX, 1., Scalar(255, 100, 0), 2);

}
void CameraThread::cpu_process(Mat frame){
    std::vector<cv::ocl::PlatformInfo> plats;
    cv::ocl::getPlatfomsInfo(plats);
    const cv::ocl::PlatformInfo *platform = &plats[0];
    //cout << "Platform Name:" << platform->name().c_str() << endl;
    cv::ocl::Device dev;
    platform->getDevice(dev,0);
    // cout << "Device name:" << dev.name().c_str() << endl;

    cv::ocl::setUseOpenCL(false);
    //cout << "Use the OpenCL Deivice?" << cv::ocl::useOpenCL() << endl;
    double t = 0.0;
    Mat grayFrame, edges;
    t = (double)cv::getTickCount();
    cv::cvtColor(frame,grayFrame,cv::COLOR_RGB2GRAY);
    // 进行边缘检测
    cv::Canny(grayFrame, edges, 50, 150);
    t = ((double)cv::getTickCount() - t) / cv::getTickFrequency();
    // std::cout << "CPU Time Cost:" << t << "s" << std::endl;
    putText(edges, "Time : " + std::to_string(t) + " s", Point(5, 30), FONT_HERSHEY_SIMPLEX, 1., Scalar(255, 100, 0), 2);
}
void CameraThread::run()
{

    cv::VideoCapture cap(cgd.rtsp[cameraIndex],CAP_ANY);
    if (!cap.isOpened()) {
        qWarning() << "Failed to open camera" << cameraIndex;
        return;
    }
    int ms=0;
    int count=0;
    while (!isInterruptionRequested()) {
        cv::UMat frame;
        cap >> frame;

        if (frame.empty())
            continue;

        // 将OpenCV Mat转换为QImage
        Mat temp;
        frame.copyTo(temp);
        QImage qImage = QImage((uchar*)(temp.data), temp.cols, temp.rows, temp.step, QImage::Format_RGB888).copy();
        qImage = qImage.rgbSwapped(); // 交换BGR到RGB格式
        int x=0,y=0;
        if(cameraIndex==0){
            if(qImage.width()==1280){
                x=360;
                y=330;
            }else if(qImage.width()==1920){
                double xs=1920.0/1280.0;
                x=360*xs;
                y=330*xs;
            }
        }else if(cameraIndex==1){
            if(qImage.width()==1280){
                x=430;
                y=330;
            }else if(qImage.width()==1920){
                double xs=1920.0/1280.0;;
                x=430*xs;
                y=330*xs;
            }

        }else if(cameraIndex==2){
            if(qImage.width()==1280){
                x=100;
                y=330;
            }else if(qImage.width()==1920){
                double xs=1920.0/1280.0;;
                x=100*xs;
                y=330*xs;
            }

        }else if(cameraIndex==3){
            if(qImage.width()==1280){
                x=460;
                y=330;
            }else if(qImage.width()==1920){
                double xs=1920.0/1280.0;;
                x=460*xs;
                y=330*xs;
            }

        }else if(cameraIndex==4){
            if(qImage.width()==1280){
                x=300;
                y=330;
            }else if(qImage.width()==1920){
                double xs=1920.0/1280.0;;
                x=300*xs;
                y=330*xs;
            }



        }else if(cameraIndex==5){
            if(qImage.width()==1280){
                x=560;
                y=330;
            }else if(qImage.width()==1920){
                double xs=1920.0/1280.0;;
                x=560*xs;
                y=330*xs;
            }

        }else if(cameraIndex==6){
            if(qImage.width()==1280){
                x=360;
                y=330;
            }else if(qImage.width()==1920){
                double xs=1920.0/1280.0;;
                x=360*xs;
                y=330*xs;
            }

        }else if(cameraIndex==7){
            if(qImage.width()==1280){
                x=460;
                y=330;
            }else if(qImage.width()==1920){
                double xs=1920.0/1280.0;;
                x=460*xs;
                y=330*xs;
            }


        }

        //count+=gpu_CheckPoint(x,y,frame);//1080X720
        QFont font("Arial",36,QFont::Bold);
        QPen pen(Qt::yellow);
        pen.setWidth(1);
        QPainter painter(&qImage);
        painter.setFont(font);
        painter.setPen(pen);

        painter.drawText(QPointF(x,y),"Cnt:"+QString::number(count));
        pen.setWidth(2);
        painter.drawRect(x,y,1280/352*50,720/288*20);

        painter.end();

        // 发送信号更新主窗口的QLabel
        emit frameReady(qImage, cameraIndex);
        emit frameReady2(cameraIndex,count_1);


        //        QTime startTime=QTime::currentTime();
        //gpu_process(frame);

        //        count++;
        //        ms+=startTime.msecsTo(QTime::currentTime());
        //        std::cout<<"cpu:"<<ms<<" count:"<<count<<std::endl;
    }
    cap.release();
}

四、主窗口
头文件:

#ifndef MAINWINDOW_H
#define MAINWINDOW_H

#include <QMainWindow>
#include <QLabel>
#include <QVector>
#include <QThread>
#include "ui_mainwindow.h"
using namespace  std;


class MainWindow : public QWidget
{
    Q_OBJECT

public:
    explicit MainWindow(QWidget *parent = nullptr);
    ~MainWindow();



private:
   // Ui::MainWindow *ui;
    QVector<QLabel*> labelList;
    QVector<QThread*> threadList;


};

#endif // MAINWINDOW_H


源文件:

#include "mainwindow.h"
#include "camerathread.h"
#include <QHBoxLayout>
#include <QPen>
#include <QPainter>
#include <QPointF>

MainWindow::MainWindow(QWidget *parent)
    : QWidget(parent)
{
    QVBoxLayout *verticalLayout=new QVBoxLayout();
    QHBoxLayout *horizontaiLayout_1=new QHBoxLayout();
    QHBoxLayout *horizontaiLayout_2=new QHBoxLayout();

    // 初始化8个QLabel并添加到主窗口
    for (int k = 0; k < 4; ++k) {
        QLabel *label = new QLabel();
        label->setStyleSheet("border:1px solid gray");
        label->setFixedSize(352,288);
        label->setScaledContents(true);
        horizontaiLayout_1->addWidget(label);
        labelList.append(label);
    }
    for (int j = 0; j < 4; ++j) {
        QLabel *label2 = new QLabel();
        label2->setStyleSheet("border:1px solid gray");
        label2->setFixedSize(352,288);
        label2->setScaledContents(true);
        horizontaiLayout_2->addWidget(label2);
        labelList.append(label2);
    }
    verticalLayout->addLayout(horizontaiLayout_1);
    verticalLayout->addLayout(horizontaiLayout_2);
    setLayout(verticalLayout);
    setFixedSize(1600,800);

    std::cout<<"labelList length:"<<labelList.length()<<std::endl;



    for (int i = 0; i < 8; ++i) {
        CameraThread *cameraThread = new CameraThread(labelList[i],i,this);
        QThread *thread = new QThread(this);
        cameraThread->moveToThread(thread);

        connect(thread, &QThread::started, this, [cameraThread]() {
            cameraThread->start();
        });
        connect(cameraThread, &CameraThread::frameReady, this, [this](const QImage &frame, int index) {
            //std::cout<<"index:"<<index<<std::endl;
            labelList[index]->setPixmap(QPixmap::fromImage(frame));
        });
//        connect(cameraThread, &CameraThread::frameReady2, this, [this](int index,int count) {
//            std::cout<<"index:"<<index<<std::endl;
//            labelList[index]->setText(QString::number(count));
//        });
        connect(cameraThread, &QThread::finished, thread, &QThread::deleteLater);
        connect(thread, &QThread::finished, cameraThread, &CameraThread::deleteLater);

        threadList.append(thread);
        thread->start();
        QThread::msleep(30);
    }
}

MainWindow::~MainWindow()
{

    for (QThread *thread : threadList) {
        thread->quit();
        thread->wait();
    }


}

五、pro文件:

QT       += core gui

greaterThan(QT_MAJOR_VERSION, 4): QT += widgets

CONFIG += c++11

# You can make your code fail to compile if it uses deprecated APIs.
# In order to do so, uncomment the following line.
#DEFINES += QT_DISABLE_DEPRECATED_BEFORE=0x060000    # disables all the APIs deprecated before Qt 6.0.0
win32{
    INCLUDEPATH += D:/opencv4.1.2/build/install/include/
    LIBS += D:/opencv4.1.2/build/install/x86/mingw/bin/libopencv_*.dll

}

SOURCES += \
    cameraglobaldata.cpp \
    camerathread.cpp \
    main.cpp \
    mainwindow.cpp

HEADERS += \
    cameraglobaldata.h \
    camerathread.h \
    mainwindow.h

FORMS += \
    mainwindow.ui

# Default rules for deployment.
qnx: target.path = /tmp/$${TARGET}/bin
else: unix:!android: target.path = /opt/$${TARGET}/bin
!isEmpty(target.path): INSTALLS += target

unix:!macx: LIBS +=/usr/lib/aarch64-linux-gnu/libopencv_*.so.4.5.1

INCLUDEPATH += $$PWD/../../../usr/include/opencv4
DEPENDPATH += $$PWD/../../../usr/include/opencv4



win32: LIBS += -L'D:/Program Files/HKVISION32/lib/' -lHCNetSDK
INCLUDEPATH += 'D:/Program Files/HKVISION32/include'
DEPENDPATH += 'D:/Program Files/HKVISION32/include'
win32:!win32-g++: PRE_TARGETDEPS += 'D:/Program Files/HKVISION32/lib/HCNetSDK.lib'
else:win32-g++: PRE_TARGETDEPS += 'D:/Program Files/HKVISION32/lib/HCNetSDK.lib'

秋风写于淄博,业务联系与技术交流:Q3717665

  • 3
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值