SS928 KCF跟踪算法

1.KCF算法介绍

KCF全称为Kernel Correlation Filter 核相关滤波算法,在目标跟踪效果和速度上都非常好,OpenCV里面有完整的kcf算法,海思平台的IVE也硬化了KCF算法,接口和调用流程基本都一致;

SS928 SDK里面关于kcf算法的文档在
SS928V100R001C02SPC022\ReleaseDoc\zh\01.software\board\SVP\IVE API 参考.pdf KCF相关接口
代码路径
SS928V100_SDK_V2.0.2.2/smp/a55_linux/mpp/sample/svp/ive

2.KCF算法关键流程

2.1 KCF流程介绍

参IVE_API参考.pdf ss_mpi_ive_kcf_proc

在这里插入图片描述

2.2 KCF关键实现代码

主要是分配MMZ内存,具体每个结构体需要的内存大小,可以参考api文档对应描述

在这里插入代码片static td_void sample_ive_kcf_info_common_init(st_ot_sample_ive_kcf_info* kcf_info, td_u32 size)
{
    kcf_info->list_mem.phys_addr = kcf_info->total_mem.phys_addr;
    kcf_info->list_mem.virt_addr = kcf_info->total_mem.virt_addr;
    kcf_info->list_mem.size = size;
    kcf_info->gauss_peak.phys_addr = kcf_info->list_mem.phys_addr + size;
    kcf_info->gauss_peak.virt_addr = kcf_info->list_mem.virt_addr + size;
    kcf_info->gauss_peak.size = OT_SAMPLE_IVE_KCF_GAUSS_PEAK_TOTAL_SIZE;
    kcf_info->cos_win_x.phys_addr = kcf_info->gauss_peak.phys_addr + OT_SAMPLE_IVE_KCF_GAUSS_PEAK_TOTAL_SIZE;
    kcf_info->cos_win_x.virt_addr = kcf_info->gauss_peak.virt_addr + OT_SAMPLE_IVE_KCF_GAUSS_PEAK_TOTAL_SIZE;
    kcf_info->cos_win_x.size = OT_SAMPLE_IVE_KCF_COS_WINDOW_TOTAL_SIZE;
    kcf_info->cos_win_y.phys_addr = kcf_info->cos_win_x.phys_addr + OT_SAMPLE_IVE_KCF_COS_WINDOW_TOTAL_SIZE;
    kcf_info->cos_win_y.virt_addr = kcf_info->cos_win_x.virt_addr + OT_SAMPLE_IVE_KCF_COS_WINDOW_TOTAL_SIZE;
    kcf_info->cos_win_y.size = OT_SAMPLE_IVE_KCF_COS_WINDOW_TOTAL_SIZE;
    kcf_info->kcf_proc_ctrl.tmp_buf.phys_addr = kcf_info->cos_win_y.phys_addr +
        OT_SAMPLE_IVE_KCF_COS_WINDOW_TOTAL_SIZE;
    kcf_info->kcf_proc_ctrl.tmp_buf.virt_addr = kcf_info->cos_win_y.virt_addr +
        OT_SAMPLE_IVE_KCF_COS_WINDOW_TOTAL_SIZE;
    kcf_info->kcf_proc_ctrl.tmp_buf.size = OT_SAMPLE_IVE_KCF_TEMP_BUF_SIZE;
    kcf_info->kcf_proc_ctrl.csc_mode = OT_IVE_CSC_MODE_VIDEO_BT709_YUV_TO_RGB;
    kcf_info->kcf_proc_ctrl.interp_factor = OT_SAMPLE_IVE_KCF_INTERP_FACTOR;
    kcf_info->kcf_proc_ctrl.lamda = OT_SAMPLE_IVE_KCF_LAMDA;
    kcf_info->kcf_proc_ctrl.sigma = OT_SAMPLE_IVE_KCF_SIGMA;
    kcf_info->kcf_proc_ctrl.norm_trunc_alfa = OT_SAMPLE_IVE_KCF_NORM_TRUNC_ALFA;
    kcf_info->kcf_proc_ctrl.response_threshold = OT_SAMPLE_IVE_KCF_RESP_THR;
    kcf_info->padding = OT_SAMPLE_IVE_KCF_PADDING;
    kcf_info->kcf_bbox_ctrl.max_bbox_num = OT_SAMPLE_IVE_KCF_NODE_MAX_NUM;
    kcf_info->kcf_bbox_ctrl.response_threshold = 0;
}

void init_kcf()
{
    td_s32 ret = OT_ERR_IVE_NULL_PTR;
    (td_void)memset_s(&kcf_info, sizeof(st_ot_sample_ive_kcf_info), 0, sizeof(st_ot_sample_ive_kcf_info));
    td_u32 total_size;
    td_u32 size;
    ret = ss_mpi_ive_kcf_get_mem_size(OT_SAMPLE_IVE_KCF_NODE_MAX_NUM, &size);

    /* (HOGFeatrue + Alpha + DstBuf) + Guasspeak + CosWinX + CosWinY + TmpBuf */
    total_size = size + OT_SAMPLE_IVE_KCF_GAUSS_PEAK_TOTAL_SIZE + OT_SAMPLE_IVE_KCF_COS_WINDOW_TOTAL_SIZE *
        OT_SAMPLE_NUM_TWO + OT_SAMPLE_IVE_KCF_TEMP_BUF_SIZE;
    ret = sample_common_ive_create_mem_info(&kcf_info.total_mem, total_size);
    (td_void)memset_s((td_u8*)(td_uintptr_t)kcf_info.total_mem.virt_addr, total_size, 0x0, total_size);
    sample_ive_kcf_info_common_init(&kcf_info, size);
    ret = ss_mpi_ive_kcf_create_obj_list(&kcf_info.list_mem, OT_SAMPLE_IVE_KCF_NODE_MAX_NUM, &kcf_info.obj_list);
    ret = ss_mpi_ive_kcf_create_gauss_peak(kcf_info.padding, &kcf_info.gauss_peak);
    ret = ss_mpi_ive_kcf_create_cos_win(&kcf_info.cos_win_x, &kcf_info.cos_win_y);
    kcf_info.is_first_det = TD_FALSE;
    kcf_info.needTrain = TD_FALSE;
}

代码调用流程,初始化kcf后,会根据用户的坐标刷新跟踪区域


pthread_t kcf_pid;
pthread_t WebStream_pid;
int running = 0;
void* kcf_proc(void* parg)
{
    td_s32 milli_sec = 40;
    td_s32  ret;
    ot_vpss_grp grp = 0;
    ot_vpss_chn chn = 0;
    ot_video_frame_info frame_info;
    kcf_info.is_train = TD_FALSE;
    kcf_info.needTrain = TD_FALSE;
    ot_ive_handle handle;
    ot_ive_kcf_bbox bbox;
    static ot_sample_ive_kcf_bbox_info bbox_info = { 0 };
    static ot_sample_ive_kcf_roi_info roi_info = { 0 };
    static ot_sample_ive_kcf_roi_info new_roi_info = { 0 };
    init_kcf();
    while (running)
    {
        ot_usleep(1000);
        ret = ss_mpi_vpss_get_chn_frame(grp, chn, &frame_info, milli_sec);
        if (ret != TD_SUCCESS) {
            continue;
        }
        int w = frame_info.video_frame.width;
        int h = frame_info.video_frame.height;

        ot_svp_src_img src;
        src.width = w;
        src.height = h;
        src.phys_addr[0] = frame_info.video_frame.phys_addr[0];
        src.phys_addr[1] = frame_info.video_frame.phys_addr[1];
        src.phys_addr[2] = frame_info.video_frame.phys_addr[2];
        src.stride[0] = frame_info.video_frame.stride[0];
        src.stride[1] = frame_info.video_frame.stride[1];
        src.stride[2] = frame_info.video_frame.stride[2];
        src.type = OT_SVP_IMG_TYPE_YUV420SP;

        int framelen = w * h * 3 / 2;
        if (kcf_info.is_train)
        {

            if (kcf_info.obj_list.track_obj_num != 0 || kcf_info.obj_list.train_obj_num != 0)
            {
                memset(&bbox_info, 0, sizeof(ot_sample_ive_kcf_bbox_info));
                ret = ss_mpi_ive_kcf_proc(&handle, &src, &kcf_info.obj_list, &kcf_info.kcf_proc_ctrl, TD_TRUE);
                ret = sample_ive_kcf_query_task(handle);
                ret = ss_mpi_ive_kcf_get_obj_bbox(&kcf_info.obj_list, bbox_info.bbox, &bbox_info.bbox_num, &kcf_info.kcf_bbox_ctrl);
                printf("bbox_info.bbox_num:%d \n", bbox_info.bbox_num);
                ot_sample_svp_rect_info rect = { 0 };
                sample_ive_rect_to_point(bbox_info.bbox, bbox_info.bbox_num, &rect);
                if (rect.num)
                {
                    int x = rect.rect[0].point[0].x;
                    int y = rect.rect[0].point[0].y;
                    int w = rect.rect[0].point[1].x - rect.rect[0].point[0].x;
                    int h = rect.rect[0].point[2].y - rect.rect[0].point[0].y;
                    // printf(" %d %d %d %d\n", x, y, w, h);
                    vgsdraw(&frame_info, x, y, w, h);
                }
            }
        }

        if (kcf_info.needTrain)
        {
            memset(&bbox_info, 0, sizeof(ot_sample_ive_kcf_bbox_info));
            kcf_info.needTrain = TD_FALSE;

            roi_info.roi[0].roi.x = kcf_info.Kcf_x * w * OT_SAMPLE_QUARTER_OF_1M;
            roi_info.roi[0].roi.y = kcf_info.Kcf_y * h * OT_SAMPLE_QUARTER_OF_1M;
            roi_info.roi[0].roi.width = kcf_info.Kcf_w * w;
            roi_info.roi[0].roi.height = kcf_info.Kcf_h * h;
            roi_info.roi_num = 1;

            new_roi_info.roi[0].roi.x = kcf_info.Kcf_x * OT_SAMPLE_QUARTER_OF_1M;
            new_roi_info.roi[0].roi.y = kcf_info.Kcf_y * h * OT_SAMPLE_QUARTER_OF_1M;
            new_roi_info.roi[0].roi.width = kcf_info.Kcf_w * w;
            new_roi_info.roi[0].roi.height = kcf_info.Kcf_h * h;
            new_roi_info.roi_num = 1;

            sample_ive_kcf_obj_iou(&roi_info, &bbox_info, OT_SAMPLE_IVE_KCF_ROI_NUM, &new_roi_info);
            ret = ss_mpi_ive_kcf_obj_update(&kcf_info.obj_list, bbox_info.bbox, bbox_info.bbox_num);

            ret = ss_mpi_ive_kcf_get_train_obj(kcf_info.padding, &new_roi_info.roi, new_roi_info.roi_num, &kcf_info.cos_win_x, &kcf_info.cos_win_y, &kcf_info.gauss_peak, &kcf_info.obj_list);
            printf("ss_mpi_ive_kcf_get_train_obj %s %d %08x \n", __FUNCTION__, __LINE__, ret);
            kcf_info.is_train = TD_TRUE;
        }

        ret = ss_mpi_vo_send_frame(0, 0, &frame_info, milli_sec);
        ret = ss_mpi_venc_send_frame(0, &frame_info, milli_sec);
        ss_mpi_vpss_release_chn_frame(grp, chn, &frame_info);
    }
    return NULL;
}
2.3 完整Demo代码
https://gitee.com/apchy_ll/ss928_kcf_demo.git
2.4 项目讲解+跟踪效果

10 KCF跟踪算法效果Demo

3.项目其它功能介绍

3.1 WebStream

WebStream用的flv方案,大概原理是吧编码的视频数据封装为flv packet,送给HttpServer,httpserver处理掉了flv的http请求,把封装的flvpack 通过http rsq的方式返回给浏览器,浏览器就可以通过flv.js播放视频,目前只支持h264部分,flv的封装使用的libflv库,httpserver用的mongoose

httpflv核心的地方需要处理http rsq 为Connection: Keep-Alive和Transfer-Encoding: chunked

void NetServer::AddFlvSession(struct mg_connection* nc,
  struct http_message* hm) {
  std::lock_guard<std::mutex> lk(mutex_);
  mg_printf(nc,
    "HTTP/1.1 200 OK\r\n"
    "Content-Type: video/x-flv\r\n"
    "Connection: Keep-Alive\r\n"
    "Transfer-Encoding: chunked\r\n\r\n");
  SendFlvHeaderTag(nc);
  stflvSessionInfo* pSession =
    (stflvSessionInfo*)malloc(sizeof(stflvSessionInfo));
  memcpy(pSession->url, hm->uri.p, hm->uri.len);
  pSession->queue = new std::list<std::string>;
  pSession->mutex = new std::mutex;
  flv_conn_map_[nc] = (void*)pSession;
  pSession->m = CreateFlvMuxer(pSession);
}

flv.js Web端的实现,参考flv.js官网的项目

3.2 用户画框的流程

画框就是在html上用canvas 响应了鼠标消息,绘制线条,然后通过post http://ip:80/setpos 接口把数据发送给httpserver,参考WWW目录index.html

    function postData(ratioX, ratioY, ratioWidth, ratioHeight) {
      const xhr = new XMLHttpRequest();
      xhr.open('POST', '/setpos');
      xhr.setRequestHeader('Content-Type', 'application/json;charset=UTF-8');
      const data = {
        x: parseFloat(ratioX).toFixed(2) * 1,
        y: parseFloat(ratioY).toFixed(2) * 1,
        w: parseFloat(ratioWidth).toFixed(2) * 1,
        h: parseFloat(ratioHeight).toFixed(2) * 1
      };
      xhr.send(JSON.stringify(data));
    }
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

爱爬山的木木

佛系

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值