海思35XX-KCF图像跟踪

以下内容均来自海思《IVE API 参考.pdf》, 所附源码为个人移植并测试通过

KCF算法利用循环矩阵对角化等性质,将跟踪问题核化,使核化后跟踪比对的操作 简化成易于并行加速的矩阵运算,提升跟踪解决方案的系统性能,KCF算法可以和 HOG算子联合使用,典型的算法用例如图2-34所示。

图 2-34 KCF 典型的算法用例

跟踪问题核化、相关滤波器训练等数学推导参阅“High-speed tracking with kernelized correlation filters”,IEEE Trans. PAMI, vol.37, No. 3, March 2015.

算法的总体流程:在It帧中,在当前位置pt附近采样,训练一个回归器。来计算一个小 窗口采样的响应。在It+1帧中,在前一帧位置pt附近采样,用已经训练的回归器来判断 每个采样的响应。响应最强的采样作为本帧的位置pt+1。

用一个人脸跟踪图解一下上面的跟踪算法过程,如图2-35所示,首先对初始帧做初始 化,框定一个待跟踪的ROI,对输入图像做特征提取用余弦函数窗对提取到的特征加权 后, 将其用FFT变换到频域,与训练得到的相关滤波器在频域做点乘,反变换到空域后 找到最强的响应位置,就是得到人脸在当前帧的预测。

图 2-35 跟踪算法过程示例

KCF 软件调用流程

当看懂了以上内容,就可移植kcf了,主函数如下:


AF_ERROE_CODE_E AF_CALLBACK AF_Track_Update(ot_video_frame_info* pFrame, AF_RECTANGLE_S& Rect, bool bIsInit)
{
    if (pFrame == nullptr || Rect._RectSize.cx <= 0 || Rect._RectSize.cy <= 0)
        return AF_ERR_PARAMERROR;

    td_s32 ret;

    static int nFrameItem;
    static ot_sample_ive_kcf_roi_info roi_info = { 0 };
    static ot_sample_ive_kcf_roi_info new_roi_info = { 0 };
    static ot_sample_ive_kcf_bbox_info bbox_info = { 0 };
    static ot_ive_handle handle;
    static ot_svp_img src;
    static ot_sample_ive_node* queue_node = TD_NULL;
    ot_sample_svp_rect_info rect = { 0 };

    sample_ive_fill_image(pFrame, &src);

    if (bIsInit)
    {
        nFrameItem = 0;
        /*
        * init kcf param
        */
        ret = sample_ive_kcf_init(&g_ive_kcf_info);
        kcf_svp_check_exps_return(ret != TD_SUCCESS, (AF_ERROE_CODE_E)ret, "Error(%#x),sample_ive_kcf_init failed!");

        memset(&bbox_info, 0, sizeof(ot_sample_ive_kcf_bbox_info));
        //
        roi_info.roi[0].roi.x = Rect._RectPoint._X * OT_SAMPLE_QUARTER_OF_1M;
        roi_info.roi[0].roi.y = Rect._RectPoint._Y * OT_SAMPLE_QUARTER_OF_1M;
        roi_info.roi[0].roi.width = Rect._RectSize.cx;
        roi_info.roi[0].roi.height = Rect._RectSize.cy;
        roi_info.roi_num = 1;

        new_roi_info.roi[0].roi.x = Rect._RectPoint._X * OT_SAMPLE_QUARTER_OF_1M;
        new_roi_info.roi[0].roi.y = Rect._RectPoint._Y * OT_SAMPLE_QUARTER_OF_1M;
        new_roi_info.roi[0].roi.width = Rect._RectSize.cx;
        new_roi_info.roi[0].roi.height = Rect._RectSize.cy;
        new_roi_info.roi_num = 1;
        //
        ret = sample_ive_kcf_obj_iou(&roi_info, &bbox_info, OT_SAMPLE_IVE_KCF_ROI_NUM, &new_roi_info);
        kcf_svp_check_exps_goto(ret, fail, "Err(%#x),sample_ive_kcf_obj_iou failed!\n", ret);

        ret = ss_mpi_ive_kcf_obj_update(&g_ive_kcf_info.obj_list, bbox_info.bbox, bbox_info.bbox_num);
        kcf_svp_check_exps_goto(ret, fail, "Err(%#x),ss_mpi_ive_kcf_obj_update failed!\n", ret);

        ret = ss_mpi_ive_kcf_get_train_obj(g_ive_kcf_info.padding, new_roi_info.roi, new_roi_info.roi_num,
            &g_ive_kcf_info.cos_win_x, &g_ive_kcf_info.cos_win_y, &g_ive_kcf_info.gauss_peak,
            &g_ive_kcf_info.obj_list);
        kcf_svp_check_exps_goto(ret, fail, "Err(%#x),ss_mpi_ive_kcf_get_train_obj failed!\n", ret);
    }
    else nFrameItem++;

    if (g_ive_kcf_info.obj_list.track_obj_num != 0 || g_ive_kcf_info.obj_list.train_obj_num != 0) 
    {
        ret = ss_mpi_ive_kcf_proc(&handle, &src, &g_ive_kcf_info.obj_list, &g_ive_kcf_info.kcf_proc_ctrl, TD_TRUE);
        kcf_svp_check_exps_goto(ret, fail, "Err(%#x),ss_mpi_ive_kcf_proc failed!\n", ret);

        ret = sample_ive_kcf_query_task(handle);
        kcf_svp_check_exps_goto(ret, fail, "ive query task failed");

        ret = ss_mpi_ive_kcf_get_obj_bbox(&g_ive_kcf_info.obj_list, bbox_info.bbox, 
            &bbox_info.bbox_num, &g_ive_kcf_info.kcf_bbox_ctrl);
        kcf_svp_check_failed_err_level_trace(ret, "Err(%#x),ss_mpi_ive_kcf_get_obj_bbox failed!\n", ret);

        sample_ive_rect_to_point(bbox_info.bbox, bbox_info.bbox_num, &rect);
    }
    else 
    {
        rect.num = 0;
    }

    if (rect.num > 0)
    {
        AppData::_g_AI_TrackMutex.lock();
        AppData::_g_AI_TrackState = _TRACK_RUNG;
        AppData::_g_AI_TrackRect._RectPoint._X = rect.rect[0].point[0].x;
        AppData::_g_AI_TrackRect._RectPoint._Y = rect.rect[0].point[0].y;
        AppData::_g_AI_TrackRect._RectSize.cx = rect.rect[0].point[1].x - rect.rect[0].point[0].x;
        AppData::_g_AI_TrackRect._RectSize.cy = rect.rect[0].point[2].y - rect.rect[0].point[0].y;
        AppData::_g_AI_TrackRect._PointItem = 0;
        //uart_send_PID_zoom(AppData::_g_AI_TrackRect);
        AppData::_g_AI_TrackMutex.unlock();
    }
    else 
    {
        if (nFrameItem > 2)
        {
            AppData::_g_AI_TrackMutex.lock();
            AppData::_g_AI_TrackState = _TRACK_LOSE;
            AppData::_g_AI_TrackRect._RectPoint._X = 0;
            AppData::_g_AI_TrackRect._RectPoint._Y = 0;
            AppData::_g_AI_TrackRect._RectSize.cx = 0;
            AppData::_g_AI_TrackRect._RectSize.cy = 0;
            AppData::_g_AI_TrackRect._PointItem = 0;
            //uart_send_PID_zoom(AppData::_g_AI_TrackRect);
            AppData::_g_AI_TrackMutex.unlock();
        }
    }

    return AF_ERROE_CODE_E AF_CALLBACK();
fail:
    sample_ive_free_queue_node(&queue_node);
    return AF_ERROE_CODE_E AF_CALLBACK();
}

  • 3
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值