背景建模(二)——以像素值为特征的方法(1)

 前面通过一篇CVPR2011年对背景建模方法评估的文章算是引出一个关于背景建模概述,博客地址: http://blog.sina.com.cn/s/blog_631a4cc40100wwg7.html

后面几篇打算详细介绍一些背景建模近年来的方法,首先介绍一些以像素值为特征的方法。
混合高斯模型(GMM)当属这种方法中经典中的经典,文章题目:Adaptive Background Mixture Models for Real-Time Tracking 发表在CVPR1999年,并在 2009年的CVPR上获得Longuet-Higgins Prize,以google学术上显示他引率:3196次。

混合高斯模型使用K(基本为3到5个) 个高斯模型来表征图像中各个像素点的特征,在新一帧图像获得后更新混合高斯模型,用当前图像中的每个像素点与混合高斯模型匹配,如果成功则判定该点为背景点, 否则为前景点。每个高斯模型,他主要是有方差和均值两个参数决定,对均值和方差的学习,采取不同的学习机制,将直接影响到模型的稳定性、精确性和收敛性。由于我们是对运动目标的背景提取建模,因此需要对高斯模型中方差和均值两个参数实时更新。为提高模型的学习能力,改进方法对均值和方差的更新采用不同的学习率;为提高在繁忙的场景下,大而慢的运动目标的检测效果,引入权值均值的概念,建立背景图像并实时更新,然后结合权值、权值均值和背景图像对像素点进行前景和背景的分类。具体更新公式如下:

μt= (1 - ρ)μt- 1 +ρxt                                  (1)
σ2t = (1 - ρ)σ2t- 1 +ρ( xt -μt ) T ( xt -μt ) (2)
ρ =αη( xt | μκ,σκ )                                      (3)
| xt -μt - 1 | ≤ 2. 5σt- 1                               (4)

w k , t = (1 - α) w k , t - 1 +αMk , t          (5)

式中ρ为学习率,即反映当前图像融入背景的速率。如果想深入了解可以看原文,或者opencv的源代码。

OpenCV当中也集成这个方法。原文地址 学习opencv(1):高斯背景建模-1

高斯背景建模源代码在“OpenCV\cvaux\src\cvbgfg_gaussmix.cpp”中。下面是我添加注释的代码。

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

CV_IMPL CvBGStatModel*
cvCreateGaussianBGModel( IplImage* first_frame, CvGaussBGStatModelParams* parameters )
{
    CvGaussBGModel* bg_model = 0;// 高斯背景状态模型变量
   
    CV_FUNCNAME( "cvCreateGaussianBGModel" );
   
    __BEGIN__;
   
    double var_init; //
    CvGaussBGStatModelParams params; // 高斯背景状态模型参数变量
    int i, j, k, n, m, p;
   
    //初始化参数,如果参数为空,则进行初始化赋值
    if( parameters == NULL )
    {
        params.win_size = CV_BGFG_MOG_WINDOW_SIZE; //初始化阶段的帧数
        params.bg_threshold = CV_BGFG_MOG_BACKGROUND_THRESHOLD;//高斯背景阈值
        params.std_threshold = CV_BGFG_MOG_STD_THRESHOLD;//
        params.weight_init = CV_BGFG_MOG_WEIGHT_INIT; // 初始权重
        params.variance_init = CV_BGFG_MOG_SIGMA_INIT*CV_BGFG_MOG_SIGMA_INIT; // 初始方差
        params.minArea = CV_BGFG_MOG_MINAREA; //最小面积
        params.n_gauss = CV_BGFG_MOG_NGAUSSIANS;// 高斯模型个数
    }
    else
    {
        params = *parameters; //如果parameters非空,则将其参数赋给 params
    }
   
    if( !CV_IS_IMAGE(first_frame) )//如果第一帧不是图像,报错
        CV_ERROR( CV_StsBadArg, "Invalid or NULL first_frame parameter" );
   
    CV_CALL( bg_model = (CvGaussBGModel*)cvAlloc( sizeof(*bg_model) )); //为bg_model申请内存
    memset( bg_model, 0, sizeof(*bg_model) );// 初始化刚申请的内存
    bg_model->type = CV_BG_MODEL_MOG; // 背景模型类型是: CV_BG_MODEL_MOG
    bg_model->release = (CvReleaseBGStatModel)icvReleaseGaussianBGModel;// 释放调用icvReleaseGaussianBGModel
    bg_model->update = (CvUpdateBGStatModel)icvUpdateGaussianBGModel;// 更新调用icvUpdateGaussianBGModel
   
    bg_model->params = params; //参数为 params
   
    //prepare storages
    CV_CALL( bg_model->g_point = (CvGaussBGPoint*)cvAlloc(sizeof(CvGaussBGPoint)*
        ((first_frame->width*first_frame->height) + 256))); //为背景模型bg_model的高斯背景点g_point 分配内存,                                                                                       
   
    CV_CALL( bg_model->background = cvCreateImage(cvSize(first_frame>width, first_frame->height), IPL_DEPTH_8U, first_frame->nChannels));
    CV_CALL( bg_model->foreground = cvCreateImage(cvSize(first_frame->width ,first_frame->height), IPL_DEPTH_8U, 1));
   
    CV_CALL( bg_model->storage = cvCreateMemStorage());     
    //initializing
    var_init = 2 * params.std_threshold * params.std_threshold; // 初始化方差
    CV_CALL( bg_model->g_point[0].g_values =  (CvGaussBGValues*)cvAlloc( sizeof(CvGaussBGValues)*params.n_gauss*(first_frame->width*first_frame->height + 128)));
   
    for( i = 0, p = 0, n = 0; i < first_frame->height; i++ )
    {
        for( j = 0; j < first_frame->width; j++, n++ ) // n =i*first_frame->width+j
        {
            bg_model->g_point[n].g_values = bg_model->g_point[0].g_values + n*params.n_gauss;
            bg_model->g_point[n].g_values[0].weight = 1;    //the first value seen has weight one //首个高斯模型,权值赋予1
            bg_model->g_point[n].g_values[0].match_sum = 1; //  the sum of matches for a particular gaussian
            for( m = 0; m < first_frame->nChannels; m++) // 对每个通道
            {
                bg_model->g_point[n].g_values[0].variance[m] = var_init;  //第0个高斯模型的 第M个通道的方差,
                bg_model->g_point[n].g_values[0].mean[m] = (unsigned char)first_frame->imageData[p + m]; //均值,第M通道的值
            }
            for( k = 1; k < params.n_gauss; k++) //其他高斯模型,
            {
                bg_model->g_point[n].g_values[k].weight = 0;  //第K个高斯模型的权值  0
                bg_model->g_point[n].g_values[k].match_sum = 0; //第K个高斯模型的match_sum  0
                for( m = 0; m < first_frame->nChannels; m++)
    {
                    bg_model->g_point[n].g_values[k].variance[m] = var_init; //第K个高斯模型 的第m 通道的方差
                    bg_model->g_point[n].g_values[k].mean[m] = 0;  //第K个高斯模型 的第m 通道的均值 0
                }
            }
            p += first_frame->nChannels;//
        }
    }
   
    bg_model->countFrames = 0; //帧=0
   
    __END__;
   
    if( cvGetErrStatus() < 0 )// 如果有错误
    {
        CvBGStatModel* base_ptr = (CvBGStatModel*)bg_model;
       
        if( bg_model && bg_model->release )
            bg_model->release( &base_ptr );//释放模型
        else
            cvFree( &bg_model );
        bg_model = 0;
    }
   
    return (CvBGStatModel*)bg_model; //返回创建的背景模型
}
 
CV_IMPL void CV_CDECL
icvReleaseGaussianBGModel( CvGaussBGModel** _bg_model )  //返回背景模型
{
    CV_FUNCNAME( "icvReleaseGaussianBGModel" );
    __BEGIN__;
   
    if( !_bg_model )
        CV_ERROR( CV_StsNullPtr, "" );
    if( *_bg_model )
    {
        CvGaussBGModel* bg_model = *_bg_model;
        if( bg_model->g_point )
        {
            cvFree( &bg_model->g_point[0].g_values ); //释放背景点中的值
            cvFree( &bg_model->g_point );//释放背景点
        }
       
        cvReleaseImage( &bg_model->background );//释放背景模型中的前景
        cvReleaseImage( &bg_model->foreground );//释放背景模型中的背景
        cvReleaseMemStorage(&bg_model->storage); //释放背景模型中的存储器
        memset( bg_model, 0, sizeof(*bg_model) );
        cvFree( _bg_model ); //释放背景模型
    }
    __END__;
}
 
CV_IMPL int  CV_CDECL
icvUpdateGaussianBGModel( IplImage* curr_frame, CvGaussBGModel*  bg_model )
{
    int i, j, k;
    int region_count = 0;
    CvSeq *first_seq = NULL, *prev_seq = NULL, *seq = NULL;
   
    bg_model->countFrames++; //每执行一次更新,帧数++
   
    for( i = 0; i < curr_frame->height; i++ )
    {
        for( j = 0; j < curr_frame->width; j++ ) //对每个像素点 逐点进行运算
        {
            int match[CV_BGFG_MOG_MAX_NGAUSSIANS]; //CV_BGFG_MOG_MAX_NGAUSSIANS=500,最大高斯模型数目:match[500]
            double sort_key[CV_BGFG_MOG_MAX_NGAUSSIANS];  // 排序: sort_key[500]
            const int nChannels = curr_frame->nChannels; // 当前帧的通道数
            const int n = i*curr_frame->width+j; // 正在处理第几个像素,
            const int p = n*curr_frame->nChannels; // 第几个通道,这与图像(BGR,BGR,BGR....)的交叉存储格式有关
           
            // A few short cuts
            CvGaussBGPoint* g_point = &bg_model->g_point[n];
            const CvGaussBGStatModelParams bg_model_params = bg_model->params;
            double pixel[4]; //
            int no_match;  //
           
            for( k = 0; k < nChannels; k++ )// 获得某个像素的  第K通道的值
                pixel[k] = (uchar)curr_frame->imageData[p+k];
           
            no_match = icvMatchTest( pixel, nChannels, match, g_point, &bg_model_params );//判断该像素值是否与背景模型匹配
            if( bg_model->countFrames >= bg_model->params.win_size ) //判断已经处理的帧数是否等于初始化阶段帧长,如果是则:?????????
            {
                icvUpdateFullWindow( pixel, nChannels, match, g_point, &bg_model->params );  //调用正常阶段更新函数进行更新
                if( no_match == -1)  //如果没有找到匹配的,则调用正常阶段NoMatch情况的更新函数
                    icvUpdateFullNoMatch( curr_frame, p, match, g_point, &bg_model_params );
            }
            else //初始化阶段
            {
                icvUpdatePartialWindow( pixel, nChannels, match, g_point, &bg_model_params );
                if( no_match == -1)
                    icvUpdatePartialNoMatch( pixel, nChannels, match, g_point, &bg_model_params );
            }
            icvGetSortKey( nChannels, sort_key, g_point, &bg_model_params ); //获得模型的适应度值
            icvInsertionSortGaussians( g_point, sort_key, (CvGaussBGStatModelParams*)&bg_model_params ); // 进行排序
            icvBackgroundTest( nChannels, n, p, match, bg_model ); //判断是否是背景
        }
    }
   
    //下面这段是前景滤波,滤掉小块区域。
   
    cvClearMemStorage(bg_model->storage);
   
    //cvMorphologyEx( bg_model->foreground, bg_model->foreground, 0, 0, CV_MOP_OPEN, 1 );
    //cvMorphologyEx( bg_model->foreground, bg_model->foreground, 0, 0, CV_MOP_CLOSE, 1 );
   
    cvFindContours( bg_model->foreground, bg_model->storage, &first_seq, sizeof(CvContour), CV_RETR_LIST );
 //对前景图像寻找轮廓,
    for( seq = first_seq; seq; seq = seq->h_next )
    {
        CvContour* cnt = (CvContour*)seq;
        if( cnt->rect.width * cnt->rect.height < bg_model->params.minArea ) //去掉小的区域
        {
            //delete small contour
            prev_seq = seq->h_prev;
            if( prev_seq )
            {
                prev_seq->h_next = seq->h_next;
                if( seq->h_next )
     seq->h_next->h_prev = prev_seq;
            }
            else
            {
                first_seq = seq->h_next;
                if( seq->h_next )
     seq->h_next->h_prev = NULL;
            }
        }//end of if
        else
        {
            region_count++; //否则,区域数++
        }
    }//end of for
    bg_model->foreground_regions = first_seq; //
    cvZero(bg_model->foreground);
    cvDrawContours(bg_model->foreground, first_seq, CV_RGB(0, 0, 255), CV_RGB(0, 0, 255), 10, -1);
 // 绘制前景轮廓
   
    return region_count; //返回轮廓数
}
 
static void icvInsertionSortGaussians( CvGaussBGPoint* g_point, double* sort_key, CvGaussBGStatModelParams *bg_model_params )
{
    int i, j;
    for( i = 1; i < bg_model_params->n_gauss; i++ )//对每个高斯背景模型
    {
        double index = sort_key[i]; // 获得适应度值
        for( j = i; j > 0 && sort_key[j-1] < index; j-- ) //sort decending order 降序排列 ,
        {
            double temp_sort_key = sort_key[j];
            sort_key[j] = sort_key[j-1];
            sort_key[j-1] = temp_sort_key;
           
            CvGaussBGValues temp_gauss_values = g_point->g_values[j];
            g_point->g_values[j] = g_point->g_values[j-1];
            g_point->g_values[j-1] = temp_gauss_values;
        }
//        sort_key[j] = index;
    }
}
 
static int icvMatchTest( double* src_pixel, int nChannels, int* match,
                         const CvGaussBGPoint* g_point,
                         const CvGaussBGStatModelParams *bg_model_params )
{
    int k;
    int matchPosition=-1;
    for ( k = 0; k < bg_model_params->n_gauss; k++) //对每个高斯背景模型 ,初始化 match[k]=0;
  match[k]=0;
   
    for ( k = 0; k < bg_model_params->n_gauss; k++) //对每个高斯背景模型
 {
        double sum_d2 = 0.0;
        double var_threshold = 0.0; //方差阈值
        for(int m = 0; m < nChannels; m++) //对每个通道
  {
            double d = g_point->g_values[k].mean[m]- src_pixel[m];  //新像素值与背景模型的均值做差
            sum_d2 += (d*d); //三个通道的偏差和
            var_threshold += g_point->g_values[k].variance[m]; //var_threshold就是背景模型中三个通道的方差的和
        }  //difference < STD_LIMIT*STD_LIMIT or difference**2 < STD_LIMIT*STD_LIMIT*VAR
        var_threshold = bg_model_params->std_threshold*bg_model_params->std_threshold*var_threshold;//
        if(sum_d2 < var_threshold) //如果差异小于阈值,
  {
            match[k] = 1;  //  匹配上,
            matchPosition = k; //记录匹配位置
            break;
        }
    }//end-of-for-k
   
    return matchPosition;  //返回 匹配位置,即是哪个高斯模型匹配。
 //如果没有匹配的,则返回的是-1。
}
 
 
static void icvUpdateFullWindow( double* src_pixel, int nChannels, int* match,
                                 CvGaussBGPoint* g_point,
                                 const CvGaussBGStatModelParams *bg_model_params )
{
    const double learning_rate_weight = (1.0/(double)bg_model_params->win_size); //权重学习速率,1/初始化阶段帧长
    for(int k = 0; k < bg_model_params->n_gauss; k++) //对每个高斯背景模型
 {
        g_point->g_values[k].weight = g_point->g_values[k].weight +
            (learning_rate_weight*((double)match[k] -g_point->g_values[k].weight)); // 权重更新
       
  if(match[k])//match[k]==1,表示该模型匹配上
  {
            double learning_rate_gaussian = (double)match[k]/(g_point->g_values[k].weight*
                (double)bg_model_params->win_size);
            for(int m = 0; m < nChannels; m++) //均值和方差要对每个通道都进行更新
   {
                const double tmpDiff = src_pixel[m] - g_point->g_values[k].mean[m];
                g_point->g_values[k].mean[m] = g_point->g_values[k].mean[m] +
                    (learning_rate_gaussian * tmpDiff);   // 均值更新
                g_point->g_values[k].variance[m] = g_point->g_values[k].variance[m]+
                    (learning_rate_gaussian*((tmpDiff*tmpDiff) - g_point->g_values[k].variance[m])); // 方差更新
            }//end-of-for-m
        }
    }//end-of-for-k
}
static void icvUpdatePartialWindow( double* src_pixel, int nChannels, int* match,
           CvGaussBGPoint* g_point,
           const CvGaussBGStatModelParams *bg_model_params )
{
    int k, m;
    int window_current = 0;//
   
    for( k = 0; k < bg_model_params->n_gauss; k++ )
        window_current += g_point->g_values[k].match_sum;  //应该是已经处理的帧数,是吗??match_sum[]是什么东西?
   
    for( k = 0; k < bg_model_params->n_gauss; k++ )
    {
        g_point->g_values[k].match_sum += match[k]; //
        double learning_rate_weight = (1.0/((double)window_current + 1.0)); //increased by one since sum
       
  g_point->g_values[k].weight = g_point->g_values[k].weight +
            (learning_rate_weight*((double)match[k] - g_point->g_values[k].weight));//权值更新
       
        if( g_point->g_values[k].match_sum > 0 && match[k] )
        {
            double learning_rate_gaussian = (double)match[k]/((double)g_point->g_values[k].match_sum);
            for( m = 0; m < nChannels; m++ )
            {
                const double tmpDiff = src_pixel[m] - g_point->g_values[k].mean[m];  //均值更新
                g_point->g_values[k].mean[m] = g_point->g_values[k].mean[m] +
                    (learning_rate_gaussian*tmpDiff);
                g_point->g_values[k].variance[m] = g_point->g_values[k].variance[m]+
                    (learning_rate_gaussian*((tmpDiff*tmpDiff) - g_point->g_values[k].variance[m])); //方差更新
            }
        }
    }
}
 
static void icvUpdateFullNoMatch( IplImage* gm_image, int p, int* match,
                                  CvGaussBGPoint* g_point,
                                  const CvGaussBGStatModelParams *bg_model_params)
{
    int k, m;
    double alpha;
    int match_sum_total = 0;
    //new value of last one
    g_point->g_values[bg_model_params->n_gauss - 1].match_sum = 1; // 将最后一个高斯模型的match_sum置1
   
    //get sum of all but last value of match_sum
   
    for( k = 0; k < bg_model_params->n_gauss ; k++ )
        match_sum_total += g_point->g_values[k].match_sum;//
   
    g_point->g_values[bg_model_params->n_gauss - 1].weight = 1./(double)match_sum_total; //为新的模型赋予权值
    for( m = 0; m < gm_image->nChannels ; m++ )
    {
        // first pass mean is image value
        g_point->g_values[bg_model_params->n_gauss - 1].variance[m] = bg_model_params->variance_init;  //为新的模型赋予方差
        g_point->g_values[bg_model_params->n_gauss - 1].mean[m] = (unsigned char)gm_image->imageData[p + m];//为新的模型赋予均值
    }
   
    alpha = 1.0 - (1.0/bg_model_params->win_size);
    for( k = 0; k < bg_model_params->n_gauss - 1; k++ )  //对除最后一个模型外的其他模型进行运算
    {
        g_point->g_values[k].weight *= alpha;  //权重
        if( match[k] )
            g_point->g_values[k].weight += alpha;
    }
}

static void
icvUpdatePartialNoMatch(double *pixel,
                        int nChannels,
                        int* ,
                        CvGaussBGPoint* g_point,
                        const CvGaussBGStatModelParams *bg_model_params)
{
    int k, m;
    //new value of last one
    g_point->g_values[bg_model_params->n_gauss - 1].match_sum = 1; // 将最后一个高斯模型的match_sum置1
   
    //get sum of all but last value of match_sum
    int match_sum_total = 0;
    for(k = 0; k < bg_model_params->n_gauss ; k++)
        match_sum_total += g_point->g_values[k].match_sum;
    for(m = 0; m < nChannels; m++)
    {
        //first pass mean is image value
        g_point->g_values[bg_model_params->n_gauss - 1].variance[m] = bg_model_params->variance_init;
        g_point->g_values[bg_model_params->n_gauss - 1].mean[m] = pixel[m];
    }
    for(k = 0; k < bg_model_params->n_gauss; k++)
    {
        g_point->g_values[k].weight = (double)g_point->g_values[k].match_sum /
            (double)match_sum_total;
    }
}

static void icvGetSortKey( const int nChannels, double* sort_key, const CvGaussBGPoint* g_point,
                           const CvGaussBGStatModelParams *bg_model_params )
{
    int k, m;
    for( k = 0; k < bg_model_params->n_gauss; k++ ) //   
    {
        // Avoid division by zero
        if( g_point->g_values[k].match_sum > 0 )
        {
            // 假设各个高斯分量之间是独立的
            double variance_sum = 0.0;
            for( m = 0; m < nChannels; m++ )
                variance_sum += g_point->g_values[k].variance[m]; //各通道的方差和
           
            sort_key[k] = g_point->g_values[k].weight/sqrt(variance_sum);  // 模型的适合度值
        }
        else
            sort_key[k]= 0.0;
    }
}
 
static void icvBackgroundTest( const int nChannels, int n, int p, int *match, CvGaussBGModel* bg_model )
{
    int m, b;
    uchar pixelValue = (uchar)255; // will switch to 0 if match found,首先假设该点为前景点,
    double weight_sum = 0.0;
    CvGaussBGPoint* g_point = bg_model->g_point;
   
    for( m = 0; m < nChannels; m++)
        bg_model->background->imageData[p+m]   = (unsigned char)(g_point[n].g_values[0].mean[m]+0.5); //背景就是模型[0]均值
   
    for( b = 0; b < bg_model->params.n_gauss; b++)
    {
        weight_sum += g_point[n].g_values[b].weight; //累积权重
        if( match[b] ) //??
            pixelValue = 0; //像素值为0,成为背景
        if( weight_sum > bg_model->params.bg_threshold )//若累积权重大于背景阈值,退出循环,舍掉后面的高斯模型
            break;
    }
   
    bg_model->foreground->imageData[p/nChannels] = pixelValue; //将像素值赋给该点,0(背景)或者255(前景)
}


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值