使用codebook检测前景

#include "codebook.h"




int minMod[3]={30, 30, 30}, maxMod[3]={30, 30, 30};


int main(int agrc, char** agrv)
{
  IplImage* src_image = NULL;
  IplImage* dst_image = NULL;


  CvCapture* capture = cvCreateFileCapture("11.avi");
  src_image = cvQueryFrame(capture);
  dst_image = cvCreateImage(cvGetSize(src_image), IPL_DEPTH_8U, 1);
  
  cvNamedWindow("src_image", 1);
  cvNamedWindow("HSV_image", 1);
  cvNamedWindow("未处理过的前景", 1);
  cvNamedWindow("处理过的前景", 1);


  IplImage* HSV_Image = cvCloneImage(src_image);
  int nframe = 0;


  cvCvtColor(src_image, HSV_Image, CV_RGB2HSV);
  
  int total = src_image->width * src_image->height;    //total count of pixel
  unsigned cbBounds = 10;
  int numChannels = channel;


  codeBook* Tcodebook;
  Tcodebook = new codeBook [total + 1];
  for (int n = 0; n < total; n++)
  {
Tcodebook[n].numEntries = 0;
Tcodebook[n].t = 0;
  }
  
  CvVideoWriter* writer = 
cvCreateAVIWriter("video.avi", CV_FOURCC('X', 'V', 'I', 'D'),
cvGetCaptureProperty(capture, CV_CAP_PROP_FPS), cvGetSize(src_image));        //用于输出前景视频
  while (src_image = cvQueryFrame(capture))
  { 
nframe++;
cvCvtColor(src_image, HSV_Image, CV_RGB2HSV);
if (nframe <= 30)                                            //建模中
{
for (n = 0; n < total; n++)
{
update_codebook((uchar*)src_image->imageData + n * numChannels, Tcodebook[n], &cbBounds, numChannels);  
}
printf("creating the mode,wait!!\n");
}
else
{
if (nframe%50 == 0)                              //每30帧更新一次codebook
{
for (n = 0; n < total; n++)
{
update_codebook((uchar*)src_image->imageData + n * numChannels, Tcodebook[n], &cbBounds, numChannels);
}
}
if (nframe%100 == 0)                           //每60帧清除stale索引
{
for (n = 0;n < total; n++)
{
cvClearStaleEntries(Tcodebook[n]);
}
}
for (n = 0; n < total; n++)
{
dst_image->imageData[n] = cvBackgroundDiff((uchar*)src_image->imageData + n * numChannels, 
Tcodebook[n], numChannels, minMod, maxMod);
}
// cvFlip(dst_image, dst_image, 0);           //绕X轴翻转图像
// cvSubRS(dst_image, cvScalar(255), dst_image); 
cvShowImage("未处理过的前景", dst_image);


int num = 100;
CvRect bbs[100];
CvPoint center[100];


find_connected_components(dst_image, 0, 4, &num, bbs, center);
     
cvWriteFrame(writer, dst_image);
cvShowImage("处理过的前景", dst_image);
cvShowImage("HSV_image", HSV_Image);
cvShowImage("src_image", src_image);
   int c = cvWaitKey(33);
switch (c)
{
case 27: return -1;
break;
case 'p': while(!(cvWaitKey(0) == 'p'));
break;
}
}


  }


  for (n = 0; n < total; n++)
  {
 if (!Tcodebook[n].cb)
 {
 delete [] Tcodebook[n].cb;
 }
  }
  if (!Tcodebook)
  {
 delete [] Tcodebook;
  }


  cvReleaseImage(&src_image);
  cvReleaseImage(&dst_image);
  cvReleaseImage(&HSV_Image);
  cvReleaseCapture(&capture);
  cvReleaseVideoWriter(&writer);


  int d = cvWaitKey(0);
  if (d == 27)
  {
 return -1;
  }
  return 0;
}


//
//int update_codebook(uchar *p, cvodeBook &c, unsigned cbBounds)
//updates the codebook entry with a new data point
//
//p                pointer to a YUV pixel
//c                codebook for this pixel
//cvBounds         Learning bounds for codebook (Rule of thumb:10)
//numChannels      Number of color channels we're learning
//
//NOTES:
//      cvbounds must be of length equal to numChannels
//
//RETURN
//    codebook index
//
int update_codebook(uchar *p, codeBook& c, unsigned* cbBounds, int numChannels)
{
unsigned int high[3], low[3];
for (int n = 0; n < numChannels; n++)
{
high[n] = *(p+n) + *(cbBounds + n);
if (high[n] > 255)
{
high[n] = 255;
}
low[n] = *(p + n) + *(cbBounds + n);
if (low[n] < 0)
{
low[n] = 0;
}
}
int matchChannel;


//SEE IF THIS FITS AN EXISTING CODEWORD
//
for (int i = 0; i < c.numEntries; i++)
{
matchChannel = 0;
for (n = 0;n < numChannels; n++)
{
if ((c.cb[i]->learnLow[n] <= *(p + n)) && (*(p + n) <= c.cb[i]->learnHigh[n]))         //Found an entry for this channel
{
matchChannel++;
}
}


if (matchChannel == numChannels)      //If an entry was found
{
c.cb[i]->t_last_update = c.t;
//adjust this codeword for the first channel
for (n = 0; n <numChannels; n++)
{
if (c.cb[i]->max[n] < *(p + n))
{
c.cb[i]->max[n] = *(p + n);
}
else if (c.cb[i]->min[n] > *(p + n))
{
c.cb[i]->min[n] = *(p + n);
}
}
break;
}
}


//OVERHEAD TO TRACK POTENTIALSTALE ENTRIES
//
for (int s = 0; s < c.numEntries; s++)
{
//Track which codebook entries are going stale;
//
int negRun = c.t - c.cb[s]->t_last_update;
if (c.cb[s]->stale < negRun)
{
c.cb[s]->stale = negRun;
}
}


//ENTER A NEW CODEWORD IF NEED
//
if (i == c.numEntries)      //if no existing codeword round, make one
{
code_element** foo = new code_element* [c.numEntries + 1];
for (int ii = 0; ii < c.numEntries; ii++)
{
foo[ii] = c.cb[ii];
}
foo[c.numEntries] = new code_element;
if (c.numEntries)
{
delete [] c.cb;
}
c.cb = foo;
for (n = 0; n < numChannels; n++)
{
c.cb[c.numEntries]->learnHigh[n] = high[n];
c.cb[c.numEntries]->learnLow[n] = low[n];
c.cb[c.numEntries]->max[n] = *(p + n);
c.cb[c.numEntries]->min[n] = *(p + n);
}
c.cb[c.numEntries]->t_last_update = c.t;
c.cb[c.numEntries]->stale = 0;
c.numEntries++;
}


//SLOWLY ADJUST LEARNING BOUNDS
//
for (n = 0; n < numChannels; n++)
{
if (c.cb[i]->learnHigh[n] < high[n])
{
c.cb[i]->learnHigh[n]++;
}
if (c.cb[i]->learnLow[n] > low[n])
{
c.cb[i]->learnLow[n]--;
}
}
return i;
}


//
 // uchar cvBackgroundDiff( uchar *p, codeBook &c, 
//                         int minMod, int maxMod)
// Given a pixel and a code book, determine if the pixel is 
// covered by the codebook
//
// p            Pixel pointer (YUV interleaved)
// c            Codebook reference
// numChannels  Number of channels we are testing
// maxMod       Add this (possibly negative) number onto 
//              max level when determining if new pixel is foreground
// minMod       Subract this (possible negative) number from 
//              min level when determining if pixel is foreground
//
// NOTES: 
// minMod and maxMod must have length numChannels, 
// e.g. 3 channels => minMod[3], maxMod[3].
//
// Return
// 0 => background, 255 => foreground
uchar cvBackgroundDiff( uchar *p, codeBook &c, int numChannels, 
  int *minMod, int *maxMod)
{
int matchChannel;
//SEE IF THIS FITS AN EXISTING CODEWORD
for(int i = 0; i < c.numEntries; i++)
{
matchChannel = 0;
for(int n = 0; n < numChannels; n++){
if((c.cb[i]->min[n] - minMod[n] <= *(p+n)) && 
(*(p+n) <= c.cb[i]->max[n] + maxMod[n])) 
{
matchChannel++; //Found an entry for this channel
}
else
{
break;
}
}
if(matchChannel == numChannels)
{
break; //Found an entry that matched all channels
}
}
if(i >= c.numEntries) return(255);
return(0);
}


//
//int cvClearStaleEntries(codeBook &c)
// During learning, after you've learned for some period of time, 
// periodically call this to clear out stale codebook entries
//
// c   Codebook to clean up
//
// Return
// number of entries cleared
int cvClearStaleEntries(codeBook &c)
{
int staleThresh = c.t>>1; 
int *keep = new int [c.numEntries];
int keepCnt = 0;
//SEE WHICH CODEBOOK ENTRIES ARE TOO STALE
for(int i = 0; i < c.numEntries; i++)
{
if(c.cb[i]->stale > staleThresh)
keep[i] = 0; //Mark for destruction
else
{
keep[i] = 1; //Mark to keep
keepCnt += 1;
}
}
//KEEP ONLY THE GOOD
c.t = 0;         //Full reset on stale tracking
code_element **foo = new code_element* [keepCnt];
int k=0;
for(int ii = 0; ii < c.numEntries; ii++)
{
if(keep[ii])
{
foo[k] = c.cb[ii];       
//We have to refresh these entries for next clearStale
foo[k]->t_last_update = 0;
k++;
}
}
//CLEAN UP
delete [] keep;   
delete [] c.cb;
c.cb = foo;
int numCleared = c.numEntries - keepCnt;
c.numEntries = keepCnt;
return(numCleared);
}
//
//

//void find_connected_components(IplImage *mask, int poly1_hull0,
// float perimScale, int  *num,
// CvRect *bbs, CvPoint *centers)
//This cleans up the foreground segmentation mask derived from
//calls to backgroundDiff
//
//mask       Is a grayscale (8-bit depth) "raw" mask image that
//           will be cleaned up
//
//OPTIONAL PARAMETERS:
//poly1_hull0 If set, approximate connected component by
//            (DEFAULT) polygon, or else convex hull (0)
//perimScale Len = image (width + height)/permiScale. If contour 
//           len < this, delete that contour (DEFAULT: 4)
//num        Maximum number of rectangles and/or centers to 
//           return; on return, will contain number filled
//           (DEFAULT: NULL)
//bbs        Pointer to bounding box rectangle vector of
//           length num, (DEFAULT: NULL)
//centers    Pointer to contour centers vector of length 
//           num (DEFAULT: NULL)
//
void find_connected_components(
IplImage* mask,
int       poly_hull0,
float     perimScales,
int*      num,
CvRect*   bbs,
CvPoint*  centers)
{
static CvMemStorage* mem_storage = NULL;
static CvSeq* contours = NULL;
//CLEAN UP RAW MASK
//
cvMorphologyEx(mask, mask, 0, 0, CV_MOP_OPEN, CVCLOSE_ITR);
cvMorphologyEx(mask, mask, 0, 0, CV_MOP_CLOSE, CVCLOSE_ITR);


//FIND CONTOURS AROUND ONLY BIGGER REGIONS
//
if (mem_storage == NULL)
{
mem_storage = cvCreateMemStorage(0);
}
else
{
cvClearMemStorage(mem_storage);
}


CvContourScanner scanner = cvStartFindContours(mask, mem_storage, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);


CvSeq* c;
int numCont = 0;
while ((c = cvFindNextContour(scanner)) != NULL)
{
double len = cvContourPerimeter(c);


//calculate perimeter len threshold;
//
double q = (mask->height + mask->width)/perimScales;


//Get rid of blob if its perimeter is too small:
//
if (len < q)
{
cvSubstituteContour(scanner, NULL);
}
else
{
//Smooth its edges if its large enough
//
CvSeq* c_new;
if (poly_hull0)
{
//Polygonal approximation
//
c_new = cvApproxPoly(c, sizeof(CvContour), mem_storage, CV_POLY_APPROX_DP, CVCONTOUR_APPROX_LEVEL);
}
else
{
//Convex hull of the segmentation
//
c_new = cvConvexHull2(c, mem_storage, CV_CLOCKWISE, 1);
}
cvSubstituteContour(scanner, c_new);
numCont++;
}
}
contours = cvEndFindContours(&scanner);


//Just some convenience variables
const CvScalar CVX_WHITE = CV_RGB(0xff, 0xff, 0xff);
const CvScalar CVX_BLACE = CV_RGB(0x00, 0x00, 0x00);
//PAINT THE FOUND REGIONS BACK INTO THE IMAGE
//
cvZero(mask);
IplImage *maskTemp;
//CALC CENTER OF MASS AND/OR BOUNDING RECTANGLE
//
if (num != NULL)
{
//User wants to collect statistics
//
int N = *num, numFilled = 0, i = 0;
CvMoments moments;
double M00, M01, M10;
maskTemp = cvCloneImage(mask);
for (i = 0, c = contours; c != NULL; c = c->h_next, i++)
{
if (i < N)
{
//only process up to *num of them
//
cvDrawContours(maskTemp, c, CVX_WHITE, CVX_WHITE, -1, CV_FILLED, 8);


//Find the center of each contour
//
if (centers != NULL)
{
cvMoments(maskTemp, &moments, 1);
M00 = cvGetSpatialMoment(&moments, 0, 0);
M10 = cvGetSpatialMoment(&moments, 1, 0);
M01 = cvGetSpatialMoment(&moments, 0, 1);
centers[i].x = (int)(M10/M00);
centers[i].y = (int)(M01/M00);
}


//Bounding rectangles around blobs
//
if (bbs != NULL)
{
bbs[i] = cvBoundingRect(c);
}
cvZero(maskTemp);
numFilled++;
}


//Draw filled contours into mask
//
cvDrawContours(mask, c, CVX_WHITE, CVX_WHITE, -1, CV_FILLED, 8);
}
*num = numFilled;
cvReleaseImage(&maskTemp);
}


//ELSE JUST DRAW PROCESSED CONTOURS INTO THE MASK
//
else
{
//The user doesn't want statistics, just draw the contours
//
for (c = contours; c != NULL; c = c->h_next)
{
cvDrawContours(mask, c, CVX_WHITE, CVX_BLACE, -1, CV_FILLED, 8);
}
}
}
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值