作者 群号 C语言交流中心 240137450 微信 15013593099
opencv——通过面积筛选最大轮廓,并求凸包矩形的长和宽
#include "stdafx.h"
#include <iostream>
#include<string>
#include <stdio.h>
#include <opencv\cv.h>
#include <opencv\highgui.h>
#include <opencv\cxcore.h>
#include <stdio.h>
using namespace std;
int _tmain(int argc, _TCHAR* argv[])
{
IplImage* src=cvLoadImage("f:\\img\\cornor.png",0);
IplImage* des=cvCreateImage(cvGetSize(src),src->depth,src->nChannels);
cvZero(des);
cvThreshold(src,src,100,255,CV_THRESH_BINARY);
CvMemStorage* memory=cvCreateMemStorage(0);
CvSeq* Icontour=NULL;
CvSeq* maxContour =NULL;
cvShowImage("原始图像1",src);
cvFindContours(src,memory,&Icontour, sizeof(CvContour),CV_RETR_LIST,CV_CHAIN_APPROX_SIMPLE,cvPoint(0,0));
double area=0;
double maxArea=0;
while(Icontour)
{
area=fabs(cvContourArea(Icontour,CV_WHOLE_SEQ));
cvDrawContours(src, Icontour,
CV_RGB(255,255,255), CV_RGB(255, 255,255),
0, 1, 8, cvPoint(0,0));
if(maxArea<area)
{
maxArea=area;
maxContour = Icontour;
}
Icontour =Icontour->h_next;
}
cvDrawContours(des, maxContour, CV_RGB(255,255,255), CV_RGB(255, 255,255), 0, 1, 8, cvPoint(0,0));
//CvRect rect=cvBoundingRect(maxContour,0);
CvBox2D box=cvMinAreaRect2(maxContour);
cout<<"长度: "<<box.size.width<<endl<<"宽度: "<<box.size.height;
//cvRectangle(src,cvPoint((rect.x-rect.height/2),(rect.y-rect.width/2)),cvPoint((rect.x+rect.height/2),(rect.y+rect.width/2)),cvScalar(255,255,255),1,8,0);
// cvRectangle(src,cvPoint((rect.x-rect.height/2),(rect.y-rect.width/2)),cvPoint((rect.x-rect.height/2),(rect.y-rect.width/2)),cvScalar(255,255,255),2,8,0);
// cvDrawCircle(src,cvPoint(box.center.x,box.center.y),box.size.height,cvScalar(255,255,255),2,8,0);
CvPoint2D32f p4[4];
cvBoxPoints(box,p4);
cvLine(des, cvPoint(cvRound(p4[0].x), cvRound(p4[0].y)),
cvPoint(cvRound(p4[1].x), cvRound(p4[1].y)), CV_RGB(0, 0, 255),2);
cvLine(des, cvPoint(cvRound(p4[1].x), cvRound(p4[1].y)),
cvPoint(cvRound(p4[2].x), cvRound(p4[2].y)), CV_RGB(0, 0, 255),2);
cvLine(des, cvPoint(cvRound(p4[3].x), cvRound(p4[3].y)),
cvPoint(cvRound(p4[2].x), cvRound(p4[2].y)), CV_RGB(0,0, 255),2);
cvLine(des, cvPoint(cvRound(p4[3].x), cvRound(p4[3].y)),
cvPoint(cvRound(p4[0].x), cvRound(p4[0].y)), CV_RGB(0, 0,255),2);
cvShowImage("原始图像",src);
cvShowImage("保留最大值",des);
cvWaitKey(0);
//cvReleaseImage(&src);
cvDestroyWindow("原始图像");
return 0;
}
阈值分割图像
#include "stdafx.h"
#include "opencv2\opencv.hpp"
using namespace cv;
IplImage* g_image=NULL;
IplImage* g_gray=NULL;
int g_thresh=120;
CvMemStorage* g_storage=NULL;
void on_trackbar(int)
{
if(g_storage==NULL)
{
g_gray=cvCreateImage(cvGetSize(g_image),8,1);
g_storage=cvCreateMemStorage(0);
}
else
cvClearMemStorage(g_storage);
CvSeq* contours = NULL;
cvCvtColor(g_image,g_gray,CV_BGR2GRAY);
cvThreshold(g_gray,g_gray,g_thresh,255,CV_THRESH_BINARY);
cvFindContours(g_gray,g_storage,&contours);
cvZero(g_gray);
if(contours)
{
cvDrawContours(g_gray,
contours,
cvScalarAll(255),
cvScalarAll(255),
100);
}
cvShowImage("Contours",g_gray);
}
int main(int argc,char** argv)
{
g_image = cvLoadImage("f:\\img\\cornor.png");
cvNamedWindow("Contours",1);
cvCreateTrackbar("Threshold","Contours",&g_thresh,255,on_trackbar);
on_trackbar(0);
cvWaitKey();
return 0;
}
旋转图像
#include "stdafx.h"
#include <opencv2\opencv.hpp>
#include <opencv\cv.h>
#include <iostream>
#include<string>
#include <windows.h>
using namespace std;
using namespace cv;
int _tmain(int argc, _TCHAR* argv[])
{
IplImage *src = cvLoadImage("f:\\img\\lena.jpg");
IplImage *dst = 0;
/* the first command line parameter must be image file name */
if (src != 0)
{
int delta = 1;
int angle = 45;
int opt =0; // 1: 旋转加缩放
// 0: 仅仅旋转
double factor;
dst = cvCloneImage (src);
cvNamedWindow ("src", 1);
cvShowImage ("src", src);
for (;;)
{
float m[6];
// Matrix m looks like:
//
// [ m0 m1 m2 ] ===> [ A11 A12 b1 ]
// [ m3 m4 m5 ] [ A21 A22 b2 ]
//
CvMat M = cvMat (2, 3, CV_32F, m);
int w = src->width;
int h = src->height;
if (opt) // 旋转加缩放
factor = (cos (angle * CV_PI / 180.) + 1.0) * 2;
else // 仅仅旋转
factor = 1;
m[0] = (float) (factor * cos (-angle * 2 * CV_PI / 180.));
m[1] = (float) (factor * sin (-angle * 2 * CV_PI / 180.));
m[3] = -m[1];
m[4] = m[0];
// 将旋转中心移至图像中间
m[2] = w * 0.5f;
m[5] = h * 0.5f;
// dst(x,y) = A * src(x,y) + b
cvZero (dst);
cvGetQuadrangleSubPix (src, dst, &M);
cvNamedWindow ("dst", 1);
cvShowImage ("dst", dst);
if (cvWaitKey (1) == 27) //ESC
break;
angle = (int) (angle + delta) % 360;
} // for-loop
}
return 0;
}
人脸检测
最近搞了几天的人脸检测,终于把大体框架和原理搞清楚了,现在写出来供大家学习之用,如有不对之处,还请大家指正。也希望大家在学习opencv的过程中能将学习过程及重点记录下来,以博客的形式分析,毕竟opencv的教材还不太多,我们自己学习大部分要靠网上的资料。通过博客分享的形式能使大家快速进步,同时也算是对自己学习的一个记录和总结。只是一个倡议,大家自己决定,呵呵。
好了进入正题。
学习基于opencv的人脸检测,首先要理清大概需要做哪些事情。这里总共分两步,第一步就是训练分类器,第二步就是利用训练好的分类器进行人脸检测。
1、训练分类器
训练分类器我没有学习,因为opencv的源代码中(opencv安装目录\data\haarcascades)中已经有了很多训练好的分类器供我们使用。但是有必要对分类器的训练原理和过程做一些介绍,以便后面进一步的学习中能够对这部分有一定了解。
目前人脸检测分类器大都是基于haar特征利用Adaboost学习算法训练的。
目标检测方法最初由Paul Viola [Viola01]提出,并由Rainer Lienhart [Lienhart02]对这一方法进行了改善. 首先,利用样本(大约几百幅样本图片)的 harr 特征进行分类器训练,得到一个级联的boosted分类器。训练样本分为正例样本和反例样本,其中正例样本是指待检目标样本(例如人脸或汽车等),反例样本指其它任意图片,所有的样本图片都被归一化为同样的尺寸大小(例如,20x20)。
分类器训练完以后,就可以应用于输入图像中的感兴趣区域(与训练样本相同的尺寸)的检测。检测到目标区域(汽车或人脸)分类器输出为1,否则输出为0。为了检测整副图像,可以在图像中移动搜索窗口,检测每一个位置来确定可能的目标。 为了搜索不同大小的目标物体,分类器被设计为可以进行尺寸改变,这样比改变待检图像的尺寸大小更为有效。所以,为了在图像中检测未知大小的目标物体,扫描程序通常需要用不同比例大小的搜索窗口对图片进行几次扫描。
分类器中的“级联”是指最终的分类器是由几个简单分类器级联组成。在图像检测中,被检窗口依次通过每一级分类器, 这样在前面几层的检测中大部分的候选区域就被排除了,全部通过每一级分类器检测的区域即为目标区域。 目前支持这种分类器的boosting技术有四种: Discrete Adaboost, Real Adaboost, Gentle Adaboost and Logitboost。"boosted" 即指级联分类器的每一层都可以从中选取一个boosting算法(权重投票),并利用基础分类器的自我训练得到。基础分类器是至少有两个叶结点的决策树分类器。 Haar特征是基础分类器的输入,主要描述如下。目前的算法主要利用下面的Harr特征。
每个特定分类器所使用的特征用形状、感兴趣区域中的位置以及比例系数(这里的比例系数跟检测时候采用的比例系数是不一样的,尽管最后会取两个系数的乘积值)来定义。例如在第二行特征(2c)的情况下,响应计算为复盖全部特征整个矩形框(包括两个白色矩形框和一个黑色矩形框)象素的和减去黑色矩形框内象素和的三倍 。每个矩形框内的象素和都可以通过积分图象很快的计算出来。
通过上述陈述,应该对整个训练过程有个大概的了解,但是对于训练的具体过程还是不太明晰,那么可以继续参考下面的文章:
http://apps.hi.baidu.com/share/detail/44451430
相信看过上面这篇文章以及前面的陈述后大家应该对分类器的训练原理有了一个整体的了解,至于一些细节如果还不清晰应该不影响使用,毕竟那些细节可能需要数字图像处理的专业知识。
2、利用分类器进行检测
前面也已经说过,opencv的源代码中已经给我们提供了一些训练好的分类器,例如人脸检测分类器,人体检测分类器等。那么如果没有什么特定的需要,我们完全可以利用这些分类器直接进行人脸及人体检测。
a、CvHaarClassifierCascade* cvLoadHaarClassifierCascade(const char* directory,cvSize orig_window_size);
directory
训练好的分类器路径
orig_window_size
级联分类器训练中采用的检测目标的尺寸。这个信息在分类器中没有存储,因此要单独指出。
函数 cvLoadHaarClassifierCascade 用于从文件中装载训练好的利用哈尔特征的级联分类器,或者从OpenCV中嵌入的分类器数据库中导入。分类器的训练可以应用函数haartraining(详细察看opencv/apps/haartraining) 这个数值是在训练分类器时就确定好的,修改它并不能改变检测的范围或精度。
需要注意的是,这个函数已经过时了。现在的目标检测分类器通常存储在 XML 或 YAML 文件中,而不是通过路径导入。从文件中导入分类器,可以使用函数 cvLoad 。
b、CvSeq* cvHaarDetectObjects(const CvArr* image, CvHaarClassifierCascade* cascade,CvMemStorage* storage,double scale_factor=1.1,int min_neighbors=3,int flags=0,CvSize min_size=cvSize(0,0));
-
image
- 被检图像 cascade
- harr 分类器级联的内部标识形式 storage 用来存储检测到的一序列候选目标矩形框的内存区域。 scale_factor
- 在前后两次相继的扫描中,搜索窗口的比例系数。例如1.1指将搜索窗口依次扩大10%。 min_neighbors
- 构成检测目标的相邻矩形的最小个数(缺省-1)。如果组成检测目标的小矩形的个数和小于min_neighbors-1 都会被排除。如果min_neighbors 为 0, 则函数不做任何操作就返回所有的被检候选矩形框,这种设定值一般用在用户自定义对检测结果的组合程序上。 flags
- 操作方式。当前唯一可以定义的操作方式是 CV_HAAR_DO_CANNY_PRUNING。如果被设定,函数利用Canny边缘检测器来排除一些边缘很少或者很多的图像区域,因为这样的区域一般不含被检目标。人脸检测中通过设定阈值使用了这种方法,并因此提高了检测速度。 min_size
- 检测窗口的最小尺寸。缺省的情况下被设为分类器训练时采用的样本尺寸(人脸检测中缺省大小是~20×20)。
c、void cvReleaseHaarClassifierCascade(CvHaarClassifierCascade** cascade);
cascade
双指针类型指针指向要释放的cascade. 指针由函数声明。
函数 cvReleaseHaarClassifierCascade 释放cascade的动态内存,其中cascade的动态内存或者是手工创建,或者通过函数 cvLoadHaarClassifierCascade 或 cvLoad分配。
三个主要函数介绍完之后,就可以看程序了,毕竟通过程序学函数和用法是最快的(个人觉得)。
#include "cv.h"
#include "highgui.h"
#include <stdio.h>
void displaydetection(IplImage* pInpImg,CvSeq* pFaceRectSeq,char* FileName);
int main(int argc,char** argv)
{
IplImage* pInpImg=0;
CvHaarClassifierCascade* pCascade=0; //指向后面从文件中获取的分类器
CvMemStorage* pStorage=0; //存储检测到的人脸数据
CvSeq* pFaceRectSeq; //用来接收检测函数返回的一系列的包含人脸的矩形区域
//初始化
pInpImg=cvLoadImage("c:\\img\\1.jpg",1);
pStorage=cvCreateMemStorage(0); //创建默认大先64k的动态内存区域
pCascade=(CvHaarClassifierCascade*)cvLoad("C:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt.xml"); //加载分类器
if (!pInpImg||!pStorage||!pCascade)
{
printf("initialization failed:%s\n",(!pInpImg)?"can't load image file":(!pCascade)?"can't load haar-cascade---make sure path is correct":"unable to allocate memory for data storage",argv[1]);
return -1;
}
//人脸检测
pFaceRectSeq=cvHaarDetectObjects(pInpImg,pCascade,pStorage,
1.2,2,CV_HAAR_DO_CANNY_PRUNING,cvSize(40,40));
//将检测到的人脸以矩形框标出。
displaydetection(pInpImg,pFaceRectSeq,argv[1]);
cvReleaseImage(&pInpImg);
cvReleaseHaarClassifierCascade(&pCascade);
cvReleaseMemStorage(&pStorage);
return 0;
}
void displaydetection(IplImage* pInpImg,CvSeq* pFaceRectSeq,char* FileName)
{
int i;
cvNamedWindow("haar window",1);
printf("the number of face is %d",pFaceRectSeq->total);
for (i=0;i<(pFaceRectSeq?pFaceRectSeq->total:0);i++)
{
CvRect* r=(CvRect*)cvGetSeqElem(pFaceRectSeq,i);
CvPoint pt1={r->x,r->y};
CvPoint pt2={r->x+r->width,r->y+r->height};
// cvSetImageROI(pInpImg,*r);
// IplImage* dst=cvCreateImage(cvSize(92,112),pInpImg->depth,pInpImg->nChannels);
// cvResize(pInpImg,dst,CV_INTER_LINEAR);
// cvSaveImage("lian.jpg",dst);
cvRectangle(pInpImg,pt1,pt2,CV_RGB(0,255,0),3,4,0);
}
cvShowImage("haar window",pInpImg);
// cvResetImageROI(pInpImg);
cvWaitKey(0);
cvDestroyWindow("haar window");
}
#include "cv.h"
#include "highgui.h"
#include <stdio.h>
void displaydetection(IplImage* pInpImg,CvSeq* pFaceRectSeq,char* FileName);
int main(int argc,char** argv)
{
IplImage* pInpImg=0;
CvHaarClassifierCascade* pCascade=0; //指向后面从文件中获取的分类器
CvMemStorage* pStorage=0; //存储检测到的人脸数据
CvSeq* pFaceRectSeq; //用来接收检测函数返回的一系列的包含人脸的矩形区域
//初始化
pInpImg=cvLoadImage("c:\\img\\1.jpg",1);
pStorage=cvCreateMemStorage(0); //创建默认大先64k的动态内存区域
pCascade=(CvHaarClassifierCascade*)cvLoad("C:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt.xml"); //加载分类器
if (!pInpImg||!pStorage||!pCascade)
{
printf("initialization failed:%s\n",(!pInpImg)?"can't load image file":(!pCascade)?"can't load haar-cascade---make sure path is correct":"unable to allocate memory for data storage",argv[1]);
return -1;
}
//人脸检测
pFaceRectSeq=cvHaarDetectObjects(pInpImg,pCascade,pStorage,
1.2,2,CV_HAAR_DO_CANNY_PRUNING,cvSize(40,40));
//将检测到的人脸以矩形框标出。
displaydetection(pInpImg,pFaceRectSeq,argv[1]);
cvReleaseImage(&pInpImg);
cvReleaseHaarClassifierCascade(&pCascade);
cvReleaseMemStorage(&pStorage);
return 0;
}
void displaydetection(IplImage* pInpImg,CvSeq* pFaceRectSeq,char* FileName)
{
int i;
cvNamedWindow("haar window",1);
printf("the number of face is %d",pFaceRectSeq->total);
for (i=0;i<(pFaceRectSeq?pFaceRectSeq->total:0);i++)
{
CvRect* r=(CvRect*)cvGetSeqElem(pFaceRectSeq,i);
CvPoint pt1={r->x,r->y};
CvPoint pt2={r->x+r->width,r->y+r->height};
// cvSetImageROI(pInpImg,*r);
// IplImage* dst=cvCreateImage(cvSize(92,112),pInpImg->depth,pInpImg->nChannels);
// cvResize(pInpImg,dst,CV_INTER_LINEAR);
// cvSaveImage("lian.jpg",dst);
cvRectangle(pInpImg,pt1,pt2,CV_RGB(0,255,0),3,4,0);
}
cvShowImage("haar window",pInpImg);
// cvResetImageROI(pInpImg);
cvWaitKey(0);
cvDestroyWindow("haar window");
}
人脸识别
#include <stdio.h>
#include <string.h>
#include "cv.h"
#include "cvaux.h"
#include "highgui.h"
定义几个重要的全局变量
IplImage ** faceImgArr = 0; // 指向训练人脸和测试人脸的指针(在学习和识别阶段指向不同)
CvMat * personNumTruthMat = 0; // 人脸图像的ID号
int nTrainFaces = 0; // 训练图像的数目
int nEigens = 0; // 自己取的主要特征值数目
IplImage * pAvgTrainImg = 0; // 训练人脸数据的平均值
IplImage ** eigenVectArr = 0; // 投影矩阵,也即主特征向量
CvMat * eigenValMat = 0; // 特征值
CvMat * projectedTrainFaceMat = 0; // 训练图像的投影
函数原型
void learn();
void recognize();
void doPCA();
void storeTrainingData();
int loadTrainingData(CvMat ** pTrainPersonNumMat);
int findNearestNeighbor(float * projectedTestFace);
int loadFaceImgArray(char * filename);
void printUsage();
//主函数,主要包括学习和识别两个阶段,需要运行两次,通过命令行传入的参数区分
void main( int argc, char** argv )
{
// validate that an input was specified
if( argc != 2 )
{
printUsage();
return;
}
//通过判断命令行参数分别执行学习和识别代码
if( !strcmp(argv[1], "train") ) learn();
else if( !strcmp(argv[1], "test") ) recognize();
else
{
printf("Unknown command: %s\n", argv[1]);
printUsage();
}
}
//学习阶段代码
void learn()
{
int i, offset;
//加载训练图像集
nTrainFaces = loadFaceImgArray("train.txt");
if( nTrainFaces < 2 )
{
fprintf(stderr,
"Need 2 or more training faces\n"
"Input file contains only %d\n", nTrainFaces);
return;
}
// 进行主成分分析
doPCA();
//将训练图集投影到子空间中
projectedTrainFaceMat = cvCreateMat( nTrainFaces, nEigens, CV_32FC1 );
offset = projectedTrainFaceMat->step / sizeof(float);
for(i=0; i<nTrainFaces; i++)
{
//int offset = i * nEigens;
cvEigenDecomposite(
faceImgArr[i],
nEigens,
eigenVectArr,
0, 0,
pAvgTrainImg,
//projectedTrainFaceMat->data.fl + i*nEigens);
projectedTrainFaceMat->data.fl + i*offset);
}
//将训练阶段得到的特征值,投影矩阵等数据存为.xml文件,以备测试时使用
storeTrainingData();
}
//识别阶段代码
void recognize()
{
int i, nTestFaces = 0; // 测试人脸数
CvMat * trainPersonNumMat = 0; // 训练阶段的人脸数
float * projectedTestFace = 0;
// 加载测试图像,并返回测试人脸数
nTestFaces = loadFaceImgArray("test.txt");
printf("%d test faces loaded\n", nTestFaces);
// 加载保存在.xml文件中的训练结果
if( !loadTrainingData( &trainPersonNumMat ) ) return;
//
projectedTestFace = (float *)cvAlloc( nEigens*sizeof(float) );
for(i=0; i<nTestFaces; i++)
{
int iNearest, nearest, truth;
//将测试图像投影到子空间中
cvEigenDecomposite(
faceImgArr[i],
nEigens,
eigenVectArr,
0, 0,
pAvgTrainImg,
projectedTestFace);
iNearest = findNearestNeighbor(projectedTestFace);
truth = personNumTruthMat->data.i[i];
nearest = trainPersonNumMat->data.i[iNearest];
printf("nearest = %d, Truth = %d\n", nearest, truth);
}
}
//加载保存过的训练结果
int loadTrainingData(CvMat ** pTrainPersonNumMat)
{
CvFileStorage * fileStorage;
int i;
fileStorage = cvOpenFileStorage( "facedata.xml", 0, CV_STORAGE_READ );
if( !fileStorage )
{
fprintf(stderr, "Can't open facedata.xml\n");
return 0;
}
nEigens = cvReadIntByName(fileStorage, 0, "nEigens", 0);
nTrainFaces = cvReadIntByName(fileStorage, 0, "nTrainFaces", 0);
*pTrainPersonNumMat = (CvMat *)cvReadByName(fileStorage, 0, "trainPersonNumMat", 0);
eigenValMat = (CvMat *)cvReadByName(fileStorage, 0, "eigenValMat", 0);
projectedTrainFaceMat = (CvMat *)cvReadByName(fileStorage, 0, "projectedTrainFaceMat", 0);
pAvgTrainImg = (IplImage *)cvReadByName(fileStorage, 0, "avgTrainImg", 0);
eigenVectArr = (IplImage **)cvAlloc(nTrainFaces*sizeof(IplImage *));
for(i=0; i<nEigens; i++)
{
char varname[200];
sprintf( varname, "eigenVect_%d", i );
eigenVectArr[i] = (IplImage *)cvReadByName(fileStorage, 0, varname, 0);
}
cvReleaseFileStorage( &fileStorage );
return 1;
}
//存储训练结果
void storeTrainingData()
{
CvFileStorage * fileStorage;
int i;
fileStorage = cvOpenFileStorage( "facedata.xml", 0, CV_STORAGE_WRITE );
//存储特征值,投影矩阵,平均矩阵等训练结果
cvWriteInt( fileStorage, "nEigens", nEigens );
cvWriteInt( fileStorage, "nTrainFaces", nTrainFaces );
cvWrite(fileStorage, "trainPersonNumMat", personNumTruthMat, cvAttrList(0,0));
cvWrite(fileStorage, "eigenValMat", eigenValMat, cvAttrList(0,0));
cvWrite(fileStorage, "projectedTrainFaceMat", projectedTrainFaceMat, cvAttrList(0,0));
cvWrite(fileStorage, "avgTrainImg", pAvgTrainImg, cvAttrList(0,0));
for(i=0; i<nEigens; i++)
{
char varname[200];
sprintf( varname, "eigenVect_%d", i );
cvWrite(fileStorage, varname, eigenVectArr[i], cvAttrList(0,0));
}
cvReleaseFileStorage( &fileStorage );
}
//寻找最接近的图像
int findNearestNeighbor(float * projectedTestFace)
{
double leastDistSq = DBL_MAX; //定义最小距离,并初始化为无穷大
int i, iTrain, iNearest = 0;
for(iTrain=0; iTrain<nTrainFaces; iTrain++)
{
double distSq=0;
for(i=0; i<nEigens; i++)
{
float d_i =
projectedTestFace[i] -
projectedTrainFaceMat->data.fl[iTrain*nEigens + i];
distSq += d_i*d_i / eigenValMat->data.fl[i]; // Mahalanobis算法计算的距离
// distSq += d_i*d_i; // Euclidean算法计算的距离
}
if(distSq < leastDistSq)
{
leastDistSq = distSq;
iNearest = iTrain;
}
}
return iNearest;
}
//主成分分析
void doPCA()
{
int i;
CvTermCriteria calcLimit;
CvSize faceImgSize;
// 自己设置主特征值个数
nEigens = nTrainFaces-1;
//分配特征向量存储空间
faceImgSize.width = faceImgArr[0]->width;
faceImgSize.height = faceImgArr[0]->height;
eigenVectArr = (IplImage**)cvAlloc(sizeof(IplImage*) * nEigens); //分配个数为住特征值个数
for(i=0; i<nEigens; i++)
eigenVectArr[i] = cvCreateImage(faceImgSize, IPL_DEPTH_32F, 1);
//分配主特征值存储空间
eigenValMat = cvCreateMat( 1, nEigens, CV_32FC1 );
// 分配平均图像存储空间
pAvgTrainImg = cvCreateImage(faceImgSize, IPL_DEPTH_32F, 1);
// 设定PCA分析结束条件
calcLimit = cvTermCriteria( CV_TERMCRIT_ITER, nEigens, 1);
// 计算平均图像,特征值,特征向量
cvCalcEigenObjects(
nTrainFaces,
(void*)faceImgArr,
(void*)eigenVectArr,
CV_EIGOBJ_NO_CALLBACK,
0,
0,
&calcLimit,
pAvgTrainImg,
eigenValMat->data.fl);
cvNormalize(eigenValMat, eigenValMat, 1, 0, CV_L1, 0);
}
//加载txt文件的列举的图像
int loadFaceImgArray(char * filename)
{
FILE * imgListFile = 0;
char imgFilename[512];
int iFace, nFaces=0;
if( !(imgListFile = fopen(filename, "r")) )
{
fprintf(stderr, "Can\'t open file %s\n", filename);
return 0;
}
// 统计人脸数
while( fgets(imgFilename, 512, imgListFile) ) ++nFaces;
rewind(imgListFile);
// 分配人脸图像存储空间和人脸ID号存储空间
faceImgArr = (IplImage **)cvAlloc( nFaces*sizeof(IplImage *) );
personNumTruthMat = cvCreateMat( 1, nFaces, CV_32SC1 );
for(iFace=0; iFace<nFaces; iFace++)
{
// 从文件中读取序号和人脸名称
fscanf(imgListFile,
"%d %s", personNumTruthMat->data.i+iFace, imgFilename);
// 加载人脸图像
faceImgArr[iFace] = cvLoadImage(imgFilename, CV_LOAD_IMAGE_GRAYSCALE);
if( !faceImgArr[iFace] )
{
fprintf(stderr, "Can\'t load image from %s\n", imgFilename);
return 0;
}
}
fclose(imgListFile);
return nFaces;
}
//
void printUsage()
{
printf("Usage: eigenface <command>\n",
" Valid commands are\n"
" train\n"
" test\n");
}
去除背景
#include "highgui.h"
#include "cv.h"
#include "cxcore.h"
/*为不同临时图像和统计属性图像创建指针*/
//三通道float图像
IplImage *IavgF,*IdiffF,*IprevF,*IhiF,*IlowF;
IplImage *Iscratch,*Iscratch2;
//单通道float图像
IplImage *Igray1,*Igray2,*Igray3;
IplImage *Ilow1, *Ilow2, *Ilow3;
IplImage *Ihi1, *Ihi2, *Ihi3;
//单通道Byte图像
IplImage *Imaskt;
//自己定义的图像
//IplImage *mypic;
//计算背景建模使用图片的张数
float Icount;
/*调用函数的声明*/
void AllocateImages(IplImage *I);
void accumulateBackground(IplImage * I);
void createModelsfromStats();
void setHighThreshold(float scale);
void setLowThreshold(float scale);
void backgroundDiff(IplImage *I,IplImage * Imask);
void DeallocateImages();
int main(int argc,char** argv)
{
cvNamedWindow("intput",CV_WINDOW_AUTOSIZE); //创建输入显示窗口
cvNamedWindow("output",CV_WINDOW_AUTOSIZE); //创建输出显示窗口
CvCapture* capture = cvCreateFileCapture("c:\\img\\1.avi"); //返回一个capture指针,指向视频
IplImage*Img = cvQueryFrame(capture); //从视频中取出的图片
IplImage*Imask = cvCreateImage(cvGetSize(Img),IPL_DEPTH_8U,1);//创建输出图片,这里不能去掉cvCreateImage(cvGetSize(Img),IPL_DEPTH_8U,1),虽然我看例程里省略了
AllocateImages(Img); //调用创建临时图片函数
/*累积图像,只取了前30帧图片*/
while(Icount<30){
accumulateBackground(Img); //调用累积图像的函数,循环30次
Img = cvQueryFrame(capture);
cvShowImage("intput",Img);
cvWaitKey(20);
}
createModelsfromStats(); //背景建模
while(1)
{
Img = cvQueryFrame(capture);
if(!Img) break;
backgroundDiff(Img,Imask); //根据模型分割前景
cvShowImage("output",Imask); //显示图像,视频是一张一张图片连续播放的结果
cvShowImage("intput",Img);
char c = cvWaitKey(33); //当前帧被显示后,等待33ms再读取下一张图片
if(c==27) break; //等待期间按下esc键,ASCII码为27,则循环退出
}
cvReleaseCapture(&capture);
cvDestroyWindow("output");
cvDestroyWindow("intput");
DeallocateImages();
}
/*给需要的所有临时图像分配内存。为了方便,传递一副图像作为大小参考来分配临时图像*/
void AllocateImages( IplImage* I ){
CvSize sz = cvGetSize(I);
IavgF = cvCreateImage(sz,IPL_DEPTH_32F,3);
IdiffF = cvCreateImage(sz,IPL_DEPTH_32F,3);
IprevF = cvCreateImage(sz,IPL_DEPTH_32F,3);
IhiF = cvCreateImage(sz,IPL_DEPTH_32F,3);
IlowF = cvCreateImage(sz,IPL_DEPTH_32F,3);
Ilow1 = cvCreateImage(sz,IPL_DEPTH_32F,1);
Ilow2 = cvCreateImage(sz,IPL_DEPTH_32F,1);
Ilow3 = cvCreateImage(sz,IPL_DEPTH_32F,1);
Ihi1 = cvCreateImage(sz,IPL_DEPTH_32F,1);
Ihi2 = cvCreateImage(sz,IPL_DEPTH_32F,1);
Ihi3 = cvCreateImage(sz,IPL_DEPTH_32F,1);
cvZero( IavgF );
cvZero( IdiffF );
cvZero( IprevF );
cvZero( IhiF );
cvZero( IlowF );
Icount = 0.0001;
Iscratch = cvCreateImage(sz,IPL_DEPTH_32F,3);
Iscratch2 = cvCreateImage(sz,IPL_DEPTH_32F,3);
Igray1 = cvCreateImage(sz,IPL_DEPTH_32F,1);
Igray2 = cvCreateImage(sz,IPL_DEPTH_32F,1);
Igray3 = cvCreateImage(sz,IPL_DEPTH_32F,1);
Imaskt = cvCreateImage(sz,IPL_DEPTH_8U,1);
cvZero( Iscratch );
cvZero( Iscratch2 );
// mypic = cvCreateImage(sz,IPL_DEPTH_8U,1);
}
/*累积背景图像和每一帧图像差值的绝对值*/
void accumulateBackground( IplImage *I ){
static int first = 1;
cvCvtScale( I,Iscratch,1,0);
if( !first ){
cvAcc( Iscratch,IavgF );
cvAbsDiff( Iscratch,IprevF,Iscratch2 );
cvAcc( Iscratch2,IdiffF );
Icount += 1.0;
}
first = 0;
cvCopy( Iscratch,IprevF );
}
/*累积背景图像与每一帧图像差值的绝对值后,建立一个背景的统计模型*/
/*先定义setHighThreshold与setLowThreshold*/
void setHighThreshold( float scale ){
cvConvertScale(IdiffF,Iscratch,scale);
cvAdd( Iscratch,IavgF,IhiF);
cvSplit( IhiF,Ihi1,Ihi2,Ihi3,0);
}
void setLowThreshold( float scale ){
cvConvertScale(IdiffF,Iscratch,scale);
cvSub( IavgF,Iscratch,IlowF);
cvSplit( IlowF,Ilow1,Ilow2,Ilow3,0);
}
/*建立模型*/
void createModelsfromStats(){
cvConvertScale( IavgF, IavgF, (double)(1.0/Icount)); //IgvgF = IgvgF *(1.0/Icount) + 0
cvConvertScale( IdiffF,IdiffF,(double)(1.0/Icount));
cvAddS( IdiffF,cvScalar(1.0,1.0,1.0),IdiffF);
setHighThreshold(7.0); //函数设置一个阈值,使得对于每一帧图像的绝对差大于平局值7倍的像素都认为是前景
setLowThreshold(6.0);
}
/*建立模型同时有了高低阈值,我们就能把图像分割成前景(不能被背景模型“解释”的图像部分)*/
void backgroundDiff(
IplImage *I,
IplImage *Imask
){
cvCvtScale(I,Iscratch,1,0);
cvSplit( Iscratch,Igray1,Igray2,Igray3,0);
//Channel 1
cvInRange(Igray1,Ilow1,Ihi1,Imask);
//Channel 2
cvInRange(Igray2,Ilow2,Ihi2,Imaskt);
cvOr(Imask,Imaskt,Imask);
//Channel 3
cvInRange(Igray3,Ilow3,Ihi3,Imaskt);
cvOr(Imask,Imaskt,Imask);
//cvCvtScale(Imask,mypic,1,0);
cvSubRS( Imask,cvScalar(255),Imask); //这里书上例程写cvSubRS(Imask,255,Imask);但是这样编译的时候会报错,必须先转换成cvScalar型
}
/*释放内存*/
void DeallocateImages()
{
cvReleaseImage(&IavgF);
cvReleaseImage(&IdiffF);
cvReleaseImage(&IprevF);
cvReleaseImage(&IhiF);
cvReleaseImage(&IlowF);
cvReleaseImage(&Ilow1);
cvReleaseImage(&Ilow2);
cvReleaseImage(&Ilow3);
cvReleaseImage(&Ihi1);
cvReleaseImage(&Ihi2);
cvReleaseImage(&Ihi3);
cvReleaseImage(&Iscratch);
cvReleaseImage(&Iscratch2);
cvReleaseImage(&Igray1);
cvReleaseImage(&Igray2);
cvReleaseImage(&Igray3);
cvReleaseImage(&Imaskt);
}
减法去背景
#include "highgui.h"
#include "cv.h"
#include "cxcore.h"
/*为不同临时图像和统计属性图像创建指针*/
int main()
{
IplImage* pFrame = NULL;
IplImage* pFrImg = NULL;
IplImage* pBkImg = NULL;
CvMat* pFrameMat = NULL;
CvMat* pFrMat = NULL;
CvMat* pBkMat = NULL;
CvCapture* pCapture = NULL;
int nFrmNum = 0;
//创建窗口
cvNamedWindow("video", 1);
cvNamedWindow("background",1);
cvNamedWindow("foreground",1);
pCapture = cvCaptureFromFile("c:\\img\\1.avi");
while(pFrame = cvQueryFrame( pCapture ))
{
nFrmNum++;
//如果是第一帧,需要申请内存,并初始化
if(nFrmNum == 1)
{
pBkImg = cvCreateImage(cvSize(pFrame->width, pFrame->height), IPL_DEPTH_8U,1);
pFrImg = cvCreateImage(cvSize(pFrame->width, pFrame->height), IPL_DEPTH_8U,1);
pBkMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
pFrMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
pFrameMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
//转化成单通道图像再处理
cvCvtColor(pFrame, pBkImg, CV_BGR2GRAY);
cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
cvConvert(pFrImg, pFrameMat);
cvConvert(pFrImg, pFrMat);
cvConvert(pFrImg, pBkMat);
}
else
{
cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
cvConvert(pFrImg, pFrameMat);
//当前帧跟背景图相减
cvAbsDiff(pFrameMat, pBkMat, pFrMat);
//二值化前景图
cvThreshold(pFrMat, pFrImg, 60, 255.0, CV_THRESH_BINARY);
//更新背景
cvRunningAvg(pFrameMat, pBkMat, 0.003, 0);
//将背景转化为图像格式,用以显示
cvConvert(pBkMat, pBkImg);
cvShowImage("video", pFrame);
cvShowImage("background", pBkImg);
cvShowImage("foreground", pFrImg);
if( cvWaitKey(2) >= 0 )
break;
}
}
cvDestroyWindow("video");
cvDestroyWindow("background");
cvDestroyWindow("foreground");
cvReleaseImage(&pFrImg);
cvReleaseImage(&pBkImg);
cvReleaseMat(&pFrameMat);
cvReleaseMat(&pFrMat);
cvReleaseMat(&pBkMat);
cvReleaseCapture(&pCapture);
return 0;
}
高斯去背景
高斯模型去除背景法也是背景去除的一种常用的方法,经常会用到视频图像侦测中。这种方法对于动态的视频图像特征侦测比较适合,因为模型中是前景和背景分离开来的。分离前景和背景的基准是判断像素点变化率,会把变化慢的学习为背景,变化快的视为前景。
#include "stdafx.h"
#include "cv.h"
#include "highgui.h"
//#include "cxtypes.h"
#include "cvaux.h"
# include <iostream>
using namespace std;
int main(int argc, char* argv[])
{
//IplImage* pFirstFrame = NULL;
IplImage* pFrame = NULL;
IplImage* pFrImg = NULL;
IplImage* pBkImg = NULL;
IplImage* FirstImg = NULL;
static IplImage* pyrImg =NULL;
CvCapture* pCapture = NULL;
int nFrmNum = 0;
int first = 0,next = 0;
int thresh = 0;
cvNamedWindow("video",0);
//cvNamedWindow("background",0);
cvNamedWindow("foreground",0);
cvResizeWindow("video",400,400);
cvResizeWindow("foreground",400,400);
//cvCreateTrackbar("thresh","foreground",&thresh,255,NULL);
//cvMoveWindow("background",360,0);
//cvMoveWindow("foregtound",0,0);
if(!(pCapture = cvCaptureFromFile("c:\\img\\1.avi") ))
{
printf("Could not initialize camera , please check it !");
return -1;
}
CvGaussBGModel* bg_model = NULL;
while(pFrame = cvQueryFrame(pCapture))
{
nFrmNum++;
if(nFrmNum == 1)
{
pBkImg = cvCreateImage(cvGetSize(pFrame),IPL_DEPTH_8U,3);
pFrImg = cvCreateImage(cvGetSize(pFrame),IPL_DEPTH_8U,1);
FirstImg = cvCreateImage(cvGetSize(pFrame),IPL_DEPTH_8U,1);
pyrImg = cvCreateImage(cvSize(pFrame->width/2,pFrame->height/2),IPL_DEPTH_8U,1);
CvGaussBGStatModelParams params;
params.win_size = 2000; //Learning rate = 1/win_size;
params.bg_threshold = 0.7; //Threshold sum of weights for background test
params.weight_init = 0.05;
params.variance_init = 30;
params.minArea = 15.f;
params.n_gauss = 5; //= K =Number of gaussian in mixture
params.std_threshold = 2.5;
//cvCopy(pFrame,pFirstFrame,0);
bg_model = (CvGaussBGModel*)cvCreateGaussianBGModel(pFrame,¶ms);
}
else
{
int regioncount = 0;
int totalNum = pFrImg->width *pFrImg->height ;
cvSmooth(pFrame,pFrame,CV_GAUSSIAN,3,0,0,0);
cvUpdateBGStatModel(pFrame,(CvBGStatModel*)bg_model,-0.00001);
cvCopy(bg_model->foreground ,pFrImg,0);
cvCopy(bg_model->background ,pBkImg,0);
//cvShowImage("background",pBkImg);
//cvSmooth(pFrImg,pFrImg,CV_GAUSSIAN,3,0,0,0);
//cvPyrDown(pFrImg,pyrImg,CV_GAUSSIAN_5x5);
//cvPyrUp(pyrImg,pFrImg,CV_GAUSSIAN_5x5);
//cvSmooth(pFrImg,pFrImg,CV_GAUSSIAN,3,0,0,0);
cvErode(pFrImg,pFrImg,0,1);
cvDilate(pFrImg,pFrImg,0,3);
//pBkImg->origin = 1;
//pFrImg->origin = 1;
cvShowImage("video",pFrame);
cvShowImage("foreground",pFrImg);
//cvReleaseBGStatModel((CvBGStatModel**)&bg_model);
//bg_model = (CvGaussBGModel*)cvCreateGaussianBGModel(pFrame,0);
/*
//catch target frame
if(nFrmNum>10 &&(double)cvSumImage(pFrImg)>0.3 * totalNum)
{
first = cvSumImage(FirstImg);
next = cvSumImage(pFrImg);
printf("Next number is :%d /n",next);
cvCopy(pFrImg,FirstImg,0);
}
cvShowImage("foreground",pFrImg);
cvCopy(pFrImg,FirstImg,0);
*/
if(cvWaitKey(2)== 27)
{
break;
}
}
}
cvReleaseBGStatModel((CvBGStatModel**)&bg_model);
cvDestroyAllWindows();
cvReleaseImage(&pFrImg);
cvReleaseImage(&FirstImg);
cvReleaseImage(&pFrame);
cvReleaseImage(&pBkImg);
cvReleaseCapture(&pCapture);
return 0;
}
高斯背景建模的处理方法,加了平滑处理
#include <stdio.h>
#include "cv.h"
#include "cxcore.h"
#include "highgui.h"
//#include "cxtypes.h"
//#include "function.h"
#include "cvaux.h"
int cvSumImage(IplImage *Img)
{
int sum = 0;
int i, j;
int width = Img->width;
int height = Img->height;
int step = Img->widthStep;
unsigned char *data = (unsigned char*)Img->imageData;
for(i=0; i<height; i++)
for(j=0; j<width; j++)
{
if(data[i*step+j])
sum ++;
}
return sum;
}
int main(int argc, char **argv)
{
IplImage* pFrame = NULL;
IplImage* pFrImg = NULL;
IplImage* pBkImg = NULL;
IplImage* Img0 = NULL;
IplImage* DiffImg = NULL;
IplImage* SumImg = NULL;
IplImage* FirstImg = NULL;
CvCapture* pCapture = NULL;
int nFrmNum = 0;
cvNamedWindow("video", 1);
cvNamedWindow("background", 1);
cvNamedWindow("foreground", 1);
cvMoveWindow("video", 30, 0);
cvMoveWindow("background", 360, 0);
cvMoveWindow("foreground", 690, 0);
pCapture = cvCreateFileCapture("c:\\img\\1.avi");
CvGaussBGModel* bg_model = NULL;
while( pFrame=cvQueryFrame( pCapture ))
{
nFrmNum ++;
if( nFrmNum == 1)
{
pBkImg = cvCreateImage(cvSize(pFrame->width, pFrame->height), IPL_DEPTH_8U, 3);
pFrImg = cvCreateImage(cvSize(pFrame->width, pFrame->height), IPL_DEPTH_8U, 1);
FirstImg = cvCreateImage(cvSize(pFrame->width, pFrame->height), IPL_DEPTH_8U, 1);
// cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
// cvSaveImage("1.jpg",pFrImg);
CvGaussBGStatModelParams params;
params.win_size = 200;
params.bg_threshold = 0.7;
params.std_threshold = 2.5;
params.weight_init = 0.05;
params.variance_init = 30*30;
params.minArea = 15.f;
params.n_gauss = 5;
bg_model = (CvGaussBGModel*) cvCreateGaussianBGModel(pFrame, 0);
}
else
{
int regioncount = 0;
int totalNum = pFrImg->width * pFrImg->height;
// regioncount = icvUpdateGaussianBGModel(pFrame, bg_model);
cvUpdateBGStatModel(pFrame, (CvBGStatModel *)bg_model);
cvCopy(bg_model->foreground, pFrImg, 0);
cvCopy(bg_model->background, pBkImg, 0);
cvErode(pFrImg, pFrImg, NULL, 1);
cvDilate(pFrImg, pFrImg, 0, 1);
cvSmooth(pFrImg, pFrImg, CV_GAUSSIAN, 3, 0, 0, 0);
// cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
// cvSaveImage("2.jpg", pFrImg);
//把图像正过来
pBkImg->origin = 1;
pFrImg->origin = 1;
cvShowImage("video", pFrame);
cvShowImage("background", pBkImg);
cvShowImage("foreground", pFrImg);
// printf("number is %d\n", cvSumImage(pFrImg));
// 取目标帧
if(nFrmNum > 10 && (double)cvSumImage(pFrImg) > 0.3*totalNum )
{
int first, next;
first = cvSumImage(FirstImg);
next = cvSumImage( pFrImg );
printf("number is %d\n", next);
if(next < first)
{
break;
}
cvCopy(pFrImg, FirstImg, 0);
}
cvCopy(pFrImg, FirstImg, 0);
if( cvWaitKey(2)>=0)
break;
}
}
printf("%d\n", pFrImg->width*pFrImg->height);
cvReleaseBGStatModel((CvBGStatModel**)&bg_model);
cvDestroyWindow("video");
cvDestroyWindow("background");
cvWaitKey(0);
cvDestroyWindow("foreground");
cvReleaseImage(&pFrImg);
cvReleaseImage(&pBkImg);
cvReleaseImage(&FirstImg);
cvReleaseCapture(&pCapture);
return 0;
}
harris角点
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
/// Global variables
Mat src, src_gray;
int thresh = 200;
int max_thresh = 255;
char* source_window = "Source image";
char* corners_window = "Corners detected";
/// Function header
void cornerHarris_demo( int, void* );
/** @function main */
int main( int argc, char** argv )
{
/// Load source image and convert it to gray
src = imread( "c:\\img\\Tulip.jpg", 1 );
cvtColor( src, src_gray, CV_BGR2GRAY );
/// Create a window and a trackbar
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
createTrackbar( "Threshold: ", source_window, &thresh, max_thresh, cornerHarris_demo );
imshow( source_window, src );
cornerHarris_demo( 0, 0 );
waitKey(0);
return(0);
}
/** @function cornerHarris_demo */
void cornerHarris_demo( int, void* )
{
Mat dst, dst_norm, dst_norm_scaled;
dst = Mat::zeros( src.size(), CV_32FC1 );
/// Detector parameters
int blockSize = 2;
int apertureSize = 3;
double k = 0.04;
/// Detecting corners
cornerHarris( src_gray, dst, blockSize, apertureSize, k, BORDER_DEFAULT );
/// Normalizing
normalize( dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat() );
convertScaleAbs( dst_norm, dst_norm_scaled );
/// Drawing a circle around corners
for( int j = 0; j < dst_norm.rows ; j++ )
{ for( int i = 0; i < dst_norm.cols; i++ )
{
if( (int) dst_norm.at<float>(j,i) > thresh )
{
circle( dst_norm_scaled, Point( i, j ), 5, Scalar(0), 2, 8, 0 );
}
}
}
/// Showing the result
namedWindow( corners_window, CV_WINDOW_AUTOSIZE );
imshow( corners_window, dst_norm_scaled );
}
blob检测
#include "opencv2/opencv.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
Mat image = imread("c:\\img\\1.jpg");
vector<KeyPoint> keyPoints;
SimpleBlobDetector::Params params;
SimpleBlobDetector blobDetect(params);
blobDetect.create("SimpleBlob");
blobDetect.detect(image, keyPoints);
cout << keyPoints.size() << endl;
drawKeypoints(image, keyPoints, image, Scalar(255,0,0));
namedWindow("blobs");
imshow("blobs", image);
waitKey();
return 0;
}
sift特征
#include "opencv2/opencv.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
#include "stdafx.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc, char* argv[])
{
initModule_nonfree();//初始化模块,使用SIFT或SURF时用到
Ptr<FeatureDetector> detector = FeatureDetector::create( "SIFT" );//创建SIFT特征检测器
Ptr<DescriptorExtractor> descriptor_extractor = DescriptorExtractor::create( "SIFT" );//创建特征向量生成器
Ptr<DescriptorMatcher> descriptor_matcher = DescriptorMatcher::create( "BruteForce" );//创建特征匹配器
if( detector.empty() || descriptor_extractor.empty() )
cout<<"fail to create detector!";
//读入图像
Mat img1 = imread("c:\\img\\cv.jpg");
Mat img2 = imread("c:\\img\\cv2.jpg");
//特征点检测
double t = getTickCount();//当前滴答数
vector<KeyPoint> keypoints1,keypoints2;
detector->detect( img1, keypoints1 );//检测img1中的SIFT特征点,存储到keypoints1中
detector->detect( img2, keypoints2 );
cout<<"图像1特征点个数:"<<keypoints1.size()<<endl;
cout<<"图像2特征点个数:"<<keypoints2.size()<<endl;
//根据特征点计算特征描述子矩阵,即特征向量矩阵
Mat descriptors1,descriptors2;
descriptor_extractor->compute( img1, keypoints1, descriptors1 );
descriptor_extractor->compute( img2, keypoints2, descriptors2 );
t = ((double)getTickCount() - t)/getTickFrequency();
cout<<"SIFT算法用时:"<<t<<"秒"<<endl;
cout<<"图像1特征描述矩阵大小:"<<descriptors1.size()
<<",特征向量个数:"<<descriptors1.rows<<",维数:"<<descriptors1.cols<<endl;
cout<<"图像2特征描述矩阵大小:"<<descriptors2.size()
<<",特征向量个数:"<<descriptors2.rows<<",维数:"<<descriptors2.cols<<endl;
//画出特征点
Mat img_keypoints1,img_keypoints2;
drawKeypoints(img1,keypoints1,img_keypoints1,Scalar::all(-1),0);
drawKeypoints(img2,keypoints2,img_keypoints2,Scalar::all(-1),0);
//imshow("Src1",img_keypoints1);
//imshow("Src2",img_keypoints2);
//特征匹配
vector<DMatch> matches;//匹配结果
descriptor_matcher->match( descriptors1, descriptors2, matches );//匹配两个图像的特征矩阵
cout<<"Match个数:"<<matches.size()<<endl;
//计算匹配结果中距离的最大和最小值
//距离是指两个特征向量间的欧式距离,表明两个特征的差异,值越小表明两个特征点越接近
double max_dist = 0;
double min_dist = 100;
for(int i=0; i<matches.size(); i++)
{
double dist = matches[i].distance;
if(dist < min_dist) min_dist = dist;
if(dist > max_dist) max_dist = dist;
}
cout<<"最大距离:"<<max_dist<<endl;
cout<<"最小距离:"<<min_dist<<endl;
//筛选出较好的匹配点
vector<DMatch> goodMatches;
for(int i=0; i<matches.size(); i++)
{
if(matches[i].distance < 0.31 * max_dist)
{
goodMatches.push_back(matches[i]);
}
}
cout<<"goodMatch个数:"<<goodMatches.size()<<endl;
//画出匹配结果
Mat img_matches;
//红色连接的是匹配的特征点对,绿色是未匹配的特征点
drawMatches(img1,keypoints1,img2,keypoints2,goodMatches,img_matches,
Scalar::all(-1)/*CV_RGB(255,0,0)*/,CV_RGB(0,255,0),Mat(),2);
imshow("MatchSIFT",img_matches);
waitKey(0);
return 0;
}
滴动轨迹OpenCV的代码示例
#include "stdafx.h"
#include <opencv2\opencv.hpp>
IplImage* GetThresholdedImage(IplImage* img)
{
// Convert the image into an HSV image
IplImage* imgHSV = cvCreateImage(cvGetSize(img), 8, 3);
cvCvtColor(img, imgHSV, CV_BGR2HSV);
IplImage* imgThreshed = cvCreateImage(cvGetSize(img), 8, 1);
// Values 20,100,100 to 30,255,255 working perfect for yellow at around 6pm
cvInRangeS(imgHSV, cvScalar(112, 100, 100), cvScalar(124, 255, 255), imgThreshed);
cvReleaseImage(&imgHSV);
return imgThreshed;
}
int main()
{
// Initialize capturing live feed from the camera
CvCapture* capture = 0;
capture = cvCaptureFromFile("c:\\img\\1.avi");
// Couldn't get a device? Throw an error and quit
if(!capture)
{
printf("Could not initialize capturing...\n");
return -1;
}
// The two windows we'll be using
cvNamedWindow("video");
cvNamedWindow("thresh");
// This image holds the "scribble" data...
// the tracked positions of the ball
IplImage* imgScribble = NULL;
// An infinite loop
while(true)
{
// Will hold a frame captured from the camera
IplImage* frame = 0;
frame = cvQueryFrame(capture);
// If we couldn't grab a frame... quit
if(!frame)
break;
// If this is the first frame, we need to initialize it
if(imgScribble == NULL)
{
imgScribble = cvCreateImage(cvGetSize(frame), 8, 3);
}
// Holds the yellow thresholded image (yellow = white, rest = black)
IplImage* imgYellowThresh = GetThresholdedImage(frame);
// Calculate the moments to estimate the position of the ball
CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments));
cvMoments(imgYellowThresh, moments, 1);
// The actual moment values
double moment10 = cvGetSpatialMoment(moments, 1, 0);
double moment01 = cvGetSpatialMoment(moments, 0, 1);
double area = cvGetCentralMoment(moments, 0, 0);
// Holding the last and current ball positions
static int posX = 0;
static int posY = 0;
int lastX = posX;
int lastY = posY;
posX = moment10/area;
posY = moment01/area;
// Print it out for debugging purposes
printf("position (%d,%d)\n", posX, posY);
// We want to draw a line only if its a valid position
if(lastX>0 && lastY>0 && posX>0 && posY>0)
{
// Draw a yellow line from the previous point to the current point
cvLine(imgScribble, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,255,255), 5);
}
// Add the scribbling image and the frame... and we get a combination of the two
cvAdd(frame, imgScribble, frame);
cvShowImage("thresh", imgYellowThresh);
cvShowImage("video", frame);
// Wait for a keypress
int c = cvWaitKey(10);
if(c!=-1)
{
// If pressed, break out of the loop
break;
}
// Release the thresholded image... we need no memory leaks.. please
cvReleaseImage(&imgYellowThresh);
delete moments;
}
// We're done using the camera. Other applications can now use it
cvReleaseCapture(&capture);
return 0;
}
光流法
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
#include <stdio.h>
const int MAX_CORNERS = 500;
int main(int argc, char** argv) {
// Initialize, load two images from the file system, and
// allocate the images and other structures we will need for
// results.
//
IplImage* imgA = cvLoadImage("c:\\img\\1.jpg",CV_LOAD_IMAGE_GRAYSCALE);
IplImage* imgB = cvLoadImage("c:\\img\\2.jpg",CV_LOAD_IMAGE_GRAYSCALE);
CvSize img_sz = cvGetSize( imgA );
int win_size = 10;
IplImage* imgC = cvLoadImage("OpticalFlow1.jpg",CV_LOAD_IMAGE_UNCHANGED);
// The first thing we need to do is get the features
// we want to track.
//
IplImage* eig_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
IplImage* tmp_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
int corner_count = MAX_CORNERS;
CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];
cvGoodFeaturesToTrack(
imgA,
eig_image,
tmp_image,
cornersA,
&corner_count,
0.01,
5.0,
0,
3,
0,
0.04
);
cvFindCornerSubPix(
imgA,
cornersA,
corner_count,
cvSize(win_size,win_size),
cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03)
);
// Call the Lucas Kanade algorithm
//
char features_found[ MAX_CORNERS ];
float feature_errors[ MAX_CORNERS ];
CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 );
IplImage* pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
IplImage* pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
CvPoint2D32f* cornersB = new CvPoint2D32f[ MAX_CORNERS ];
cvCalcOpticalFlowPyrLK(
imgA,
imgB,
pyrA,
pyrB,
cornersA,
cornersB,
corner_count,
cvSize( win_size,win_size ),
5,
features_found,
feature_errors,
cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 ),
0
);
// Now make some image of what we are looking at:
//
for( int i=0; i<corner_count; i++ ) {
if( features_found[i]==0|| feature_errors[i]>550 ) {
// printf("Error is %f/n",feature_errors[i]);
continue;
}
// printf("Got it/n");
CvPoint p0 = cvPoint(
cvRound( cornersA[i].x ),
cvRound( cornersA[i].y )
);
CvPoint p1 = cvPoint(
cvRound( cornersB[i].x ),
cvRound( cornersB[i].y )
);
cvLine( imgC, p0, p1, CV_RGB(255,0,0),2 );
}
cvNamedWindow("ImageA",0);
cvNamedWindow("ImageB",0);
cvNamedWindow("LKpyr_OpticalFlow",0);
cvShowImage("ImageA",imgA);
cvShowImage("ImageB",imgB);
cvShowImage("LKpyr_OpticalFlow",imgC);
cvWaitKey(0);
return 0;
}
光流2
#include <cv.h>
#include <highgui.h>
#include <iostream>
using namespace std;
int const MAX_CORNERS = 1000;
int main (int argc, char **argv)
{
CvCapture* capture = 0;
capture = cvCaptureFromCAM( CV_CAP_ANY ); //get frame
IplImage *src_img1; //the previous frame (gray)
IplImage *src_img2; //the current frame(gray)
IplImage *dst_img; //the result
IplImage *cur_img;
IplImage *pre_img;
CvPoint2D32f * move_old_point = new CvPoint2D32f[ MAX_CORNERS];
CvPoint2D32f * move_new_point = new CvPoint2D32f[ MAX_CORNERS];
char *features_found = new char[MAX_CORNERS];
float *features_error = new float[MAX_CORNERS];
CvTermCriteria criteria;
criteria = cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 64, 0.01);
while(1)
{
int i,j;
int dx, dy;
int p = 0;
int rows, cols;
int countn = MAX_CORNERS;
pre_img = cvQueryFrame(capture);
CvSize img_sz = cvGetSize(pre_img);
src_img1 = cvCreateImage(img_sz, IPL_DEPTH_8U, 1);
cvCvtColor(pre_img, src_img1, CV_RGB2GRAY);
cur_img = cvQueryFrame(capture);
src_img2 = cvCreateImage(img_sz, IPL_DEPTH_8U, 1);
cvCvtColor(cur_img, src_img2, CV_RGB2GRAY);
dst_img = (IplImage *)cvClone(cur_img);
IplImage *move_img = cvCreateImage(img_sz, IPL_DEPTH_8U, 1);
cvZero(move_img);
//cvAbsDiff(src_img1, src_img2,move_img);
cols = src_img1->width;
rows = src_img1->height;
for (i = 0; i <cols; i++)
{
for (j = 0; j<rows; j++)
{
double a = abs(cvGet2D(src_img1, j, i).val[0]-cvGet2D(src_img2, j, i).val[0]);
CvScalar b = cvScalar(a, 0, 0,0);
cvSet2D(move_img, j, i,b);
if (a>40)
{
if (p<MAX_CORNERS-1)
{
int d = ++p;
move_old_point[d].x = i;
move_old_point[d].y = j;
}
}
}
}
cvNamedWindow("moving object", 1);
cvShowImage("moving object", move_img);
CvSize Pyrsize = cvSize(src_img1->width +8, src_img1->height/3);
IplImage * pyrA = cvCreateImage(Pyrsize, IPL_DEPTH_32F, 1); //pyrA是需要寻找的点,不是没有初始化的
IplImage * pyrB = cvCreateImage(Pyrsize, IPL_DEPTH_32F, 1);
cvCalcOpticalFlowPyrLK(src_img1,
src_img2,
pyrA,
pyrB,
move_old_point,
move_new_point,
countn,
cvSize(10, 10),
3,
features_found,
features_error,
criteria,
0
);
for (i = 0; i < countn; i++)
{
int x1 = (int)move_new_point[i].x;
int x2 = (int)move_old_point[i].x;
int y1 = (int)move_new_point[i].y;
int y2 = (int)move_old_point[i].y;
dx =(int) abs(x1 - x2) ;
dy = (int)abs(y1 - y2);
if (dx >= 5&& dy >= 5)
{
cvLine (dst_img, cvPoint(x2, y2),cvPoint(x2+5, y2+5) , CV_RGB (255, 0, 0), 1, CV_AA, 0);
}
}
cvNamedWindow ("ImagePyrLK", 1);
cvShowImage ("ImagePyrLK", dst_img);
cvWaitKey (1);
cvReleaseImage (&dst_img);
cvReleaseImage(&pyrA);
cvReleaseImage(&pyrB);
cvReleaseImage(&move_img);
}
cvDestroyWindow("moving object");
cvDestroyWindow ("ImagePyrLK");
cvReleaseImage (&src_img1);
cvReleaseImage (&src_img2);
cvReleaseImage (&pre_img);
cvReleaseImage (&cur_img);
return 0;
}
运动跟踪
#include <stdio.h>
#include<iostream>
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
using namespace std;
int main( int argc, char** argv )
{
//声明IplImage指针
IplImage* pFrame = NULL;
IplImage* pFrImg = NULL;
IplImage* pBkImg = NULL;
CvMat* pFrameMat = NULL;
CvMat* pFrMat = NULL;
CvMat* pBkMat = NULL;
CvCapture* pCapture = NULL;
int nFrmNum = 0;
//创建窗口
cvNamedWindow("background",1);
cvNamedWindow("video", 1);
cvNamedWindow("foreground",1);
//排列窗口
cvMoveWindow("background", 30, 500);
cvMoveWindow("video", 350, 0);
cvMoveWindow("foreground", 690, 500);
//打开视频文件
if( !(pCapture = cvCaptureFromFile("c:\\img\\1.avi")))
{
fprintf(stderr, "文件打开错误", argv[1]);
return -2;
}
//逐帧读取视频
while(pFrame = cvQueryFrame( pCapture ))
{
nFrmNum++;
//如果是第一帧,则申请内存,并初始化
if(nFrmNum == 1)
{
pBkImg = cvCreateImage(cvSize(pFrame->width, pFrame->height), IPL_DEPTH_8U,1);
pFrImg = cvCreateImage(cvSize(pFrame->width, pFrame->height), IPL_DEPTH_8U,1);
pBkMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
pFrMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
pFrameMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
//转化成单通道图
cvCvtColor(pFrame, pBkImg, CV_BGR2GRAY);
cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
cvConvert(pFrImg, pFrameMat);
cvConvert(pFrImg, pFrMat);
cvConvert(pFrImg, pBkMat);
}
else
{
cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
cvConvert(pFrImg, pFrameMat);
//平滑图像(高斯滤波)
cvSmooth(pFrameMat, pFrameMat, CV_GAUSSIAN, 3, 0, 0);
//当前帧减去背景
cvAbsDiff(pFrameMat, pBkMat, pFrMat);
//前景图二值化
cvThreshold(pFrMat, pFrImg, 60, 255.0, CV_THRESH_BINARY);
//形态学滤波(去噪音)
cvErode(pFrImg, pFrImg, 0, 1);
cvDilate(pFrImg, pFrImg, 0, 1);
//把图像转正
pBkImg->origin=1;
pFrImg->origin=1;
//对pFrImg上的已经识别出的运动物体,在pFrame上画跟踪框
int x,y;
for (y=pFrImg->height - 1;y>=250;y--)
{
uchar* ptr = (uchar*)(pFrImg->imageData+pFrImg->widthStep*y); //将imageData指针指向第y行头部
for (x=0;x<pFrImg->width;x++)
{
if(ptr[x]!=0)//判断地y行第x个元素是否有图像,如果有图像,则画跟踪框
{
CvPoint pt1_Rect;
CvPoint pt2_Rect;
pt1_Rect.x=x-30;
pt1_Rect.y=y;
pt2_Rect.x=x+30;
pt2_Rect.y=y-300;
int thickness=3;
int line_type=8;
CvScalar color=CV_RGB(255,0,0);
cvRectangle( pFrame, pt1_Rect, pt2_Rect,color ,thickness, line_type, 0 );
y=-1;
break;
}
}
}
//显示图像
cvShowImage("video", pFrame);
cvShowImage("background", pBkImg);
cvShowImage("foreground", pFrImg);
//如果有按键事件,则跳出循环
//为cvShowImage函数提供时间完成显示
//等待时间可以根据CPU速度调整
if( cvWaitKey(27) >= 0 )
break;
}
}
//销毁窗口
cvDestroyWindow("video");
cvDestroyWindow("background");
cvDestroyWindow("foreground");
//释放图像和矩阵
cvReleaseImage(&pFrImg);
cvReleaseImage(&pBkImg);
cvReleaseMat(&pFrameMat);
cvReleaseMat(&pFrMat);
cvReleaseMat(&pBkMat);
cvReleaseCapture(&pCapture);
return 0;
}
连通域
#include "cv.h"
#include "cxcore.h"
#include "highgui.h"
int main( int argc, char** argv )
{
//声明IplImage指针
IplImage* pImg = cvLoadImage("c:\\img\\1.bmp",0);
IplImage* pContourImg = NULL;
CvMemStorage * storage = cvCreateMemStorage(0);
CvSeq * contour = 0;
CvSeq *contmax = 0;
int mode = CV_RETR_EXTERNAL;
cvShowImage( "src", pImg );
//为轮廓显示图像申请空间
//3通道图像,以便用彩色显示
pContourImg = cvCreateImage(cvGetSize(pImg),
IPL_DEPTH_8U,
3);
//copy source image and convert it to BGR image
cvCvtColor(pImg, pContourImg, CV_GRAY2BGR);
//查找contour
cvFindContours( pImg, storage, &contour, sizeof(CvContour),
mode, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
//将轮廓画出
cvDrawContours(pContourImg, contour,
CV_RGB(255,0,0), CV_RGB(255, 0, 0),
2, 2, 8, cvPoint(0,0));
int area,maxArea = 10;//设面积最大值大于10Pixel
for(;contour;contour = contour->h_next)
{
area = fabs(cvContourArea( contour, CV_WHOLE_SEQ )); //获取当前轮廓面积
printf("area == %lf\n", area);
if(area > maxArea)
{
contmax = contour;
maxArea = area;
}
}
CvRect aRect = cvBoundingRect( contmax, 0 );
cvSetImageROI( pContourImg,aRect);
//显示图像
cvShowImage( "contour", pContourImg );
cvSaveImage("contour.bmp",pContourImg);
cvWaitKey(0);
//销毁窗口
cvDestroyWindow( "src" );
cvDestroyWindow( "contour" );
//释放图像
cvReleaseImage( &pImg );
cvReleaseImage( &pContourImg );
cvReleaseMemStorage(&storage);
return 0;
}
连通域2
#include <stdio.h>
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
int main( int argc, char** argv )
{
IplImage* src;
src=cvLoadImage("c:\\img\\1.bmp",CV_LOAD_IMAGE_GRAYSCALE);
IplImage* dst = cvCreateImage( cvGetSize(src), 8, 3 );
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq* contour = 0;
cvThreshold( src, src,120, 255, CV_THRESH_BINARY );//二值化
cvNamedWindow( "Source", 1 );
cvShowImage( "Source", src );
//提取轮廓
cvFindContours( src, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
cvZero( dst );//清空数组
CvSeq* _contour =contour;
double maxarea=0;
double minarea=100;
int n=-1,m=0;//n为面积最大轮廓索引,m为迭代索引
for( ; contour != 0; contour = contour->h_next )
{
double tmparea=fabs(cvContourArea(contour));
if(tmparea < minarea)
{
cvSeqRemove(contour,0); //删除面积小于设定值的轮廓
continue;
}
CvRect aRect = cvBoundingRect( contour, 0 );
if ((aRect.width/aRect.height)<1)
{
cvSeqRemove(contour,0); //删除宽高比例小于设定值的轮廓
continue;
}
if(tmparea > maxarea)
{
maxarea = tmparea;
n=m;
}
m++;
// CvScalar color = CV_RGB( rand()&255, rand()&255, rand()&255 );//创建一个色彩值
CvScalar color = CV_RGB( 0, 255,255 );
//max_level 绘制轮廓的最大等级。如果等级为0,绘制单独的轮廓。如果为1,绘制轮廓及在其后的相同的级别下轮廓。
//如果值为2,所有的轮廓。如果等级为2,绘制所有同级轮廓及所有低一级轮廓,诸此种种。
//如果值为负数,函数不绘制同级轮廓,但会升序绘制直到级别为abs(max_level)-1的子轮廓。
cvDrawContours( dst, contour, color, color, -1, 1, 8 );//绘制外部和内部的轮廓
}
contour =_contour; /*int k=0;*/
int count=0;
for( ; contour != 0; contour = contour->h_next )
{
count++;
double tmparea=fabs(cvContourArea(contour));
if (tmparea==maxarea /*k==n*/)
{
CvScalar color = CV_RGB( 255, 0, 0);
cvDrawContours( dst, contour, color, color, -1, 1, 8 );
}
/*k++;*/
}
printf("The total number of contours is:%d",count);
cvNamedWindow( "Components", 1 );
cvShowImage( "Components", dst );
cvWaitKey(0);
cvDestroyWindow( "Source" );
cvReleaseImage(&src);
cvDestroyWindow( "Components" );
cvReleaseImage(&dst);
return 0;
}
联通3
#include "cv.h"
#include "cxcore.h"
#include "highgui.h"
#include <iostream>
#include <string>
#include <list>
#include <vector>
#include <map>
#include<stack>
using namespace std;
using namespace cv;
#include <opencv2/opencv.hpp>
void icvprCcaByTwoPass(const cv::Mat& _binImg, cv::Mat& _lableImg)
{
// connected component analysis (4-component)
// use two-pass algorithm
// 1. first pass: label each foreground pixel with a label
// 2. second pass: visit each labeled pixel and merge neighbor labels
//
// foreground pixel: _binImg(x,y) = 1
// background pixel: _binImg(x,y) = 0
if (_binImg.empty() ||
_binImg.type() != CV_8UC1)
{
return ;
}
// 1. first pass
_lableImg.release() ;
_binImg.convertTo(_lableImg, CV_32SC1) ;
int label = 1 ; // start by 2
std::vector<int> labelSet ;
labelSet.push_back(0) ; // background: 0
labelSet.push_back(1) ; // foreground: 1
int rows = _binImg.rows - 1 ;
int cols = _binImg.cols - 1 ;
for (int i = 1; i < rows; i++)
{
int* data_preRow = _lableImg.ptr<int>(i-1) ;
int* data_curRow = _lableImg.ptr<int>(i) ;
for (int j = 1; j < cols; j++)
{
if (data_curRow[j] == 1)
{
std::vector<int> neighborLabels ;
neighborLabels.reserve(2) ;
int leftPixel = data_curRow[j-1] ;
int upPixel = data_preRow[j] ;
if ( leftPixel > 1)
{
neighborLabels.push_back(leftPixel) ;
}
if (upPixel > 1)
{
neighborLabels.push_back(upPixel) ;
}
if (neighborLabels.empty())
{
labelSet.push_back(++label) ; // assign to a new label
data_curRow[j] = label ;
labelSet[label] = label ;
}
else
{
std::sort(neighborLabels.begin(), neighborLabels.end()) ;
int smallestLabel = neighborLabels[0] ;
data_curRow[j] = smallestLabel ;
// save equivalence
for (size_t k = 1; k < neighborLabels.size(); k++)
{
int tempLabel = neighborLabels[k] ;
int& oldSmallestLabel = labelSet[tempLabel] ;
if (oldSmallestLabel > smallestLabel)
{
labelSet[oldSmallestLabel] = smallestLabel ;
oldSmallestLabel = smallestLabel ;
}
else if (oldSmallestLabel < smallestLabel)
{
labelSet[smallestLabel] = oldSmallestLabel ;
}
}
}
}
}
}
// update equivalent labels
// assigned with the smallest label in each equivalent label set
for (size_t i = 2; i < labelSet.size(); i++)
{
int curLabel = labelSet[i] ;
int preLabel = labelSet[curLabel] ;
while (preLabel != curLabel)
{
curLabel = preLabel ;
preLabel = labelSet[preLabel] ;
}
labelSet[i] = curLabel ;
}
// 2. second pass
for (int i = 0; i < rows; i++)
{
int* data = _lableImg.ptr<int>(i) ;
for (int j = 0; j < cols; j++)
{
int& pixelLabel = data[j] ;
pixelLabel = labelSet[pixelLabel] ;
}
}
}
void icvprCcaBySeedFill(const cv::Mat& _binImg, cv::Mat& _lableImg)
{
// connected component analysis (4-component)
// use seed filling algorithm
// 1. begin with a foreground pixel and push its foreground neighbors into a stack;
// 2. pop the top pixel on the stack and label it with the same label until the stack is empty
//
// foreground pixel: _binImg(x,y) = 1
// background pixel: _binImg(x,y) = 0
if (_binImg.empty() ||
_binImg.type() != CV_8UC1)
{
return ;
}
_lableImg.release() ;
_binImg.convertTo(_lableImg, CV_32SC1) ;
int label = 1 ; // start by 2
int rows = _binImg.rows - 1 ;
int cols = _binImg.cols - 1 ;
for (int i = 1; i < rows-1; i++)
{
int* data= _lableImg.ptr<int>(i) ;
for (int j = 1; j < cols-1; j++)
{
if (data[j] == 1)
{
std::stack<std::pair<int,int> > neighborPixels ;
neighborPixels.push(std::pair<int,int>(i,j)) ; // pixel position: <i,j>
++label ; // begin with a new label
while (!neighborPixels.empty())
{
// get the top pixel on the stack and label it with the same label
std::pair<int,int> curPixel = neighborPixels.top() ;
int curX = curPixel.first ;
int curY = curPixel.second ;
_lableImg.at<int>(curX, curY) = label ;
// pop the top pixel
neighborPixels.pop() ;
// push the 4-neighbors (foreground pixels)
if (_lableImg.at<int>(curX, curY-1) == 1)
{// left pixel
neighborPixels.push(std::pair<int,int>(curX, curY-1)) ;
}
if (_lableImg.at<int>(curX, curY+1) == 1)
{// right pixel
neighborPixels.push(std::pair<int,int>(curX, curY+1)) ;
}
if (_lableImg.at<int>(curX-1, curY) == 1)
{// up pixel
neighborPixels.push(std::pair<int,int>(curX-1, curY)) ;
}
if (_lableImg.at<int>(curX+1, curY) == 1)
{// down pixel
neighborPixels.push(std::pair<int,int>(curX+1, curY)) ;
}
}
}
}
}
}
cv::Scalar icvprGetRandomColor()
{
uchar r = 255 * (rand()/(1.0 + RAND_MAX));
uchar g = 255 * (rand()/(1.0 + RAND_MAX));
uchar b = 255 * (rand()/(1.0 + RAND_MAX));
return cv::Scalar(b,g,r) ;
}
void icvprLabelColor(const cv::Mat& _labelImg, cv::Mat& _colorLabelImg)
{
if (_labelImg.empty() ||
_labelImg.type() != CV_32SC1)
{
return ;
}
std::map<int, cv::Scalar> colors ;
int rows = _labelImg.rows ;
int cols = _labelImg.cols ;
_colorLabelImg.release() ;
_colorLabelImg.create(rows, cols, CV_8UC3) ;
_colorLabelImg = cv::Scalar::all(0) ;
for (int i = 0; i < rows; i++)
{
const int* data_src = (int*)_labelImg.ptr<int>(i) ;
uchar* data_dst = _colorLabelImg.ptr<uchar>(i) ;
for (int j = 0; j < cols; j++)
{
int pixelValue = data_src[j] ;
if (pixelValue > 1)
{
if (colors.count(pixelValue) <= 0)
{
colors[pixelValue] = icvprGetRandomColor() ;
}
cv::Scalar color = colors[pixelValue] ;
*data_dst++ = color[0] ;
*data_dst++ = color[1] ;
*data_dst++ = color[2] ;
}
else
{
data_dst++ ;
data_dst++ ;
data_dst++ ;
}
}
}
}
int main(int argc, char** argv)
{
cv::Mat binImage = cv::imread("c:\\img\\1.bmp", 0) ;
cv::threshold(binImage, binImage, 50, 1, CV_THRESH_BINARY_INV) ;
// connected component labeling
cv::Mat labelImg ;
icvprCcaByTwoPass(binImage, labelImg) ;
//icvprCcaBySeedFill(binImage, labelImg) ;
// show result
cv::Mat grayImg ;
labelImg *= 10 ;
labelImg.convertTo(grayImg, CV_8UC1) ;
cv::imshow("labelImg", grayImg) ;
cv::Mat colorLabelImg ;
icvprLabelColor(labelImg, colorLabelImg) ;
cv::imshow("colorImg", colorLabelImg) ;
cv::waitKey(0) ;
return 0 ;
}
联通4
#include <iostream>
#include <opencv2\\opencv.hpp>
using namespace cv;
using namespace std;
//Just some convienience macros
#define CV_CVX_WHITE CV_RGB(0xff,0xff,0xff)
#define CV_CVX_BLACK CV_RGB(0x00,0x00,0x00)
void ConnectedComponents(Mat &mask_process, int poly1_hull0, float perimScale, int number = 0,
Rect &bounding_box = Rect(), Point &contour_centers = Point(-1, -1))
{
/*下面4句代码是为了兼容原函数接口,即内部使用的是c风格,但是其接口是c++风格的*/
IplImage *mask = &mask_process.operator IplImage();
int *num = &number;
CvRect *bbs = &bounding_box.operator CvRect();
CvPoint *centers = &contour_centers.operator CvPoint();
static CvMemStorage* mem_storage = NULL;
static CvSeq* contours = NULL;
//CLEAN UP RAW MASK
//开运算作用:平滑轮廓,去掉细节,断开缺口
cvMorphologyEx( mask, mask, NULL, NULL, CV_MOP_OPEN, 1 );//对输入mask进行开操作,CVCLOSE_ITR为开操作的次数,输出为mask图像
//闭运算作用:平滑轮廓,连接缺口
cvMorphologyEx( mask, mask, NULL, NULL, CV_MOP_CLOSE, 1 );//对输入mask进行闭操作,CVCLOSE_ITR为闭操作的次数,输出为mask图像
//FIND CONTOURS AROUND ONLY BIGGER REGIONS
if( mem_storage==NULL ) mem_storage = cvCreateMemStorage(0);
else cvClearMemStorage(mem_storage);
//CV_RETR_EXTERNAL=0是在types_c.h中定义的,CV_CHAIN_APPROX_SIMPLE=2也是在该文件中定义的
CvContourScanner scanner = cvStartFindContours(mask,mem_storage,sizeof(CvContour),CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE);
CvSeq* c;
int numCont = 0;
//该while内部只针对比较大的轮廓曲线进行替换处理
while( (c = cvFindNextContour( scanner )) != NULL )
{
double len = cvContourPerimeter( c );
double q = (mask->height + mask->width) /perimScale; //calculate perimeter len threshold
if( len < q ) //Get rid of blob if it's perimeter is too small
{
cvSubstituteContour( scanner, NULL ); //用NULL代替原来的那个轮廓
}
else //Smooth it's edges if it's large enough
{
CvSeq* c_new;
if(poly1_hull0) //Polygonal approximation of the segmentation
c_new = cvApproxPoly(c,sizeof(CvContour),mem_storage,CV_POLY_APPROX_DP, 2,0);
else //Convex Hull of the segmentation
c_new = cvConvexHull2(c,mem_storage,CV_CLOCKWISE,1);
cvSubstituteContour( scanner, c_new ); //最开始的轮廓用凸包或者多项式拟合曲线替换
numCont++;
}
}
contours = cvEndFindContours( &scanner ); //结束轮廓查找操作
// PAINT THE FOUND REGIONS BACK INTO THE IMAGE
cvZero( mask );
IplImage *maskTemp;
//CALC CENTER OF MASS AND OR BOUNDING RECTANGLES
if(*num != 0)
{
int N = *num, numFilled = 0, i=0;
CvMoments moments;
double M00, M01, M10;
maskTemp = cvCloneImage(mask);
for(i=0, c=contours; c != NULL; c = c->h_next,i++ ) //h_next为轮廓序列中的下一个轮廓
{
if(i < N) //Only process up to *num of them
{
//CV_CVX_WHITE在本程序中是白色的意思
cvDrawContours(maskTemp,c,CV_CVX_WHITE, CV_CVX_WHITE,-1,CV_FILLED,8);
//Find the center of each contour
if(centers != &cvPoint(-1, -1))
{
cvMoments(maskTemp,&moments,1); //计算mask图像的最高达3阶的矩
M00 = cvGetSpatialMoment(&moments,0,0); //提取x的0次和y的0次矩
M10 = cvGetSpatialMoment(&moments,1,0); //提取x的1次和y的0次矩
M01 = cvGetSpatialMoment(&moments,0,1); //提取x的0次和y的1次矩
centers[i].x = (int)(M10/M00); //利用矩的结果求出轮廓的中心点坐标
centers[i].y = (int)(M01/M00);
}
//Bounding rectangles around blobs
if(bbs != &CvRect())
{
bbs[i] = cvBoundingRect(c); //算出轮廓c的外接矩形
}
cvZero(maskTemp);
numFilled++;
}
//Draw filled contours into mask
cvDrawContours(mask,c,CV_CVX_WHITE,CV_CVX_WHITE,-1,CV_FILLED,8); //draw to central mask
} //end looping over contours
*num = numFilled;
cvReleaseImage( &maskTemp);
}
//ELSE JUST DRAW PROCESSED CONTOURS INTO THE MASK
else
{
for( c=contours; c != NULL; c = c->h_next )
{
cvDrawContours(mask,c,CV_CVX_WHITE, CV_CVX_BLACK,-1,CV_FILLED,8);
}
}
}
int main()
{
Mat src, mask;
src = imread("c:\\img\\1.png", 0); //以灰度图像读入
imshow("src", src);
mask = src > 0; //转换为二值图像
imshow("mask", mask);
ConnectedComponents(mask, 1, 8.0, 1, Rect(), Point(-1, -1)); //采用多边形拟合处理
imshow("out1", mask);
ConnectedComponents(mask, 0, 8.0, 1, Rect(), Point(-1, -1)); //c采用凸包进行处理
imshow("out2", mask);
waitKey(0);
return 0;
}
联通5
#include "stdafx.h"
#include<iostream>
#include <string>
#include <list>
#include <vector>
#include <map>
#include <stack>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace std;
void Seed_Filling(const cv::Mat& binImg, cv::Mat& lableImg) //种子填充法
{
// 4邻接方法
if (binImg.empty() ||
binImg.type() != CV_8UC1)
{
return;
}
lableImg.release();
binImg.convertTo(lableImg, CV_32SC1);
int label = 1;
int rows = binImg.rows - 1;
int cols = binImg.cols - 1;
for (int i = 1; i < rows-1; i++)
{
int* data= lableImg.ptr<int>(i);
for (int j = 1; j < cols-1; j++)
{
if (data[j] == 1)
{
std::stack<std::pair<int,int>> neighborPixels;
neighborPixels.push(std::pair<int,int>(i,j)); // 像素位置: <i,j>
++label; // 没有重复的团,开始新的标签
while (!neighborPixels.empty())
{
std::pair<int,int> curPixel = neighborPixels.top(); //如果与上一行中一个团有重合区域,则将上一行的那个团的标号赋给它
int curX = curPixel.first;
int curY = curPixel.second;
lableImg.at<int>(curX, curY) = label;
neighborPixels.pop();
if (lableImg.at<int>(curX, curY-1) == 1)
{//左边
neighborPixels.push(std::pair<int,int>(curX, curY-1));
}
if (lableImg.at<int>(curX, curY+1) == 1)
{// 右边
neighborPixels.push(std::pair<int,int>(curX, curY+1));
}
if (lableImg.at<int>(curX-1, curY) == 1)
{// 上边
neighborPixels.push(std::pair<int,int>(curX-1, curY));
}
if (lableImg.at<int>(curX+1, curY) == 1)
{// 下边
neighborPixels.push(std::pair<int,int>(curX+1, curY));
}
}
}
}
}
}
void Two_Pass(const cv::Mat& binImg, cv::Mat& lableImg) //两遍扫描法
{
if (binImg.empty() ||
binImg.type() != CV_8UC1)
{
return;
}
// 第一个通路
lableImg.release();
binImg.convertTo(lableImg, CV_32SC1);
int label = 1;
std::vector<int> labelSet;
labelSet.push_back(0);
labelSet.push_back(1);
int rows = binImg.rows - 1;
int cols = binImg.cols - 1;
for (int i = 1; i < rows; i++)
{
int* data_preRow = lableImg.ptr<int>(i-1);
int* data_curRow = lableImg.ptr<int>(i);
for (int j = 1; j < cols; j++)
{
if (data_curRow[j] == 1)
{
std::vector<int> neighborLabels;
neighborLabels.reserve(2);
int leftPixel = data_curRow[j-1];
int upPixel = data_preRow[j];
if ( leftPixel > 1)
{
neighborLabels.push_back(leftPixel);
}
if (upPixel > 1)
{
neighborLabels.push_back(upPixel);
}
if (neighborLabels.empty())
{
labelSet.push_back(++label); // 不连通,标签+1
data_curRow[j] = label;
labelSet[label] = label;
}
else
{
std::sort(neighborLabels.begin(), neighborLabels.end());
int smallestLabel = neighborLabels[0];
data_curRow[j] = smallestLabel;
// 保存最小等价表
for (size_t k = 1; k < neighborLabels.size(); k++)
{
int tempLabel = neighborLabels[k];
int& oldSmallestLabel = labelSet[tempLabel];
if (oldSmallestLabel > smallestLabel)
{
labelSet[oldSmallestLabel] = smallestLabel;
oldSmallestLabel = smallestLabel;
}
else if (oldSmallestLabel < smallestLabel)
{
labelSet[smallestLabel] = oldSmallestLabel;
}
}
}
}
}
}
// 更新等价对列表
// 将最小标号给重复区域
for (size_t i = 2; i < labelSet.size(); i++)
{
int curLabel = labelSet[i];
int preLabel = labelSet[curLabel];
while (preLabel != curLabel)
{
curLabel = preLabel;
preLabel = labelSet[preLabel];
}
labelSet[i] = curLabel;
} ;
for (int i = 0; i < rows; i++)
{
int* data = lableImg.ptr<int>(i);
for (int j = 0; j < cols; j++)
{
int& pixelLabel = data[j];
pixelLabel = labelSet[pixelLabel];
}
}
}
//彩色显示
cv::Scalar GetRandomColor()
{
uchar r = 255 * (rand()/(1.0 + RAND_MAX));
uchar g = 255 * (rand()/(1.0 + RAND_MAX));
uchar b = 255 * (rand()/(1.0 + RAND_MAX));
return cv::Scalar(b,g,r);
}
void LabelColor(const cv::Mat& labelImg, cv::Mat& colorLabelImg)
{
if (labelImg.empty() ||
labelImg.type() != CV_32SC1)
{
return;
}
std::map<int, cv::Scalar> colors;
int rows = labelImg.rows;
int cols = labelImg.cols;
colorLabelImg.release();
colorLabelImg.create(rows, cols, CV_8UC3);
colorLabelImg = cv::Scalar::all(0);
for (int i = 0; i < rows; i++)
{
const int* data_src = (int*)labelImg.ptr<int>(i);
uchar* data_dst = colorLabelImg.ptr<uchar>(i);
for (int j = 0; j < cols; j++)
{
int pixelValue = data_src[j];
if (pixelValue > 1)
{
if (colors.count(pixelValue) <= 0)
{
colors[pixelValue] = GetRandomColor();
}
cv::Scalar color = colors[pixelValue];
*data_dst++ = color[0];
*data_dst++ = color[1];
*data_dst++ = color[2];
}
else
{
data_dst++;
data_dst++;
data_dst++;
}
}
}
}
int main()
{
cv::Mat binImage = cv::imread("c:\\img\\1.bmp", 0);
cv::threshold(binImage, binImage, 50, 1, CV_THRESH_BINARY_INV);
cv::Mat labelImg;
Two_Pass(binImage, labelImg);
//Seed_Filling(binImage, labelImg);
//彩色显示
cv::Mat colorLabelImg;
LabelColor(labelImg, colorLabelImg);
cv::imshow("colorImg", colorLabelImg);
/* //灰度显示
cv::Mat grayImg;
labelImg *= 10;
labelImg.convertTo(grayImg, CV_8UC1);
cv::imshow("labelImg", grayImg);
*/
cv::waitKey(0);
return 0;
}
分割
// meanshift_segmentation.cpp : 定义控制台应用程序的入口点。
//
#include "stdafx.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
Mat src,dst;
int spatialRad=10,colorRad=10,maxPryLevel=1;
//const Scalar& colorDiff=Scalar::all(1);
void meanshift_seg(int,void *)
{
//调用meanshift图像金字塔进行分割
pyrMeanShiftFiltering(src,dst,spatialRad,colorRad,maxPryLevel);
RNG rng=theRNG();
Mat mask(dst.rows+2,dst.cols+2,CV_8UC1,Scalar::all(0));
for(int i=0;i<dst.rows;i++) //opencv图像等矩阵也是基于0索引的
for(int j=0;j<dst.cols;j++)
if(mask.at<uchar>(i+1,j+1)==0)
{
Scalar newcolor(rng(256),rng(256),rng(256));
floodFill(dst,mask,Point(i,j),newcolor,0,Scalar::all(1),Scalar::all(1));
// floodFill(dst,mask,Point(i,j),newcolor,0,colorDiff,colorDiff);
}
imshow("dst",dst);
}
int main(int argc, uchar* argv[])
{
namedWindow("src",WINDOW_AUTOSIZE);
namedWindow("dst",WINDOW_AUTOSIZE);
src=imread("c:\\img\\1.png");
CV_Assert(!src.empty());
spatialRad=10;
colorRad=10;
maxPryLevel=1;
//虽然createTrackbar函数的参数onChange函数要求其2个参数形式为onChange(int,void*)
//但是这里是系统响应函数,在使用createTrackbar函数时,其调用的函数可以不用写参数,甚至
//括号都不用写,但是其调用函数的实现过程中还是需要满足(int,void*)2个参数类型
createTrackbar("spatialRad","dst",&spatialRad,80,meanshift_seg);
createTrackbar("colorRad","dst",&colorRad,60,meanshift_seg);
createTrackbar("maxPryLevel","dst",&maxPryLevel,5,meanshift_seg);
// meanshift_seg(0,0);
imshow("src",src);
/*char c=(char)waitKey();
if(27==c)
return 0;*/
imshow("dst",src);
waitKey();//无限等待用户交互响应
// while(1);//这里不能用while(1)的原因是需要等待用户的交互,而while(1)没有该功能。虽然2者都有无限等待的作用。
return 0;
}
分割2
// meanshift_segmentation.cpp : 定义控制台应用程序的入口点。
//
#include <stdio.h>
#include<iostream>
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
using namespace std;
#include "stdafx.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <opencv2/legacy/legacy.hpp>
using namespace cv;
using namespace std;
int main(int argc,char ** argv)
{
IplImage * src = cvLoadImage("c:\\img\\1.bmp");
//常见错误,如果图像本身不能被要求次数的2整除,则会报错终止,一定注意
IplImage * dst=cvCreateImage(cvGetSize(src), src->depth,src->nChannels);
CvMemStorage * stoage = cvCreateMemStorage(0) ;
CvSeq* comp=NULL;
int level = 4 ; //进行n层采样
double threshold1 = 150 ;
double threshold2 = 30 ; //
cvPyrSegmentation(src,dst, stoage,&comp,level, threshold1,threshold2) ;
cvNamedWindow("src") ;
cvNamedWindow("dst") ;
cvShowImage("src",src);
cvShowImage("dst",dst);
cvWaitKey(0) ;
cvDestroyAllWindows();
return 0;
}
阀值
代码
/*===============================图像分割=====================================*/
/*---------------------------------------------------------------------------*/
/*手动设置阀值*/
IplImage* binaryImg = cvCreateImage(cvSize(w, h),IPL_DEPTH_8U, 1);
cvThreshold(smoothImgGauss,binaryImg,71,255,CV_THRESH_BINARY);
cvNamedWindow("cvThreshold", CV_WINDOW_AUTOSIZE );
cvShowImage( "cvThreshold", binaryImg );
//cvReleaseImage(&binaryImg);
/*---------------------------------------------------------------------------*/
/*自适应阀值 //计算像域邻域的平均灰度,来决定二值化的值*/
IplImage* adThresImg = cvCreateImage(cvSize(w, h),IPL_DEPTH_8U, 1);
double max_value=255;
int adpative_method=CV_ADAPTIVE_THRESH_GAUSSIAN_C;//CV_ADAPTIVE_THRESH_MEAN_C
int threshold_type=CV_THRESH_BINARY;
int block_size=3;//阈值的象素邻域大小
int offset=5;//窗口尺寸
cvAdaptiveThreshold(smoothImgGauss,adThresImg,max_value,adpative_method,threshold_type,block_size,offset);
cvNamedWindow("cvAdaptiveThreshold", CV_WINDOW_AUTOSIZE );
cvShowImage( "cvAdaptiveThreshold", adThresImg );
cvReleaseImage(&adThresImg);
/*---------------------------------------------------------------------------*/
/*最大熵阀值分割法*/
IplImage* imgMaxEntropy = cvCreateImage(cvGetSize(imgGrey),IPL_DEPTH_8U,1);
MaxEntropy(smoothImgGauss,imgMaxEntropy);
cvNamedWindow("MaxEntroyThreshold", CV_WINDOW_AUTOSIZE );
cvShowImage( "MaxEntroyThreshold", imgMaxEntropy );//显示图像
cvReleaseImage(&imgMaxEntropy );
/*---------------------------------------------------------------------------*/
/*基本全局阀值法*/
IplImage* imgBasicGlobalThreshold = cvCreateImage(cvGetSize(imgGrey),IPL_DEPTH_8U,1);
cvCopyImage(srcImgGrey,imgBasicGlobalThreshold);
int pg[256],i,thre;
for (i=0;i<256;i++) pg[i]=0;
for (i=0;i<imgBasicGlobalThreshold->imageSize;i++) // 直方图统计
pg[(BYTE)imgBasicGlobalThreshold->imageData[i]]++;
thre = BasicGlobalThreshold(pg,0,256); // 确定阈值
cout<<"The Threshold of this Image in BasicGlobalThreshold is:"<<thre<<endl;//输出显示阀值
cvThreshold(imgBasicGlobalThreshold,imgBasicGlobalThreshold,thre,255,CV_THRESH_BINARY); // 二值化
cvNamedWindow("BasicGlobalThreshold", CV_WINDOW_AUTOSIZE );
cvShowImage( "BasicGlobalThreshold", imgBasicGlobalThreshold);//显示图像
cvReleaseImage(&imgBasicGlobalThreshold);
/*---------------------------------------------------------------------------*/
/*OTSU*/
IplImage* imgOtsu = cvCreateImage(cvGetSize(imgGrey),IPL_DEPTH_8U,1);
cvCopyImage(srcImgGrey,imgOtsu);
int thre2;
thre2 = otsu2(imgOtsu);
cout<<"The Threshold of this Image in Otsu is:"<<thre2<<endl;//输出显示阀值
cvThreshold(imgOtsu,imgOtsu,thre2,255,CV_THRESH_BINARY); // 二值化
cvNamedWindow("imgOtsu", CV_WINDOW_AUTOSIZE );
cvShowImage( "imgOtsu", imgOtsu);//显示图像
cvReleaseImage(&imgOtsu);
/*---------------------------------------------------------------------------*/
/*上下阀值法:利用正态分布求可信区间*/
IplImage* imgTopDown = cvCreateImage( cvGetSize(imgGrey), IPL_DEPTH_8U, 1 );
cvCopyImage(srcImgGrey,imgTopDown);
CvScalar mean ,std_dev;//平均值、 标准差
double u_threshold,d_threshold;
cvAvgSdv(imgTopDown,&mean,&std_dev,NULL);
u_threshold = mean.val[0] +2.5* std_dev.val[0];//上阀值
d_threshold = mean.val[0] -2.5* std_dev.val[0];//下阀值
//u_threshold = mean + 2.5 * std_dev; //错误
//d_threshold = mean - 2.5 * std_dev;
cout<<"The TopThreshold of this Image in TopDown is:"<<d_threshold<<endl;//输出显示阀值
cout<<"The DownThreshold of this Image in TopDown is:"<<u_threshold<<endl;
cvThreshold(imgTopDown,imgTopDown,d_threshold,u_threshold,CV_THRESH_BINARY_INV);//上下阀值
cvNamedWindow("imgTopDown", CV_WINDOW_AUTOSIZE );
cvShowImage( "imgTopDown", imgTopDown);//显示图像
cvReleaseImage(&imgTopDown);
/*---------------------------------------------------------------------------*/
/*迭代法*/
IplImage* imgIteration = cvCreateImage( cvGetSize(imgGrey), IPL_DEPTH_8U, 1 );
cvCopyImage(srcImgGrey,imgIteration);
int thre3,nDiffRec;
thre3 =DetectThreshold(imgIteration, 100, nDiffRec);
cout<<"The Threshold of this Image in imgIteration is:"<<thre3<<endl;//输出显示阀值
cvThreshold(imgIteration,imgIteration,thre3,255,CV_THRESH_BINARY_INV);//上下阀值
cvNamedWindow("imgIteration", CV_WINDOW_AUTOSIZE );
cvShowImage( "imgIteration", imgIteration);
cvReleaseImage(&imgIteration);
找米粒
// meanshift_segmentation.cpp : 定义控制台应用程序的入口点。
//
#include <stdio.h>
#include<iostream>
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
using namespace std;
#include "stdafx.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <opencv2/legacy/legacy.hpp>
#include "CvvImage.h"
using namespace cv;
using namespace std;
int main(int argc,char ** argv)
{
IplImage * src = cvLoadImage("c:\\img\\1.png");
IplImage * tmp=cvCreateImage(cvGetSize(src), src->depth,src->nChannels);
IplImage * backImage=cvCreateImage(cvGetSize(src), src->depth,src->nChannels);
cvShowImage("src",src);
//常见错误,如果图像本身不能被要求次数的2整除,则会报错终止,一定注意
IplConvKernel* element=cvCreateStructuringElementEx(4,4,1,1,CV_SHAPE_ELLIPSE,0);//形态学结构指针[创建结构元素,4列4行,椭圆形】
cvErode(src,tmp,element,10);//腐蚀
cvDilate(tmp,backImage,element,10);//这里得到的backImage是背景图像
cvSub(src,backImage,tmp,0);//用原始图像减去背景图像,tmp是结果图像
cvShowImage("tmp",tmp);
cvThreshold(tmp,backImage,50,255,CV_THRESH_BINARY);//这里得到的backImage是二值图
cvShowImage("backImage",backImage);
CvMemStorage* storage=cvCreateMemStorage(0);
CvSeq* contours=0,*contoursTemp=0;
//CvSeq * cont=cvCreateSeq(CV_SEQ_ELTYPE_POINT,sizeof(CvSeq),sizeof(CvPoint),stor);
IplImage* dst;
dst = cvCreateImage( cvGetSize(backImage), backImage->depth, 1 );
cvCvtColor(backImage, dst, CV_BGR2GRAY );//3通道->1通道
cvShowImage("cvCvtColor",dst);
int numberOfObject=cvFindContours(dst,storage,&contours,sizeof(CvContour), CV_RETR_CCOMP,CV_CHAIN_APPROX_NONE,cvPoint(0,0));
double maxArea=0;
double tmpArea=0;
CvSeq* maxAreaRice=0;
double maxLength=0;
double tmpLength=0;
CvSeq* maxLengthRice=0;
int i;
//cvThreshold(dst,dst,0,255,CV_THRESH_BINARY);//在画轮廓前先把图像变成白色
IplImage* dst_contours = cvCreateImage( cvGetSize(dst), dst->depth, dst->nChannels);
cvThreshold( dst_contours, dst_contours ,0, 0, CV_THRESH_BINARY ); //在画轮廓前先把图像变成黑色。 threshold=0,pixel>0,pixel = 0.
cvShowImage("dst_contourscvThreshold",dst_contours);
contoursTemp=contours;
int count=0;
for (;contoursTemp!=0;contoursTemp=contoursTemp->h_next)
{
tmpArea=fabs(cvContourArea(contoursTemp,CV_WHOLE_SEQ));
if(contoursTemp){
for( i=0;i<contoursTemp->total;i++);
{
//取得第i个返回序列
CvPoint* pt=(CvPoint*)cvGetSeqElem(contoursTemp,i);
//cvSetReal2D(dst_contours,pt->y,pt->x,255.0);
cvSet2D(dst_contours,pt->y,pt->x,cvScalar(128));
}
count++;
//提取内轮廓上的所有坐标点
CvSeq* InterCon=contoursTemp->v_next;
for(;InterCon!=0;InterCon=InterCon->h_next)
{
for( i=0;i<InterCon->total;i++)
{
CvPoint* pt=(CvPoint*)cvGetSeqElem(InterCon,i);
//cvSetReal2D(dst_contours,pt->y,pt->x,255.0);
cvSet2D(dst_contours,pt->y,pt->x,cvScalar(128));
}
}
}
}
/* CRect outputRect;
GetDlgItem(IDC_PROCESSED_PIC)->GetWindowRect(&outputRect);
m_ProcessedImage->CopyOf(dst_contours,1);
CRect rect;
SetRect( rect, 0, 0, outputRect.Width(),outputRect.Height() ); */
//m_ProcessedImage->DrawToHDC(GetDlgItem(IDC_PROCESSED_PIC)->GetDC()->GetSafeHdc(),&rect);
printf("\n米粒数目为: %d 个\n米粒最大面积: %f\n米粒最大周长: %f\n",numberOfObject,maxArea,maxLength);
cvShowImage("dst_contours",dst_contours);
//UpdateData(FALSE);
//system("pause");
cvReleaseImage(&dst);
cvReleaseImage(&dst_contours);
cvReleaseMemStorage(&storage);
cvWaitKey(0) ;
cvDestroyAllWindows();
return 0;
}
细胞统计
#include <stdio.h>
#include<iostream>
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
using namespace std;
#include "stdafx.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <opencv2/legacy/legacy.hpp>
using namespace cv;
using namespace std;
/*------------------------------------------------------------------*/
int main(int argc,char **argv)
{
/*-------------------------------------------//
//功能:载入图像
//原型:IplImage* cvLoadImage( const char* filename, int flags=CV_LOAD_IMAGE_COLOR );
//参数:
flags:指定读入图像的颜色和深度,指定的颜色可将输入图像转换为以下三种格式
3通道(CV_LOAD_IMAGE_COLOR)也即彩色(>0),
单信道 (CV_LOAD_IMAGE_GRAYSCALE)也即灰色(=0),
保持不变(CV_LOAD_IMAGE_ANYCOLOR)(<0)
深度指定输入的图像是否转为每个颜色信道每象素8位
如果想要载入最真实的图像,选择CV_LOAD_IMAGE_ANYDEPTH | CV_LOAD_IMAGE_ANYCOLOR
//-------------------------------------------*/
char * fileName = (char *)"c:\\img\\1.jpg"; //"lena.jpg";
IplImage * src =0;
// 原色彩
src=cvLoadImage(fileName,-1);
// 3通道
// src=cvLoadImage(fileName,1);
// 灰度
// src=cvLoadImage(fileName,0);
//
if (!src)
{
cout<<"Could not load image file!"<<endl;
return -1;
//exit(0);
}
/*-------------------------------------------//
//功能:创建窗口
//原型:int cvNamedWindow( const char* name, int flags=CV_WINDOW_AUTOSIZE );
//参数:CV_WINDOW_AUTOSIZE为1,表示窗口自动调整以适合图像
为0时,表示自动按上一次使用的窗口尺寸
//-------------------------------------------*/
cvNamedWindow("src",1);
cvNamedWindow("dst",1);
/*-------------------------------------------//
//功能:改变窗口大小
//原型:void cvResizeWindow( const char* name, int width, int height );
//参数:\\
//-------------------------------------------*/
cvResizeWindow("src",512,512);
cvResizeWindow("dst",512,512);
/*-------------------------------------------//
//功能:移动窗口,不重叠
//原型:void cvMoveWindow( const char* name, int x, int y );
//参数:x、y:窗口左上角的x、y坐标
//-------------------------------------------*/
cvMoveWindow("src",0,0);
cvMoveWindow("dst",200,200);
/*-------------------------------------------//
//功能:指定窗口中显示图像
//原型:void cvShowImage( const char* name, const CvArr* image );
//-------------------------------------------*/
cvShowImage("src",src);
/*-------------------------------------------//
//功能:保存图像
//原型:int cvSaveImage( const char* filename, const CvArr* image );
//参数:图像格式的的取决于扩展名
//-------------------------------------------*/
cvSaveImage("rice.jpg",src);
cvSaveImage("rice.bmp",src);
/*-------------------------------------------//
//功能:图像反转
//说明:仅是测试用
//-------------------------------------------*/
// 定义工作位图
IplImage * dst = cvCreateImage(cvGetSize(src),src->depth,src->nChannels);
cvCopy(src,dst); //dst已经创建
// 获取图像信息
int height,width,step,channels;
uchar * data;
height=dst->height;
width=dst->width;
step=dst->widthStep;//排列的行字节数
channels=dst->nChannels;
data=(uchar *)dst->imageData;//图像数据 char *imageData;
// 反转图像
for (int i=0;i<height;i++)
{
for (int j=0;j<width;j++)
{
for (int k=0;k<channels;k++)
{
data[i*step+j*channels+k]=255-data[i*step+j*channels+k];
}
}
}
// 显示
cvShowImage("dst",dst);
// 释放资源
cvReleaseImage(&dst);
/*-------------------------------------------//
//功能:图像背景估计
//说明:
//-------------------------------------------*/
// 创建工作位图
IplImage *tmp = 0; //定义临时图像指针
IplImage *src_back = 0; //定义源图像背景指针
tmp = cvCreateImage( cvGetSize(src), src->depth, src->nChannels);
src_back = cvCreateImage( cvGetSize(src), src->depth, src->nChannels);
// 创建结构元素
IplConvKernel *element = 0; //定义形态学结构指针
element = cvCreateStructuringElementEx( 4, 4, 1, 1, CV_SHAPE_ELLIPSE, 0);
// 用该结构对源图象进行数学形态学的开操作后,估计背景亮度
cvErode( src, tmp, element, 10);
cvDilate( tmp, src_back, element, 10);
cvNamedWindow( "src_back", CV_WINDOW_AUTOSIZE );
cvShowImage( "src_back", src_back );
/*-------------------------------------------//
//功能:从源图象中减去背景图像
//说明:
//-------------------------------------------*/
IplImage *dst_gray = 0; //定义源文件去掉背景后的目标灰度图像指针
dst_gray = cvCreateImage( cvGetSize(src), src->depth, src->nChannels);
cvSub( src, src_back, dst_gray, 0);
cvNamedWindow( "dst_gray", CV_WINDOW_AUTOSIZE );
cvShowImage( "dst_gray", dst_gray );
/*-------------------------------------------//
//功能:使用阀值操作将图像转换为二值图像
//说明:
//-------------------------------------------*/
IplImage *dst_bw = 0; //定义源文件去掉背景后的目标二值图像指针
dst_bw = cvCreateImage( cvGetSize(src), src->depth, src->nChannels);
cvThreshold( dst_gray, dst_bw ,50, 255, CV_THRESH_BINARY ); //取阀值为50把图像转为二值图像
//cvAdaptiveThreshold( dst_gray, dst_bw, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY, 3, 5 );
cvNamedWindow( "dst_bw", CV_WINDOW_AUTOSIZE );
cvShowImage( "dst_bw", dst_bw );
/*-------------------------------------------//
//功能:检查图像中的目标对象数量
//说明:
//-------------------------------------------*/
int Number_Object =0; //定义目标对象数量
CvMemStorage *stor = 0;
CvSeq * cont = 0;
CvContourScanner contour_scanner;
CvSeq * a_contour= 0;
stor = cvCreateMemStorage(0);
cont = cvCreateSeq(CV_SEQ_ELTYPE_POINT, sizeof(CvSeq), sizeof(CvPoint), stor);
Number_Object = cvFindContours( dst_bw, stor, &cont, sizeof(CvContour), \
CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); //找到所有轮廓
printf("Number_Object: %d\n", Number_Object);
/*-------------------------------------------//
//功能:计算图像中对象的统计属性
//说明:
//-------------------------------------------*/
IplImage *dst_contours = 0; //定义轮廓图像指针
int contour_area_tmp = 0; //定义目标对象面积临时寄存器
int contour_area_sum = 0; //定义目标所有对象面积的和
int contour_area_ave = 0; //定义目标对象面积平均值
int contour_area_max = 0; //定义目标对象面积最大值
dst_contours = cvCreateImage( cvGetSize(src), src->depth, src->nChannels);
cvThreshold( dst_contours, dst_contours ,0, 255, CV_THRESH_BINARY_INV); //在画轮廓前先把图像变成白色
for(; cont; cont = cont->h_next)
{
cvDrawContours( dst_contours, cont, CV_RGB(255, 0, 0), CV_RGB(255, 0, 0), 0, 1, 8, cvPoint(0, 0) ); //绘制当前轮廓
contour_area_tmp = fabs(cvContourArea( cont, CV_WHOLE_SEQ )); //获取当前轮廓面积
if( contour_area_tmp > contour_area_max )
{
contour_area_max = contour_area_tmp; //找到面积最大的轮廓
}
contour_area_sum += contour_area_tmp; //求所有轮廓的面积和
}
contour_area_ave = contour_area_sum/ Number_Object; //求出所有轮廓的平均值
printf("contour_area_ave: %d\n", contour_area_ave );
printf("contour_area_max: %d\n", contour_area_max );
cvNamedWindow( "dst_contours", CV_WINDOW_AUTOSIZE );
cvShowImage( "dst_contours", dst_contours );
/*-------------------------------------------//
//功能:等待
//原型:int cvWaitKey( int delay=0 );
//参数:参数<=0表示不自动返回
//注意:需要周期地被调用,除非HighGUI被用在某些能够处理事件的环境中。如在MFC环境下,这个函数不起作用
//-------------------------------------------*/
cvWaitKey(0);
/*-------------------------------------------//
//功能:销毁窗口、释放资源
//原型:void cvDestroyAllWindows(void);
//-------------------------------------------*/
cvDestroyAllWindows();
cvReleaseImage(&src);
cvReleaseImage(&tmp);
cvReleaseImage(&src_back);
cvReleaseImage(&dst_gray);
cvReleaseImage(&dst_bw);
cvReleaseImage(&dst_contours);
cvReleaseMemStorage(&stor);
cvDestroyWindow( "src" );
cvDestroyWindow( "src_back" );
cvDestroyWindow( "dst_gray" );
cvDestroyWindow( "dst_bw" );
cvDestroyWindow( "dst_contours" );
return 0;
}