前言
在robomaster比赛中,自瞄占据了视觉组工作的关键部分,而在自瞄中又包含了装甲板的识别以及追踪等等方面,笔者作为视觉组的一位萌新,将记录下来自己的学习历程以及心得体会,希望可以不断进步
官方例程的分析
#include "stdafx.h"
#include "cv.h"
#include "highgui.h"
#include "cxcore.h"
#include "omp.h"
using namespace cv;
using namespace std;
#define T_ANGLE_THRE 10
#define T_SIZE_THRE 5
void BrightAdjust(IplImage* src, IplImage* dst,
double dContrast, double dBright)
{
int nVal;
unsigned char* SrcData = (unsigned char*)src->imageData; //存储原始图片数据像素信息的地址
unsigned char* DstData = (unsigned char*)dst->imageData; //存储目标图片地址信息的地址
int step = src->widthStep / sizeof(unsigned char) / 3;//每一行有多少个像素 作为步长
//widthstep 每一行存储像素的总共字节数 对于RGB图像来说的话 因为channel = 3 所以像素的数量 = 字节数量/3
omp_set_num_threads(8);
#pragma omp parallel for
for (int nI = 0; nI<src->height; nI++)
{
for (int nJ = 0; nJ <src->width; nJ++)
{
for (int nK = 0; nK < 3; nK++)
{
//nVal = (int)(dContrast * SrcData[(nI*step + nJ) * 3 + nK] + dBright);
nVal = (int)(dContrast * SrcData[(nI*step + nJ) * 3 + nK] + dBright);
if (nVal < 0)
nVal = 0;
if (nVal > 255)
nVal = 255;
DstData[(nI*step + nJ) * 3 + nK] = nVal;
}
}
}
}
void GetDiffImage(IplImage* src1, IplImage* src2, IplImage* dst, int nThre)
{ //如果R通道-G通道的像素值大于25,那么目标像素值就是255,否则就是0
unsigned char* SrcData1 = (unsigned char*)src1->imageData;
unsigned char* SrcData2 = (unsigned char*)src2->imageData;
unsigned char* DstData = (unsigned char*)dst->imageData;
int step = src1->widthStep / sizeof(unsigned char);//计算出每一行像素的步长
omp_set_num_threads(8);
#pragma omp parallel for
for (int nI = 0; nI<src1->height; nI++)
{
for (int nJ = 0; nJ <src1->width; nJ++)
{
if (SrcData1[nI*step + nJ] - SrcData2[nI*step + nJ]> nThre)
{
DstData[nI*step + nJ] = 255;
}
else
{
DstData[nI*step + nJ] = 0;
}
}
}
}
vector<CvBox2D> ArmorDetect(vector<CvBox2D> vEllipse)
{
vector<CvBox2D> vRlt;
CvBox2D Armor;
int nL, nW;
double dAngle;
vRlt.clear();
if (vEllipse.size() < 2)
return vRlt;
for (unsigned int nI = 0; nI < vEllipse.size() - 1; nI++)
{
for (unsigned int nJ = nI + 1; nJ < vEllipse.size(); nJ++)
{
dAngle = abs(vEllipse[nI].angle - vEllipse[nJ].angle);
while (dAngle > 180)
dAngle -= 180;
if ((dAngle < T_ANGLE_THRE || 180 - dAngle < T_ANGLE_THRE) && abs(vEllipse[nI].size.height - vEllipse[nJ].size.height) < (vEllipse[nI].size.height + vEllipse[nJ].size.height) / T_SIZE_THRE && abs(vEllipse[nI].size.width - vEllipse[nJ].size.width) < (vEllipse[nI].size.width + vEllipse[nJ].size.width) / T_SIZE_THRE)
{//上述条件语句实现的功能是判断LED是否属于同一个装甲板的两个灯条
Armor.center.x = (vEllipse[nI].center.x + vEllipse[nJ].center.x) / 2;
Armor.center.y = (vEllipse[nI].center.y + vEllipse[nJ].center.y) / 2;
Armor.angle = (vEllipse[nI].angle + vEllipse[nJ].angle) / 2; //所在矩形的旋转角
if (180 - dAngle < T_ANGLE_THRE)
Armor.angle += 90;
nL = (vEllipse[nI].size.height + vEllipse[nJ].size.height) / 2;
nW = sqrt((vEllipse[nI].center.x - vEllipse[nJ].center.x) * (vEllipse[nI].center.x - vEllipse[nJ].center.x) + (vEllipse[nI].center.y - vEllipse[nJ].center.y) * (vEllipse[nI].center.y - vEllipse[nJ].center.y));
//装甲板的宽度等于匹配的两个旋转矩形的中心坐标的距离
if (nL < nW)
{
Armor.size.height = nL;
Armor.size.width = nW;
}
else
{
Armor.size.height = nW;
Armor.size.width = nL;
}
vRlt.push_back(Armor);//将找出的装甲板的旋转矩形保存下来
}
}
}
return vRlt;
}
void DrawBox(CvBox2D box, IplImage* img)
{
CvPoint2D32f point[4];
int i;
for (i = 0; i<4; i++)
{
point[i].x = 0;
point[i].y = 0;
}
cvBoxPoints(box, point); //计算二维盒子顶点
CvPoint pt[4];
for (i = 0; i<4; i++)
{
pt[i].x = (int)point[i].x;
pt[i].y = (int)point[i].y;
}
cvLine(img, pt[0], pt[1], CV_RGB(0, 0, 255), 2, 8, 0); //这四条语句的作用是将四个顶点连接起来
cvLine(img, pt[1], pt[2], CV_RGB(0, 0, 255), 2, 8, 0);
cvLine(img, pt[2], pt[3], CV_RGB(0, 0, 255), 2, 8, 0);
cvLine(img, pt[3], pt[0], CV_RGB(0, 0, 255), 2, 8, 0);
}
int main()
{
CvCapture* pCapture0 = cvCreateFileCapture("RawImage\\RedCar.avi"); //对视频进行逐帧提取
//CvCapture* pCapture0 = cvCreateCameraCapture(0);
IplImage* pFrame0 = NULL;
CvSize pImgSize;
CvBox2D s;
vector<CvBox2D> vEllipse;
vector<CvBox2D> vRlt;
vector<CvBox2D> vArmor;
CvScalar sl;
bool bFlag = false;
CvSeq *pContour = NULL;
CvMemStorage *pStorage = cvCreateMemStorage(0);
pFrame0 = cvQueryFrame(pCapture0);//从视频中捕获并解压一帧后返回 用户不要释放或者重定义该返回帧
pImgSize = cvGetSize(pFrame0);
IplImage *pRawImg = cvCreateImage(pImgSize, IPL_DEPTH_8U, 3);
IplImage* pGrayImage = cvCreateImage(pImgSize, IPL_DEPTH_8U, 1);//创建灰度图像
IplImage* pRImage = cvCreateImage(pImgSize, IPL_DEPTH_8U, 1);
IplImage* pGImage = cvCreateImage(pImgSize, IPL_DEPTH_8U, 1);
IplImage *pBImage = cvCreateImage(pImgSize, IPL_DEPTH_8U, 1);
IplImage *pBinary = cvCreateImage(pImgSize, IPL_DEPTH_8U, 1);
IplImage *pRlt = cvCreateImage(pImgSize, IPL_DEPTH_8U, 1);
CvSeq* lines = NULL;
CvMemStorage* storage = cvCreateMemStorage(0);
while (1)
{
if (pFrame0) //当读取到视频中的每一帧时
{
BrightAdjust(pFrame0, pRawImg, 1, -120);
cvSplit(pRawImg, pBImage, pGImage, pRImage, 0);//将三个通道的像素值分离
GetDiffImage(pRImage, pGImage, pBinary, 25);
cvDilate(pBinary, pGrayImage, NULL, 3); //对图像进行膨胀处理 细小物质被填充
cvErode(pGrayImage, pRlt, NULL, 1);//对图像进行腐蚀处理 消除了噪点和干扰点
cvFindContours(pRlt, pStorage, &pContour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
for (; pContour != NULL; pContour = pContour->h_next)
{
if (pContour->total > 10)
{
bFlag = true;
s = cvFitEllipse2(pContour);//获取轮廓的椭圆边界框
for (int nI = 0; nI < 5; nI++) //遍历以旋转矩形中心点为中心的5*5区域的像素块
{
for (int nJ = 0; nJ < 5; nJ++)
{
if (s.center.y - 2 + nJ > 0 && s.center.y - 2 + nJ < 480 && s.center.x - 2 + nI > 0 && s.center.x - 2 + nI < 640)
{//判断该中心点及其附近的区域是否在图像边框的范围内
sl = cvGet2D(pFrame0, (int)(s.center.y - 2 + nJ), (int)(s.center.x - 2 + nI));//获取每一个点的RGB像素值
if (sl.val[0] < 200 || sl.val[1] < 200 || sl.val[2] < 200)
bFlag = false;
}
}
}
if (bFlag)
{
vEllipse.push_back(s); //将图片中的椭圆边界框填充到vEllipse当中
//cvEllipseBox(pFrame0, s, CV_RGB(255, 0, 0), 2, 8, 0);
}
}
}
vRlt = ArmorDetect(vEllipse);
for (unsigned int nI = 0; nI < vRlt.size(); nI++)
DrawBox(vRlt[nI], pFrame0);
//将每一个装甲板对应的顶点都画在相应的图像上面
cvShowImage("Raw", pFrame0);
cvWaitKey(0);
vEllipse.clear();
vRlt.clear();
vArmor.clear();
}
pFrame0 = cvQueryFrame(pCapture0); //重新对视频进行逐帧抓取
}
cvReleaseCapture(&pCapture0);//释放videocapture结构体指针
return 0;
}