要点
1.边界线链码
2.链码的长度,区域面积
3.计算灰度共生矩阵,特征
4.SIFT特征
解释
1.边界线链码
简单来说就是利用一个八邻域去搜索图像的边界,这里自然指的是二值图像。
利用点的八邻域信息,选择下一个点作为边界点,这个算法需要选择一个开始点,可以选择图像上是目标点,在最上,最左的点。然后查看它的八邻域的点,从右下方45°的位置开始寻找,如果是目标点,将沿顺时针90°作为下一次寻找的方向,如果不是,则逆时针45°继续寻找,一旦找到重复上面的过程。
参考:CSDN
2.灰度共生矩阵
我自己还是不写了,大家看参考。
参考:CSDN
3.SIFT特征
原理网上一搜一大把。。
参考:CSDN
程序
1.边界线链码
#include <cv.h>
#include <highgui.h>
#include <iostream>
#include <stack>
using namespace std;
int main()
{
IplImage * image, *image2, *image3;
image = cvLoadImage("F:\\c_code\\week8_1\\week8_1\\1.png", 0);
cvNamedWindow("image", 1);
cvShowImage("image", image);
image2 = cvCreateImage(cvSize(image->width, image->height), image->depth, 1);
image3 = cvCreateImage(cvSize(image->width, image->height), image->depth, 1);
cvZero(image2);//image2 赋值为0
cvZero(image3);
//寻找区域的左上角点
CvPoint startPoint = cvPoint(0, 0);
bool bFindStartpoint = false;
int i, j;
unsigned char * ptr, *dst;
stack<int> board;//奇数位存储x坐标,偶数位存储y坐标
//当前扫描点
CvPoint currentPoint = cvPoint(0, 0);
//邻域的8个点的方向
int directions[8][2] = { { 0, 1 }, { 1, 1 }, { 1, 0 }, { 1, -1 }, { 0, -1 }, { -1, -1 }, { -1, 0 }, { -1, 1 } };
int beginDirection = 0;
bool bFindBoardpoint = false;//寻找到邻域的边界点的判定
for (i = 0; i< image->height && bFindStartpoint == false; i++)
{
for (j = 0; j< image->width && bFindStartpoint == false; j++)
{
ptr = (unsigned char *)(image->imageData + i*image->widthStep + j);
if (*ptr == 255)
{
startPoint.x = j;
startPoint.y = i;
bFindStartpoint = true;
cout<<"x: " << j <<" y : " <<i <<endl;
cvWaitKey(0);
}
}
}
//进行边界跟踪 每次搜索8个方向的点 找到了即停止
currentPoint = startPoint;
bFindStartpoint = false;
beginDirection = 0;
board.push(startPoint.x);
board.push(startPoint.y);
while (!bFindStartpoint)
{
bFindBoardpoint = false;
//在8个方向寻找符合条件的边界点
while (!bFindBoardpoint)
{
//进行出界判定
ptr = (unsigned char *)(image->imageData + (currentPoint.y + directions[beginDirection][1])* image->widthStep + currentPoint.x + directions[beginDirection][0]);
if (*ptr == 255)
{
bFindBoardpoint = true;
currentPoint.x += directions[beginDirection][0];
currentPoint.y += directions[beginDirection][1];
/************************************************************************/
/* 此处添加序列存储的代码 */
/************************************************************************/
//一、将边界存储到图片中
dst = (unsigned char *)image2->imageData + currentPoint.y * image2->widthStep + currentPoint.x;
*dst = 255;
//二、将边界点的序列存储到一个堆栈中
board.push(currentPoint.x);
board.push(currentPoint.y);
if (currentPoint.x == startPoint.x && currentPoint.y == startPoint.y)
{
bFindStartpoint = true;
}
//改变下次首先开始扫描的方向
beginDirection -= 2;
if (beginDirection < 0)
{
beginDirection += 8;
}
}
else
{
beginDirection++;
beginDirection = beginDirection % 8;
}
}
cout<<"currentPoint "<<currentPoint.x <<" "<< currentPoint.y<<endl;
}
cvNamedWindow("image2", 1);
cvShowImage("image2", image2);
int lianmaLength = (board.size() + 5) / 10;
int* lianma = new int[lianmaLength];
for (i = 0; i< lianmaLength && !board.empty(); i += 2)
{
lianma[i + 1] = board.top();
board.pop();
lianma[i] = board.top();
board.pop();
for (j = 0; j< 18 && !board.empty(); j++)
{
board.pop();
}
}
//将数据在image3中显示
int t;
for (t = 0; t < lianmaLength; t += 2)
{
i = lianma[t + 1];
j = lianma[t];
ptr = (unsigned char *)image3->imageData + i*image->widthStep + j;
*ptr = 255;
}
cvNamedWindow("image3", 1);
cvSaveImage("E:\\image\\bottle2lianma.bmp", image3);
cvShowImage("image3", image3);
cvWaitKey(0);
return 0;
}
注:这里的代码在找到初始点后,进行了等待,此时按下任意键,程序继续执行。控制台输出点的位置。
结果:
)
2.链码的长度,区域面积
#include <opencv2\opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
Mat srcImage = imread("F:\\c_code\\week8_2\\week8_2\\1.png",0);
for (int y = 0; y < srcImage.rows; y++)
{
uchar* data = srcImage.ptr<uchar>(y);
for (int x = 0; x < srcImage.cols; x++)
{
if (data[x] < 100)
data[x] = 0;
else
data[x] = 255;
}
}
imshow("srcImage", srcImage);
vector<vector<Point>> contours;
vector<Vec4i> hier;
double length,area;
findContours(srcImage, contours,RETR_EXTERNAL,CHAIN_APPROX_NONE);
for (int i = 0; i < (int)contours.size(); i++)
{
area = contourArea(contours[i]);
length = arcLength(contours[i], true);
cout << " area is " << area;
cout << " length is " << length << endl;
}
waitKey(0);
return 0;
}
这一段不解释了。
3.灰度共生矩阵
#include<iostream>
#include<opencv2/highgui.hpp>
#include<opencv2/core.hpp>
//#include<opencv2/imgcodecs.hpp>
#include<opencv2/opencv.hpp>
using namespace std;
using namespace cv;
const int gray_level = 16;
void getglcm_horison(Mat& input, Mat& dst)//0度灰度共生矩阵
{
Mat src = input;
CV_Assert(1 == src.channels());
src.convertTo(src, CV_32S);
int height = src.rows;
int width = src.cols;
int max_gray_level = 0;
for (int j = 0; j < height; j++)//寻找像素灰度最大值
{
int* srcdata = src.ptr<int>(j);
for (int i = 0; i < width; i++)
{
if (srcdata[i] > max_gray_level)
{
max_gray_level = srcdata[i];
}
}
}
max_gray_level++;//像素灰度最大值加1即为该矩阵所拥有的灰度级数
if (max_gray_level > 16)//若灰度级数大于16,则将图像的灰度级缩小至16级,减小灰度共生矩阵的大小。
{
for (int i = 0; i < height; i++)
{
int*srcdata = src.ptr<int>(i);
for (int j = 0; j < width; j++)
{
srcdata[j] = (int)srcdata[j] / gray_level;
}
}
dst.create(gray_level, gray_level, CV_32SC1);
dst = Scalar::all(0);
for (int i = 0; i < height; i++)
{
int*srcdata = src.ptr<int>(i);
for (int j = 0; j < width - 1; j++)
{
int rows = srcdata[j];
int cols = srcdata[j + 1];
dst.ptr<int>(rows)[cols]++;
}
}
}
else//若灰度级数小于16,则生成相应的灰度共生矩阵
{
dst.create(max_gray_level, max_gray_level, CV_32SC1);
dst = Scalar::all(0);
for (int i = 0; i < height; i++)
{
int*srcdata = src.ptr<int>(i);
for (int j = 0; j < width - 1; j++)
{
int rows = srcdata[j];
int cols = srcdata[j + 1];
dst.ptr<int>(rows)[cols]++;
}
}
}
}
void getglcm_vertical(Mat& input, Mat& dst)//90度灰度共生矩阵
{
Mat src = input;
CV_Assert(1 == src.channels());
src.convertTo(src, CV_32S);
int height = src.rows;
int width = src.cols;
int max_gray_level = 0;
for (int j = 0; j < height; j++)
{
int* srcdata = src.ptr<int>(j);
for (int i = 0; i < width; i++)
{
if (srcdata[i] > max_gray_level)
{
max_gray_level = srcdata[i];
}
}
}
max_gray_level++;
if (max_gray_level > 16)
{
for (int i = 0; i < height; i++)//将图像的灰度级缩小至16级,减小灰度共生矩阵的大小。
{
int*srcdata = src.ptr<int>(i);
for (int j = 0; j < width; j++)
{
srcdata[j] = (int)srcdata[j] / gray_level;
}
}
dst.create(gray_level, gray_level, CV_32SC1);
dst = Scalar::all(0);
for (int i = 0; i < height - 1; i++)
{
int*srcdata = src.ptr<int>(i);
int*srcdata1 = src.ptr<int>(i + 1);
for (int j = 0; j < width; j++)
{
int rows = srcdata[j];
int cols = srcdata1[j];
dst.ptr<int>(rows)[cols]++;
}
}
}
else
{
dst.create(max_gray_level, max_gray_level, CV_32SC1);
dst = Scalar::all(0);
for (int i = 0; i < height - 1; i++)
{
int*srcdata = src.ptr<int>(i);
int*srcdata1 = src.ptr<int>(i + 1);
for (int j = 0; j < width; j++)
{
int rows = srcdata[j];
int cols = srcdata1[j];
dst.ptr<int>(rows)[cols]++;
}
}
}
}
void getglcm_45(Mat& input, Mat& dst)//45度灰度共生矩阵
{
Mat src = input;
CV_Assert(1 == src.channels());
src.convertTo(src, CV_32S);
int height = src.rows;
int width = src.cols;
int max_gray_level = 0;
for (int j = 0; j < height; j++)
{
int* srcdata = src.ptr<int>(j);
for (int i = 0; i < width; i++)
{
if (srcdata[i] > max_gray_level)
{
max_gray_level = srcdata[i];
}
}
}
max_gray_level++;
if (max_gray_level > 16)
{
for (int i = 0; i < height; i++)//将图像的灰度级缩小至16级,减小灰度共生矩阵的大小。
{
int*srcdata = src.ptr<int>(i);
for (int j = 0; j < width; j++)
{
srcdata[j] = (int)srcdata[j] / gray_level;
}
}
dst.create(gray_level, gray_level, CV_32SC1);
dst = Scalar::all(0);
for (int i = 0; i < height - 1; i++)
{
int*srcdata = src.ptr<int>(i);
int*srcdata1 = src.ptr<int>(i + 1);
for (int j = 0; j < width - 1; j++)
{
int rows = srcdata[j];
int cols = srcdata1[j + 1];
dst.ptr<int>(rows)[cols]++;
}
}
}
else
{
dst.create(max_gray_level, max_gray_level, CV_32SC1);
dst = Scalar::all(0);
for (int i = 0; i < height - 1; i++)
{
int*srcdata = src.ptr<int>(i);
int*srcdata1 = src.ptr<int>(i + 1);
for (int j = 0; j < width - 1; j++)
{
int rows = srcdata[j];
int cols = srcdata1[j + 1];
dst.ptr<int>(rows)[cols]++;
}
}
}
}
void getglcm_135(Mat& input, Mat& dst)//135度灰度共生矩阵
{
Mat src = input;
CV_Assert(1 == src.channels());
src.convertTo(src, CV_32S);
int height = src.rows;
int width = src.cols;
int max_gray_level = 0;
for (int j = 0; j < height; j++)
{
int* srcdata = src.ptr<int>(j);
for (int i = 0; i < width; i++)
{
if (srcdata[i] > max_gray_level)
{
max_gray_level = srcdata[i];
}
}
}
max_gray_level++;
if (max_gray_level > 16)
{
for (int i = 0; i < height; i++)//将图像的灰度级缩小至16级,减小灰度共生矩阵的大小。
{
int*srcdata = src.ptr<int>(i);
for (int j = 0; j < width; j++)
{
srcdata[j] = (int)srcdata[j] / gray_level;
}
}
dst.create(gray_level, gray_level, CV_32SC1);
dst = Scalar::all(0);
for (int i = 0; i < height - 1; i++)
{
int*srcdata = src.ptr<int>(i);
int*srcdata1 = src.ptr<int>(i + 1);
for (int j = 1; j < width; j++)
{
int rows = srcdata[j];
int cols = srcdata1[j - 1];
dst.ptr<int>(rows)[cols]++;
}
}
}
else
{
dst.create(max_gray_level, max_gray_level, CV_32SC1);
dst = Scalar::all(0);
for (int i = 0; i < height - 1; i++)
{
int*srcdata = src.ptr<int>(i);
int*srcdata1 = src.ptr<int>(i + 1);
for (int j = 1; j < width; j++)
{
int rows = srcdata[j];
int cols = srcdata1[j - 1];
dst.ptr<int>(rows)[cols]++;
}
}
}
}
void feature_computer(Mat&src, double& Asm, double& Eng, double& Con, double& Idm)//计算特征值
{
int height = src.rows;
int width = src.cols;
int total = 0;
for (int i = 0; i < height; i++)
{
int*srcdata = src.ptr<int>(i);
for (int j = 0; j < width; j++)
{
total += srcdata[j];//求图像所有像素的灰度值的和
}
}
Mat copy;
copy.create(height, width, CV_64FC1);
for (int i = 0; i < height; i++)
{
int*srcdata = src.ptr<int>(i);
double*copydata = copy.ptr<double>(i);
for (int j = 0; j < width; j++)
{
copydata[j] = (double)srcdata[j] / (double)total;//图像每一个像素的的值除以像素总和
}
}
for (int i = 0; i < height; i++)
{
double*srcdata = copy.ptr<double>(i);
for (int j = 0; j < width; j++)
{
Asm += srcdata[j] * srcdata[j];//能量
if (srcdata[j]>0)
Eng -= srcdata[j] * log(srcdata[j]);//熵
Con += (double)(i - j)*(double)(i - j)*srcdata[j];//对比度
Idm += srcdata[j] / (1 + (double)(i - j)*(double)(i - j));//逆差矩
}
}
}
int main()
{
Mat dst_horison, dst_vertical, dst_45, dst_135;
Mat src = imread("F:\\c_code\\week8_3\\week8_3\\1.jpg");
if (src.empty())
{
return -1;
}
Mat src_gray;
cvtColor(src, src_gray, COLOR_BGR2GRAY);
imshow("srcImage", src_gray);
getglcm_horison(src_gray, dst_horison);
getglcm_vertical(src_gray, dst_vertical);
getglcm_45(src_gray, dst_45);
getglcm_135(src_gray, dst_135);
double eng_horison = 0, con_horison = 0, idm_horison = 0, asm_horison = 0;
feature_computer(dst_horison, asm_horison, eng_horison, con_horison, idm_horison);
cout << "能量:" << asm_horison << endl;
cout << "熵:" << eng_horison << endl;
cout << "对比度:" << con_horison << endl;
cout << "逆差分矩:" << idm_horison << endl;
cout << "------------------------------------------------------------------------" << endl;
for (int i = 0; i < dst_horison.rows; i++)
{
int *data = dst_horison.ptr<int>(i);
for (int j = 0; j < dst_horison.cols; j++)
{
cout << data[j] << " ";
}
cout << endl;
}
cout << endl;
for (int i = 0; i < dst_vertical.rows; i++)
{
int *data = dst_vertical.ptr<int>(i);
for (int j = 0; j < dst_vertical.cols; j++)
{
cout << data[j] << " ";
}
cout << endl;
}
cout << endl;
for (int i = 0; i < dst_45.rows; i++)
{
int *data = dst_45.ptr<int>(i);
for (int j = 0; j < dst_45.cols; j++)
{
cout << data[j] << " ";
}
cout << endl;
}
cout << endl;
for (int i = 0; i < dst_135.rows; i++)
{
int *data = dst_135.ptr<int>(i);
for (int j = 0; j < dst_135.cols; j++)
{
cout << data[j] << " ";
}
cout << endl;
}
waitKey(0);
return 0;
}
结果:
4.SIFT特征
#include <opencv2\opencv.hpp>
#include <opencv2\nonfree\features2d.hpp>
#include <iostream>
#include <opencv2\nonfree\\nonfree.hpp>
#include <opencv2\legacy\legacy.hpp>
using namespace cv;
using namespace std;
int main()
{
Mat img_1 = imread("F:\\c_code\\week8_4\\week8_4\\1.png");
Mat img_2 = imread("F:\\c_code\\week8_4\\week8_4\\2.png");
Mat dstImage1, dstImage2;
std::vector<KeyPoint> keyPoint1, keyPoint2;
Mat descriptor1, descriptor2;
SiftFeatureDetector sift1(500000000);
SiftFeatureDetector sift2(500000000);
SiftDescriptorExtractor siftdescriptor;
sift1.detect(img_1, keyPoint1);
sift2.detect(img_2, keyPoint2);
drawKeypoints(img_1, keyPoint1, dstImage1, Scalar(0, 0, 255));
drawKeypoints(img_2, keyPoint2, dstImage2, Scalar(0, 0, 255));
siftdescriptor.compute(img_1, keyPoint1, descriptor1);
siftdescriptor.compute(img_2, keyPoint2, descriptor2);
vector<DMatch> matches;
BruteForceMatcher<L2<float>> matcher;
matcher.match(descriptor1, descriptor2, matches, Mat());
Mat out;
drawMatches(img_1, keyPoint1, img_2, keyPoint2, matches, out);
imshow("dstImage1", dstImage1);
imshow("dstImage2", dstImage2);
imshow("result", out);
double min = 100000, max = 0;
for (int i = 0; i < descriptor1.rows; i++)
{
double dist = matches[i].distance;
if (dist < min) min = dist;
if (dist>max) max = dist;
}
std::vector<DMatch> good;
for (int i = 0; i < descriptor1.rows; i++)
{
if (matches[i].distance <= std::max(2.6 * min, 30.0))
good.push_back(matches[i]);
}
Mat goodMatch;
drawMatches(img_1, keyPoint1, img_2, keyPoint2, good, goodMatch);
imshow("after optimization", goodMatch);
waitKey(0);
return 0;
}
结果:
关于怎么去滤掉一些误匹配的情况,还是有很多办法的,这里只用了最简单的一种。
最后
这是这个系列的最后一篇了,这门计算机视觉课也上完了(啥计算机视觉啊,明明是图像处理好嘛),Anyway,要画上句号了,接下来我会主要把精力集中在deep learning和图形学上面了,see you!