#include "PictureDemo.h"
#include <fstream>
#include "cvui.h"
#include "./libsvm/svm.h"
using namespace std;
using namespace cv::ml;
PictureDemo::PictureDemo()
{
}
PictureDemo::~PictureDemo()
{
}
/* 读取和展示 */
void PictureDemo::test1()
{
Mat src = imread("../x64/Debug/picture/2.jpg");
String winname = "Picture";
//imshow(winname, src);
//namedWindow(winname, WINDOW_GUI_EXPANDED);
//displayOverlay(winname, "1234");
//moveWindow(winname, 50, 50);
waitKey(1);
//destroyAllWindows();
}
/* 转换和保存 */
void PictureDemo::test2()
{
Mat src = imread("../x64/Debug/picture/2.jpg", IMREAD_REDUCED_COLOR_2);
String winname = "Picture";
//h 0~180, s,v 0~255
Mat gray, hsv;
cvtColor(src, hsv, COLOR_BGR2HSV);
cvtColor(src, gray, COLOR_BGR2GRAY);
imshow("1", src);
imshow("2", hsv);
imshow("3", gray);
imwrite("../x64/Debug/picture/2_copy.jpg", hsv);
waitKey(0);
destroyAllWindows();
}
/* mat对象 */
void PictureDemo::test3()
{
Mat dst = imread("../x64/Debug/picture/2_copy.jpg", IMREAD_REDUCED_COLOR_2);
Mat dst1 = dst.clone();
Mat dst2;
dst.copyTo(dst2);
Mat dst3 = Mat::zeros(Size(10, 10), CV_8UC2);
cout << dst3 << endl;
cout << "*********************" << dst3.cols << "*********************" << dst3.rows << endl;
Mat dst4 = Mat::ones(Size(256, 256), CV_8UC3);
dst4 = 24; //只赋值给第一个通道
dst4 = Scalar(255, 26, 27);
imshow("3", dst4);
}
/* mat对象数据操作 */
void PictureDemo::test4()
{
Mat dst1 = Mat::zeros(Size(480, 270), CV_8UC3);
for(int i = 0; i < 270; i++) {
for(int j = 0; j < 480; j++) {
if(0) {//灰度图
int ii = i % 255;
dst1.at<uchar>(i, j) = ii;
}
if(1) {//彩色图
dst1.at<Vec3b>(i, j)[0] = i % 255;
dst1.at<Vec3b>(i, j)[1] = 255 - i % 255;
dst1.at<Vec3b>(i, j)[2] = i % 255;
}
}
}
imshow("3", dst1);
Mat dst = imread("../x64/Debug/picture/2.jpg", IMREAD_REDUCED_COLOR_2);
for(int i = 0; i < dst.rows; i++) {//数组操作
for(int j = 0; j < dst.cols; j++) {
dst.at<Vec3b>(i, j)[0] = 255 - dst.at<Vec3b>(i, j)[0];
dst.at<Vec3b>(i, j)[1] = 255 - dst.at<Vec3b>(i, j)[1];
dst.at<Vec3b>(i, j)[2] = 255 - dst.at<Vec3b>(i, j)[2];
}
}
for(int i = 0; i < dst.rows; i++) {//指针操作
Vec3b *curRow = dst.ptr<Vec3b>(i);
for(int j = 0; j < dst.cols; j++) {
curRow[j][0] = 255 - curRow[j][0];
curRow[j][1] = 255 - curRow[j][1];
curRow[j][2] = 255 - curRow[j][2];
}
}
imshow("2", dst);
}
/* mat算术操作 */
void PictureDemo::test5()
{
Mat dst = imread("../x64/Debug/picture/3.jpg", IMREAD_REDUCED_COLOR_2);
dst = dst + Scalar(50, 50, 50); //加
imshow("1", dst);
dst = dst - Scalar(100, 100, 100); //减
imshow("2", dst);
dst = dst / Scalar(2, 2, 2); //除
imshow("3", dst);
multiply(dst, Scalar(4, 4, 4), dst); //乘,加减除也有函数
imshow("4", dst);
for(int i = 0; i < dst.rows; i++) {
for(int j = 0; j < dst.cols; j++) {
dst.at<Vec3b>(i, j)[0] = saturate_cast<uchar>(dst.at<Vec3b>(i, j)[0] - 150); //saturate_cast可以防止色值超越范围,图片不会花
dst.at<Vec3b>(i, j)[1] = saturate_cast<uchar>(dst.at<Vec3b>(i, j)[1] - 150);
dst.at<Vec3b>(i, j)[2] = saturate_cast<uchar>(dst.at<Vec3b>(i, j)[2] - 150);
}
}
imshow("5", dst);
}
static void callbackBar(int brightness, void *data) //调整亮度
{
Mat g_dst = imread("../x64/Debug/picture/3.jpg", IMREAD_REDUCED_COLOR_2);
subtract(g_dst, Scalar(brightness, brightness, brightness), g_dst); //等同于g_dst = g_dst - Scalar(brightness, brightness, brightness)
imshow("亮度与对比度", g_dst);
}
static void callbackBar2(int brightness, void *data) //调整对比度
{
Mat g_dst = imread("../x64/Debug/picture/3.jpg", IMREAD_REDUCED_COLOR_2);
/*参数1:src1,第一个原数组.
* 参数2:alpha,第一个数组元素权重
* 参数3:src2第二个原数组
* 参数4:beta,第二个数组元素权重
* 参数5:gamma,图1与图2作和后添加的数值。不要太大,不然图片一片白。总和等于255以上就是纯白色了。
* 参数6:dst,输出图片 */
addWeighted(g_dst, (float)brightness / 40.0, Scalar(0, 0, 0), 0, 0, g_dst);
imshow("亮度与对比度", g_dst);
}
/* 滚动条拖动调整亮度、对比度 */
void PictureDemo::test6()
{
namedWindow("亮度与对比度", WINDOW_AUTOSIZE);
int max = 200;
int cur = 100;
createTrackbar("brightness", "亮度与对比度", &cur, max, callbackBar);
createTrackbar("contrast", "亮度与对比度", &cur, max, callbackBar2);
}
/* 融合两张图片 */
void fuse()
{
Mat g_dst = imread("../x64/Debug/picture/1.jpg", IMREAD_REDUCED_COLOR_2); //图片大小,类型要一样大
Mat g_dst2 = imread("../x64/Debug/picture/6.jpg", IMREAD_REDUCED_COLOR_2);
Mat dst1;
addWeighted(g_dst, 0.5, g_dst2, 0.5, 0, dst1);
imshow("融合", dst1);
resize(g_dst, g_dst, Size(g_dst.cols * 0.5, g_dst.rows * 0.5));
Mat imageROI = g_dst2(Rect(20, 40, g_dst.cols, g_dst.rows)); //利用ROI,获取将要理图像的矩形大小
imshow("a", imageROI);
imshow("b", g_dst2);
addWeighted(g_dst, 1, imageROI, 0, 0, imageROI); //修改imageROI会影响g_dst2
imshow("混合后的图片", g_dst2);
Mat src = imread("../x64/Debug/picture/1.jpg", IMREAD_REDUCED_COLOR_2);
Mat src1 = imread("../x64/Debug/picture/4.jpg", IMREAD_REDUCED_COLOR_2);
Mat src2;
scaleAdd(src, 0.5, src1, src2); //计算缩放数组和另一个数组的和
imshow("3", src2);
}
/* 键盘操作 */
void PictureDemo::test7()
{
Mat g_dst = imread("../x64/Debug/picture/3.jpg", IMREAD_REDUCED_COLOR_2);
imshow("fuse", g_dst);
fuse();
Mat dst;
while(1) {
char c = waitKey(500);
cout << c << endl; // 打印键盘输入
if(c == '2') {//键盘响应事件
break;
}
}
}
/* 颜色表 */
void PictureDemo::test8()
{
int colormap[] = {COLORMAP_AUTUMN, COLORMAP_BONE, COLORMAP_JET, COLORMAP_WINTER, COLORMAP_RAINBOW, COLORMAP_OCEAN,COLORMAP_SUMMER,
COLORMAP_SPRING, COLORMAP_COOL, COLORMAP_HSV, COLORMAP_PINK, COLORMAP_HOT, COLORMAP_PARULA, COLORMAP_MAGMA, COLORMAP_INFERNO,
COLORMAP_PLASMA, COLORMAP_VIRIDIS, COLORMAP_CIVIDIS, COLORMAP_TWILIGHT, COLORMAP_TWILIGHT_SHIFTED, COLORMAP_TURBO, COLORMAP_DEEPGREEN};
Mat g_dst = imread("../x64/Debug/picture/4.jpg", IMREAD_REDUCED_COLOR_2);
imshow("原图", g_dst);
Mat dst;
int i = 0;
while(1) {
waitKey(500);
applyColorMap(g_dst, dst, colormap[i % 22]);
i++;
imshow("颜色风格", dst);
}
}
/* 与、或、非、异或 */
void PictureDemo::test9()
{
Mat dst1 = Mat::zeros(Size(270, 270), CV_8UC3);
Mat dst2 = Mat::zeros(Size(270, 270), CV_8UC3);
rectangle(dst1, Rect(0,0,40,40), Scalar(255,0,255), -1, LINE_AA, 0); //画矩形
rectangle(dst2, Rect(30,30,50,50), Scalar(0,255,255), 2, LINE_8, 0);
imshow("1", dst1);
imshow("22", dst2);
Rect imgRect = Rect(0,0,40,40);
Point pt1 = Point(20, 200);
Point pt2 = Point(220, 500);
bool dsd = clipLine(imgRect, pt1, pt2); //判断线是否在矩形内
Mat dst3;
bitwise_and(dst1, dst2, dst3);
imshow("3", dst3);
bitwise_or(dst1, dst2, dst3);
imshow("4", dst3);
Mat g_dst = imread("../x64/Debug/picture/1.jpg", IMREAD_REDUCED_COLOR_2);
bitwise_not(g_dst, g_dst);
imshow("5", g_dst);
bitwise_xor(dst1, dst2, dst3);
imshow("6", dst3);
}
/* 通道合并和分离 */
void PictureDemo::test10()
{
Mat m = imread("../x64/Debug/picture/1.jpg", IMREAD_REDUCED_COLOR_2);
imshow("原图", m);
vector<Mat> vImg;
split(m, vImg); //分离
imshow("red", vImg[0]);
imshow("green", vImg[1]);
imshow("blue", vImg[2]);
Mat dst;
vImg[0] = 0;
merge(vImg, dst); //合并
imshow("merge", dst);
int szMix[] = {0, 2, 1, 0, 2, 1};
mixChannels(&m, 1, &dst, 1, szMix, 3); //混合, m和dst可以是Mat数组
imshow("mixChannels", dst);
}
/* 扣图,替换背景 */
void PictureDemo::test11()
{
Mat m = imread("../x64/Debug/picture/7.jpg", IMREAD_REDUCED_COLOR_2);
imshow("原图", m);
Mat hsv;
cvtColor(m, hsv, COLOR_BGR2HSV);//H色调,S饱和度,V明度
Mat mask;
inRange(hsv, Scalar(78,43,46), Scalar(99,255,255), mask); //在Scalar(78,43,46)和Scalar(99,255,255)之间设为255,否则为0
imshow("mask", mask);
Mat newBack = Mat::zeros(m.size(), m.type());
newBack = Scalar(250, 240, 230);
bitwise_not(mask, mask);
imshow("mask1", mask);
m.copyTo(newBack, mask); //m对应mask为0(黑色的)的像素不会复制给newBack
imshow("mask2", newBack);
}
/* 计算均值和方差 */
void PictureDemo::test12()
{
Mat m = imread("../x64/Debug/picture/5.jpg", IMREAD_REDUCED_COLOR_2);
imshow("原图", m);
Scalar m1 = mean(m);
cout << "*******" << m1 << endl;
vector<Mat> vImg;
split(m, vImg); //分离
double iMin, iMax;
Point minLoc, maxLoc;
Mat n;
minMaxLoc(vImg[0], &iMin, &iMax, &minLoc, &maxLoc, n); //求最大值最小值
cout << "$$$$$$$$$" << iMin << "--" << iMax << minLoc << maxLoc << endl;
Mat mean, stddv;
meanStdDev(m, mean, stddv); //mean均值 stddev标准差
cout << "#########" << stddv << endl;
cout << "&&&&&&&&&" << mean << endl;
}
/* 几何形状填充 */
void PictureDemo::test13()
{
Mat dst1 = Mat::zeros(Size(1720, 880), CV_8UC3);
dst1 = Scalar(250, 240, 230);
Rect re;
re.x = 50;
re.y = 50;
re.width = 200;
re.height = 200;
rectangle(dst1, re, Scalar(255,0,255), 3, LINE_4, 0);
circle(dst1, Point(400, 400), 90, Scalar(0,10,20), 4, LINE_AA, 0); //圆
line(dst1, Point(20, 200), Point(220, 500), Scalar(128,120,240), 5, LINE_AA, 0); //线
RotatedRect re2;
re2.center = Point(800, 400);
re2.size = Size(180, 250);
re2.angle = 0;
ellipse(dst1, re2, Scalar(128,120,240), -1, LINE_AA); //椭圆
line(dst1, Point(1020, 400), Point(1380, 400), Scalar(128,120,240), 3, LINE_4, 0);
ellipse(dst1, Point(1200, 400), Size(180, 220), 0, 180, 360, Scalar(128,120,240), 3, LINE_4, 0); //半椭圆
Point p1(600, 100);
Point p2(900, 150);
Point p3(900, 350);
Point p4(850, 450);
Point p5(650, 450);
vector<Point> pts;
pts.push_back(p1);
pts.push_back(p2);
pts.push_back(p3);
pts.push_back(p4);
pts.push_back(p5);
polylines(dst1, pts, true, Scalar(128, 128, 128), 2, LINE_8, 0); //不规则多边形
imshow("原图", dst1);
}
/* 随机数 */
void PictureDemo::test14()
{
Mat dst = Mat::zeros(Size(500, 500), CV_8UC3);
RNG cNum(12345);
while(1) {
int c = waitKey(500);
if(c == 'a') {
break;
}
int r = cNum.uniform(1, 255);
int g = cNum.uniform(1, 255);
int b = cNum.uniform(1, 255);
dst = Scalar(255 - r, 255 - g, 255 - b);
Point p1(cNum.uniform(1, 500), cNum.uniform(1, 500));
Point p2(cNum.uniform(1, 500), cNum.uniform(1, 500));
Point p3(cNum.uniform(1, 500), cNum.uniform(1, 500));
vector<Point> pts;
pts.push_back(p1);
pts.push_back(p2);
pts.push_back(p3);
polylines(dst, pts, true, Scalar(r, g, b), 2, LINE_8, 0); //不规则多边形
imshow("原图", dst);
}
}
/* 填充多边形 */
void PictureDemo::test15()
{
Mat dst1 = Mat::zeros(Size(1720, 880), CV_8UC3);
dst1 = Scalar(250, 240, 230);
Point p1(600, 100);
Point p2(900, 150);
Point p3(900, 350);
Point p4(850, 450);
Point p5(650, 450);
Point p6(550, 300);
vector<Point> pts;
pts.push_back(p1);
pts.push_back(p2);
pts.push_back(p3);
pts.push_back(p4);
pts.push_back(p5);
pts.push_back(p6);
polylines(dst1, pts, true, Scalar(128, 128, 128), 4, LINE_8, 0);
fillPoly(dst1, pts, Scalar(180, 180, 180)); //填充
vector<vector<Point>> vContours;
Point p11(1110, 110);
Point p21(1410, 160);
Point p31(1410, 360);
Point p41(1360, 460);
Point p51(1160, 460);
vector<Point> pts1;
pts1.push_back(p11);
pts1.push_back(p21);
pts1.push_back(p31);
pts1.push_back(p41);
pts1.push_back(p51);
vContours.push_back(pts1);
drawContours(dst1, vContours, 0, Scalar(180, 180, 180), -1); //绘制+填充,边界和填充颜色一致,第三个参数填-1表示vContours内的图形全绘制
imshow("原图", dst1);
}
/* 鼠标操作与响应 */
Mat g_m1 = imread("../x64/Debug/picture/2.jpg", IMREAD_REDUCED_COLOR_2);
static void callbackMouse(int event, int x, int y, int flags, void* userdata)
{
Mat g_m;
static int iDownX = -1, iDownY = -1;
if(EVENT_LBUTTONDOWN == event) {//按下
iDownX = x;
iDownY = y;
g_m1.copyTo(g_m);
imshow("原图", g_m);
}
else if(EVENT_LBUTTONUP == event) {//抬起
if(iDownX >= 0 && iDownY >= 0 && x > iDownX && y > iDownY) {
Rect g_re;
g_re.x = iDownX;
g_re.y = iDownY;
g_re.width = x - iDownX;
g_re.height = y - iDownY;
g_m1.copyTo(g_m);
rectangle(g_m, g_re, Scalar(255,0,255), 1, LINE_4, 0);
imshow("原图", g_m);
imshow("原图1", g_m(g_re));
iDownX = -1;
iDownY = -1;
}
}
else if(EVENT_MOUSEMOVE == event) {//拖动
if(iDownX >= 0 && iDownY >= 0 && x > iDownX && y > iDownY) {
Rect g_re;
g_re.x = iDownX;
g_re.y = iDownY;
g_re.width = x - iDownX;
g_re.height = y - iDownY;
g_m1.copyTo(g_m);
rectangle(g_m, g_re, Scalar(255,0,255), 1, LINE_4, 0);
imshow("原图", g_m);
imshow("原图1", g_m(g_re));
cout << "$$$$$$$$$" << x << "," << y << "," << iDownX << ","<< iDownY << endl;
}
}
}
void PictureDemo::test16()
{
Mat m = imread("../x64/Debug/picture/5.jpg", IMREAD_REDUCED_COLOR_2);
namedWindow("原图", WINDOW_AUTOSIZE);
setMouseCallback("原图", callbackMouse, (void*)(&m)); //注册鼠标事件
}
/* 归一化 */
void PictureDemo::test17()
{
Mat src = imread("../x64/Debug/picture/3.jpg", IMREAD_REDUCED_COLOR_2);
Mat src1;
imshow("原图", src);
src.convertTo(src1, CV_32F); //把一个矩阵从一种数据类型转换到另一种数据类型,同时可以带上缩放因子和增量
cout << "@@@@@@:" << src.type() << "$$$$$$:" << src1.type() << endl; //type=16表示CV_8UC3, 21表示CV_32FC3
imshow("1", src1); //不能正常显示,浮点数必须在0-1之间
normalize(src1, src1, 1.0, 0, NORM_MINMAX);
imshow("2", src1);
}
/* 放缩与插值 */
void PictureDemo::test18()
{
Mat src = imread("../x64/Debug/picture/1.jpg", IMREAD_REDUCED_COLOR_2);
Mat enlarge, narrow;
imshow("1", src);
resize(src, enlarge, Size(src.cols * 1.5, src.rows * 1.5), 0, 0, INTER_LANCZOS4);
imshow("22", enlarge);
resize(src, narrow, Size(src.cols / 2, src.rows / 2), 0, 0, INTER_LINEAR);
imshow("3", narrow);
}
/* 镜像 */
void PictureDemo::test19()
{
Mat src = imread("../x64/Debug/picture/1.jpg", IMREAD_REDUCED_COLOR_2);
Mat enlarge;
imshow("1", src);
flip(src, enlarge, 0); //上下翻转
imshow("2", enlarge);
flip(src, enlarge, 1); //左右翻转
imshow("3", enlarge);
flip(src, enlarge, -1); //对角线翻转
imshow("4", enlarge);
}
/* 旋转不丢角 */
void FullAngleRotate(Mat src, Mat &dst, int iAngle)
{
int w = src.cols;
int h = src.rows;
Mat M = getRotationMatrix2D(Point(w/2, h/2), iAngle, 1.0);
//确定新的旋转图像大小及偏移
double cos = abs(M.at<double>(0, 0)); //得到角度a的余弦值cos(a) ,abs()求元素的绝对值 ,CV_64F=double
double sin = abs(M.at<double>(0, 1)); //得到角度a的正弦值sin(a)
int newW = cos * w + sin * h; //获取新的图像宽度
int newH = sin * w + cos * h; //获取新的图像宽度
M.at<double>(0, 2) = M.at<double>(0, 2) + (newW / 2 - w / 2); //更改旋转矩阵M中的(0,2)处的值,此处含义类似更改旋转中心宽度
M.at<double>(1, 2) = M.at<double>(1, 2) + (newH / 2 - h / 2); //更改旋转矩阵M中的(1,2)处的值,此处含义类似更改旋转中心高度
warpAffine(src, dst, M, Size(newW, newH), INTER_LINEAR, 0, Scalar(0, 0, 0));
}
/* 旋转 */
void PictureDemo::test20()
{
Mat src = imread("../x64/Debug/picture/1.jpg", IMREAD_REDUCED_COLOR_2);
Mat enlarge;
imshow("原图", src);
int w = src.cols;
int h = src.rows;
auto M = getRotationMatrix2D(Point(w/2, h/2), 45, 1.0); //围绕Point(w/2, h/2)旋转45度
warpAffine(src, enlarge, M, src.size(), INTER_LINEAR, 0, Scalar(0, 0, 0)); //旋转45度
imshow("22", enlarge);
Mat enlarge2;
FullAngleRotate(src, enlarge2, 45);
imshow("3", enlarge2);
//平移
auto M2 = M;
Mat enlarge1;
M2.at<double>(0, 2) = M2.at<double>(0, 2) + 20; //更改旋转矩阵M中的(0,2)处的值,此处含义类似更改旋转中心宽度
M2.at<double>(1, 2) = M2.at<double>(1, 2) + 20; //更改旋转矩阵M中的(1,2)处的值,此处含义类似更改旋转中心高度
warpAffine(src, enlarge1, M2, Size(w + 40, h + 40), INTER_LINEAR, 0, Scalar(0, 0, 0));
imshow("4", enlarge1);
//歪斜
Point2f srcTri[3];
Point2f dstTri[3];
srcTri[0] = Point2f(0, 0); //设置源图像和目标图像上的三组点以计算仿射变换
srcTri[1] = Point2f(src.cols - 1, 0);
srcTri[2] = Point2f(0, src.rows - 1);
dstTri[0] = Point2f(src.cols * 0.0, src.rows * 0.33);
dstTri[1] = Point2f(src.cols * 0.85, src.rows * 0.25);
dstTri[2] = Point2f(src.cols * 0.15, src.rows * 0.7);
Mat warp_dst;
auto warp_mat = getAffineTransform(srcTri, dstTri);
warpAffine(src, warp_dst, warp_mat, warp_dst.size());
imshow("歪斜", warp_dst);
//在歪斜的基础上旋转
Point center = Point(warp_dst.cols / 2, warp_dst.rows / 2);
double angle = -50.0; //计算绕图像中点顺时针旋转50度
double scale = 0.5; //缩放因子为0.6的旋转矩阵
Mat warpRotate;
Mat rot_mat(2, 3, CV_32FC1);
rot_mat = getRotationMatrix2D(center, angle, scale); //通过上面的旋转细节信息求得旋转矩阵
warpAffine(warp_dst, warpRotate, rot_mat, warp_dst.size()); //旋转已扭曲图像
imshow("歪斜旋转", warpRotate);
//逆仿射变换
Mat inverse;
Mat M1;
invertAffineTransform(M, M1);
warpAffine(enlarge, inverse, M1, Size(w, h), INTER_LINEAR, 0, Scalar(0, 0, 0));
imshow("逆仿射变换", inverse);
}
/* 稀疏矩阵 */
void PictureDemo::test21(Mat src)
{
SparseMat image(src);
*image.ptr(0, 0, 1) = 1000; //第三个参数:1:申请内存,0:不申请内存
int k = *image.ptr(0, 0, 1);
*(float *)image.ptr(0, 0, 1) = 3000.14;
float kk = *(float *)image.ptr(0, 0, 1);
image.ref<float>(2, 3) = 6000.28;
kk = image.ref<float>(2, 3);
image.ref<int>(3, 5) = 50 + 55*256 + 44*256*256; //50是第一通道的值,55是第二通道的值,44是第三通道的值
int value = image.ref<int>(3, 5);
int k0 = (value & 0xFF); //得到第一通道值
int k1 = (value & 0xFF00) / 256; //得到第二通道值
int k2 = (value & 0xFF0000) / (256*256); //得到第三通道值
kk = *image.find<float>(4, 6);
int cout = image.nzcount(); //返回sm1中非零元素的数目
Mat dst;
image.copyTo(dst);
imshow("4", dst);
}
/* 基本运算 */
void PictureDemo::test22()
{
Mat src = imread("../x64/Debug/picture/1.jpg", IMREAD_REDUCED_COLOR_2);
Mat src1 = imread("../x64/Debug/picture/4.jpg", IMREAD_REDUCED_COLOR_2);
imshow("0", src);
Mat enlarge = abs(src - src1);
imshow("1", enlarge);
}
/* 取两个图片最大值和最小值 */
void PictureDemo::test23()
{
Mat src = imread("../x64/Debug/picture/1.jpg", IMREAD_REDUCED_COLOR_2);
Mat src1 = imread("../x64/Debug/picture/4.jpg", IMREAD_REDUCED_COLOR_2);
Mat enlarge;
max(src, src1, enlarge);
imshow("22", enlarge);
Mat enlarge1;
min(src, src1, enlarge1);
imshow("3", enlarge1);
}
/* 求解范数 */
void PictureDemo::test24()
{
Mat src = imread("../x64/Debug/picture/1.jpg", IMREAD_REDUCED_COLOR_2);
Mat src1 = imread("../x64/Debug/picture/4.jpg", IMREAD_REDUCED_COLOR_2);
double ass = norm(src);
cout << ass << endl;
ass = norm(src1);
cout << ass << endl;
double ass1 = norm(src, src1);
cout << ass1 << endl;
}
/* 执行向量的透视矩阵转换 */
void PictureDemo::test25()
{
float fData[16] = {0 };
for(int i = 0; i < 16; i++) {
fData[i] = i * 5;
}
Mat m(4, 4, CV_32FC1, fData);
Mat src = imread("../x64/Debug/picture/3.jpg", IMREAD_REDUCED_COLOR_2);
Mat src2;
src.convertTo(src2, CV_32F);
Mat src1;
perspectiveTransform(src2, src1, m); //本质是将图像投影到一个新的视平面
imshow("3", src1);
}
/* 计算图像方向场 */
void PictureDemo::test26()
{
Mat src = imread("../x64/Debug/picture/1.jpg");
Mat grad1, grad2, angle;
Sobel(src, grad1, CV_64FC1, 1, 0); //求梯度
Sobel(src, grad2, CV_64FC1, 0, 1);
imshow("111", grad1);
imshow("222", grad2);
blur(grad1, grad1, Size(6,6));
blur(grad2, grad2, Size(6,6));
//phase(grad1, grad2, angle, true); //求角度
//imshow("11", grad1);
//imshow("22", grad2);
//cout << angle << endl;
Mat grad3, grad4;
Scharr(src, grad3, CV_64FC1, 1, 0); //scharr算子实际上是sobel算子的优化,scharr算子在处理边缘时比sobel精度高一些
Scharr(src, grad4, CV_64FC1, 0, 1);
imshow("333", grad3);
imshow("444", grad4);
}
/* 随机数 */
void PictureDemo::test27()
{
Mat src = imread("../x64/Debug/picture/2.jpg", IMREAD_REDUCED_COLOR_2);
randu(src, Scalar(0,0,0), Scalar(0,0,255)); //返回均匀分布的随机数,填入数组或矩阵
imshow("1", src);
randn(src, Scalar(0,0,0), Scalar(0,0,255)); //返回高斯分布的随机数,填入数组或矩阵
imshow("22", src);
Mat src1;
reduce(src, src1, 0, REDUCE_SUM, CV_32F); //将矩阵简化为向量
cout << src1 << endl;
}
/* 图片重复填充 */
void PictureDemo::test28()
{
Mat src;
Mat src1 = imread("../x64/Debug/picture/10.jpg", IMREAD_REDUCED_COLOR_2);
repeat(src1, 8, 6, src);
imshow("22", src);
}
/* 画正方形对角线 */
void PictureDemo::test29()
{
Mat src1 = imread("../x64/Debug/picture/16.png", IMREAD_REDUCED_COLOR_2);
setIdentity(src1, Scalar(255, 225, 0));
imshow("22", src1);
}
/* 解线性方程 A*X=B */
void PictureDemo::test30()
{
Mat A = (Mat_<float>(2, 2) << 1,2,3,4);
Mat B = (Mat_<float>(2, 1) <<5,11);
Mat C;
solve(A, B, C, DECOMP_LU);
cout << C << endl;
}
/* 求解三次方程的根 */
void PictureDemo::test31()
{
Mat A = (Mat_<float>(1, 4) << 1,2,3,4); //x^3 * 1 + x^2 * 2 + x * 3 + 4 = 0
Mat B;
solveCubic(A, B);
cout << B << endl;
A = (Mat_<float>(1, 3) << 1,2,3); //x^3 * 1 + x^2 * 2 + x * 3 = 0
solveCubic(A, B);
cout << B << endl;
//solvePoly 求解n次方程的根
}
/* 二维数组排序 */
void PictureDemo::test32()
{
Mat A = (Mat_<float>(3, 3) << 3,1,2,4,8,2,9,7,8);
Mat B; //B里面是对于数组下标
sortIdx(A, B, SORT_EVERY_COLUMN); //列排序
cout << A << endl;
cout << B << endl;
sortIdx(A, B, SORT_EVERY_ROW); //行排序
cout << A << endl;
cout << B << endl;
}
/* 对角线元素总和 */
void PictureDemo::test33()
{
Mat src = imread("../x64/Debug/picture/1.jpg", IMREAD_REDUCED_COLOR_2);
auto data = trace(src);
cout << data << endl;
}
/* 稀疏变换 */
void PictureDemo::test34()
{
Point2f triangleA[3];
Point2f triangleB[3];
triangleA[0] = Point2f(0,0);
triangleA[1] = Point2f(100,0);
triangleA[2] = Point2f(0,50);
triangleB[0] = Point2f(100,50);
triangleB[1] = Point2f(200,50);
triangleB[2] = Point2f(100,100);
Mat affineMat = getAffineTransform(triangleA, triangleB);
Mat m;
m.create(3,1,CV_64FC2);
m.at<Vec2d>(0,0)[0] = 50;
m.at<Vec2d>(0,0)[1] = 25;
m.at<Vec2d>(1,0)[0] = 100;
m.at<Vec2d>(1,0)[1] = 25;
m.at<Vec2d>(2,0)[0] = 100;
m.at<Vec2d>(2,0)[1] = 50;
Mat res;
res.create(3, 1, CV_64FC2);
transform(m, res, affineMat);
cout << m << endl;
cout << res << endl;
cout << affineMat << endl;
}
/* 将矩阵沿着对角线进行翻转 */
void PictureDemo::test35()
{
Mat src = imread("../x64/Debug/picture/1.jpg", IMREAD_REDUCED_COLOR_2);
Mat dst;
transpose(src, dst);
imshow("1", src);
imshow("22", dst);
}
/* 编解码 */
void PictureDemo::test36()
{
Mat src = imread("../x64/Debug/picture/1.jpg", IMREAD_COLOR);
//编码
vector<uchar> buf;
imencode(".jpg", src, buf);
string strEncode(buf.begin(), buf.end());
string strPath = "../x64/Debug/picture/imgencode.txt";
ofstream ofs(strPath, ifstream::out);
assert(ofs.is_open());
ofs << strEncode;
ofs.flush();
ofs.close();
//解码
Mat dst;
dst = imdecode(buf, IMREAD_COLOR);
imshow("22", dst);
}
Mat g_src;
void switchOn()
{
imshow("按键", g_src);
}
void switchOff()
{
Mat dst = g_src;
dst = Mat(dst.rows, dst.cols, CV_8UC3, Scalar(0,0,0));
imshow("按键", dst);
}
void SwitchCallback(int pos, void *src)
{
if(pos == 1) {
switchOn();
}
else {
switchOff();
}
}
/* 滚动条模拟按键 */
void PictureDemo::test37()
{
int close = 0;
g_src = imread("../x64/Debug/picture/1.jpg");
string strName = "按键";
namedWindow(strName, WINDOW_AUTOSIZE);
createTrackbar("开关", strName, &close, 1, SwitchCallback);
}
/* 图片加入边框 */
void PictureDemo::test38()
{
Mat aa = imread("../x64/Debug/picture/5.jpg");
Mat dst;
borderInterpolate;
copyMakeBorder(aa, dst, 10, 10, 5, 5, BORDER_CONSTANT, Scalar(255,255,0));
imshow("gg", dst);
copyMakeBorder(aa, dst, 10, 10, 5, 5, BORDER_REPLICATE);
imshow("ggg", dst);
}
/* 去噪 */
void PictureDemo::test39()
{
Mat aa = imread("../x64/Debug/picture/4.jpg", IMREAD_REDUCED_COLOR_2);
imshow("原图", aa);
Mat dst;
fastNlMeansDenoisingColored(aa, dst, 3, 3, 7, 21);
imshow("去噪", dst);
}
/* 积分图 */
void PictureDemo::test40()
{
Mat sum, sqrsum;
Mat src = imread("../x64/Debug/picture/10.jpg", IMREAD_REDUCED_COLOR_2);
integral(src, sum, sqrsum, CV_32S, CV_32F);
normalize(sum, sum, 0, 255, NORM_MINMAX, CV_8UC1, Mat());
normalize(sqrsum, sqrsum, 0, 255, NORM_MINMAX, CV_8UC1, Mat());
imshow("原图", src);
imshow("和表积分图", sum);
imshow("平方和表积分图", sqrsum);
}
/* 霍夫变换 */
void PictureDemo::test41()
{
Mat dst, mid, dst1;
Mat src = imread("../x64/Debug/picture/3.jpg", IMREAD_REDUCED_COLOR_2);
Canny(src, mid, 100, 200, 3);
imshow("边缘", mid);
cvtColor(mid, dst, COLOR_GRAY2BGR); //转彩图,新增控件可以显示彩色
cvtColor(mid, dst1, COLOR_GRAY2BGR);
imshow("边缘BGR", dst);
//HoughLines
vector<Vec2f> vLines;
HoughLines(mid, vLines, 1, CV_PI / 180.0, 200, 0, 0);
for(size_t i = 0; i < vLines.size(); ++i) {
float rho = vLines[i][0], theta = vLines[i][1];
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a * rho, y0 = b * rho;
double length = max(src.rows, src.cols); //图像高宽的最大值
pt1.x = cvRound(x0 + length * (-b));
pt1.y = cvRound(y0 + length * (a));
pt2.x = cvRound(x0 - length * (-b));
pt2.y = cvRound(y0 - length * (a));
line(dst, pt1, pt2, Scalar(255, 255, 0), 1, LINE_AA);
}
imshow("线条", dst);
//HoughLinesP
vector<Vec4i> linesP1;
HoughLinesP(mid, linesP1, 1, CV_PI / 180.0, 200, 30, 30);
for(size_t i = 0; i < linesP1.size(); i++) {
line(dst1, Point(linesP1[i][0], linesP1[i][1]), Point(linesP1[i][2], linesP1[i][3]), Scalar(255, 255, 0), 1, LINE_AA);
}
imshow("线条1", dst1);
}
/* 霍夫点判断 */
void PictureDemo::test42()
{
//该函数就是能够在众多点中寻找是否存在直线的函数
Mat lines; //存放检测直线的结果
vector<Vec3d> line3d; //换一种结果存放形式
vector<Point2f> point; //待检测是否存在直线的所有点
const static float Points[20][2] = {
{0.0f, 369.0f}, {10.0f, 364.0f}, {20.2f, 358.0f}, {30.0f, 352.0f},
{40.0f, 364.0f}, {50.0f, 341.0f}, {60.0f, 335.0f}, {70.0f, 329.0f},
{80.0f, 323.0f}, {90.0f, 318.0f}, {100.0f, 312.0f}, {110.0f, 306.0f},
{120.0f, 300.0f}, {130.0f, 295.0f}, {140.0f, 289.0f}, {150.0f, 284.0f},
{160.0f, 277.0f}, {170.0f, 271.0f}, {180.0f, 266.0f}, {190.0f, 260.0f}
};
for(int i = 0; i < 20; i++) {
point.push_back(Point2f(Points[i][0], Points[i][1]));
}
HoughLinesPointSet(point, lines, 20, 1, 0, 360, 1, 0, CV_PI / 2.0f, CV_PI / 180.0f);
lines.copyTo(line3d);
for(int i = 0; i < line3d.size(); i++) {
cout << "votes:" << (int)line3d.at(i).val[0] << "," << "rho:" << line3d.at(i).val[1] << "," << "theta:" << line3d.at(i).val[2] << endl;
}
}
/* 霍夫圆变换 */
void PictureDemo::test43()
{
Mat dst, mid;
Mat src = imread("../x64/Debug/picture/26.png", IMREAD_REDUCED_COLOR_2);
imshow("原图", src);
dst = src.clone();
cvtColor(src, mid, COLOR_BGR2GRAY);
GaussianBlur(mid, mid, Size(9,9), 2, 2);
vector<Vec3f> vCircles;
HoughCircles(mid, vCircles, HOUGH_GRADIENT, 1.5, 100, 200, 200, 0, 0);
for(size_t i = 0; i < vCircles.size(); ++i) {
Point center(cvRound(vCircles[i][0]), cvRound(vCircles[i][1])); //圆心
int radius = cvRound(vCircles[i][2]); //半径
circle(dst, center, 3, Scalar(255, 255, 0), -1, 8, 0);
circle(dst, center, radius, Scalar(0, 0, 255), 3, 8, 0);
}
imshow("result", dst);
}
/* 距离变换 */
void PictureDemo::test44()
{
//计算出每个像素离图像中满足某个特定条件的像素的距离,然后使用这个计算出的距离进行灰度值的变换
Mat dst, mid;
Mat src = imread("../x64/Debug/picture/27.jpg");
imshow("原图", src);
Mat bin;
cvtColor(src, bin, COLOR_BGR2GRAY);
threshold(bin, bin, 80, 255, THRESH_BINARY);
imshow("bin", bin);
Mat Dist1;
distanceTransform(bin, Dist1, DIST_L1, 3);
normalize(Dist1, Dist1, 0, 1, NORM_MINMAX);
imshow("dist1", Dist1);
Mat Dist, Labels;
distanceTransform(bin, Dist, Labels, DIST_L1, 3, DIST_LABEL_CCOMP);
normalize(Dist, Dist, 0, 1, NORM_MINMAX);
normalize(Labels, Labels, 0, 255, NORM_MINMAX);
imshow("dist2", Dist);
//imshow("labels2", Labels);
FileStorage cWrite;
cWrite.open("../x64/Debug/xml/xml1.xml", FileStorage::Mode::WRITE);
cWrite << "pic" << Labels; //距离文件展示
cWrite.release();
}
/* 漫水填充 */
void PictureDemo::test45()
{
Mat dst, mid;
Mat src = imread("../x64/Debug/picture/26.png");
imshow("原图", src);
mid = src.clone();
Rect roi;
floodFill(src, Point(src.cols * 3 / 4, src.rows / 4), Scalar(255, 0, 255), &roi, Scalar(1, 1, 1), Scalar(10, 10, 10), 8);
imshow("填充", src);
Mat mask = Mat::zeros(mid.rows + 2,mid.cols + 2, CV_8UC1);
mask.at<uchar>(mid.rows / 2, mid.cols / 2) = 255;
floodFill(mid, mask, Point(mid.cols * 3 / 4, mid.rows / 4), Scalar(255, 0, 255), &roi, Scalar(1, 1, 1), Scalar(10, 10, 10), 8);
imshow("填充1", mid);
}
Vec3b RandomColor(int value)
{
value = value % 255; //生成0~255的随机数
RNG rng;
int aa = rng.uniform(0, value);
int bb = rng.uniform(0, value);
int cc = rng.uniform(0, value);
return Vec3b(aa, bb, cc);
}
/* 分水岭 */
void PictureDemo::test46()
{
Mat image = imread("../x64/Debug/picture/3.jpg");
Mat imageGray;
cvtColor(image,imageGray, COLOR_BGR2GRAY);
GaussianBlur(imageGray,imageGray,Size(5,5),2);
imshow("高斯滤波",imageGray);
Canny(imageGray,imageGray,80,150);
imshow("Canny边缘检测",imageGray);
//查找轮廓
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
findContours(imageGray, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point());
Mat marks(image.size(), CV_32S); //Opencv分水岭第二个矩阵参数
marks = Scalar::all(0);
int index = 0;
for(int i = 0; index >= 0; index = hierarchy[index][0], i++ ) {
//对marks进行标记,对不同区域的轮廓进行编号,相当于设置注水点,有多少轮廓,就有多少注水点
drawContours(marks, contours, index, Scalar::all(i+1), 1, 8, hierarchy);
}
Mat marksShows;
convertScaleAbs(marks, marksShows);
imshow("轮廓", marksShows);
watershed(image, marks);
Mat afterWatershed;
convertScaleAbs(marks, afterWatershed);
imshow("分水岭", afterWatershed);
//对每一个区域进行颜色填充
Mat PerspectiveImage = Mat::zeros(image.size(), CV_8UC3);
for(int i=0; i<marks.rows; i++) {
for(int j=0; j<marks.cols;j++) {
int index = marks.at<int>(i,j);
if(index == -1) {
PerspectiveImage.at<Vec3b>(i,j) = Vec3b(255,255,255); //黑线变白线
}
else {
PerspectiveImage.at<Vec3b>(i,j) = RandomColor(index);
}
}
}
imshow("填充分水岭", PerspectiveImage);
FileStorage cWrite;
cWrite.open("../x64/Debug/xml/xml1.xml", FileStorage::Mode::WRITE);
cWrite << "pic" << PerspectiveImage;
cWrite.release();
//分割并填充颜色的结果跟原始图像融合
Mat wshed;
addWeighted(image, 0.4, PerspectiveImage, 0.6, 0, wshed);
imshow("分水岭与原图融合",wshed);
}
/* grabCut算法 */
void PictureDemo::test47()
{
Mat image = imread("../x64/Debug/picture/28.jpg");
Rect rectangle(79, 160, 900, 400);
Mat background, frontground, result;
grabCut(image, result, rectangle, background, frontground, 5, GC_INIT_WITH_RECT);
compare(result, GC_PR_FGD, result, CMP_EQ); //标记可能属于前景的区域(去掉矩形区域的背景)
Mat foreground(image.size(), CV_8UC3, Scalar(255, 255, 255));
image.copyTo(foreground, result); //复制前景图像
cv::rectangle(image, rectangle, Scalar(200,0,200), 4);
imshow("Rectangle", image);
imshow("Foreground", foreground);
}
/* MeanShift均值漂移 */
void PictureDemo::test48()
{
//去除了精细纹理、颜色梯度大部分变得平坦
Mat image = imread("../x64/Debug/picture/3.jpg");
Mat res; //分割后图像
int spatialRad = 50; //空间窗口大小
int colorRad = 50; //色彩窗口大小
int maxPyrLevel = 2; //金字塔层数
pyrMeanShiftFiltering(image, res, spatialRad, colorRad, maxPyrLevel);
imshow("res", res);
}
/* 模板匹配 */
void PictureDemo::test49()
{
//在一副图像中寻找与另一幅模板图像最匹配(相似)部分的技术
Mat img = imread("../x64/Debug/picture/6.jpg", IMREAD_COLOR);
Mat img1 = imread("../x64/Debug/picture/6_cut.png", IMREAD_COLOR);
Mat dst;
matchTemplate(img, img1, dst, TM_SQDIFF);
double minVal, maxVal; //存储匹配的最大值和最小值
Point minLoc, maxLoc; //存储匹配最大值的位置和最小值的位置0
minMaxLoc(dst, &minVal, &maxVal, &minLoc, &maxLoc); //寻找最佳匹配位置
//Point中使用max或min根据具体方法确定
circle(img, Point(minLoc.x + img1.cols / 2, minLoc.y + img1.rows / 2), 10, Scalar(0, 0, 255), 2, 8, 0);
normalize(dst, dst, 0, 1, NORM_MINMAX);
imshow("Source Window" , img);
imshow("Template Window" , img1);
imshow("Target Window" , dst); //result图像中的每一个点的值代表了一次相似度比较结果
}
/* 图像连通域分析 */
void PictureDemo::test50()
{
Mat dst, dst2, riceBW, img1, stats, centroids;
Mat img = imread("../x64/Debug/picture/31.jpg");
imshow("原图", img);
cvtColor(img, img1, COLOR_BGR2GRAY);
threshold(img1, riceBW, 50, 255, THRESH_BINARY);
int num2 = connectedComponents(riceBW, dst2, 8, CV_16U); //纹理也能划分为区域
int num, nccomps = connectedComponentsWithStats(riceBW, dst, stats, centroids); //比上面的多了一个区域面积
RNG rng(10086);
vector<Vec3b> colors2;
vector<Vec3b> colors;
for(int i = 0; i < num2; i++) {
//使用均匀分布的随机数确定颜色
Vec3b vec3 = Vec3b(rng.uniform(0, 256), rng.uniform(0, 256), rng.uniform(0, 256));
colors2.push_back(vec3);
}
for(int i = 1; i < nccomps; i++) {
//使用均匀分布的随机数确定颜色
Vec3b vec3 = Vec3b(rng.uniform(0, 256), rng.uniform(0, 256), rng.uniform(0, 256));
colors.push_back(vec3);
if(stats.at<int>(i-1, CC_STAT_AREA) < 5) {//面积小于5的致黑
colors.push_back(Vec3b(0,0,0));
}
else {
colors.push_back(vec3);
}
}
//以不同颜色标记出不同的连通域
Mat result = Mat::zeros(img.size(), img.type());
Mat result2 = Mat::zeros(img.size(), img.type());
int w = result.cols;
int h = result.rows;
for(int row = 0; row < h; row++) {
for(int col = 0; col < w; col++) {
int label2 = dst2.at<uint16_t>(row, col);
int label = dst.at<int>(row, col);
if(label == 0) {//背景的黑色不改变
continue;
}
result.at<Vec3b>(row, col) = colors[label];
result2.at<Vec3b>(row, col) = colors2[label2];
}
}
imshow("标记后的图像2", result2);
imshow("标记后的图像", result);
}
/* 拟合轮廓 */
void PictureDemo::test51()
{
Mat dst, temp;
Mat img = imread("../x64/Debug/picture/16.png");
cvtColor(img, temp, COLOR_BGR2GRAY);
GaussianBlur(temp, temp, Size(3,3), 0, 0);
Canny(temp, temp, 200, 220);
dilate(temp, temp, Mat());
vector<vector<Point>> contours;
findContours(temp, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
//多边形拟合
vector<vector<Point>> approxCurve(contours.size());
for(uint i=0; i<contours.size(); i++) {
double len = arcLength(contours[i], true); //计算周长
approxPolyDP(contours[i], approxCurve[i], len*0.02, true); //拟合
}
dst = img.clone();
drawContours(dst, approxCurve, -1, Scalar(255,0,255), 2);
imshow("dst", dst);
}
/* 计算图形周长 */
void PictureDemo::test52()
{
Mat mat = Mat::zeros(500, 500, CV_8UC1);
Point2f a(100,100);
Point2f b(400,100);
Point2f c(400,400);
Point2f d(100,400);
vector<Point> contour_rec;
contour_rec.push_back(a);
contour_rec.push_back(b);
contour_rec.push_back(c);
contour_rec.push_back(d);
double len_rec = arcLength(contour_rec, true); //计算矩形的周长
}
/* 包围轮廓的矩形的边界信息 */
void PictureDemo::test53()
{
Mat src = imread("../x64/Debug/picture/32.png", IMREAD_GRAYSCALE);
Mat result = imread("../x64/Debug/picture/32.png");
Mat th1;
threshold(src, th1, 0, 255, THRESH_OTSU);
//确定连通区轮廓
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
Point2f szRect[4];
findContours(th1, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_NONE, Point());
vector<Point2f>centers(contours.size()); //圆心
vector<float>radius(contours.size()); //半径
vector<vector<Point>> hull(contours.size());
Point po = Point(98, 296);
circle(result, po, 5, Scalar(255,255,255), 4, LINE_AA, 0); //圆
for(int i = 0; i < contours.size(); ++i) {
Rect rect = boundingRect(Mat(contours[i])); //水平矩形
rectangle(result, rect, Scalar(255, 255, 255), 2);
RotatedRect box = minAreaRect(Mat(contours[i])); //最小矩形
circle(result, Point(box.center.x, box.center.y), 5, Scalar(0, 255, 0), -1, 8);
box.points(szRect);
for(int j=0; j<4; j++) {
line(result, szRect[j], szRect[(j+1)%4], Scalar(0, 0, 255), 2, 8); //绘制最小外接矩形每条边
}
minEnclosingCircle(contours[i], centers[i], radius[i]); //最小外接圆
circle(result, centers[i], radius[i], Scalar(0, 255, 255), 2, 8);
RotatedRect ell = fitEllipse(contours[i]); //椭圆拟合
ellipse(result, ell, Scalar(0,255,128), 2, LINE_AA);
convexHull(contours[i], hull[i], false, true); //凸包
double value = pointPolygonTest(contours[i], po, false); //判断点是不是在轮廓内,-1不在,1在
cout << "点在不在轮廓内:" << value << endl;
int value1 = pointPolygonTest(contours[i], po, true); //点到轮廓的距离,负数在圈外,正数在圈内
cout << "点到轮廓的距离:" << value1 << endl;
bool ret = isContourConvex(contours[i]); //判断轮廓是不是凸,放大有锯齿也不行
cout << "轮廓是不是凸:" << ret << endl;
}
drawContours(result, hull, -1, Scalar(255,0,0), 2, 8, hierarchy);
imshow("original", src);
imshow("thresh", th1);
imshow("result", result);
/*********************************************************************************/
Mat img = Mat::zeros(500,500,CV_8UC3);
vector<Point> point_set;
Point P1(0, 0), P2(5, 10), P3(10, 20), P4(20, 43), P5(100, 202), P6(150, 280), P7(200, 440), P8(250,496);
point_set.push_back(P1);
point_set.push_back(P2);
point_set.push_back(P3);
point_set.push_back(P4);
point_set.push_back(P5);
point_set.push_back(P6);
point_set.push_back(P7);
point_set.push_back(P8);
for(int i = 0; i < point_set.size(); i++) {//将原点绘制到空白图上
circle(img, point_set[i], 5, Scalar(0, 255, 0), 2, 8, 0);
}
Vec4f fitline;
fitLine(point_set, fitline, DIST_L2, 0, 0.01, 0.01); //拟合线
double k_line = fitline[1] / fitline[0];
Point p1(0, k_line*(0 - fitline[2]) + fitline[3]); //计算直线的端点: y = k(x - x0) + y0
Point p2(img.rows - 1, k_line*(img.rows - 1 - fitline[2]) + fitline[3]);
line(img, p1, p2, Scalar(0, 0, 255), 2);
imshow("原图+拟合结果", img);
}
/* 图像矩 */
void PictureDemo::test54()
{
Mat grayImage = imread("../x64/Debug/picture/32.png", IMREAD_GRAYSCALE);
Mat drawing = imread("../x64/Debug/picture/32.png");
Mat efsdf = imread("../x64/Debug/picture/31.jpg", IMREAD_GRAYSCALE);
Mat canny_output;
vector<vector<Point>>contours;
vector<Vec4i>hierarchy;
//canny边缘检测
Canny(grayImage, canny_output, 100, 100 * 2, 3);
//轮廓提取
findContours(canny_output, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0));
double hu[7];
vector<Moments>mu(contours.size());
vector<Point2f>mc(contours.size());
for(int i = 0; i < contours.size(); i++) {
mu[i] = moments(contours[i], false); //计算图像矩
HuMoments(mu[i], hu); //不变矩
for(int j=0; j<7; j++) {
cout << "取对数:" <<log(abs(hu[j])) <<endl; // 取对数 (自然指数e 为底)
}
mc[i] = Point2f(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00); //计算图像的质心
circle(drawing, mc[i], 4, Scalar(0, 255, 0), -1, 8, 0);
printf("*[%d]->[%.2f]-[%.2f]\n", i, mu[i].m00, contourArea(contours[i])); //用moments矩集计算轮廓面积并与opencv函数计算结果进行比较
}
imshow("轮廓图", drawing);
double sfdsd = matchShapes(grayImage, efsdf, CONTOURS_MATCH_I2, 0); //越接近0越相似
double sfdsd1 = matchShapes(contours[0], contours[5], CONTOURS_MATCH_I2, 0);
}
/**********************************背景提取**********************************/
/* LineIterator使用 */
void PictureDemo::test55()
{
Mat drawing = imread("../x64/Debug/picture/4.jpg");
Point a(100, 100);
Point b(100, 120);
LineIterator it(drawing, a, b);
for(int i = 0; i < it.count; i++, ++it) {
cout<< i <<", "<<" pos: "<< it.pos() <<endl;
}
}
/* accumulate系列 */
void PictureDemo::test56()
{
Mat dst, dst2, dst3, dst4;
Mat drawing = imread("../x64/Debug/picture/4.jpg");
Mat drawing2 = imread("../x64/Debug/picture/4.jpg");
vector<Mat> vPlanes;
split(drawing, vPlanes);
Size sz = drawing.size();
dst = Mat::zeros(sz, CV_32F);
dst2 = Mat::zeros(sz, CV_32F);
accumulate(vPlanes[0], dst);
accumulate(vPlanes[1], dst);
accumulate(vPlanes[2], dst);
imshow("原图", dst);
accumulateWeighted(vPlanes[0], dst2, 0.5);
accumulateWeighted(vPlanes[1], dst2, 0.5);
accumulateWeighted(vPlanes[2], dst2, 0.5);
imshow("原图1", dst2);
}
/* 角点 */
void PictureDemo::test57()
{
Mat gray;
Mat image = imread("../x64/Debug/picture/29.jpg", IMREAD_COLOR);
cvtColor(image, gray, COLOR_BGR2GRAY);
Mat image_copy1 = image.clone(); //用于绘制亚像素角点
vector<Point2f> corners; //设置角点检测参数
int max_corners = 200;
double quality_level = 0.01;
double min_distance = 10;
int block_size = 3;
double k = 0.04;
goodFeaturesToTrack(gray, corners, max_corners, quality_level, min_distance, Mat(), block_size, false, k);
for(int i = 0; i < corners.size(); i++) {//将检测到的角点绘制到原图上
circle(image_copy1, corners[i], 3, Scalar(0,0,255), 2, 8, 0);
}
imshow("角", image_copy1);
//亚像素检测,和上面差不多
Mat image_copy2 = image.clone();
TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 40, 0.01); //指定亚像素计算迭代标注
cornerSubPix(gray, corners, Size(5, 5), Size(-1, -1), criteria);
for(int i = 0; i < corners.size(); i++) {
circle(image_copy2, corners[i], 3, Scalar(0,255,0), 2, 8, 0);
}
imshow("细化角落位置", image_copy2);
//哈里斯角点,goodFeaturesToTrack相比cornerHarris,增加了检测的复杂度,同时也可以更好的控制检测到的角点的特性
Mat image_copy = image.clone();
int threshod_val=1; //当前阈值
image_copy = image.clone(); //用于绘制亚像素角点
Mat normImage;
Mat Img_dst = Mat::zeros(image.size(), CV_32FC1);
cornerHarris(gray, Img_dst, 2 , 3 , 0.04 , BORDER_DEFAULT);
normalize(Img_dst, normImage, 0, 255, NORM_MINMAX, CV_32FC1, Mat());
for(int i=0;i< normImage.rows;i++) {
for(int j=0;j< normImage.cols;j++) {
if( (int) normImage.at<float>(i,j) > 100 + threshod_val) { //画圆
circle(image_copy, Point(j, i), 3, Scalar(255,0,0), 2, 8, 0);
}
}
}
imshow("work", image_copy);
}
/* 特征点和描述符 */
void PictureDemo::test58()
{
//FAST核心思想是拿一个点跟它周围的点比较,如果它和其中大部分的点都不一样就可以认为它是一个特征点。
Mat src = imread("../x64/Debug/picture/29.jpg", IMREAD_GRAYSCALE);
Mat src2 = imread("../x64/Debug/picture/30.jpg", IMREAD_GRAYSCALE);
int numfeature = 400; //特征点数目
//Ptr<ORB>detector = ORB::create(numfeature);
auto detector = ORB::create(); //自动生成特征点的个数
vector<KeyPoint>keypoints;
detector->detect(src, keypoints, Mat());
printf("所有的特征点个数:%d", keypoints.size());
Mat resultImg = imread("../x64/Debug/picture/29.jpg");
drawKeypoints(resultImg, keypoints, resultImg, Scalar::all(-1), DrawMatchesFlags::DEFAULT); //特征点颜色随机
imshow("特征点提取", resultImg);
//SIFT描述符
Ptr<SIFT> detector1 = SIFT::create();
vector<KeyPoint> vDescriptors1, vDescriptors2;
Mat descriptors1, descriptors2;
detector1->detectAndCompute(src, noArray(), vDescriptors1, descriptors1);
detector1->detectAndCompute(src2, noArray(), vDescriptors2, descriptors2);
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::FLANNBASED);
vector<DMatch> matches;
matcher->match(descriptors1, descriptors2, matches);
//绘制从两个图像中匹配出的关键点
Mat imgMatches;
drawMatches(src, vDescriptors1, src2, vDescriptors2, matches, imgMatches);
imshow("描述符提取", imgMatches);
}
/* 角点检测 */
void PictureDemo::test59()
{
Mat srcImage = imread("../x64/Debug/picture/29.jpg");
Mat srcGrayImage;
cvtColor(srcImage, srcGrayImage, COLOR_BGR2GRAY);
vector<KeyPoint>detectKeyPoint;
Mat keyPointImage1, keyPointImage2;
Ptr<GFTTDetector> gftt = GFTTDetector::create();
gftt->detect(srcGrayImage, detectKeyPoint);
drawKeypoints(srcImage, detectKeyPoint, keyPointImage1, Scalar(0,0,255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
drawKeypoints(srcImage, detectKeyPoint, keyPointImage2, Scalar(0,0,255), DrawMatchesFlags::DEFAULT);
imshow("src image", srcImage);
imshow("keyPoint image1", keyPointImage1);
imshow("keyPoint image2", keyPointImage2);
}
Mat src, gray;
void HarrisCallback(int thresh, void*)
{
Mat HarrisDst, normDst, normScaleDst;
HarrisDst = Mat::zeros(src.size(), CV_32FC1);
int blockSize = 2;
int ksize = 3;
double k = 0.04;
cornerHarris(gray, HarrisDst, 2, 3, 0.04);
normalize(HarrisDst, normDst, 0, 255, NORM_MINMAX, CV_32FC1, Mat());
//取绝对值
convertScaleAbs(normDst, normScaleDst);
//根据阈值选择角点
Mat resImg = src.clone();
for(int i = 0; i < resImg.rows; i++) {
uchar* currentRow = normScaleDst.ptr(i);
for(int j = 0; j < resImg.cols; j++) {
int value = (int)*currentRow;
if(value > thresh) {
circle(resImg, Point(j,i), 2, Scalar(0,0,255), 2);
}
else;
currentRow++;
}
}
imshow("HarrisCorner", resImg);
}
/* cornerHarris角点阈值拖动 */
void PictureDemo::test60()
{
src = imread("../x64/Debug/picture/21.png");
cvtColor(src, gray, COLOR_BGR2GRAY);
namedWindow("HarrisCorner", WINDOW_AUTOSIZE);
int thresh = 145;
createTrackbar("Threshold", "HarrisCorner", &thresh, 255, HarrisCallback);
}
const string g_strWinName = "自定义角点检测";
Mat harrisDst, harrisRes; // harrisDst存储自相关矩阵M的特征值和特征向量,harrisRes存储响应函数的结果
double minResValue; //响应函数的结果矩阵中的最小值
double maxResValue; //响应函数的结果矩阵中的最大值
void HarrisDemo(int qualityValue, void*)
{
if(qualityValue < 10) {
qualityValue = 10; //控制qualitylevel的下限值
}
Mat result = src.clone();
float fThreshold = minResValue + (((double)qualityValue) / 100) * (maxResValue - minResValue);
for(int row = 0; row <result.rows; row++) {
for(int col = 0; col < result.cols; col++) {
float resp_value = harrisRes.at<float>(row, col);
if(resp_value > fThreshold) {
circle(result, Point(col, row), 2, Scalar(0,255,0), 2, 8, 0);
}
}
}
imshow(g_strWinName, result);
}
/* cornerEigenValsAndVecs角点阈值拖动 */
void PictureDemo::test61()
{
src = imread("../x64/Debug/picture/21.png");
cvtColor(src, gray, COLOR_BGR2GRAY);
int blockSize = 3;
int ksize = 3;
double k = 0.04;
harrisDst = Mat::zeros(src.size(), CV_32FC(6));
harrisRes = Mat::zeros(src.size(), CV_32FC1);
cornerEigenValsAndVecs(gray, harrisDst, blockSize, ksize, 4); //计算每个像素值对应的自相关矩阵的特征值和特征向量
//计算响应函数值
for(int row = 0; row < harrisDst.rows;++row) {
for(int col = 0; col < harrisDst.cols; ++col) {
double eigenvalue1 = harrisDst.at<Vec6f>(row, col)[0]; //获取特征值1
double eigenvalue2 = harrisDst.at<Vec6f>(row, col)[1]; //获取特征值2
harrisRes.at<float>(row, col) = eigenvalue1*eigenvalue2 - k*pow((eigenvalue1+eigenvalue2), 2); //通过R=λ1*λ2 - k*(λ1+λ2)*(λ1+λ2)来计算每个像素对应的响应值
}
}
minMaxLoc(harrisRes, &minResValue, &maxResValue, 0, 0, Mat()); //寻找响应矩阵中的最小值和最大值
namedWindow(g_strWinName, WINDOW_AUTOSIZE);
int qualityValue = 30;
createTrackbar("Quality Value:", g_strWinName, &qualityValue, 100, HarrisDemo);
}
int iThStep = 10;
int iMinth = 50;
int iMaxth = 220;
int iMinBt = 10;
int iMinar = 25;
int iMaxar = 5000;
int iMinCir = 0;
int iMinIne = 0;
int iMinCon = 0;
void detect(int, void *)
{
SimpleBlobDetector::Params pBLOBDetector;
pBLOBDetector.thresholdStep = iThStep;
pBLOBDetector.minThreshold = iMinth;
pBLOBDetector.maxThreshold = iMaxth;
pBLOBDetector.minRepeatability = 2;
pBLOBDetector.minDistBetweenBlobs = iMinBt;
pBLOBDetector.filterByColor = true;
pBLOBDetector.blobColor = 0;
//斑点面积
pBLOBDetector.filterByArea = true;
pBLOBDetector.minArea = iMinar;
pBLOBDetector.maxArea = iMaxar;
//斑点圆度
pBLOBDetector.filterByCircularity = true;
pBLOBDetector.minCircularity = iMinCir *0.01;
pBLOBDetector.maxCircularity = (float)3.40282e+038;
//斑点惯性率
pBLOBDetector.filterByInertia = true;
pBLOBDetector.minInertiaRatio = iMinIne * 0.01;
pBLOBDetector.maxInertiaRatio = (float)3.40282e+038;
//斑点凸度
pBLOBDetector.filterByConvexity = true;
pBLOBDetector.minConvexity = iMinCon * 0.01;
pBLOBDetector.maxConvexity = (float)3.40282e+038;
//*用参数创建对象
Ptr<SimpleBlobDetector> blob = SimpleBlobDetector::create(pBLOBDetector);
//*blob检测
vector<KeyPoint> key_points;
Mat dst;
cvtColor(src, dst, COLOR_RGB2GRAY);
blob->detect(dst, key_points);
Mat outImg;
//绘制结果
drawKeypoints(src, key_points, outImg, Scalar(0, 0, 0));
imshow("blob", outImg);
}
/* 提取斑点 */
void PictureDemo::test62()
{
Mat gray1;
src = imread("../x64/Debug/picture/33.jpg");
namedWindow("Detect window", cv::WINDOW_NORMAL);
createTrackbar("最小圆度", "Detect window", &iMinCir, 100, detect);
createTrackbar("最小惯性率", "Detect window", &iMinIne, 100, detect);
createTrackbar("最大凸度", "Detect window", &iMinCon, 100, detect);
createTrackbar("阈值步距", "Detect window", &iThStep, 100, detect);
createTrackbar("最小阈值", "Detect window", &iMinth, 255, detect);
createTrackbar("最大阈值", "Detect window", &iMaxth, 255, detect);
createTrackbar("最小距离", "Detect window", &iMinBt, 255, detect);
createTrackbar("最小面积", "Detect window", &iMinar, 1000, detect);
createTrackbar("最大面积", "Detect window", &iMaxar, 5000, detect);
}
void trackBar(int thre, void*)
{
vector<KeyPoint> keypoints;
Mat dst = src.clone();
Ptr<FastFeatureDetector> detector = FastFeatureDetector::create(thre);
detector->detect(src,keypoints);
drawKeypoints(dst, keypoints, dst, Scalar::all(-1), DrawMatchesFlags::DRAW_OVER_OUTIMG);
imshow("output", dst);
}
/* FAST特征点检测算法 */
void PictureDemo::test63()
{
src = imread("../x64/Debug/picture/33.jpg");
int thre = 40;
namedWindow("output",WINDOW_AUTOSIZE);
createTrackbar("threshould", "output", &thre, 255, trackBar);
}
/* SIFT */
void PictureDemo::test64()
{
//尺度不变特征变换匹配算法. SIFT 算法对于旋转和尺度具有不变性
src = imread("../x64/Debug/picture/33.jpg");
Mat imageGray;
cvtColor(src, imageGray, COLOR_BGR2GRAY);
vector<KeyPoint> keypoints;
Mat descriptors;
Ptr<SIFT> ptrSIFT = SIFT::create();
ptrSIFT->detectAndCompute(imageGray, Mat(), keypoints, descriptors);
drawKeypoints(src, keypoints, src, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
imshow("正常版", src);
src = imread("../x64/Debug/picture/33_2.jpg");
cvtColor(src, imageGray, COLOR_BGR2GRAY);
ptrSIFT->detectAndCompute(imageGray, Mat(), keypoints, descriptors);
drawKeypoints(src, keypoints, src, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
imshow("缩小版", src); //尺寸变化,角点也变了,大体是相同的
}
/* BRISK二进制特征描述符 */
void PictureDemo::test65()
{
//SIFT<SURF<BRISK<FREAK<ORB,对有较大模糊的图像配准时,BRISK算法在其中表现最为出色
Mat tem_image = imread("../x64/Debug/picture/34_cut.jpg");
Mat dected_image = imread("../x64/Debug/picture/34.jpg");
auto brisk = BRISK::create();
vector<KeyPoint> keyPoints_tem, keyPoints_dected;
Mat descriptors_tem, descriptors_dected;
brisk->detectAndCompute(tem_image, Mat(), keyPoints_tem, descriptors_tem, false);
brisk->detectAndCompute(dected_image, Mat(), keyPoints_dected, descriptors_dected, false);
auto matcher = DescriptorMatcher::create(DescriptorMatcher::MatcherType::BRUTEFORCE);
vector<DMatch> matches;
matcher->match(descriptors_tem, descriptors_dected, matches);
float maxdist = matches[0].distance;
for(int i = 0; i < matches.size(); i++) {
if(maxdist < matches[i].distance) {
maxdist = matches[i].distance;
}
}
float thresh = 0.4;
vector<DMatch> good_Matches;
vector<Point2f> temPoints, dectedPoints;
for(int j = 0; j < matches.size(); j++) {
if(matches[j].distance < thresh * maxdist) {
good_Matches.push_back(matches[j]);
temPoints.push_back(keyPoints_tem[matches[j].queryIdx].pt);
dectedPoints.push_back(keyPoints_dected[matches[j].trainIdx].pt);
}
}
if(0 == good_Matches.size()) {
cout << "不存在最佳匹配特征点" << endl;
}
Mat result;
drawMatches(tem_image, keyPoints_tem, dected_image, keyPoints_dected, good_Matches, result,
Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
imshow("result", result);
}
/* KeyPointsFilter */
void PictureDemo::test66()
{
Mat src = imread("../x64/Debug/picture/33.jpg");
double t1 = (double)getTickCount();
//SIFT特征检测
int minHessian = 400;
Ptr<SIFT> detector = SIFT::create(minHessian);
vector<KeyPoint> keypoints;
detector->detect(src, keypoints, Mat());
double t2 = (double)getTickCount();
double t = (t2 - t1) / getTickFrequency();
cout << "spend time: " << t << " s" << endl;
//绘制关键点
Mat keypoint_img;
drawKeypoints(src, keypoints, keypoint_img, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
cout << "Number:" << keypoints.size() << endl;
imshow("KeyPoints Image", keypoint_img);
Mat keypoint_img2;
KeyPointsFilter f;
f.runByKeypointSize(keypoints, 0, 88);//特征点的半径设置的小于18像素
cout << "Number:" << keypoints.size() << endl;
f.runByImageBorder(keypoints, src.size(), 10);//过滤边界的点
drawKeypoints(src, keypoints, keypoint_img2, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
imshow("KeyPoints Image2", keypoint_img2);
Mat mask;
if(src.channels() == 3) {
cvtColor(src, mask, COLOR_BGR2GRAY);
}
threshold(src, mask, 245, 255, THRESH_BINARY_INV);
Mat kernel = getStructuringElement(MORPH_RECT, Size(7, 7)); //设置核
cvtColor(mask, mask, COLOR_BGR2GRAY);
imshow("mask", mask);
//方法一
f.runByPixelsMask(keypoints, mask); //保留掩码非零的地方
//方法二
/*for(vector<KeyPoint>::iterator it = keypoints.begin();it != keypoints.end();) {
if(mask.at<Vec3b>((*it).pt.y, (*it).pt.x)[0] == 0 && mask.at<Vec3b>((*it).pt.y, (*it).pt.x)[0] == 0 && mask.at<Vec3b>((*it).pt.y, (*it).pt.x)[0] == 0) {
it = keypoints.erase(it);
}
else {
it++;
}
}*/
Mat keypoint_img3;
drawKeypoints(src, keypoints, keypoint_img3, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
imshow("KeyPoints Image3", keypoint_img3);
}
/* 暴力匹配 */
void PictureDemo::test67()
{
//暴力匹配原理简单,算法的复杂度高,当遇到特征点数目比较大时,会大大影响程序运行时间
Mat src1 = imread("../x64/Debug/picture/5.jpg", IMREAD_COLOR);
Mat src2 = imread("../x64/Debug/picture/10.jpg", IMREAD_COLOR);
vector<KeyPoint> keypoints1, keypoints2;
Mat descriptors1,descriptors2;
Mat result_img;
Ptr<SIFT>detector = SIFT::create();
//提取特征点
detector->detect(src1,keypoints1,noArray());
detector->detect(src2, keypoints2, Mat());
//获取特征点的描述信息=>特征向量
detector->compute(src1, keypoints1, descriptors1);
detector->compute(src2, keypoints2, descriptors2);
//定义匹配器的实例化=>方法为暴力匹配法
//Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::BRUTEFORCE);
//第二种实例化方法
BFMatcher *matcher = new BFMatcher(); //和上面效果基本一致
//进行暴力匹配
vector<DMatch> matches;
vector<Mat>train_desc(1, descriptors2);
matcher->add(train_desc);
matcher->train();
vector<vector<DMatch>> matchpoints;
matcher->knnMatch(descriptors1, matchpoints, 2);
float fThreshold = 0.1;
vector<DMatch> goodfeatur;
for(int i = 0; i < matchpoints.size(); i++) {
if(matchpoints[i][0].distance < fThreshold * matchpoints[i][1].distance) {
goodfeatur.push_back(matchpoints[i][0]);
}
}
cout << "筛选后的特征点数量为: " << goodfeatur.size() << endl;
drawMatches(src1, keypoints1, src2, keypoints2, goodfeatur, result_img); //输出关键点和匹配结果
imshow("匹配结果", result_img);
}
/* FLANN匹配 */
void PictureDemo::test68()
{
Mat img1, img2;
img1 = imread("../x64/Debug/picture/5.jpg");
img2 = imread("../x64/Debug/picture/10.jpg");
vector<KeyPoint>Keypoints1, Keypoints2;
Mat descriptions1, descriptions2;
//计算ORB特征点
Ptr<ORB>orb = ORB::create(1000, 1.2f);
orb->detectAndCompute(img1, noArray(), Keypoints1, descriptions1);
orb->detectAndCompute(img2, noArray(), Keypoints2, descriptions2);
//判断描述子数据类型,如果数据类型不符,那么需要类型转换。主要针对ORB特征点
if((descriptions1.type() != CV_32F) && (descriptions2.type() != CV_32F)) {
descriptions1.convertTo(descriptions1, CV_32F);
descriptions2.convertTo(descriptions2, CV_32F);
}
//特征点匹配
vector<DMatch>matches;
FlannBasedMatcher matcher;
matcher.match(descriptions1, descriptions2, matches);
//寻找距离最大值和最小值,如果是ORB特征点,那么min_dist取值需要大一些
double max_dist = 0;
double min_dist = 0;
for(int i = 0; i < descriptions1.rows; i++) {
double dist = matches[i].distance;
if(dist < min_dist) {
min_dist = dist;
}
if(dist > max_dist) {
max_dist = dist;
}
}
//将最大值距离的0.4倍作为最优匹配结果进行筛选
vector<DMatch>good_matches;
for(int i = 0; i < descriptions1.rows; i++) {
if(matches[i].distance < 0.5*max_dist) {
good_matches.push_back(matches[i]);
}
}
Mat outimg, outimg1;
//绘制匹配结果
drawMatches(img1, Keypoints1, img2, Keypoints2, matches, outimg);
drawMatches(img1, Keypoints1, img2, Keypoints2, good_matches, outimg1);
imshow("未筛选结果", outimg);
imshow("筛选结果", outimg1);
}
Mat char2Mat(uchar c, int width, int height)
{
float scale = 1.0f;
int fontFace = FONT_HERSHEY_DUPLEX;
Size elemSize(width, height);
Mat elem(elemSize, CV_8UC1, Scalar(0));
int baseline = 0;
string text;
text += c;
Size textSize = getTextSize(text, fontFace, scale, 1, &baseline);
cout << "baseline = " << baseline << endl;
cout << "text size of " << text << " = " << textSize << endl;
Point textPos((elemSize.width - textSize.width) / 2, elemSize.height - (elemSize.height - textSize.height) / 2 - 2);
putText(elem, text, textPos, fontFace, 1, Scalar(255), scale, 8);
elem.convertTo(elem, CV_32F);
return elem.reshape(1, 1);
}
/* KDTreeIndexParams搜索算法 */
void PictureDemo::test69()
{
//在处理高维度数据时,仅通过KdTree进行近邻搜索,就能获取到不错的模糊搜索效果
Mat image = imread("../x64/Debug/picture/31.jpg", IMREAD_GRAYSCALE);
image = 255 - image; //视图片情况,可以给图片反一下色
int elemSize = 8; //设置图素大小
//在ascii字符集中选取32到126之间的字符作为图素点阵
vector<uchar> chars;
map<int, uchar> dic; //形状输出
for(int i = 32; i <= 126; i++) {
chars.push_back(static_cast<uchar>(i));
dic[i - 32] = static_cast<uchar>(i);
}
//将ascii字符写到elem x elem大小的点阵中,然后生成一个1 x (elemSizex elemSize)大小的向量,作为一个数据样本
Mat samples(0, elemSize * elemSize, CV_32F);
for(int i = 0; i < chars.size(); i++) {
Mat elem = char2Mat(chars[i], elemSize, elemSize);
cout << elem.size() << endl;
samples.push_back(elem);
}
//最后再补充两个图素点阵,用于处理全黑与全白的情况
Mat_<float> white(0, elemSize * elemSize, 255.f);
Mat_<float> black(0, elemSize * elemSize, 0.f);
samples.push_back(white);
dic[samples.rows - 1] = '@';
samples.push_back(black);
dic[samples.rows - 1] = ' ';
//创建KdTree,方向是使用cv::flann::Index
//flann::Index flannIndex(samples, flann::KDTreeIndexParams(1));
//flann::Index flannIndex(samples, flann::KMeansIndexParams());
flann::Index flannIndex(samples, flann::CompositeIndexParams());
//对图片做一点小小的处理,使图片的宽高正好是图素宽高的整数倍
int iWidth = image.cols / elemSize * elemSize;
int iHeight = image.rows / elemSize * elemSize;
resize(image, image, Size(iWidth, iHeight), 0.0, 0.0, INTERSECT_NONE);
//遍历图片,每个elemSize x elemSize的子区域通过KdTree找到最接近的图素,然后再找到对应的ascii字符,将其输出
for(int j = 0; j < iHeight; j += elemSize) {
for(int i = 0; i < iWidth; i += elemSize) {
Mat roi = image(Rect(i, j, elemSize, elemSize));
Mat query(elemSize, elemSize, CV_32F);
roi.convertTo(query, CV_32F);
query = query.reshape(1, 1);
Mat indices, dists;
flannIndex.knnSearch(query, indices, dists, 1);
auto value = indices.ptr<int>(0)[0];
char chr = dic[value];
cout << chr;
}
cout << endl;
}
}
/* k均值 */
void PictureDemo::test70()
{
Mat img(500, 500, CV_8UC3);
RNG rng(12345);
Scalar colorTab[] = {//五个颜色,聚类之后的颜色随机从这里面选择
Scalar(0,0,255),
Scalar(0,255,0),
Scalar(255,0,0),
Scalar(0,255,255),
Scalar(255,0,255),
Scalar(128,80,128),
Scalar(128,128,128)};
int numCluster = 7; //分类总数
int sampleCount = 700; //生成样本总数
Mat points(sampleCount, 1, CV_32FC2); //创建samplecount行1列的矩阵,因为待分类的数据只有一种类型,一维的,所以用1列
Mat labels; //存放表示每个簇的标签,是一个整数,从0开始的索引整数
Mat centers; //存放的是kmeans算法结束后每个簇的中心位置
//随机生成样本
for(int k = 0; k < numCluster; k++) {
Point center; //随机出中心点
center.x = rng.uniform(0, img.cols);
center.y = rng.uniform(0, img.rows);
//得到不同小块
Mat pointChunk = points.rowRange(k * sampleCount / numCluster, k == numCluster - 1 ? sampleCount : (k + 1) * sampleCount / numCluster);
//用随机数对小块点进行填充,在center坐标周围上下左右 img.cols*0.05, img.rows*0.05 的方差内生成高斯分布的随机数,最后赋值给pointChunk
rng.fill(pointChunk, RNG::NORMAL, Scalar(center.x, center.y), Scalar(img.cols*0.05, img.rows*0.05));
}
randShuffle(points, 1, &rng); //打乱points中的顺序
kmeans(points, numCluster, labels, TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, 0.1), 3, KMEANS_PP_CENTERS, centers);
//用不同颜色显示分类
img = Scalar::all(255);
for(int i = 0; i < sampleCount; i++) {//初始化
int index = labels.at<int>(i);
Point p = points.at<Point2f>(i);
circle(img, p, 3, colorTab[index], -1, LINE_AA);
imshow("KMeans-Data-Demo:", img);
waitKey(20);
}
for(int i = 0; i < centers.rows; i++) {//每个聚类的中心来绘制圆
int x = centers.at<float>(i, 0);
int y = centers.at<float>(i, 1);
cout << "c.x = " << x << "c.y = " << y << endl;
circle(img, Point(x, y), 40, colorTab[i], 1, LINE_AA);
imshow("KMeans-Data-Demo:", img);
}
imshow("KMeans-Data-Demo:", img);
}
/* 利用最近邻算法(knn)实现手写数字分类 */
void PictureDemo::test71()
{
Mat img = imread("../x64/Debug/picture/47.png");
Mat gray;
cvtColor(img, gray, COLOR_BGR2GRAY);
int b = 20;
int m = gray.rows / b; //原图为1000*2000,裁剪为5000个20*20的小图块
int n = gray.cols / b;
Mat data, labels; //特征矩阵
for(int i = 0; i < n; i++) {//按列读取
int offsetCol = i * b; //列上的偏移量
for(int j = 0; j < m; j++) {
int offsetRow = j * b; //行上的偏移量
//截取20*20的小块
Mat tmp;
gray(Range(offsetRow, offsetRow + b), Range(offsetCol, offsetCol + b)).copyTo(tmp);
data.push_back(tmp.reshape(0,1)); //序列化后放入特征矩阵
labels.push_back((int)j / 5); //对应的标注
}
}
imshow("222:", data);
data.convertTo(data, CV_32F); //uchar型转换为cv_32f
int samplesNum = data.rows;
int trainNum = 3500; //前3500个样本为训练数据,该数字越大,后期测试越准
Mat trainData, trainLabels;
trainData = data(Range(0, trainNum), Range::all());
trainLabels = labels(Range(0, trainNum), Range::all());
//使用KNN算法
int K = 5;
Ptr<TrainData> tData = TrainData::create(trainData, ROW_SAMPLE, trainLabels); //降训练数据封装成一个TrainData对象,送入train函数
Ptr<KNearest> model = KNearest::create();
model->setDefaultK(K);
model->setIsClassifier(true);
//tData->setTrainTestSplit(3000);
model->train(tData);
Mat out;
float fValue = model->calcError(tData, true, out); //一次性测试整个数据集上的误差
//int ssda = tData->getNTrainSamples(); //setTrainTestSplits设置的数量
//auto dfgd = tData->getTestResponses(); //setTrainTestSplits设置的数量
double train_hr = 0, test_hr = 0;
Mat response;
for(int i = 0; i < samplesNum; i++) {//计算训练和测试数据的预测误差
Mat sample = data.row(i);
float fPredict = model->predict(sample); //逐个预测结果
float r = abs(fPredict - labels.at<int>(i)) <= FLT_EPSILON ? 1.0 : 0.0; //预测结果与原结果相比,相等为1,不等为0
if(i < trainNum) {
train_hr += r; //累积正确数
cout << "训练结果,预测数: " << fPredict << ",实际数: " << labels.at<int>(i) << endl;
}
else {
test_hr += r;
cout << "测试结果,预测数: " << fPredict << ",实际数: " << labels.at<int>(i) << endl;
}
}
test_hr = test_hr / (samplesNum - trainNum);
train_hr = train_hr / trainNum;
printf("准确率: 训练 = %.1f%%, 测试 = %.1f%%\n", train_hr*100.0, test_hr*100.0);
}
void TrainPrintErrs(Ptr<StatModel> model, const Ptr<TrainData>& data)
{
bool ok = model->train(data);
if(!ok) {
printf("Training failed\n");
}
else {
printf("train error: %f\n", model->calcError(data, false, noArray()));
printf("test error: %f\n\n", model->calcError(data, true, noArray()));
}
}
/* loadFromCSV */
void PictureDemo::test72()
{
const char* filename = "../x64/Debug/xml/xml2.csv";
int response_idx = 0;
string typespec;
printf("\nReading in %s...\n\n", filename);
const double dSplitRatio = 0.5;
Ptr<TrainData> data = TrainData::loadFromCSV(filename, 0, response_idx, response_idx+1, typespec);
if(data.empty()) {
printf("ERROR: File %s can not be read\n", filename);
return;
}
data->setTrainTestSplitRatio(dSplitRatio);
printf("======DTREE=====\n");
Ptr<DTrees> dtree = DTrees::create();
dtree->setMaxDepth(10);
dtree->setMinSampleCount(2);
dtree->setRegressionAccuracy(0);
dtree->setUseSurrogates(false);
dtree->setMaxCategories(16);
dtree->setCVFolds(0);
dtree->setUse1SERule(false);
dtree->setTruncatePrunedTree(false);
dtree->setPriors(Mat());
TrainPrintErrs(dtree, data);
dtree->save("../x64/Debug/xml/dtree_result.xml");
if( (int)data->getClassLabels().total() <= 2 ) {//regression or 2-class classification problem
printf("======BOOST=====\n");
Ptr<Boost> boost = Boost::create();
boost->setBoostType(Boost::GENTLE);
boost->setWeakCount(100);
boost->setWeightTrimRate(0.95);
boost->setMaxDepth(2);
boost->setUseSurrogates(false);
boost->setPriors(Mat());
TrainPrintErrs(boost, data);
}
printf("======RTREES=====\n");
Ptr<RTrees> rtrees = RTrees::create();
rtrees->setMaxDepth(10);
rtrees->setMinSampleCount(2);
rtrees->setRegressionAccuracy(0);
rtrees->setUseSurrogates(false);
rtrees->setMaxCategories(16);
rtrees->setPriors(Mat());
rtrees->setCalculateVarImportance(false);
rtrees->setActiveVarCount(0);
rtrees->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 0));
TrainPrintErrs(rtrees, data);
cout << "======TEST====="<<endl;
Ptr<DTrees> dtree2 = DTrees::load("../x64/Debug/xml/dtree_result.xml");
vector<float>testVec;
testVec.push_back(1);
testVec.push_back(6);
float resultKind = dtree2->predict(testVec);
cout << "1,6:"<<resultKind<<endl;
}
/***************朴素贝叶斯分类***************/
/* 根据身高体重脚长预测性别 */
void PictureDemo::test73()
{
float trainingData[8][3] = {{6, 180, 12}, {5.92, 190, 11}, {5.58, 170, 12},{5.92, 165, 10}, //创建训练样本
{5, 100, 6}, {5.5, 150, 8}, {5.42, 130, 7}, {5.75, 150, 9}};
Mat trainingDataMat(8, 3, CV_32FC1, trainingData);
//1代表男,-1代表女
int labels[8] = {1, 1, 1, 1, -1, -1, -1, -1};
Mat labelsMat(8, 1, CV_32SC1, labels);
//创建TrainData并进行训练
Ptr<TrainData> tData = TrainData::create(trainingDataMat, ROW_SAMPLE, labelsMat);
Ptr<NormalBayesClassifier> model = NormalBayesClassifier::create();
model->train(tData);
float myData[3] = {5, 130, 7}; //已知某人身高6英尺,体重130磅,脚掌长8英寸,利用前面的训练样本来预测该人是男还是女
Mat myDataMat(1, 3, CV_32FC1, myData);
int res = model->predict(myDataMat); //利用训练好的分类器进行测试样本预测
string str;
if(res == 1) {
str = "男";
}
else if(res == -1) {
str = "女";
}
cout << endl << "检测结果 : " << str << endl;
}
/* 二分类训练结果展示 */
void PictureDemo::test74()
{
Mat image = Mat::zeros(512, 512, CV_8UC3);
int labels[10] = {1, -1, 1, 1,-1,1,-1,1,-1,-1};
Mat labelsMat(10, 1, CV_32SC1, labels);
float trainingData[10][2] = {{501, 150}, {255, 10}, {501, 255}, {10, 501}, {25, 80}, {150, 300}, {77, 200}, {300, 300}, {45, 250}, {200, 200}};
Mat trainingDataMat(10, 2, CV_32FC1, trainingData);
Scalar c1 = Scalar::all(0);
Scalar c2 = Scalar::all(255);
for(int i = 0; i < labelsMat.rows; i++) {
Point pt = Point(trainingData[i][0], trainingData[i][1]);
if(labels[i] == 1) {
circle(image, pt, 5, c1, -1, 8);
}
else {
circle(image, pt, 5, c2, -1, 8);
}
}
Ptr<NormalBayesClassifier> model = NormalBayesClassifier::create();
Ptr<TrainData> tData = TrainData::create(trainingDataMat, ROW_SAMPLE, labelsMat);
model->train(tData);
Vec3b green(0, 255, 0), blue(255, 0, 0);
for(int i = 0; i < image.rows; ++i) {
for(int j = 0; j < image.cols; ++j) {
Mat sampleMat = (Mat_<float>(1, 2) << j, i); //对图片没个点进行检测
float response = model->predict(sampleMat);
if(response == 1) {
image.at<Vec3b>(i, j) = green;
}
else if(response == -1) {
image.at<Vec3b>(i, j) = blue;
}
}
}
imshow("222", image);
}
/* 决策树 */
void PictureDemo::test75()
{
FILE *fp;
int flge = 0;
int fpoint, flabel;
Point point;
fp = fopen("../x64/Debug/xml/point.tx", "r+"); //点坐标
if(fp == NULL) {
printf("Cannot open the file!\n");
return;
}
vector<Point> trainedPoints;
vector<int> trainedPointsMarkers;
while (!feof(fp)) {
fscanf(fp, "%d", &fpoint);
if(feof(fp)) {
break;
}
//依次为横坐标、纵坐标、分类
if((flge%3==0? point.x = fpoint:
flge%3==1? point.y = fpoint:
flge%3==2? flabel = fpoint : -1) < 0) {
return;
}
if(flge % 3 == 2) {
trainedPoints.push_back(point);
trainedPointsMarkers.push_back(flabel);
}
flge++;
}
vector<Vec3b> colors(2);
colors[0] = Vec3b(0, 255, 0);
colors[1] = Vec3b(0, 0, 255);
Mat src, dst;
src.create(480, 640, CV_8UC3);
src = Scalar::all(0);
src.copyTo(dst);
//绘制点
for(size_t i = 0; i < trainedPoints.size(); i++) {
Scalar c = colors[trainedPointsMarkers[i]];
circle(src, trainedPoints[i], 3, c, -1);
circle(dst, trainedPoints[i], 3, c, -1);
}
imshow("points", src);
Mat samples;
Mat(trainedPoints).reshape(1, (int)trainedPoints.size()).convertTo(samples, CV_32F);
Ptr<DTrees> model = DTrees::create();
model->setMaxDepth(8); //树的最大可能深度
model->setMinSampleCount(2); //节点最小样本数量
model->setUseSurrogates(false); //是否建立替代分裂点
model->setCVFolds(0); //交叉验证次数
model->setUse1SERule(false); //是否严格修剪
model->setTruncatePrunedTree(false); //分支是否完全移除
model->train(TrainData::create(samples, ROW_SAMPLE, Mat(trainedPointsMarkers)));
//显示结果
Mat testSample( 1, 2, CV_32FC1 );
for(int y = 0; y < dst.rows; y += 3) {
for(int x = 0; x < dst.cols; x += 3) {
testSample.at<float>(0) = (float)x;
testSample.at<float>(1) = (float)y;
int response = (int)model->predict(testSample);
dst.at<Vec3b>(y, x) = colors[response];
}
}
imshow("Decision Tree", dst);
}
/* AdaBoost */
void PictureDemo::test76()
{
Mat showImage(780, 1300, CV_8UC3, Scalar(128, 128, 128));
float traindata[42][2] = {{400,550}, {350,350}, {550,150}, {540,250}, {100,100}, {150,150}, {400,100},
{300,150}, {300,500}, {1000,200}, {450,650}, {200,350}, {800,200}, {900,50},
{950,350}, {800,650}, {150,550}, {250,650}, {850,350}, {850,550}, {950,700},
{1050,500}, {1150,650}, {1100,250}, {1200,250}, {1200,450}, {150,450}, {550,300},
{600,650}, {950,600}, {250,400}, {750,450}, {1050,350}, {650,100}, {500,500},
{400,350}, {700,550}, {800,300}, {950,450}, {600,250}, {700,300}, {650,450}};
Mat trainMat(42, 2, CV_32FC1, traindata);
int respond[42] = {'R','R','R','R','R','R','R','R','R','R',
'R','R','R','R','R','R','R','R','R','R','R',
'B','B','B','B','B','B','B','B','B','B',
'B','B','B','B','B','B','B','B','B','B','B'};
Mat labelsMat(42, 1, CV_32SC1, respond);
Ptr<TrainData> data = TrainData::create(trainMat, ROW_SAMPLE, labelsMat);
Ptr<Boost> model = Boost::create();
model->setBoostType(Boost::REAL);
model->setMaxDepth(15); //设置决策树得最大深度
model->setWeightTrimRate(0.95); //设置weight_trim_rate
model->setWeakCount(15); //设置弱分类器数量
model->train(data);
//开始预测
Vec3b red(0, 0, 255), green(0, 255, 0);
Mat sampleMat;
for(int i = 0; i < showImage.rows; ++i) {
for(int j = 0; j < showImage.cols; ++j) {
sampleMat = (Mat_<float>(1, 2) << j, i); //生成测试数据
int response = model->predict(sampleMat); //进行预测,返回1或-1
if(response == 'R'){
showImage.at<Vec3b>(i, j) = red;
}
else if(response == 'B'){
showImage.at<Vec3b>(i, j) = green;
}
}
}
for(int i = 0; i < 42; i++) { //把训练样本点,显示在图相框内
int v = respond[i];
if(v == 'R') {
circle(showImage, Point(traindata[i][0], traindata[i][1]), 5, Scalar(0, 0, 0), -1, LINE_AA);
}
else {
circle(showImage, Point(traindata[i][0], traindata[i][1]), 5, Scalar(255, 255, 255), -1, LINE_AA);
}
}
imshow("AdaBoost", showImage);
}
/* 随机森林的分类 */
void PictureDemo::test77()
{
//用于显示分类结果的图像
Mat image = Mat::zeros(512, 512, CV_8UC3);
//组织分类标签,三类,每类50个样本
int labels[150];
for (int i = 0; i < 50; i++) {
labels[i] = 1;
}
for (int i = 50; i < 100; i++) {
labels[i] = 2;
}
for (int i = 100; i < 150; i++) {
labels[i] = 3;
}
Mat labelsMat(150, 1, CV_32SC1, labels); //矩阵化,150*1的矩阵
float trainDataArray[150][2]; //组织训练数据,三类数据,每个数据点为二维特征向量
RNG rng;
for (int i = 0; i < 50; i++) {
trainDataArray[i][0] = 250 + static_cast<float>(rng.gaussian(30));
trainDataArray[i][1] = 250 + static_cast<float>(rng.gaussian(30));
}
for (int i = 50; i < 100; i++) {
trainDataArray[i][0] = 150 + static_cast<float>(rng.gaussian(30));
trainDataArray[i][1] = 150 + static_cast<float>(rng.gaussian(30));
}
for (int i = 100; i < 150; i++) {
trainDataArray[i][0] = 320 + static_cast<float>(rng.gaussian(30));
trainDataArray[i][1] = 150 + static_cast<float>(rng.gaussian(30));
}
Mat trainingDataMat(150, 2, CV_32FC1, trainDataArray); //矩阵化,150*2的矩阵
Ptr<TrainData> tData = TrainData::create(trainingDataMat, ROW_SAMPLE, labelsMat); // 设置训练数据,把特征数据和标签数据放在一起
Ptr<RTrees> rtrees = RTrees::create();
rtrees->setMaxDepth(10);
rtrees->setMinSampleCount(10);
rtrees->setRegressionAccuracy(0);
rtrees->setUseSurrogates(false);
rtrees->setMaxCategories(15);
rtrees->setPriors(Mat());
rtrees->setCalculateVarImportance(true);
rtrees->setActiveVarCount(4);
rtrees->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER + (0.01f > 0 ? TermCriteria::EPS : 0), 100, 0.01f));
rtrees->train(tData);
//对图像内所有512*512个背景点进行预测,不同的预测结果,图像背景区域显示不同的颜色
Vec3b red(0, 0, 255), green(0, 255, 0), blue(255, 0, 0);
for (int i = 0; i < image.rows; ++i) {
for (int j = 0; j < image.cols; ++j) {
Mat sampleMat = (Mat_<float>(1, 2) << j, i); //生成测试数据
float response = rtrees->predict(sampleMat); //进行预测,返回1或-1
if (response == 1) {
image.at<Vec3b>(i, j) = red;
}
else if (response == 2) {
image.at<Vec3b>(i, j) = green;
}
else {
image.at<Vec3b>(i, j) = blue;
}
}
}
for (int i = 0; i < trainingDataMat.rows; i++) {
const float * v = trainingDataMat.ptr<float>(i);
Point pt = Point((int)v[0], (int)v[1]);
if (labels[i] == 1) {
circle(image, pt, 5, Scalar::all(0), -1, LINE_AA);
}
else if (labels[i] == 2) {
circle(image, pt, 5, Scalar::all(128), -1, LINE_AA);
}
else{
circle(image, pt, 5, Scalar::all(255), -1, LINE_AA);
}
}
imshow("随机森林分类器示例", image);
}
/* 使用EM算法对图像进行分割 */
void PictureDemo::test78()
{
Vec3b colors[] = {Vec3b(0, 0, 255), Vec3b(0, 255, 0), Vec3b(255, 100, 100), Vec3b(255, 0, 255)};
Mat data, labels, src, dst;
src = imread("../x64/Debug/picture/35.jpg", IMREAD_COLOR);
src.copyTo(dst);
for (int i = 0; i < src.rows; i++) {
for (int j = 0; j < src.cols; j++) {
Vec3b point = src.at<Vec3b>(i, j);
Mat tmp = (Mat_<float>(1, 3) << point[0], point[1], point[2]);
data.push_back(tmp);
}
}
Ptr<EM> model = EM::create();
model->setClustersNumber(4); //类个数
model->setCovarianceMatrixType(EM::COV_MAT_SPHERICAL);
model->setTermCriteria(TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 300, 0.1));
model->trainEM(data, noArray(), labels, noArray());
for (int i = 0; i < dst.rows; i++) { //显示结果,不同的类别用不同的颜色
for (int j = 0; j < dst.cols; j++) {
int index = labels.at<int>(i * dst.cols + j);
dst.at<Vec3b>(i, j) = colors[index];
}
}
imshow("src", src);
imshow("dst", dst);
}
/* 使用EM算法对点所在区域进行划分 */
void PictureDemo::test79()
{
Mat src, dst;
vector<Point> trainedPoints;
vector<int> trainedPointsMarkers;
//读取文件中的点坐标
FILE *fp;
int flge = 0;
int fpoint,flabel;
fp = fopen("../x64/Debug/xml/point1.tx", "r+");
Point point;
while (!feof(fp)) {
fscanf(fp, "%d", &fpoint);
if (feof(fp)) break;
//依次为横坐标、纵坐标、分类
if ((flge%3==0? point.x = fpoint:
flge%3==1? point.y = fpoint:
flge%3==2? flabel = fpoint : -1)<0)
return;
if (flge%3 == 2) {
trainedPoints.push_back(point);
trainedPointsMarkers.push_back(flabel);
}
flge++;
}
vector<Vec3b> colors(4);
colors[0] = Vec3b(0, 255, 0);
colors[1] = Vec3b(0, 0, 255);
colors[2] = Vec3b(0, 255, 255);
colors[3] = Vec3b(255, 0, 0);
src.create(480, 640, CV_8UC3);
src = Scalar::all(0);
for(size_t i = 0; i < trainedPoints.size(); i++) { //绘制点
Scalar c = colors[trainedPointsMarkers[i]];
circle(src, trainedPoints[i], 3, c, -1);
}
src.copyTo(dst);
imshow("points", src);
Mat samples;
Mat(trainedPoints).reshape(1, (int)trainedPoints.size()).convertTo(samples, CV_32F);
int nmodels = (int)colors.size(); //四种颜色的点
vector<Ptr<EM> > em_models(nmodels); //对训练分数标记一样的点进行训练
Mat modelSamples;
for(int i = 0; i < nmodels; i++) {
modelSamples.release();
for(int j = 0; j < trainedPointsMarkers.size(); j++) {
if(trainedPointsMarkers[j] == i) {
modelSamples.push_back(samples.row(j));
}
}
if(!modelSamples.empty()) { //训练模型
const int componentCount = 5;
Ptr<EM> em = EM::create();
em->setClustersNumber(componentCount); //高斯混合模型中混合成分的数量
em->setCovarianceMatrixType(EM::COV_MAT_DIAGONAL); //协方差矩阵的类型
em->trainEM(modelSamples, noArray(), noArray(), noArray());
em_models[i] = em;
}
}
Mat testSample(1, 2, CV_32FC1);
Mat logLikelihoods(1, nmodels, CV_64FC1, Scalar(-DBL_MAX));
for(int y = 0; y < src.rows; y += 3) {
for(int x = 0; x < src.cols; x += 3) {
testSample.at<float>(0) = (float)x;
testSample.at<float>(1) = (float)y;
for(int i = 0; i < nmodels; i++) {
if(!em_models[i].empty()) {
logLikelihoods.at<double>(i) = em_models[i]->predict2(testSample, noArray())[0]; //逐个点分别对四种值的em进行检测
}
}
Point maxLoc;
minMaxLoc(logLikelihoods, 0, 0, 0, &maxLoc);
dst.at<Vec3b>(y, x) = colors[maxLoc.x];
}
}
imshow("EM", dst);
}
int SZ = 20; //图片中数字尺寸
int CLASS_N = 10; //10类
/* 分割得到小图像集合digits,以及每个小数字图像对应的标签 */
void load_digits(string strName, vector<Mat>& digits, vector<int>& labels)
{
digits.clear(); //单个数字图像集合
labels.clear(); //单个数字图像标签
Mat image = imread(strName, IMREAD_GRAYSCALE);
for (int i = 0; i < image.rows; i += SZ) { //分割图像
for (int j = 0; j < image.cols; j += SZ) {
digits.push_back(image(Rect(j, i, SZ, SZ))); //取小图像区域
}
}
for (int i = 0; i < CLASS_N; i++) { //N分类。每行数字对应标签 0 1 2 3 ……
for (size_t j = 0; j < digits.size() / CLASS_N; j++) {
labels.push_back(i); //第i行所有列(digits.size() / CLASS_N)对应标签i
}
}
}
/* 去歪斜 倾斜矫正 */
void deskew(Mat& img, Mat& deskewed_img)
{
Moments m = moments(img); //计算图像三阶矩
if (abs(m.mu02) < 0.01) {
deskewed_img = img.clone();
return;
}
//mu11/mu02来表示图像的斜切系数,因为图像斜切了,所以原本图像的中心点就移动位置了,所以我们需要将图像的中心点再移动回去
float skew = (float)(m.mu11 / m.mu02);
float M_vals[2][3] = {{1, skew, -0.5f * SZ * skew}, {0, 1, 0}}; //图像的刚体变换矩阵M
Mat M(Size(3, 2), CV_32F);
for (int i = 0; i < M.rows; i++) {
for (int j = 0; j < M.cols; j++) {
M.at<float>(i, j) = M_vals[i][j];
}
}
warpAffine(img, deskewed_img, M, Size(SZ, SZ), WARP_INVERSE_MAP | INTER_LINEAR); //仿射变换,去歪斜
/*imshow("原图", img);
imshow("矫正", deskewed_img);
waitKey(1000);*/
}
/* 测试样本拼接成一幅图 */
void mosaic(int width, vector<Mat>& images, Mat& grid)
{
int iWidth = SZ * width;
int iHeight = SZ * (int)ceil((double)images.size() / width);
if (!images.empty()) {
grid = Mat(Size(iWidth, iHeight), images[0].type());
for (size_t i = 0; i < images.size(); i++) { //马赛克网格:测试集小图像拼接成grid
Mat location_on_grid = grid(Rect(SZ * ((int)i % width), SZ * ((int)i / width), SZ, SZ));
images[i].copyTo(location_on_grid);
}
}
}
/* 评估模型 预测结果,测试集样本,测试集标签,可视化评估图像矩阵 */
void EvaluateModel(vector<float>& predictions, vector<Mat>& digits, vector<int>& labels, Mat& mos)
{
double err = 0;
for (size_t i = 0; i < predictions.size(); i++) {
if ((int)predictions[i] != labels[i]) {
err++; //预测失败次数
cout << "预测失败,预测:" << (int)predictions[i] << ",真实:"<< labels[i] << endl;
}
else {
cout << "预测成功,预测:" << (int)predictions[i] << ",真实:"<< labels[i] << endl;
}
}
err /= predictions.size();
cout << format("预测错误百分比: %.2f %%", err * 100) << endl;
vector<Mat> vis; //测试集小数字图像向量
for (size_t i = 0; i < digits.size(); i++) {
Mat img;
cvtColor(digits[i], img, COLOR_GRAY2BGR);
if ((int)predictions[i] != labels[i]) { //测试集样本预测失败
for (int j = 0; j < img.rows; j++) {
for (int k = 0; k < img.cols; k++) {
img.at<Vec3b>(j, k)[0] = 0;
img.at<Vec3b>(j, k)[1] = 0; //前两个通道BG置为0,显示红色小数字图像,黑色区域不影响
}
}
}
vis.push_back(img);
}
mosaic(25, vis, mos); //拼图小数字图像得到mos(红色为错误预测)
}
/* x:梯度图像方向,梯度图像的幅度值weights,16个方向iMinLen,部分直方图bins */
void bincount(Mat& x, Mat& weights, int iMinLen, vector<double>& bins)
{
double dMaxX = 0;
minMaxLoc(x, NULL, &dMaxX); //最大梯度方向bin_n
bins = vector<double>(max((int)dMaxX, iMinLen)); //初始化bins
for (int i = 0; i < x.rows; i++) {
for (int j = 0; j < x.cols; j++) {
bins[x.at<int>(i, j)] += weights.at<float>(i, j); //方向和幅度相加
}
}
}
/* 处理小数字图像向量,获得hog描述子矩阵,提取梯度方向直方图hog特征 */
void PreprocessHog(vector<Mat>& digits, Mat& hog)
{
//cell数字图像10x10 被分成 4个单元格,并为每个单元格计算 16 位定向梯度直方图
int bin_n = 16; //16位定向梯度直方图
int iHalfCell = SZ / 2;
double eps = 1e-7; //迭代条件:精度
hog = Mat(Size(4 * bin_n, (int)digits.size()), CV_32F); //hog描述子矩阵
for (int n = 0; n < digits.size(); n++) {
Mat gx;
Sobel(digits[n], gx, CV_32F, 1, 0); //梯度gx
Mat gy;
Sobel(digits[n], gy, CV_32F, 0, 1); //梯度gy
Mat mag, ang;
cartToPolar(gx, gy, mag, ang); //笛卡尔转极坐标:幅度、角度 计算每个 2D 向量 (x(I),y(I)) 的幅度、角度或两者
Mat bin(ang.size(), CV_32S);
for (int i = 0; i < ang.rows; i++) {
for (int j = 0; j < ang.cols; j++) {
bin.at<int>(i, j) = (int)(bin_n * ang.at<float>(i, j) / (2 * CV_PI)); //梯度方向1 …… bin_n
}
}
//梯度方向图分为 4个bin区域。4个bin区域 组合成10x10像素网格
Mat bin_cells[] = {bin(Rect(0, 0, iHalfCell, iHalfCell)), //(0,0,5,5)
bin(Rect(iHalfCell, 0, iHalfCell, iHalfCell)), //(5,0,5,5)
bin(Rect(0, iHalfCell, iHalfCell, iHalfCell)), //(0,5,5,5)
bin(Rect(iHalfCell, iHalfCell, iHalfCell, iHalfCell))}; //(5,5,5,5)
Mat mag_cells[] = {mag(Rect(0, 0, iHalfCell, iHalfCell)),
mag(Rect(iHalfCell, 0, iHalfCell, iHalfCell)),
mag(Rect(0, iHalfCell, iHalfCell, iHalfCell)),
mag(Rect(iHalfCell, iHalfCell, iHalfCell, iHalfCell))}; //梯度幅度分为四个幅度值单元
vector<double> hist; //直方图向量4*16 个
hist.reserve(4 * bin_n); //reserve的作用是更改vector的容量(capacity),使vector至少可以容纳n个元素
for (int i = 0; i < 4; i++) { //4部分
vector<double> partial_hist; //部分直方图:方向bin_n和幅度相加
bincount(bin_cells[i], mag_cells[i], bin_n, partial_hist);
hist.insert(hist.end(), partial_hist.begin(), partial_hist.end());
}
double sum = 0;
for (size_t i = 0; i < hist.size(); i++) { // transform to Hellinger kernel转换为 Hellinger 核
sum += hist[i];
}
for (size_t i = 0; i < hist.size(); i++) { //将得到梯度直方图转化为Hellinger Matrix.
hist[i] /= sum + eps;
hist[i] = sqrt(hist[i]);
}
double hist_norm = norm(hist);
for (size_t i = 0; i < hist.size(); i++) { //将梯度直方图Hellinger化,相当于求取了和0向量的海林格距离
hog.at<float>((int)n, (int)i) = (float)(hist[i] / (hist_norm + eps)); //hog描述子
}
}
}
/* 随机数字-洗牌:打乱顺序的小数字图像及其标签。标签与图像保持对应关系 */
void shuffle(vector<Mat>& digits, vector<int>& labels)
{
vector<int> vShuffled(digits.size()); //随机数字索引向量
for (size_t i = 0; i < digits.size(); i++) {
vShuffled[i] = (int)i; //初始化shuffled_indexes:0 1 2 ……digits.size()-1
}
randShuffle(vShuffled); //随机打乱索引数组
vector<Mat> shuffled_digits(digits.size());
vector<int> shuffled_labels(labels.size());
for (size_t i = 0; i < vShuffled.size(); i++) {
shuffled_digits[vShuffled[i]] = digits[i]; //根据打乱的索引数组 生成小数字图像向量
shuffled_labels[vShuffled[i]] = labels[i]; //根据打乱的索引数组 生成小数字图像的标签向量
}
digits = shuffled_digits; //更新整体数字图像为打乱顺序的数字图像
labels = shuffled_labels; //更新标签向量为 打乱顺序的数字图像对应的标签
}
/* KNearest和SVM */
void PictureDemo::test80()
{
vector<Mat> digits;
vector<int> labels; //正确结果
load_digits("../x64/Debug/picture/47.png", digits, labels); //加载样本数字图像获得小数字图像集合及分类标签
shuffle(digits, labels);
vector<Mat> digits2; //去歪斜的小数字图像向量
for (size_t i = 0; i < digits.size(); i++) {
Mat deskewed_digit;
deskew(digits[i], deskewed_digit);
digits2.push_back(deskewed_digit);
}
Mat samples; //hog描述子样本矩阵
PreprocessHog(digits2, samples);
//数据集的划分--训练集、验证集和测试集
int iTrain = 4500; //训练数据集4500个
Mat test_set; //测试集
vector<Mat> vDigitsTest(digits2.begin() + iTrain, digits2.end()); //测试集图像向量
mosaic(25, vDigitsTest, test_set); //测试集图像马赛克:每行25个小数字图像
imshow("test set", test_set); //显示测试集图像
Mat samples_train = samples(Rect(0, 0, samples.cols, iTrain)); //训练集样本:hog描述子
Mat samples_test = samples(Rect(0, iTrain, samples.cols, samples.rows - iTrain)); //测试集:hog描述子
vector<int> labels_train(labels.begin(), labels.begin() + iTrain); //训练集标签
vector<int> labels_test(labels.begin() + iTrain, labels.end()); //测试集标签
vector<float> predictions; //samples_test的预测结果
Mat vis;
//K最近邻(kNN,k-NearestNeighbor)分类算法
Ptr<KNearest> k_nearest;
k_nearest = KNearest::create();
k_nearest->train(samples_train, ROW_SAMPLE, labels_train);
k_nearest->findNearest(samples_test, 5, predictions); //检测5个最近的邻居,为避免死结,最好为奇数
EvaluateModel(predictions, vDigitsTest, labels_test, vis);
imshow("KNearest", vis); //显示K近邻预测结果图(红色表示错误预测)
k_nearest.release();
//支持向量机
Ptr<SVM> svm;
svm = SVM::create();
svm->setGamma(5.383);
svm->setC(2.67);
svm->setKernel(SVM::RBF);
svm->setType(SVM::C_SVC);
svm->train(samples_train, ROW_SAMPLE, labels_train);
svm->predict(samples_test, predictions);
EvaluateModel(predictions, vDigitsTest, labels_test, vis);
imshow("SVM", vis); //显示预测结果(红色表示失败)
svm->save("../x64/Debug/xml/digits_svm.yml");
svm.release();
}
/* SVM */
void PictureDemo::test81()
{
int iSampleNnums = 200;
Mat trainData(iSampleNnums, 2, CV_32FC1); //200行,2列的数据,相当于坐标
Mat trainClassess(iSampleNnums, 1, CV_32SC1); //200行,1列的数据,相当于坐标的结果
Mat img(500, 500, CV_8UC3, Scalar::all(0));
Mat sample(1, 2, CV_32FC1); //样本点
Mat trainData1, trainData2, trainClasses1, trainClasses2;
RNG rng = RNG(-1);
trainData1 = trainData.rowRange(0, iSampleNnums / 2);
rng.fill(trainData1, RNG::NORMAL, Scalar(100, 100), Scalar(40, 40)); //生成均值为(100,100),方差为(40,40)的随机数据
trainData2 = trainData.rowRange(iSampleNnums / 2, iSampleNnums);
rng.fill(trainData2, RNG::NORMAL, Scalar(400, 400), Scalar(40, 40)); //生成均值为(400,400),方差为(40,40)的随机数据
cout << trainData << endl;
//trainClasses1和trainClassess的前100行绑定
trainClasses1 = trainClassess.rowRange(0, iSampleNnums / 2);
trainClasses1 = Scalar::all(1);
trainClasses2 = trainClassess.rowRange(iSampleNnums / 2, iSampleNnums);
trainClasses2 = Scalar::all(-1);
Ptr<SVM> svm = SVM::create();
svm->setType(SVM::C_SVC);
svm->setC(1.0);
svm->setKernel(SVM::POLY);
svm->setGamma(1.0);
svm->setDegree(2.0);
svm->setCoef0(0.0);
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 500, 1e-6));
Ptr<TrainData> TrainData = TrainData::create(trainData, SampleTypes::ROW_SAMPLE, trainClassess);
svm->train(TrainData);
for (int i = 0; i < img.rows; ++i) {
for (int j = 0; j < img.cols; ++j) {
Mat sampleMat = (Mat_<float>(1, 2) << i, j);
float response = svm->predict(sampleMat);
if (response == 1) {
img.at<Vec3b>(i, j) = Vec3b(0, 0, 0);
}
else if (response == -1) {
img.at<Vec3b>(i, j) = Vec3b(255, 255, 255);
}
}
}
imshow("111", img);
for (int i = 0; i < iSampleNnums / 2; i++) {
Point pt;
pt.x = round(trainData1.at<float>(i, 0));
pt.y = round(trainData1.at<float>(i, 1));
circle(img, pt, 1, Scalar(255, 255, 255), 1, LINE_AA);
pt.x = round(trainData2.at<float>(i, 0));
pt.y = round(trainData2.at<float>(i, 1));
circle(img, pt, 1, Scalar(0, 0, 0), 1, LINE_AA);
}
Mat image(img);
Mat sv = svm->getSupportVectors();
for (int i = 0; i < sv.rows; i++) {
float* v = sv.ptr<float>(i);
circle(image, Point((int)v[0], (int)v[1]), 6, Scalar(255, 0, 0), 1, LINE_AA);
}
imshow("img", img);
imshow("image", image);
}
/* MPL */
void PictureDemo::test82()
{
const int iW = 8;
const int iH = 8;
const int iClass = 2;
const int iSum = 700;
float training_data[iClass * iSum][iW * iH] = { {0} }; //把图像的每个像素装入二维数组的每一行中,其中每一行就是一幅图片
float labels[iClass * iSum][iClass] = { {0} }; //标签也要是二维数组,它的每一行都是一幅图片的标签值
for (int i=0; i<iClass*iSum; i++) { //最好自己做个视频,转换成图片集
Mat src, resize_img, train_img;
string img_path = format("../x64/Debug/picture/test1/%d.png", i+1); //图片地址
src = imread(img_path, CV_32FC1); //读入图片
resize(src, resize_img, Size(iW, iH)); //裁剪8×8的大小
normalize(resize_img, train_img, 0, 255, NORM_MINMAX); //归一化0-255范围
for (int j=0; j< iW*iH; j++) {
training_data[i][j] = (float)train_img.data[j]; //读入二维数组,数组的每一行都是一幅图片的数据
}
}
for (int i=0; i<iClass * iSum; i++) { //(0是闭眼,1是睁眼)
for (int j=0; j<iClass; j++) {
if (i < 796) {
labels[i][j] = 0;
}
else {
labels[i][j] = 1;
}
}
}
//数组转化成mat类型
Mat labels_Mat(iClass * iSum, iClass, CV_32FC1, labels);
Mat trainingDataMat(iClass * iSum, iW * iH, CV_32FC1, training_data);
// 共5层:输入层加3个隐藏层加一个输出层 (Mat_<int>(1, 5),输入层神经元为8*8、隐藏层第一层神经元为128第二层神经元为128第三层神经元为64,输出层神经元为2(img_class)
Mat layerSizes = (Mat_<int>(1, 5) << iW * iH, 128, 128, 64, iClass);
Ptr<TrainData> trainData = TrainData::create(trainingDataMat, ROW_SAMPLE, labels_Mat);
Ptr<ANN_MLP> Net_5 = ANN_MLP::create();
Net_5->setLayerSizes(layerSizes);//设置各层的神经元个数
Net_5->setTrainMethod(ml::ANN_MLP::BACKPROP, 0.01, 0.9); //MLP的训练方法(0.01, 0.9这2个参数很重要)
Net_5->setActivationFunction(ml::ANN_MLP::SIGMOID_SYM); //激活函数
Net_5->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, 10000, 0.0001));//可以指定最大迭代次数(maxCount)或迭代之间的误差变化大小(epsilon)
Net_5->train(trainData);
//保存训练结果
//Net_5->save("H:\\net5xml\\Net_5.xml");
//预测图像
Mat text = imread("../x64/Debug/picture/test1/1404.png", CV_32FC1); //这儿有问题,如果第二个参数改变预测结果会不准
imshow("text", text);
Mat dst, output;
resize(text, dst, Size(iW, iH)); //裁剪8×8的大小
normalize(dst, dst, 0, 255, NORM_MINMAX); //0-255范围
Mat_<float> testMat(1, iW * iH); //将8*8=64个像素装入这个一维向量中
for (int i = 0; i < iW * iH; i++) {
testMat.at<float>(0, i) = (float)dst.data[i];
}
float a = NULL;
a = Net_5->predict(testMat, output);
if (a == 0) {
cout << "测试结果:闭眼" << endl ;
}
else {
cout << "测试结果:睁眼" << endl ;
}
}
/* 文字泊松克隆 */
void PictureDemo::test83()
{
Mat src = imread("../x64/Debug/picture/HDR/iloveyouticket.jpg");
Mat dst = imread("../x64/Debug/picture/HDR/wood-texture.jpg");
Mat src_mask = 255 * Mat::ones(src.rows, src.cols, src.depth()); //白色掩模
Point center(dst.cols/2,dst.rows/2);
//无缝地将src克隆到dst并将结果输出
Mat normal_clone;
Mat mixed_clone;
Mat nonochrome_clone;
seamlessClone(src, dst, src_mask, center, normal_clone, NORMAL_CLONE);
seamlessClone(src, dst, src_mask, center, nonochrome_clone, MONOCHROME_TRANSFER);
imshow("opencv-normal-clone-example.jpg", normal_clone);
imshow("opencv-nonochrome-clone-example.jpg", nonochrome_clone);
}
/* 图像泊松克隆 */
void PictureDemo::test84()
{
Mat src = imread("../x64/Debug/picture/HDR/airplane.jpg");
Mat dst = imread("../x64/Debug/picture/HDR/sky.jpg");
Mat src_mask = Mat::zeros(src.rows, src.cols, src.depth()); //创建掩模
//定义轮廓类似目标物体的多边形
Point poly[1][7];
poly[0][0] = Point(4, 80);
poly[0][1] = Point(30, 54);
poly[0][2] = Point(151, 63);
poly[0][3] = Point(254, 37);
poly[0][4] = Point(298, 90);
poly[0][5] = Point(272, 134);
poly[0][6] = Point(43, 122);
const Point* polygons[1] = { poly[0] };
int num_points[] = { 7 };
//填充多边形
fillPoly(src_mask, polygons, num_points, 1, Scalar(255, 255, 255));
imshow("src_mask", src_mask);
//目标图像在背景图像中心点左边
Point center(800, 100);
//无缝地将src克隆到dst并将结果输出
Mat output;
seamlessClone(src, dst, src_mask, center, output, NORMAL_CLONE);
imshow("result", output);
}
/* cvui窗口点击与显示 */
void PictureDemo::test85()
{
string strName = "CVUI Hello World!";
Mat frame = Mat(200, 500, CV_8UC3);
int count = 0;
namedWindow(strName); //如果没有使用namedWindow(),鼠标事件将不会被cui捕获
cvui::init(strName); //初始化窗口
while (true) {
frame = Scalar(49, 52, 49); //创建程序窗口背景图像
if (cvui::button(frame, 110, 80, "Hello, world!")) { //当按钮被点击时,会返回true
count++; //统计按钮被点击次数
}
cvui::printf(frame, 250, 90, 0.5, 0xff0000, "Button click count: %d", count); //在frame(250,90)点添加一个文本框,文本框字体大小为0.5,颜色为0xff0000
cvui::update(); //此函数必须在*AFTER*所有UI组件后调用。它做了所有的幕后魔术来处理鼠标点击等
imshow(strName, frame);
if (waitKey(20) == 27) { //ESC退出循环
break;
}
}
}
/* cvui窗口拖拽控制Canny参数 */
void PictureDemo::test86()
{
string strName = "CVUI Hello World!";
Mat lena = imread("../x64/Debug/picture/7.jpg");
Mat frame = lena.clone(); //背景图像
int low_threshold = 50, high_threshold = 150; //canny阈值
bool use_canny = false; //是否使用边缘检测
namedWindow(strName);
cvui::init(strName);
while (true) {
if (use_canny) { //是否使用边缘检测
cvtColor(lena, frame, COLOR_BGR2GRAY);
Canny(frame, frame, low_threshold, high_threshold, 3);
cvtColor(frame, frame, COLOR_GRAY2BGR);
}
else { //直接显示图像
lena.copyTo(frame);
}
cvui::window(frame, 10, 50, 180, 180, "Settings"); //在frame(10,50)处设置一个长宽180,180的名为Settings窗口
cvui::checkbox(frame, 15, 80, "Use Canny Edge", &use_canny); //在frame(15,80)点添加复选框
cvui::trackbar(frame, 15, 110, 165, &low_threshold, 5, 150); //滑动条控制最低分割阈值
cvui::trackbar(frame, 15, 180, 165, &high_threshold, 80, 300); //滑动条控制最高分割阈值
cvui::update(); //此函数必须在*AFTER*所有UI组件后调用。它做了所有的幕后魔术来处理鼠标点击等
imshow(strName, frame);
if (waitKey(30) == 27) {
break;
}
}
}
/** 图像仿射变换
* img1 输入图像
* img2 输出图像
* tri1 输入三角形坐标点
* tri2 输出三角形坐标点
*/
void warpTriangle(Mat &img1, Mat &img2, vector<Point2f> tri1, vector<Point2f> tri2)
{
Rect r1 = boundingRect(tri1); //得到每个三角形的最小外接矩形
Rect r2 = boundingRect(tri2);
vector<Point2f> tri1Cropped, tri2Cropped; //获得剪裁后的坐标点,输入和输出三角形坐标点
vector<Point> tri2CroppedInt;
for (int i = 0; i < 3; i++) { //输出三角形坐标点int形式
tri1Cropped.push_back(Point2f(tri1[i].x - r1.x, tri1[i].y - r1.y));
tri2Cropped.push_back(Point2f(tri2[i].x - r2.x, tri2[i].y - r2.y));
tri2CroppedInt.push_back(Point((int)(tri2[i].x - r2.x), (int)(tri2[i].y - r2.y))); //fillConvexPoly需要一个Point而不是Point2f的向量
}
Mat img1Cropped; //应用仿射变换到三角形外接矩形
img1(r1).copyTo(img1Cropped); //提取外接矩形区域
Mat img2Cropped = Mat::zeros(r2.height, r2.width, img1Cropped.type()); //将刚刚找到的仿射变换应用于src图像
Mat warpMat = getAffineTransform(tri1Cropped, tri2Cropped); //提取仿射变换矩阵
warpAffine(img1Cropped, img2Cropped, warpMat, img2Cropped.size(), INTER_LINEAR, BORDER_REFLECT_101); //应用仿射变换
Mat mask = Mat::zeros(r2.height, r2.width, CV_32FC3); //获得掩模
fillConvexPoly(mask, tri2CroppedInt, Scalar(1.0, 1.0, 1.0), 16, 0); //填充多边形
//应用掩模,获得输出图
multiply(img2Cropped, mask, img2Cropped); //提取掩模对应的图像区域
multiply(img2(r2), Scalar(1.0, 1.0, 1.0) - mask, img2(r2)); //获得输出图像掩模区域
img2(r2) = img2(r2) + img2Cropped; //保存仿射变换结果
}
/* 三角形仿射变换 */
void PictureDemo::test87()
{
Mat imgIn = imread("../x64/Debug/picture/robot.jpg");
imgIn.convertTo(imgIn, CV_32FC3, 1 / 255.0);
Mat imgOut = Mat::ones(imgIn.size(), imgIn.type());
imgOut = Scalar(1.0, 1.0, 1.0); //设定输出,输出为纯白色图像
vector<Point2f> triIn; //输入三角形坐标点
triIn.push_back(Point2f(360, 200));
triIn.push_back(Point2d(60, 250));
triIn.push_back(Point2f(450, 400));
vector<Point2f> triOut; //输出三角形坐标点
triOut.push_back(Point2f(400, 200));
triOut.push_back(Point2f(160, 270));
triOut.push_back(Point2f(400, 400));
warpTriangle(imgIn, imgOut, triIn, triOut); //仿射变换
//在输入输出图像上绘制三角形。转换回int,因为OpenCV抗锯齿不适用于CV_32FC3类型的图像
imgIn.convertTo(imgIn, CV_8UC3, 255.0);
imgOut.convertTo(imgOut, CV_8UC3, 255.0);
Scalar color = Scalar(255, 150, 0);
vector<Point> triInInt, triOutInt;
for (int i = 0; i < 3; i++) {
triInInt.push_back(Point(triIn[i].x, triIn[i].y));
triOutInt.push_back(Point(triOut[i].x, triOut[i].y));
}
polylines(imgIn, triInInt, true, color, 2, 16); //在图中画出三角形
polylines(imgOut, triOutInt, true, color, 2, 16);
imshow("Input", imgIn);
imshow("Output", imgOut);
}
/* 非真实感渲染 */
void PictureDemo::test88()
{
Mat im = imread("../x64/Debug/picture/cow.jpg");
Mat imout, imout_gray;
//保边滤波器
edgePreservingFilter(im, imout, RECURS_FILTER);
imshow("111", imout);
edgePreservingFilter(im, imout, NORMCONV_FILTER);
imshow("222", imout);
//边缘增强滤波器
detailEnhance(im, imout);
imshow("333", imout);
//素描滤波器
pencilSketch(im, imout_gray, imout);
imshow("444", imout_gray);
//风格化滤波器
stylization(im, imout);
imshow("555", imout);
}
// 拟合非广义分布
Mat AGGDfit(Mat structdis, double& lsigma_best, double& rsigma_best, double& gamma_best)
{
long int poscount = 0, negcount = 0;
double possqsum = 0, negsqsum = 0, abssum = 0;
for (int i = 0; i < structdis.rows; i++) {
for (int j = 0; j < structdis.cols; j++) {
double pt = structdis.at<double>(i, j);
if (pt > 0) {
poscount++;
possqsum += pt * pt;
abssum += pt;
}
else if (pt < 0) {
negcount++;
negsqsum += pt * pt;
abssum -= pt;
}
}
}
lsigma_best = pow(negsqsum / negcount, 0.5);
rsigma_best = pow(possqsum / poscount, 0.5);
double gammahat = lsigma_best / rsigma_best;
long int totalcount = (structdis.cols) * (structdis.rows);
double rhat = pow(abssum / totalcount, static_cast<double>(2)) / ((negsqsum + possqsum) / totalcount);
double rhatnorm = rhat * (pow(gammahat, 3) + 1) * (gammahat + 1) / pow(pow(gammahat, 2) + 1, 2);
double prevgamma = 0;
double prevdiff = 1e10;
float sampling = 0.001;
//可以粗化采样以加快代码,但会损失一些准确性
for (float gam = 0.2; gam < 10; gam += sampling) {
double r_gam = tgamma(2 / gam) * tgamma(2 / gam) / (tgamma(1 / gam) * tgamma(3 / gam));
double diff = abs(r_gam - rhatnorm);
if (diff > prevdiff) break;
prevdiff = diff;
prevgamma = gam;
}
gamma_best = prevgamma;
return structdis.clone();
}
//提取特征
void ComputeBrisqueFeature(Mat& orig, vector<double>& vFeature)
{
Mat imOrigBw(orig.size(), CV_64F, 1); //归一化的图像
cvtColor(orig, imOrigBw, COLOR_BGR2GRAY); //转换为灰度图像
imOrigBw.convertTo(imOrigBw, 1.0 / 255); // 归一化
int scalenum = 2; //缩放图像的次数
for (int i = 1; i <= scalenum; i++) {
Size dst_size(imOrigBw.cols / pow((double)2, i - 1), imOrigBw.rows / pow((double)2, i - 1)); // 重置图像大小
Mat imDistScaled;
resize(imOrigBw, imDistScaled, dst_size, 0, 0, INTER_CUBIC);
imDistScaled.convertTo(imDistScaled, CV_64FC1, 1.0 / 255.0);
// 计算MSCN系数
Mat mu(imDistScaled.size(), CV_64FC1, 1);
GaussianBlur(imDistScaled, mu, Size(7, 7), 1.166); // 计算局部均值
Mat mu_sq;
pow(mu, double(2.0), mu_sq);
// 计算局部方差
Mat sigma(imDistScaled.size(), CV_64FC1, 1);
multiply(imDistScaled, imDistScaled, sigma);
GaussianBlur(sigma, sigma, Size(7, 7), 1.166);
subtract(sigma, mu_sq, sigma);
pow(sigma, double(0.5), sigma);
add(sigma, Scalar(1.0 / 255), sigma); // 避免局部方差为0,因为后面计算MSCN系数要除以局部方差
// 计算MSCN
Mat structdis(imDistScaled.size(), CV_64FC1, 1);
subtract(imDistScaled, mu, structdis);
divide(structdis, sigma, structdis);
double lsigma_best, rsigma_best, gamma_best; // lsgima_best 左方差,rsigma_best右方差,gamma均值
structdis = AGGDfit(structdis, lsigma_best, rsigma_best, gamma_best); // 非对称广义高斯分布拟合
vFeature.push_back(gamma_best); // 形状参数
vFeature.push_back((lsigma_best * lsigma_best + rsigma_best * rsigma_best) / 2); // 方差参数
// 计算两两对称参数
int shifts[4][2] = { {0,1},{1,0},{1,1},{-1,1} }; //计算配对产品图像,方向索引 (H, V, D1, D2)
for (int itr_shift = 1; itr_shift <= 4; itr_shift++) {
int* reqshift = shifts[itr_shift - 1]; //从2D数组中选择移动索引
Mat shifted_structdis(imDistScaled.size(), CV_64F, 1); //将shifted_structdis声明为成对图像
Mat OrigArr(structdis);
Mat ShiftArr(shifted_structdis);
//为给定方向创建成对产品(reqshift)
for (int i = 0; i < structdis.rows; i++) {
for (int j = 0; j < structdis.cols; j++) {
if (i + reqshift[0] >= 0 && i + reqshift[0] < structdis.rows && j + reqshift[1] >= 0 && j + reqshift[1] < structdis.cols) {
ShiftArr.at<double>(i, j) = OrigArr.at<double>(i + reqshift[0], j + reqshift[1]);
}
else {
ShiftArr.at<double>(i, j) = 0;
}
}
}
shifted_structdis = ShiftArr.clone();
multiply(structdis, shifted_structdis, shifted_structdis);
shifted_structdis = AGGDfit(shifted_structdis, lsigma_best, rsigma_best, gamma_best);
double constant = sqrt(tgamma(1 / gamma_best)) / sqrt(tgamma(3 / gamma_best));
double meanparam = (rsigma_best - lsigma_best) * (tgamma(2 / gamma_best) / tgamma(1 / gamma_best)) * constant;
//将计算参数从AGGD拟合推到成对产品
vFeature.push_back(gamma_best);
vFeature.push_back(meanparam);
vFeature.push_back(pow(lsigma_best, 2));
vFeature.push_back(pow(rsigma_best, 2));
}
}
}
/* 图像质量评价BRISQUE */
void PictureDemo::test89()
{
/*float szRescale[36][2];
char buff[100];
FILE* range_file = fopen("../x64/Debug/allrange", "r");
if (range_file == NULL) {
return;
}
fgets(buff, 100, range_file);
fgets(buff, 100, range_file);
for (int i = 0; i < 36; ++i) {
float a, b, c;
fscanf(range_file, "%f %f %f", &a, &b, &c);
szRescale[i][0] = b;
szRescale[i][1] = c;
}*/
//allrange读出来的数据有差异,而且评估图片质量无效
float min_[36] = {0.336999, 0.019667, 0.230000, -0.125959, 0.000167, 0.000616, 0.231000, -0.125873, 0.000165, 0.000600, 0.241000, -0.128814,
0.000179, 0.000386, 0.243000, -0.133080, 0.000182, 0.000421, 0.436998, 0.016929, 0.247000, -0.200231, 0.000104, 0.000834, 0.257000,
-0.200017, 0.000112, 0.000876, 0.257000, -0.155072, 0.000112, 0.000356, 0.258000, -0.154374, 0.000117, 0.000351};
float max_[36] = {9.999411, 0.807472, 1.644021, 0.202917, 0.712384, 0.468672, 1.644021, 0.169548, 0.713132, 0.467896, 1.553016,
0.101368, 0.687324, 0.533087, 1.554016, 0.101000, 0.689177, 0.533133, 3.639918, 0.800955, 1.096995, 0.175286, 0.755547,
0.399270, 1.095995, 0.155928, 0.751488, 0.402398, 1.041992, 0.093209, 0.623516, 0.532925, 1.042992, 0.093714, 0.621958, 0.534484};
double qualityscore; //图像得分
svm_model* model;
Mat orig = imread("../x64/Debug/picture/images/original-rotated-image.jpg", 1);
vector<double> vFeatures; //特征向量初始化
ComputeBrisqueFeature(orig, vFeatures); //计算特征向量
if ((model = svm_load_model("../x64/Debug/allmodel")) == 0) { // 打开svm模型
fprintf(stderr, "can't open model file allmodel\n");
exit(1);
}
svm_node x[37];
for (int i = 0; i < 36; ++i) { // 将vFeatures向量从-1重新缩放到1了,将向量转换为svm节点数组对象
float min = min_[i];
float max = max_[i];
x[i].value = -1 + (2.0 / (max - min) * (vFeatures[i] - min)); // 归一化
x[i].index = i + 1;
}
x[36].index = -1;
int nr_class = svm_get_nr_class(model); //计算内存消耗
double* prob_estimates = (double*)malloc(nr_class * sizeof(double));
qualityscore = svm_predict_probability(model, x, prob_estimates); //SVM预测
free(prob_estimates);
svm_free_and_destroy_model(&model);
cout << "image quality score:" << qualityscore << endl;
}
opencv自测程序(常用函数4.6.0)
于 2023-09-18 10:16:10 首次发布