高斯运动物体检测
#include "gaussians_back.h"
#include "gaussians_back_start.h"
#include<opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main()
{
VideoCapture cap(0);//不使用电脑摄像头可删
Mat frame;//图像
Mat AT = Mat::zeros(Size(640, 480), CV_8UC1);//统计各点被标记次数
Ptr<BackgroundSubtractor>back;//背景
gaussians_back_start::Gaussians_Back_Start(back);
while (1)
{
cap >> (frame);//若使用其他摄像头,删除此句,给frame赋值任意彩色图像即可
Mat frame_gray;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
vector<RotatedRect>target;
gaussians_back::Gaussians_Back(frame_gray, back, target, AT, 30);
for (size_t i = 0; i < target.size(); i++)
rectangle(frame, target[i].boundingRect(), Scalar(0, 255, 0), 5, 8, 0);
imshow("彩", frame);
//imshow("灰", frame_gray);
waitKey(1);
}
cap.release();//不使用电脑摄像头可删除此句
}
SVM识别手写数字 图像分割
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main()
{
char ad[128] = { 0 };
int filename = 0, filenum = 0;
Mat img = imread("digits.jpg");
Mat gray;
imshow("img", img);
cvtColor(img, gray, CV_BGR2GRAY);
int b = 20;
int m = gray.rows / b; //原图为1000*2000
int n = gray.cols / b; //裁剪为5000个20*20的小图块
for (int i = 0; i < m; i++)
{
int offsetRow = i * b; //行上的偏移量
if (i % 5 == 0 && i != 0)
{
filename++;
filenum = 0;
}
for (int j = 0; j < n; j++)
{
int offsetCol = j * b; //列上的偏移量
sprintf_s(ad, "C:\\Users\\独白\\Desktop\\pic\\%d\\%d.jpg", filename, filenum++);
//截取20*20的小块
Mat tmp;
gray(Range(offsetRow, offsetRow + b), Range(offsetCol, offsetCol + b)).copyTo(tmp);
imwrite(ad, tmp);
cout << "1111111111" << endl;
}
}
waitKey(0);
return 0;
}
SVM手写数字 训练数据集
#include <stdio.h>
#include <time.h>
#include <opencv2/opencv.hpp>
#include <opencv/cv.h>
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/ml/ml.hpp>
#include <io.h>
using namespace std;
using namespace cv;
void getFiles(string path, vector<string>& files);
void get_1(Mat& trainingImages, vector<int>& trainingLabels);
void get_0(Mat& trainingImages, vector<int>& trainingLabels);
int main()
{
//获取训练数据
Mat classes;
Mat trainingData;
Mat trainingImages;
vector<int> trainingLabels;
get_1(trainingImages, trainingLabels);
get_0(trainingImages, trainingLabels);
Mat(trainingImages).copyTo(trainingData);
trainingData.convertTo(trainingData, CV_32FC1);
Mat(trainingLabels).copyTo(classes);
//配置SVM训练器参数
Ptr<ml::SVM> svm = ml::SVM::create();
svm->setType(ml::SVM::C_SVC);
svm->setKernel(ml::SVM::LINEAR);
svm->setDegree(0);
svm->setTermCriteria(TermCriteria(CV_TERMCRIT_ITER, 1000, 0.01));
svm->setGamma(1);
svm->setCoef0(0);
svm->setC(1);
svm->setNu(0);
svm->setP(0);
cout << "开始训练!!!" << endl;
//训练
svm->train(trainingData, cv::ml::ROW_SAMPLE, classes);
//保存模型
svm->save("C:\\pic\\svm.xml");
cout << "训练好了!!!" << endl;
return 0;
}
void getFiles(string path, vector<string>& files)
{
long long hFile = 0;
struct _finddata_t fileinfo;
string p;
if ((hFile = _findfirst(p.assign(path).append("\\*").c_str(), &fileinfo)) != -1)
{
do
{
if ((fileinfo.attrib & _A_SUBDIR))
{
if (strcmp(fileinfo.name, ".") != 0 && strcmp(fileinfo.name, "..") != 0)
getFiles(p.assign(path).append("\\").append(fileinfo.name), files);
}
else
{
files.push_back(p.assign(path).append("\\").append(fileinfo.name));
}
} while (_findnext(hFile, &fileinfo) == 0);
_findclose(hFile);
}
}
void get_1(Mat& trainingImages, vector<int>& trainingLabels)
{
const char* filePath = "C:\\pic\\train_img\\1\\";
vector<string> files;
getFiles(filePath, files);
int number = files.size();
for (int i = 0; i < number; i++)
{
Mat SrcImage = imread(files[i].c_str());
//imshow("srcimage", SrcImage);
//waitKey(100);
SrcImage = SrcImage.reshape(1, 1);
trainingImages.push_back(SrcImage);
trainingLabels.push_back(1);
}
}
void get_0(Mat& trainingImages, vector<int>& trainingLabels)
{
const char* filePath = "C:\\pic\\train_img\\0\\";
vector<string> files;
getFiles(filePath, files);
int number = files.size();
for (int i = 0; i < number; i++)
{
Mat SrcImage = imread(files[i].c_str());
SrcImage = SrcImage.reshape(1, 1);
trainingImages.push_back(SrcImage);
trainingLabels.push_back(0);
}
}
SVM识别手写数字 训练集使用
在使用过程中出现报错,
int response = (int)svm->predict;句,
暂时未找到解决方法。
#include <stdio.h>
#include <time.h>
#include <opencv2/opencv.hpp>
#include <opencv/cv.h>
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/ml/ml.hpp>
#include <io.h>
#include<vector>
using namespace std;
using namespace cv;
using namespace ml;
void getFiles(string path, vector<string>& files);
int main()
{
int result = 0;
const char* filePath = "C:\\pic\\test_img\\0\\";
vector<string> files;
getFiles(filePath, files);
int number = files.size();
//cout << number << endl;
//string modelpath = "C:\\pic\\svm.xml";
//Ptr<SVM> svm = StatModel::load<SVM>(modelpath);
//Ptr<SVM> svm = Algorithm::load<SVM>("C:\\pic\\svm.xml");
Ptr<SVM> svm = SVM::create();
svm = SVM::load("C:\\pic\\svm.xml");
for (int i = 0; i < number; i++)
{
Mat inMat = imread(files[i].c_str(), 0);
//cout << files[i].c_str()<<endl;
resize(inMat, inMat, Size(8, 16), (0, 0), (0, 0), INTER_AREA);
Mat p = inMat.reshape(1, 1);
p.convertTo(p, CV_32FC1);
int response = (int)svm->predict(p);
cout << response << endl;
if (response == 1)
{
result++;
}
}
//cout << result << endl;
//getchar();
return 0;
}
void getFiles(string path, vector<string>& files)
{
long long hFile = 0;
struct _finddata_t fileinfo;
string p;
if ((hFile = _findfirst(p.assign(path).append("\\*").c_str(), &fileinfo)) != -1)
{
do
{
if ((fileinfo.attrib & _A_SUBDIR))
{
if (strcmp(fileinfo.name, ".") != 0 && strcmp(fileinfo.name, "..") != 0)
getFiles(p.assign(path).append("\\").append(fileinfo.name), files);
}
else
{
files.push_back(p.assign(path).append("\\").append(fileinfo.name));
}
} while (_findnext(hFile, &fileinfo) == 0);
_findclose(hFile);
}
}
贝塞尔曲线 绘制物体运动轨迹
#include <iostream>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<opencv2\core.hpp>
using namespace cv;
using namespace std;
vector<Point> points;
Point Bcenter;
//绘制贝塞尔曲线
Point pointAdd(Point p, Point q) {
p.x += q.x;
p.y += q.y;
return p;
}
Point pointTimes(float c, Point p) {
p.x *= c;
p.y *= c;
return p;
}
Point Bernstein(float u, Point qi, Point mid, Point mo)
{
Point a, b, c, r;
a = pointTimes(pow(u, 2), mo);
b = pointTimes(pow((1 - u), 2), qi);
c = pointTimes(2 * u * (1 - u), mid);
r = pointAdd(pointAdd(a, b), c);
return r;
}
int main(int argc, char** argv)
{
VideoCapture cap(0);//读取摄像头
if (!cap.isOpened())
return -1;
int iLowH = 0;
int iHighH = 5;
int iLowS = 45;
int iHighS = 255;
int iLowV = 45;
int iHighV = 255;
int nGaussianBlurValue = 3;
//采取颜色识别方法,利用滑条选色,参考HSV对应的颜色,获取目标物体
namedWindow("Control");
cvCreateTrackbar("LowH", "Control", &iLowH, 179); //Hue (0 - 179)
cvCreateTrackbar("HighH", "Control", &iHighH, 179);
cvCreateTrackbar("LowS", "Control", &iLowS, 255); //Saturation (0 - 255)
cvCreateTrackbar("HighS", "Control", &iHighS, 255);
cvCreateTrackbar("LowV", "Control", &iLowV, 255); //Value (0 - 255)
cvCreateTrackbar("HighV", "Control", &iHighV, 255);
while (true)
{
Mat imgOriginal;
cap >> imgOriginal;
//高斯滤波
GaussianBlur(imgOriginal, imgOriginal, Size(nGaussianBlurValue * +1, nGaussianBlurValue * 2 + 1), 0, 0);
Mat imgHSV;
vector<Mat> hsvSplit;
cvtColor(imgOriginal, imgHSV, COLOR_BGR2HSV); //转换颜色空间
Mat element1 = getStructuringElement(MORPH_RECT, Size(5, 5));//获取结构元素
morphologyEx(imgHSV, imgHSV, MORPH_OPEN, element1);//开操作
morphologyEx(imgHSV, imgHSV, MORPH_CLOSE, element1);//闭操作
split(imgHSV, hsvSplit);//HSV图像分离
equalizeHist(hsvSplit[2], hsvSplit[2]);//直方图均衡化
merge(hsvSplit, imgHSV);//HSV图像聚合
Mat imgThresholded;
//根据颜色选取目标物体
inRange(imgHSV, Scalar(iLowH, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), imgThresholded);
Mat element = getStructuringElement(MORPH_RECT, Size(5, 5));//获取结构元素
morphologyEx(imgThresholded, imgThresholded, MORPH_OPEN, element);//开操作
morphologyEx(imgThresholded, imgThresholded, MORPH_CLOSE, element);//闭操作
morphologyEx(imgThresholded, imgThresholded, MORPH_ELLIPSE, element);//膨胀操作
vector<vector<Point>> contours;
vector<Vec4i> hierarcy;
findContours(imgThresholded, contours, hierarcy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);//查找轮廓
//drawContours(imgOriginal, contours, -1, Scalar(0, 255, 0), 2);//绘制轮廓
vector<RotatedRect> box(contours.size());
for (int i = 0; i < contours.size(); i++)
{
box[i] = fitEllipse(Mat(contours[i]));
Bcenter = box[i].center;
points.push_back(Bcenter);
//circle(imgOriginal, center, 3, Scalar(0, 255, 0));//绘制目标物体质点
//ellipse(imgOriginal, box[i], Scalar(0, 255, 0));//绘制拟合椭圆
for (int j = 2; j < points.size(); j += 2)
{
Point pre, last, mid;
pre = points[j - 2];
mid = points[j - 1];
last = points[j];
Point pt_pre = points[j - 2];
Point pt_now;
//绘制贝塞尔曲线,一小段一小段的直线就能组合成曲线
for (int k = 0; k <= 10; k++)
{
float u = (float)k / 10;
Point new_point = Bernstein(u, pre, mid, last);
pt_now.x = (int)new_point.x;
pt_now.y = (int)new_point.y;
line(imgOriginal, pt_pre, pt_now, Scalar(0, 255, 0), 2, CV_AA, 0);//绘制直线
pt_pre = pt_now;
}
}
}
imshow("Thresholded Image", imgThresholded); //显示处理图像
imshow("Original", imgOriginal); //显示最终图像
char key = (char)waitKey(300);
if (key == 27)
break;
}
return 0;
}
yolo训练数据集
#include<iostream>
#include<io.h>
#include<fstream>
#include<vector>
#include<string>
#include<opencv2/opencv.hpp>
using namespace cv;
using namespace std;
int main()
{
CascadeClassifier classifier;
if (!classifier.load("C:/ComputerSense/samples/data/cascade.xml"))
cout << -1 << endl;
Mat image = imread("WIN_20220108_22_26_22_Pro.jpg");
Mat gray;
std::vector<Rect> rec;
cvtColor(image, gray, COLOR_BGR2GRAY);
//equalizeHist(gray, gray);
classifier.detectMultiScale(gray, rec, 1.5, 3, 0, Size(100, 100));
for (size_t t = 0; t < rec.size(); t++)
{
rectangle(image, rec[t], Scalar(0, 255, 255), 2, 8, 0);
}
imshow("image", image);
waitKey(0);
}
KCF追踪 自定义roi区域物体
#include <opencv2/opencv.hpp>
#include <opencv2/tracking.hpp>
#include <opencv2/core/ocl.hpp>
using namespace cv;
using namespace std;
// Convert to string
#define SSTR( x ) static_cast< std::ostringstream & >( \
( std::ostringstream() << std::dec << x ) ).str()
int main(int argc, char** argv)
{
// List of tracker types in OpenCV 3.4.1
string trackerTypes[8] = { "BOOSTING", "MIL", "KCF", "TLD","MEDIANFLOW", "GOTURN", "MOSSE", "CSRT" };
// vector <string> trackerTypes(types, std::end(types));
// Create a tracker
string trackerType = trackerTypes[3];
Ptr<Tracker> tracker;
#if (CV_MINOR_VERSION < 3)
{
tracker = Tracker::create(trackerType);
}
#else
{
if (trackerType == "BOOSTING")
tracker = TrackerBoosting::create();
if (trackerType == "MIL")
tracker = TrackerMIL::create();
if (trackerType == "KCF")
tracker = TrackerKCF::create();
if (trackerType == "TLD")
tracker = TrackerTLD::create();
if (trackerType == "MEDIANFLOW")
tracker = TrackerMedianFlow::create();
if (trackerType == "GOTURN")
tracker = TrackerGOTURN::create();
if (trackerType == "MOSSE")
tracker = TrackerMOSSE::create();
if (trackerType == "CSRT")
tracker = TrackerCSRT::create();
}
#endif
// Read video
VideoCapture video(0);
Exit if video is not opened
//if (!video.isOpened())
//{
// cout << "Could not read video file" << endl;
// return 1;
//}
// Read first frame
Mat frame;
bool ok = video.read(frame);
// Define initial bounding box
Rect2d bbox(287, 23, 86, 320);
// Uncomment the line below to select a different bounding box
bbox = selectROI(frame, false);
// Display bounding box.
rectangle(frame, bbox, Scalar(255, 0, 0), 2, 1);
imshow("Tracking", frame);
tracker->init(frame, bbox);
while (video.read(frame))
{
// Start timer
double timer = (double)getTickCount();
// Update the tracking result
bool ok = tracker->update(frame, bbox);
// Calculate Frames per second (FPS)
float fps = getTickFrequency() / ((double)getTickCount() - timer);
if (ok)
{
// Tracking success : Draw the tracked object
rectangle(frame, bbox, Scalar(255, 0, 0), 2, 1);
}
else
{
// Tracking failure detected.
putText(frame, "Tracking failure detected", Point(100, 80), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(0, 0, 255), 2);
}
// Display tracker type on frame
putText(frame, trackerType + " Tracker", Point(100, 20), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(50, 170, 50), 2);
// Display FPS on frame
putText(frame, "FPS : " + (int(fps)), Point(100, 50), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(50, 170, 50), 2);
// Display frame.
imshow("Tracking", frame);
// Exit if ESC pressed.
int k = waitKey(1);
if (k == 27)
{
break;
}
}
}
KCF+SVM追踪
来源:opencv-svm+kcf跟踪
#include <iostream>
#include <fstream>
#include <ctime>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/ml/ml.hpp>
#include <opencv2/tracking.hpp>
using namespace std;
using namespace cv;
using namespace cv::ml;
vector<float> train(HOGDescriptor& hog)
{
Ptr<SVM> svm = Algorithm::load<SVM>("mouse.xml");
Mat svmat = svm->getSupportVectors();//获取每个支持向量,
int cols = svm->getVarCount();//支持向量的维数,也就是svmat.cols
int rows = svmat.rows;//支持向量的个数
Mat alphamat = Mat::zeros(rows, cols, CV_32F);//初始化拉格朗日乘子,也就是α_i,α_j,其中alpmat不为0的也就是支持向量
Mat svindex = Mat::zeros(1, rows, CV_64F);//初始化一个记录支持向量编号的索引,也就是i,j
Mat Result;//ω
double rho;//b
rho = svm->getDecisionFunction(0, alphamat, svindex);//获得决策函数,解出α,b,注意因为y=wx+b,-b=wx-y,-wx=b-y,这里解出来的α和b是反的。 0代表1-2分类 n分类可自己设n
alphamat.convertTo(alphamat, CV_32F);//因为getDecisionFunction会改变类型 将alphamat元素的数据类型重新转成CV_32F 不然会报错
Result = -alphamat * svmat;//推导的过程中我们知道是α_i * α_j*x_i*x_j,也就是列向量ω,因为上文我们知道α和b是反的,所以*-1变正
vector<float> vec;//分类器
for (int i = 0; i < cols; i++)
{
vec.push_back(Result.at<float>(0, i));
}
vec.push_back(rho);//+b
cout << "检测子维数:" << vec.size() << endl;
ofstream fopen1("svm_vec.txt");//保存vec写入文件
for (int i = 0; i < vec.size(); i++)
{
fopen1 << vec[i] << " ";
}
fopen1.close();
return vec;
}
void track()
{
Mat frame;
Rect2d roi;
HOGDescriptor hog(Size(64, 128), Size(16, 16), Size(8, 8), Size(8, 8), 9);
vector<float> vec;
vec = train(hog);//HOG特征 + SVM训练
hog.setSVMDetector(vec);//设置SVM检测器
VideoCapture cap(0);//跟踪摄像头
cap >> frame;
int p = 4;
resize(frame, frame, Size(frame.cols / p, frame.rows / p));
if (frame.empty())
{
cout << "视频不存在" << endl;
}
vector<Rect> found, found_filtered;
clock_t startTime, finishTime;
cout << "开始检测" << endl;
startTime = clock();
hog.detectMultiScale(frame, found, 0, Size(2, 2), Size(4, 4), 1.05, 2);//多尺度检测目标,返回的矩形从大到小排列
finishTime = clock();
cout << "检测所用时间为" << (finishTime - startTime) * 1.0 / CLOCKS_PER_SEC << " 秒 " << endl;
cout << endl << "矩形框的尺寸为 : " << found.size() << endl;
for (size_t i = 0; i < found.size(); i++)
{
Rect r = found[i];
size_t j;
for (j = 0; j < found.size(); j++)// 如果有嵌套的话,则取外面最大的那个矩形框放入found_filtered中
if (j != i && (r & found[j]) == r)
break;
if (j == found.size())
found_filtered.push_back(r);
}
cout << "嵌套矩形框尺寸= " << found_filtered.size() << " Rects" << endl;
for (size_t i = 0; i < found_filtered.size(); i++)//因为检测到的roi总是大于实际的,所以我们做下调整
{
roi = found_filtered[i];
// hog检测结果返回的矩形比实际的要大一些
roi.x += cvRound(roi.width * 0.1);
roi.width = cvRound(roi.width * 0.8);
roi.y += cvRound(roi.height * 0.07);
roi.height = cvRound(roi.height * 0.8);
}//多个嵌套矩阵
Ptr<TrackerKCF> tracker = TrackerKCF::create();
//下面这三条语句很重要,把多目标框筛出来的框作为感兴趣的区域传递给跟踪器,实现自动跟踪
tracker->init(frame, roi);//追踪第一帧
while (1)
{
cap >> frame;
resize(frame, frame, Size(frame.cols / p, frame.rows / p));
tracker->update(frame, roi);//刷新ROI的位置
rectangle(frame, roi, Scalar(255, 0, 0), 2, 1);
imshow("捕捉老鼠", frame);
//quit on ESC button
if (char(waitKey(1)) == 'q')//按键退出视频
{
cv::destroyWindow("捕捉老鼠");
break;
}
}
}
int main()
{
track();
return 0;
}