opencv3+ hog+svm前车识别,代码里有acfdetection的,但是效果不好

直接上代码把,利用opencv的hog和svm的库函数实现识别,在我的数据集下正确率95%以上

训练,save参数和load,load有一些问题SVM前缀不行,得用StatModel,注意大小写,谷歌说opencv3+不用线性核load会出问题,说是opencv的bug,我没试过,大佬可以试试用其他核

#ifdef TRAIN
	// SVM::Params params;
	// params.svmType = SVM::C_SVC;
	// params.kernelType = SVM::POLY;
	// params.gamma = 3;
	// Ptr<SVM> svm = SVM::create(params);

	Mat train_data;
    Mat train_label;
    read_num_class_data(&train_data,&train_label,num);
	Ptr<SVM> svm = SVM::create(); 
	svm->setType(SVM::C_SVC);    //设置svm类型
    //svm->setKernel(SVM::POLY); //设置核函数;
	svm->setKernel(SVM::LINEAR);
	/*these param can be ignore 
	// // libsvm参数说明:http://blog.csdn.net/changyuanchn/article/details/7540014
	// // svm->setDegree(0.5);
	// // svm->setGamma(1);
	// // svm->setCoef0(1);
	// // svm->setNu(0.5);
	// // svm->setP(0);
	// // svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, 0.01));
	
	// 	// setTermCriteria是用来设置算法的终止条件, 
	// 	// SVM训练的过程就是一个通过 迭代 方式解决约束条件下的二次优化问题,
	// 	// 这里我们指定一个最大迭代次数和容许误差,以允许算法在适当的条件下停止计算
	
	//  //svm->setC(1);
	*/

	Ptr<TrainData> tData = TrainData::create(train_data, ROW_SAMPLE, train_label);
	//svm->trainAuto(tData);  
	svm->train(tData);
	svm->save("mysvm.xml");//文件形式为xml,可以保存在txt或者xml文件中
	cout<< svm->getVarCount()<<endl;//获取SVM支持向量的维数
	// // svm->getSupportVectors;//返回Mat类型,获取SVM的支持向量
	
#else
	// Ptr<SVM> svm1 = SVM::create();
	// SVM::load<SVM>("mysvm.xml");
	Ptr<ml::SVM> svm1 = StatModel::load<SVM>("mysvm.xml");
	//svm=SVM::load<SVM>("mysvm.xml");
    //cout<<svm->getVarCount()<<endl;//获取SVM支持向量的维数
#endif

预测就是一句话

float r = svm1->predict(featureVector);



整体代码

#include <iostream>
#include <string>
#include <fstream>
#include <stdio.h>
#include <stdlib.h>
//OpenCV include
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <ml.h>

using namespace std;
using namespace cv;
using namespace cv::ml;
static const Size trainingPadding = Size(0, 0);
//static const Size winStride = Size(8, 8);
static const Size winStride = Size(64, 64);


bool read_num_class_data(Mat* _data, Mat* _responses,int num){
	int icount=0;
	_data->release();
    _responses->release();
    vector<int >train_label;
    vector< vector<float > > train_data;
	for (int count = 0; icount < num ; ++count)
	//while (count<100)
	{
		//capture>>im;
		 char str[100];
		 sprintf(str,"/home/cx/Videos/data/vehicles/KITTI_extracted/%d.png",count);
		Mat im = imread(str,1);
		if(im.empty()){
			//cout <<"loading image failed"<< endl; 
			continue;
			//return 0;
		}
		icount++;
		Mat luvImage;

		Mat imageData;
    	cvtColor(im, imageData, CV_RGB2GRAY);		

		//hog
		HOGDescriptor hog;
		//HOGDescriptor hog(cvSize(64,64),cvSize(64,64),cvSize(64,64),cvSize(64,64),9); // Use standard parameters here
    	hog.winSize = Size(64, 64); // Default training images size as used in paper

    	//calculateFeaturesFromInput(currentImageFile, featureVector, hog);
    	vector<float> featureVector;
	    // Check for mismatching dimensions
	    if (imageData.cols != hog.winSize.width || imageData.rows != hog.winSize.height) {
	        featureVector.clear();
	        //printf("Error: Image '%s' dimensions (%u x %u) do not match HOG window size (%u x %u)!\n", imageFilename.c_str(), imageData.cols, imageData.rows, hog.winSize.width, hog.winSize.height);
	        return 1;
	    }
	    //hog detect
	    vector<Point> locations;
	    hog.compute(imageData, featureVector, winStride, trainingPadding, locations);
	    


	 //    Mat edge;
	 //    Canny(imageData,edge,70,200,3);
	 //    for (int i=0;i<im.rows;i++){
		// 	for (int j=0;j<im.cols;j++){
		// 		File_vector<<edge.at<uchar>(j,i)/255.0 <<" ";
		// 	}
		// }
	    //Sobel(imageData,edge,imageData.depth(),1,1);

	 //    cvtColor(im, luvImage, CV_RGB2Luv); 
		// vector<Mat> mv;  
		// split(im,mv);
		// fstream f("test");
		// f.open("test.dat", ios::out);
		// f<<mv[2]<<endl;
		// for (int k=0;k<3;k++){
		// 	for (int i=0;i<im.rows;i++){
		// 		for (int j=0;j<im.cols;j++){
		// 			File_vector<<mv[k].at<uchar>(j,i)/255.0 <<" ";
		// 		}
		// 	}
		// }

	    //myluv
		// uchar *tmp = mat2uchar(im);
		// int dim0, dim1, dim2;
		// float *I = rgbConvert(tmp, im.rows, im.cols, im.channels(), dim0, dim1, dim2, "luv");
		// int nr=im.rows;
		// int nc=im.cols;
		// for (int i = 0; i < im.channels(); i++){
		// 	for (int j = 0; j <im.rows; j++){
		// 		for (int k = 0; k < im.cols; k++){
		// 			File_vector << I[i*nr*nc + j*nc + k] << " ";
		// 		}
		// 	}
		// }
		// wrFree(I);
		//File_vector<<endl;

		train_data.push_back(featureVector);
	    train_label.push_back(1);
	}

	icount=0;
	for (int count = 0; icount < num ; ++count)
	//while (count<100)
	{
		//capture>>im;
		 char str[100];
		 //sprintf(str,"/home/cx/Videos/data/vehicles/KITTI_extracted/%d.png",count);
		 //sprintf(str,"/home/cx/Desktop/Vehicle_test/pyramid_test/pic/%d.png",count);
		 sprintf(str,"/home/cx/Videos/data/non-vehicles/Extras/extra%d.png",count);
		 //sprintf(str,"/home/cx/Videos/data/vehicles/GTI_MiddleClose/image%04d.png",count);
		 //sprintf(str,"/home/cx/Videos/2011_09_26/2011_09_26_drive_0027_sync/image_02/data/%010d.png",count);
		Mat im = imread(str,1);
		if(im.empty()){
			//cout <<"loading image failed"<< endl; 
			continue;
			//return 0;
		}
		icount++;
		Mat luvImage;

		Mat imageData;
    	cvtColor(im, imageData, CV_RGB2GRAY);		

		//hog
		HOGDescriptor hog;
		//HOGDescriptor hog(cvSize(64,64),cvSize(64,64),cvSize(64,64),cvSize(64,64),9); // Use standard parameters here
    	hog.winSize = Size(64, 64); // Default training images size as used in paper

    	//calculateFeaturesFromInput(currentImageFile, featureVector, hog);
    	vector<float> featureVector;
	    // Check for mismatching dimensions
	    if (imageData.cols != hog.winSize.width || imageData.rows != hog.winSize.height) {
	        featureVector.clear();
	        //printf("Error: Image '%s' dimensions (%u x %u) do not match HOG window size (%u x %u)!\n", imageFilename.c_str(), imageData.cols, imageData.rows, hog.winSize.width, hog.winSize.height);
	        return 1;
	    }
	    //hog detect
	    vector<Point> locations;
	    hog.compute(imageData, featureVector, winStride, trainingPadding, locations);
	    
	    
	 //    Mat edge;
	 //    Canny(imageData,edge,70,200,3);
	 //    for (int i=0;i<im.rows;i++){
		// 	for (int j=0;j<im.cols;j++){
		// 		File_vector<<edge.at<uchar>(j,i)/255.0 <<" ";
		// 	}
		// }
	    //Sobel(imageData,edge,imageData.depth(),1,1);

	 //    cvtColor(im, luvImage, CV_RGB2Luv); 
		// vector<Mat> mv;  
		// split(im,mv);
		// fstream f("test");
		// f.open("test.dat", ios::out);
		// f<<mv[2]<<endl;
		// for (int k=0;k<3;k++){
		// 	for (int i=0;i<im.rows;i++){
		// 		for (int j=0;j<im.cols;j++){
		// 			File_vector<<mv[k].at<uchar>(j,i)/255.0 <<" ";
		// 		}
		// 	}
		// }

	    //myluv
		// uchar *tmp = mat2uchar(im);
		// int dim0, dim1, dim2;
		// float *I = rgbConvert(tmp, im.rows, im.cols, im.channels(), dim0, dim1, dim2, "luv");
		// int nr=im.rows;
		// int nc=im.cols;
		// for (int i = 0; i < im.channels(); i++){
		// 	for (int j = 0; j <im.rows; j++){
		// 		for (int k = 0; k < im.cols; k++){
		// 			File_vector << I[i*nr*nc + j*nc + k] << " ";
		// 		}
		// 	}
		// }
		// wrFree(I);
		//File_vector<<endl;


		train_data.push_back(featureVector);
	    train_label.push_back(0);
	}

	srand((unsigned)time(0));
	for (int i=0;i<num*2;i++){
		int a=rand()%(num*2);
		int b=rand()%(num*2);
		swap(train_label[a],train_label[b]);
		swap(train_data[a],train_data[b]);
	}

	Mat(train_label).copyTo(*_responses);

	*_data=Mat::zeros(train_data.size(), train_data[0].size(), CV_32F);
	for (int i=0;i<train_data.size();i++){
		for (int j=0;j<train_data[0].size();j++){
			_data->at<float>(i,j)=train_data[i][j];
			//cout<<_data->at<float>(i,j)<<" "<<train_data[i][j]<<endl;
		}
	}
}

int main(int argc, char** argv)
{
	
	string pic;
	int count = 0;
    	int num = atoi(argv[1]); //the num of picture

	// VideoCapture capture(input_video_file);
 //    if ( !capture.isOpened() ) {
 //        cout << "video could not be opened" << endl;
 //        return -1;
 //    }

 //    double num_frames = capture.get(CV_CAP_PROP_FRAME_COUNT);
    Mat im;

    fstream File_vector;
    fstream File_label;


    clock_t start, end;
	start = clock();
		
	//vector<vector<float> > train_data;
	//vector<int > train_label;
	Mat train_data;
    Mat train_label;
    read_num_class_data(&train_data,&train_label,num);

	

	Ptr<SVM> svm = SVM::create(); 
	svm->setType(SVM::C_SVC);    //设置svm类型
    //svm->setKernel(SVM::POLY); //设置核函数;
	svm->setKernel(SVM::LINEAR);
	// svm->setDegree(0.5);
	// svm->setGamma(1);
	// svm->setCoef0(1);
	// svm->setNu(0.5);
	// svm->setP(0);
	// svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, 0.01));
	svm->setC(1);
	Ptr<TrainData> tData = TrainData::create(train_data, ROW_SAMPLE, train_label);
	svm->train(tData);

	Mat sample1 = train_data.row(0);
	//cout<<sample1.rows<<endl;
	int icount=0;
	int ans=0;
	for (int count = 0; icount < num ; ++count){
		//capture>>im;
		 char str[100];
		 //sprintf(str,"/home/cx/Videos/data/vehicles/KITTI_extracted/%d.png",count+1000);
		 //sprintf(str,"/home/cx/Desktop/Vehicle_test/pyramid_test/pic/%d.png",count);
		 sprintf(str,"/home/cx/Videos/data/non-vehicles/Extras/extra%d.png",count+1000);
		 //sprintf(str,"/home/cx/Videos/data/vehicles/GTI_MiddleClose/image%04d.png",count);
		 //sprintf(str,"/home/cx/Videos/2011_09_26/2011_09_26_drive_0027_sync/image_02/data/%010d.png",count);
		Mat im = imread(str,1);
		if(im.empty()){
			continue;
		}
		icount++;
		Mat luvImage;

		Mat imageData;
    	cvtColor(im, imageData, CV_RGB2GRAY);		

		//hog
		HOGDescriptor hog;
		//HOGDescriptor hog(cvSize(64,64),cvSize(64,64),cvSize(64,64),cvSize(64,64),9); // Use standard parameters here
    	hog.winSize = Size(64, 64); // Default training images size as used in paper

    	//calculateFeaturesFromInput(currentImageFile, featureVector, hog);
    	vector<float> featureVector;
	    // Check for mismatching dimensions
	    if (imageData.cols != hog.winSize.width || imageData.rows != hog.winSize.height) {
	        featureVector.clear();
	        //printf("Error: Image '%s' dimensions (%u x %u) do not match HOG window size (%u x %u)!\n", imageFilename.c_str(), imageData.cols, imageData.rows, hog.winSize.width, hog.winSize.height);
	        return 1;
	    }
	    //hog detect
	    vector<Point> locations;
	    hog.compute(imageData, featureVector, winStride, trainingPadding, locations);
	    //Mat sample(featureVector);
	    //cout<<sample.rows<<endl;
	    float r = svm->predict(featureVector);
        if (r==0) ans++;
	}

	cout<<ans<<endl;

	end = clock();
	printf("time=%f\n", (double)(end - start) / CLOCKS_PER_SEC);

	//release_data(det25);
	return 0;
}


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值