opencv的行人检测demo中的get_support_vector在之前返回的值一直是1,也就是支持向量个数一直是1,这样在
for(int i=0; i<supportVectorNum; i++)
{
const float * pSVData = svm.get_support_vector(i);//返回所有支持向量的指针
cout<<"支持向量:"<< * pSVData<<endl;
for(int j=0; j<DescriptorDim; j++)
{
//有没有可能访问了空指针?
//supportVectorMat.at<float>(i,j) = pSVData[j];
supportVectorMat.at<float>(i,j) = pSVData[j];
}
}
double * pAlphaData = svm.get_alpha_vector();
for(int i=0; i<supportVectorNum; i++)
{
alphaMat.at<float>(0,i) = pAlphaData[i];
}
这段代码中明显出现数组越界了,所以报错段错误就有理由了,
int supportVectorNum = svm.get_support_vector_count();//支持向量的个数
这句代码是表明支持向量的个数是有这个函数得到的,这个函数并没有参数传入,那是由什么因素决定的支持向量个数?网上有人问这个问题,
但是没有搜啊到有用的回答,这个有点头疼,,
runtracker.cpp
#include <iostream>
#include <fstream>
#include <sstream>
#include <algorithm>
#include <dirent.h>
#include <math.h>
#include <ros/ros.h>
#include <ros/package.h>
#include <image_transport/image_transport.h>
#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/image_encodings.h>
#include "geometry_msgs/Twist.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include "kcftracker.hpp"
#include <opencv2/ml/ml.hpp>
using namespace cv;
using namespace std;
static const std::string RGB_WINDOW = "RGB Image window";
//static const std::string DEPTH_WINDOW = "DEPTH Image window";
#define Max_linear_speed 1
#define Min_linear_speed 0.4
#define Min_distance 1.0
#define Max_distance 5.0
#define Max_rotation_speed 0.75
float linear_speed = 0;
float rotation_speed = 0;
float k_linear_speed = (Max_linear_speed - Min_linear_speed) / (Max_distance - Min_distance);
float h_linear_speed = Min_linear_speed - k_linear_speed * Min_distance;
float k_rotation_speed = 0.004;
float h_rotation_speed_left = 1.2;
float h_rotation_speed_right = 1.36;
float distance_scale = 1.0;
int ERROR_OFFSET_X_left1 = 100;
int ERROR_OFFSET_X_left2 = 300;
int ERROR_OFFSET_X_right1 = 340;
int ERROR_OFFSET_X_right2 = 600;
int roi_height = 100;
int roi_width = 100;
cv::Mat rgbimage;
cv::Mat depthimage;
cv::Rect selectRect;
cv::Point origin;
cv::Rect result;
bool select_flag = false;
bool bRenewROI = false; // the flag to enable the implementation of KCF algorithm for the new chosen ROI
bool bBeginKCF = false;
bool enable_get_depth = false;
bool HOG = true;
bool FIXEDWINDOW = false;
bool MULTISCALE = true;
bool SILENT = true;
bool LAB = false;
int DescriptorDim;
bool has_dectect_people ;
// Create KCFTracker object
KCFTracker tracker(HOG, FIXEDWINDOW, MULTISCALE, LAB);
vector<float> myDetector;
float dist_val[5] ;
/*
void onMouse(int event, int x, int y, int, void*)
{
if (select_flag)
{
selectRect.x = MIN(origin.x, x);
selectRect.y = MIN(origin.y, y);
selectRect.width = abs(x - origin.x);
selectRect.height = a