OpenCV3.4+OpenCV-Contrib +gpu编译【windows10_x64+vs2015】

下载源码:
opencv3.4
opencv_contrib

考虑好自己是打算编译debug版本或者是release版本。
下载cmake 我用的是3.7.0版本的。所以其它的我不是很清楚行不行。
然后分别将以上的文件解压好。将cmake解压后的文件单独放,opecnv和opencv_contrib的文件放在一个文件夹下,同是在次文件夹下新建一个newbuild文件夹。这个newbuild文件夹之后用来放cmake出来的东西。
打开cmake,选择源码和编译目录,源码为opencv文件夹下的sources文件夹。编译目录为newbuild。
(如果可以科学上网,记得打开。因为有些文件需要下载。)点击config,当配置完成后,在搜索里面输入opencv_extra_----,然后将opencv——contrib文件夹下的moduls路径输入。
如下图:
![这里写图片描述](https://img-blog.csdn.net/2018030615570123?watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQvcXFfMjUyNTQ3Nzc=/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70)

然后点击config,如果没有问题,那么点击generate。
如果成功了,可以关闭cmake了。

打开,vs2015,然后打开newbuiuld文件夹里面的OPENCV.sln工程,等待一会儿加载,先对ALL进行一次builed.编译完成后,右击INSTALL,选择project only,然后点击build。等待下,最后会发现很多错误,但是没关系。关闭该工程。

后面的就是设置环境变量,win10
用户变量:

OPENCV    D:\playopencv\opencv3.4\build\install

系统变量,在path中添加

D:\playopencv\opencv3.4\build\install\x64\vc14\bin

环境变量配置完了,最好重启下电脑。

  后面的就是在vs2015中配置了。
  新建一个空工程,然后点开它的属性,在VC++ directories中 选择include Directories添加如下三个:
D:\playopencv\opencv3.4\build\install\include
D:\playopencv\opencv3.4\build\install\include\opencv
D:\playopencv\opencv3.4\build\install\include\opencv2

然后选择 Librarry Directories 添加:

D:\playopencv\opencv3.4\build\install\x64\vc14\lib

继续选择Linker->input
additional_dependence
添加如下的库:

opencv_aruco340d.lib
opencv_bgsegm340d.lib
opencv_bioinspired340d.lib
opencv_calib3d340d.lib
opencv_ccalib340d.lib
opencv_core340d.lib
opencv_datasets340d.lib
opencv_dnn340d.lib
opencv_dnn_objdetect340d.lib
opencv_dpm340d.lib
opencv_face340d.lib
opencv_features2d340d.lib
opencv_flann340d.lib
opencv_fuzzy340d.lib
opencv_hfs340d.lib
opencv_highgui340d.lib
opencv_imgcodecs340d.lib
opencv_imgproc340d.lib
opencv_img_hash340d.lib
opencv_line_descriptor340d.lib
opencv_ml340d.lib
opencv_objdetect340d.lib
opencv_optflow340d.lib
opencv_phase_unwrapping340d.lib
opencv_photo340d.lib
opencv_plot340d.lib
opencv_reg340d.lib
opencv_rgbd340d.lib
opencv_saliency340d.lib
opencv_shape340d.lib
opencv_stereo340d.lib
opencv_stitching340d.lib
opencv_structured_light340d.lib
opencv_superres340d.lib
opencv_surface_matching340d.lib
opencv_text340d.lib
opencv_tracking340d.lib
opencv_video340d.lib
opencv_videoio340d.lib
opencv_videostab340d.lib
opencv_xfeatures2d340d.lib
opencv_ximgproc340d.lib
opencv_xobjdetect340d.lib
opencv_xphoto340d.lib

这个是debug版本的,如果你想编译release版本的只需要在使用vs2015重新编译的时候选择release就好。如果你的debug模式或者release模式测试程序出现

unresolved external.

那么你可以尝试切换一下vs2015属性里面的

c/C++   -> Coding Generation -> runtime Library ->

release 版本可以选择

mutil-threaded DLL (/MD)

debug版本可以选择:

mutil-threaded Debug DLL (/MDd)

测试程序1:

//#include <opencv2\highgui\highgui.hpp>
//#include <opencv2\core\core.hpp>
//#include <iostream>
//using namespace cv;
//using namespace std;
//
//int main()
//{
//  cout << "opencv3.4" << endl;
//  //Mat src = Mat::zeros(640,480,CV_8UC1);
//  Mat src = imread("../data/1.jpg");
//  /*src = imread("1.jpg");
//  imshow("src",src);
//  waitKey(0);*/
//  imshow("src", src);
//  waitKey(0);
//  return 0;
//
//
//
//}

测试程序2,ORB:

#include <iostream>
#include <string>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/flann/flann.hpp"

using namespace std;
using namespace cv;

void readme();
string type2str(int type);

int main(int argc, char** argv)
{


    Mat img_1 = imread("left01.jpg", 0);
    Mat img_2 = imread("right01.jpg", 0);

    if (!img_1.data || !img_2.data)
    {
        cout << " --(!) Error reading images " << endl;
        return -1;
    }

    //-- Step 1: Detect the keypoints using ORB Detector

    cv::Ptr<cv::ORB> orb = cv::ORB::create(50);

    std::vector<KeyPoint> keypoints_1, keypoints_2;

    orb->detect(img_1, keypoints_1);
    orb->detect(img_2, keypoints_2);
    //-- Step 2: Calculate descriptors (feature vectors)

    Mat descriptors_1, descriptors_2;

    // descriptor is a cv::Mat, with rows the same as nFeatures, and cols as 32 (8UC1)
    orb->compute(img_1, keypoints_1, descriptors_1);
    orb->compute(img_2, keypoints_2, descriptors_2);
    cout << type2str(descriptors_1.type()) << " " << descriptors_1.rows << "*" << descriptors_1.cols << endl;;

    //-- Step 3: Matching descriptor vectors using FLANN matcher
    FlannBasedMatcher matcher;
    std::vector<DMatch> matches;

    // the descriptor for FlannBasedMatcher should has matrix element of CV_32F
    if (descriptors_1.type() != CV_32F)
    {
        descriptors_1.convertTo(descriptors_1, CV_32F);
        descriptors_2.convertTo(descriptors_2, CV_32F);
    }
    matcher.match(descriptors_1, descriptors_2, matches);



    double min_dist = min_element(matches.begin(),
        matches.end(),
        [](const DMatch& d1, const DMatch& d2)->double
    {
        return d1.distance < d2.distance;
    })->distance;

    cout << min_dist << endl;

    vector<DMatch> good_matches;

    for (int i = 0; i < descriptors_1.rows; i++)
    {
        if (matches[i].distance < max<double>(min_dist * 2, 60.0))
        {
            good_matches.push_back(matches[i]);
        }
    }

    Mat img_matches;
    drawMatches(img_1, keypoints_1, img_2, keypoints_2,
        good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
        vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

    //-- Show detected matches
    imshow("Good Matches", img_matches);

    for (int i = 0; i < good_matches.size(); i++)
    {
        cout << good_matches[i].queryIdx << " --- " << good_matches[i].trainIdx << endl;
    }

    waitKey(0);

    return 0;
}

void readme()
{
    cout << " Usage: ./ORB_test <img1> <img2>" << endl;
}

string type2str(int type)
{
    string r;

    uchar depth = type & CV_MAT_DEPTH_MASK;
    uchar chans = 1 + (type >> CV_CN_SHIFT);

    switch (depth) {
    case CV_8U:  r = "8U"; break;
    case CV_8S:  r = "8S"; break;
    case CV_16U: r = "16U"; break;
    case CV_16S: r = "16S"; break;
    case CV_32S: r = "32S"; break;
    case CV_32F: r = "32F"; break;
    case CV_64F: r = "64F"; break;
    default:     r = "User"; break;
    }

    r += "C";
    r += (chans + '0');

    return r;
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值