OpenCV18(图像拼接stitcher_detail)

从OpenCV2.4.0之后的版本中都包含有一个图像拼接的例程。路径:“...\OpenCV\sources\samples\cpp\stitcher_detail.cpp”

本文就网上基于图像拼接的例程代码总结一下,基本上都是由此修改而来。以下给出原代,以及一个简单版本的例程。


1.一个简单的例子(易于理解)

  1. #include "stdafx.h"  
  2. #include <iostream>  
  3. #include <fstream>  
  4. #include <opencv2/core/core.hpp>  
  5. #include "opencv2/highgui/highgui.hpp"  
  6. #include "opencv2/stitching/stitcher.hpp"  
  7.    
  8. using namespace std;  
  9. using namespace cv;  
  10.    
  11. bool try_use_gpu = false;  
  12. vector<Mat> imgs;  
  13. string result_name = "result.jpg";  
  14.    
  15. int main()  
  16. {  
  17.     Mat img1=imread("1.jpg");  
  18.     Mat img2=imread("2.jpg");  
  19.     imgs.push_back(img1);  
  20.     imgs.push_back(img2);  
  21.     Mat pano;  
  22.     Stitcher stitcher = Stitcher::createDefault(try_use_gpu);//关键语句一  
  23.     Stitcher::Status status = stitcher.stitch(imgs, pano);//关键语句二  
  24.     if (status != Stitcher::OK)  
  25.     {  
  26.         cout << "Can't stitch images, error code = " << status << endl;  
  27.         return -1;  
  28.     }  
  29.     namedWindow(result_name);  
  30.     imshow(result_name,pano);  
  31.     imwrite(result_name,pano);  
  32.     waitKey();  
  33.     return 0;  
  34. }  
#include "stdafx.h"
#include <iostream>
#include <fstream>
#include <opencv2/core/core.hpp>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/stitching/stitcher.hpp"
 
using namespace std;
using namespace cv;
 
bool try_use_gpu = false;
vector<Mat> imgs;
string result_name = "result.jpg";
 
int main()
{
    Mat img1=imread("1.jpg");
    Mat img2=imread("2.jpg");
    imgs.push_back(img1);
    imgs.push_back(img2);
    Mat pano;
    Stitcher stitcher = Stitcher::createDefault(try_use_gpu);//关键语句一
    Stitcher::Status status = stitcher.stitch(imgs, pano);//关键语句二
    if (status != Stitcher::OK)
    {
        cout << "Can't stitch images, error code = " << status << endl;
        return -1;
    }
    namedWindow(result_name);
    imshow(result_name,pano);
    imwrite(result_name,pano);
    waitKey();
    return 0;
}


注意事项:

1.图像可以多幅,但是建议先两幅

2.图像之间的重合度要高一点

3.图像的大小建议一样,不一样的还没测试


问题解决:

1.问题描述:

1>stitch.obj : error LNK2019: 无法解析的外部符号 "public: static class cv::Stitcher __cdecl cv::Stitcher::createDefault(bool)" (?createDefault@Stitcher@cv@@SA?AV12@_N@Z),该符号在函数 _main 中被引用
1>stitch.obj : error LNK2019: 无法解析的外部符号 "public: enum cv::Stitcher::Status __thiscall cv::Stitcher::stitch(class cv::_InputArray const &,class cv::_OutputArray const &)" (?stitch@Stitcher@cv@@QAE?AW4Status@12@ABV_InputArray@2@ABV_OutputArray@2@@Z),该符号在函数 _main 中被引用

这种错误出在配置上,缺少opencv_stitching294.lib,这个不常用,在opencv配置教程中一般都没有包含。

解决方法

在属性中(我原来是用属性管理器中配置opencv的)


找不到的话,在下图中输入“属性”,点击  属性管理器


在连接器中(link)->输入->附加依赖项,添加“opencv_stitching249d.lib”(看你的版本了)。如果你是用debug,不要添加opencv_stitching249.lib(没有d)。这里还涉及一个问题,见4中的描述


4.问题描述

Stitcher::Status status = stitcher.stitch(imgs, pano);//关键语句二       运行出错   |  vector访问出错 | vector小标越界 | 程序中断如下 | 0x00000005错误



解决方法

原因还是在OpenCV的配置上。如果你是debug,将属性管理器中的所有不带d的lib文件都删去(建议你放记事本里备份)。如果是release,就把带d的都删去。

测试发现,平时使用的时候,会在属性管理器的“Microsoft.Cpp.Win32.user”中加上带d和不带d的lib,这样就不需要每个工程都配置opencv了。

但是debug中的“Microsoft.Cpp.Win32.user”和release中的是同一个,会相互影响,修改哪一个中的都不行,于是全部添加上。相信有很多人都是这样的,所以这个问题应该是很多人都会遇到的吧。改过了就可以了。


最后注意一下,拼接成功的概率不高,应该是代码还不够优化吧。只要通过了“关键代码二”,基本配置上就不会有问题了。剩下的就要自己调参数了。


2.一个截图程序

(稍后附上)



3.源代码(复制过来的)

  1. /*M/// 
  2. // 
  3. //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 
  4. // 
  5. //  By downloading, copying, installing or using the software you agree to this license. 
  6. //  If you do not agree to this license, do not download, install, 
  7. //  copy or use the software. 
  8. // 
  9. // 
  10. //                          License Agreement 
  11. //                For Open Source Computer Vision Library 
  12. // 
  13. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. 
  14. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. 
  15. // Third party copyrights are property of their respective owners. 
  16. // 
  17. // Redistribution and use in source and binary forms, with or without modification, 
  18. // are permitted provided that the following conditions are met: 
  19. // 
  20. //   * Redistribution's of source code must retain the above copyright notice, 
  21. //     this list of conditions and the following disclaimer. 
  22. // 
  23. //   * Redistribution's in binary form must reproduce the above copyright notice, 
  24. //     this list of conditions and the following disclaimer in the documentation 
  25. //     and/or other materials provided with the distribution. 
  26. // 
  27. //   * The name of the copyright holders may not be used to endorse or promote products 
  28. //     derived from this software without specific prior written permission. 
  29. // 
  30. // This software is provided by the copyright holders and contributors "as is" and 
  31. // any express or implied warranties, including, but not limited to, the implied 
  32. // warranties of merchantability and fitness for a particular purpose are disclaimed. 
  33. // In no event shall the Intel Corporation or contributors be liable for any direct, 
  34. // indirect, incidental, special, exemplary, or consequential damages 
  35. // (including, but not limited to, procurement of substitute goods or services; 
  36. // loss of use, data, or profits; or business interruption) however caused 
  37. // and on any theory of liability, whether in contract, strict liability, 
  38. // or tort (including negligence or otherwise) arising in any way out of 
  39. // the use of this software, even if advised of the possibility of such damage. 
  40. // 
  41. // 
  42. //M*/  
  43.   
  44. #include <iostream>  
  45. #include <fstream>  
  46. #include <string>  
  47. #include "opencv2/opencv_modules.hpp"  
  48. #include "opencv2/highgui/highgui.hpp"  
  49. #include "opencv2/stitching/detail/autocalib.hpp"  
  50. #include "opencv2/stitching/detail/blenders.hpp"  
  51. #include "opencv2/stitching/detail/camera.hpp"  
  52. #include "opencv2/stitching/detail/exposure_compensate.hpp"  
  53. #include "opencv2/stitching/detail/matchers.hpp"  
  54. #include "opencv2/stitching/detail/motion_estimators.hpp"  
  55. #include "opencv2/stitching/detail/seam_finders.hpp"  
  56. #include "opencv2/stitching/detail/util.hpp"  
  57. #include "opencv2/stitching/detail/warpers.hpp"  
  58. #include "opencv2/stitching/warpers.hpp"  
  59.   
  60. using namespace std;  
  61. using namespace cv;  
  62. using namespace cv::detail;  
  63.   
  64. static void printUsage()  
  65. {  
  66.     cout <<  
  67.         "Rotation model images stitcher.\n\n"  
  68.         "stitching_detailed img1 img2 [...imgN] [flags]\n\n"  
  69.         "Flags:\n"  
  70.         "  --preview\n"  
  71.         "      Run stitching in the preview mode. Works faster than usual mode,\n"  
  72.         "      but output image will have lower resolution.\n"  
  73.         "  --try_gpu (yes|no)\n"  
  74.         "      Try to use GPU. The default value is 'no'. All default values\n"  
  75.         "      are for CPU mode.\n"  
  76.         "\nMotion Estimation Flags:\n"  
  77.         "  --work_megapix <float>\n"  
  78.         "      Resolution for image registration step. The default is 0.6 Mpx.\n"  
  79.         "  --features (surf|orb)\n"  
  80.         "      Type of features used for images matching. The default is surf.\n"  
  81.         "  --match_conf <float>\n"  
  82.         "      Confidence for feature matching step. The default is 0.65 for surf and 0.3 for orb.\n"  
  83.         "  --conf_thresh <float>\n"  
  84.         "      Threshold for two images are from the same panorama confidence.\n"  
  85.         "      The default is 1.0.\n"  
  86.         "  --ba (reproj|ray)\n"  
  87.         "      Bundle adjustment cost function. The default is ray.\n"  
  88.         "  --ba_refine_mask (mask)\n"  
  89.         "      Set refinement mask for bundle adjustment. It looks like 'x_xxx',\n"  
  90.         "      where 'x' means refine respective parameter and '_' means don't\n"  
  91.         "      refine one, and has the following format:\n"  
  92.         "      <fx><skew><ppx><aspect><ppy>. The default mask is 'xxxxx'. If bundle\n"  
  93.         "      adjustment doesn't support estimation of selected parameter then\n"  
  94.         "      the respective flag is ignored.\n"  
  95.         "  --wave_correct (no|horiz|vert)\n"  
  96.         "      Perform wave effect correction. The default is 'horiz'.\n"  
  97.         "  --save_graph <file_name>\n"  
  98.         "      Save matches graph represented in DOT language to <file_name> file.\n"  
  99.         "      Labels description: Nm is number of matches, Ni is number of inliers,\n"  
  100.         "      C is confidence.\n"  
  101.         "\nCompositing Flags:\n"  
  102.         "  --warp (plane|cylindrical|spherical|fisheye|stereographic|compressedPlaneA2B1|compressedPlaneA1.5B1|compressedPlanePortraitA2B1|compressedPlanePortraitA1.5B1|paniniA2B1|paniniA1.5B1|paniniPortraitA2B1|paniniPortraitA1.5B1|mercator|transverseMercator)\n"  
  103.         "      Warp surface type. The default is 'spherical'.\n"  
  104.         "  --seam_megapix <float>\n"  
  105.         "      Resolution for seam estimation step. The default is 0.1 Mpx.\n"  
  106.         "  --seam (no|voronoi|gc_color|gc_colorgrad)\n"  
  107.         "      Seam estimation method. The default is 'gc_color'.\n"  
  108.         "  --compose_megapix <float>\n"  
  109.         "      Resolution for compositing step. Use -1 for original resolution.\n"  
  110.         "      The default is -1.\n"  
  111.         "  --expos_comp (no|gain|gain_blocks)\n"  
  112.         "      Exposure compensation method. The default is 'gain_blocks'.\n"  
  113.         "  --blend (no|feather|multiband)\n"  
  114.         "      Blending method. The default is 'multiband'.\n"  
  115.         "  --blend_strength <float>\n"  
  116.         "      Blending strength from [0,100] range. The default is 5.\n"  
  117.         "  --output <result_img>\n"  
  118.         "      The default is 'result.jpg'.\n";  
  119. }  
  120.   
  121.   
  122. // Default command line args  
  123. vector<string> img_names;  
  124. bool preview = false;  
  125. bool try_gpu = false;  
  126. double work_megapix = 0.6;  
  127. double seam_megapix = 0.1;  
  128. double compose_megapix = -1;  
  129. float conf_thresh = 1.f;  
  130. string features_type = "surf";  
  131. string ba_cost_func = "ray";  
  132. string ba_refine_mask = "xxxxx";  
  133. bool do_wave_correct = true;  
  134. WaveCorrectKind wave_correct = detail::WAVE_CORRECT_HORIZ;  
  135. bool save_graph = false;  
  136. std::string save_graph_to;  
  137. string warp_type = "spherical";  
  138. int expos_comp_type = ExposureCompensator::GAIN_BLOCKS;  
  139. float match_conf = 0.3f;  
  140. string seam_find_type = "gc_color";  
  141. int blend_type = Blender::MULTI_BAND;  
  142. float blend_strength = 5;  
  143. string result_name = "result.jpg";  
  144.   
  145. static int parseCmdArgs(int argc, char** argv)  
  146. {  
  147.     if (argc == 1)  
  148.     {  
  149.         printUsage();  
  150.         return -1;  
  151.     }  
  152.     for (int i = 1; i < argc; ++i)  
  153.     {  
  154.         if (string(argv[i]) == "--help" || string(argv[i]) == "/?")  
  155.         {  
  156.             printUsage();  
  157.             return -1;  
  158.         }  
  159.         else if (string(argv[i]) == "--preview")  
  160.         {  
  161.             preview = true;  
  162.         }  
  163.         else if (string(argv[i]) == "--try_gpu")  
  164.         {  
  165.             if (string(argv[i + 1]) == "no")  
  166.                 try_gpu = false;  
  167.             else if (string(argv[i + 1]) == "yes")  
  168.                 try_gpu = true;  
  169.             else  
  170.             {  
  171.                 cout << "Bad --try_gpu flag value\n";  
  172.                 return -1;  
  173.             }  
  174.             i++;  
  175.         }  
  176.         else if (string(argv[i]) == "--work_megapix")  
  177.         {  
  178.             work_megapix = atof(argv[i + 1]);  
  179.             i++;  
  180.         }  
  181.         else if (string(argv[i]) == "--seam_megapix")  
  182.         {  
  183.             seam_megapix = atof(argv[i + 1]);  
  184.             i++;  
  185.         }  
  186.         else if (string(argv[i]) == "--compose_megapix")  
  187.         {  
  188.             compose_megapix = atof(argv[i + 1]);  
  189.             i++;  
  190.         }  
  191.         else if (string(argv[i]) == "--result")  
  192.         {  
  193.             result_name = argv[i + 1];  
  194.             i++;  
  195.         }  
  196.         else if (string(argv[i]) == "--features")  
  197.         {  
  198.             features_type = argv[i + 1];  
  199.             if (features_type == "orb")  
  200.                 match_conf = 0.3f;  
  201.             i++;  
  202.         }  
  203.         else if (string(argv[i]) == "--match_conf")  
  204.         {  
  205.             match_conf = static_cast<float>(atof(argv[i + 1]));  
  206.             i++;  
  207.         }  
  208.         else if (string(argv[i]) == "--conf_thresh")  
  209.         {  
  210.             conf_thresh = static_cast<float>(atof(argv[i + 1]));  
  211.             i++;  
  212.         }  
  213.         else if (string(argv[i]) == "--ba")  
  214.         {  
  215.             ba_cost_func = argv[i + 1];  
  216.             i++;  
  217.         }  
  218.         else if (string(argv[i]) == "--ba_refine_mask")  
  219.         {  
  220.             ba_refine_mask = argv[i + 1];  
  221.             if (ba_refine_mask.size() != 5)  
  222.             {  
  223.                 cout << "Incorrect refinement mask length.\n";  
  224.                 return -1;  
  225.             }  
  226.             i++;  
  227.         }  
  228.         else if (string(argv[i]) == "--wave_correct")  
  229.         {  
  230.             if (string(argv[i + 1]) == "no")  
  231.                 do_wave_correct = false;  
  232.             else if (string(argv[i + 1]) == "horiz")  
  233.             {  
  234.                 do_wave_correct = true;  
  235.                 wave_correct = detail::WAVE_CORRECT_HORIZ;  
  236.             }  
  237.             else if (string(argv[i + 1]) == "vert")  
  238.             {  
  239.                 do_wave_correct = true;  
  240.                 wave_correct = detail::WAVE_CORRECT_VERT;  
  241.             }  
  242.             else  
  243.             {  
  244.                 cout << "Bad --wave_correct flag value\n";  
  245.                 return -1;  
  246.             }  
  247.             i++;  
  248.         }  
  249.         else if (string(argv[i]) == "--save_graph")  
  250.         {  
  251.             save_graph = true;  
  252.             save_graph_to = argv[i + 1];  
  253.             i++;  
  254.         }  
  255.         else if (string(argv[i]) == "--warp")  
  256.         {  
  257.             warp_type = string(argv[i + 1]);  
  258.             i++;  
  259.         }  
  260.         else if (string(argv[i]) == "--expos_comp")  
  261.         {  
  262.             if (string(argv[i + 1]) == "no")  
  263.                 expos_comp_type = ExposureCompensator::NO;  
  264.             else if (string(argv[i + 1]) == "gain")  
  265.                 expos_comp_type = ExposureCompensator::GAIN;  
  266.             else if (string(argv[i + 1]) == "gain_blocks")  
  267.                 expos_comp_type = ExposureCompensator::GAIN_BLOCKS;  
  268.             else  
  269.             {  
  270.                 cout << "Bad exposure compensation method\n";  
  271.                 return -1;  
  272.             }  
  273.             i++;  
  274.         }  
  275.         else if (string(argv[i]) == "--seam")  
  276.         {  
  277.             if (string(argv[i + 1]) == "no" ||  
  278.                 string(argv[i + 1]) == "voronoi" ||  
  279.                 string(argv[i + 1]) == "gc_color" ||  
  280.                 string(argv[i + 1]) == "gc_colorgrad" ||  
  281.                 string(argv[i + 1]) == "dp_color" ||  
  282.                 string(argv[i + 1]) == "dp_colorgrad")  
  283.                 seam_find_type = argv[i + 1];  
  284.             else  
  285.             {  
  286.                 cout << "Bad seam finding method\n";  
  287.                 return -1;  
  288.             }  
  289.             i++;  
  290.         }  
  291.         else if (string(argv[i]) == "--blend")  
  292.         {  
  293.             if (string(argv[i + 1]) == "no")  
  294.                 blend_type = Blender::NO;  
  295.             else if (string(argv[i + 1]) == "feather")  
  296.                 blend_type = Blender::FEATHER;  
  297.             else if (string(argv[i + 1]) == "multiband")  
  298.                 blend_type = Blender::MULTI_BAND;  
  299.             else  
  300.             {  
  301.                 cout << "Bad blending method\n";  
  302.                 return -1;  
  303.             }  
  304.             i++;  
  305.         }  
  306.         else if (string(argv[i]) == "--blend_strength")  
  307.         {  
  308.             blend_strength = static_cast<float>(atof(argv[i + 1]));  
  309.             i++;  
  310.         }  
  311.         else if (string(argv[i]) == "--output")  
  312.         {  
  313.             result_name = argv[i + 1];  
  314.             i++;  
  315.         }  
  316.         else  
  317.             img_names.push_back(argv[i]);  
  318.     }  
  319.     if (preview)  
  320.     {  
  321.         compose_megapix = 0.6;  
  322.     }  
  323.     return 0;  
  324. }  
  325.   
  326.   
  327. int main(int argc, char* argv[])  
  328. {  
  329. #if ENABLE_LOG  
  330.     int64 app_start_time = getTickCount();  
  331. #endif  
  332.   
  333.     cv::setBreakOnError(true);  
  334.   
  335.     int retval = parseCmdArgs(argc, argv);  
  336.     if (retval)  
  337.         return retval;  
  338.   
  339.     // Check if have enough images  
  340.     int num_images = static_cast<int>(img_names.size());  
  341.     if (num_images < 2)  
  342.     {  
  343.         LOGLN("Need more images");  
  344.         return -1;  
  345.     }  
  346.   
  347.     double work_scale = 1, seam_scale = 1, compose_scale = 1;  
  348.     bool is_work_scale_set = false, is_seam_scale_set = false, is_compose_scale_set = false;  
  349.   
  350.     LOGLN("Finding features...");  
  351. #if ENABLE_LOG  
  352.     int64 t = getTickCount();  
  353. #endif  
  354.   
  355.     Ptr<FeaturesFinder> finder;  
  356.     if (features_type == "surf")  
  357.     {  
  358. #if defined(HAVE_OPENCV_NONFREE) && defined(HAVE_OPENCV_GPU)  
  359.         if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)  
  360.             finder = new SurfFeaturesFinderGpu();  
  361.         else  
  362. #endif  
  363.             finder = new SurfFeaturesFinder();  
  364.     }  
  365.     else if (features_type == "orb")  
  366.     {  
  367.         finder = new OrbFeaturesFinder();  
  368.     }  
  369.     else  
  370.     {  
  371.         cout << "Unknown 2D features type: '" << features_type << "'.\n";  
  372.         return -1;  
  373.     }  
  374.   
  375.     Mat full_img, img;  
  376.     vector<ImageFeatures> features(num_images);  
  377.     vector<Mat> images(num_images);  
  378.     vector<Size> full_img_sizes(num_images);  
  379.     double seam_work_aspect = 1;  
  380.   
  381.     for (int i = 0; i < num_images; ++i)  
  382.     {  
  383.         full_img = imread(img_names[i]);  
  384.         full_img_sizes[i] = full_img.size();  
  385.   
  386.         if (full_img.empty())  
  387.         {  
  388.             LOGLN("Can't open image " << img_names[i]);  
  389.             return -1;  
  390.         }  
  391.         if (work_megapix < 0)  
  392.         {  
  393.             img = full_img;  
  394.             work_scale = 1;  
  395.             is_work_scale_set = true;  
  396.         }  
  397.         else  
  398.         {  
  399.             if (!is_work_scale_set)  
  400.             {  
  401.                 work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area()));  
  402.                 is_work_scale_set = true;  
  403.             }  
  404.             resize(full_img, img, Size(), work_scale, work_scale);  
  405.         }  
  406.         if (!is_seam_scale_set)  
  407.         {  
  408.             seam_scale = min(1.0, sqrt(seam_megapix * 1e6 / full_img.size().area()));  
  409.             seam_work_aspect = seam_scale / work_scale;  
  410.             is_seam_scale_set = true;  
  411.         }  
  412.   
  413.         (*finder)(img, features[i]);  
  414.         features[i].img_idx = i;  
  415.         LOGLN("Features in image #" << i+1 << ": " << features[i].keypoints.size());  
  416.   
  417.         resize(full_img, img, Size(), seam_scale, seam_scale);  
  418.         images[i] = img.clone();  
  419.     }  
  420.   
  421.     finder->collectGarbage();  
  422.     full_img.release();  
  423.     img.release();  
  424.   
  425.     LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");  
  426.   
  427.     LOG("Pairwise matching");  
  428. #if ENABLE_LOG  
  429.     t = getTickCount();  
  430. #endif  
  431.     vector<MatchesInfo> pairwise_matches;  
  432.     BestOf2NearestMatcher matcher(try_gpu, match_conf);  
  433.     matcher(features, pairwise_matches);  
  434.     matcher.collectGarbage();  
  435.     LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");  
  436.   
  437.     // Check if we should save matches graph  
  438.     if (save_graph)  
  439.     {  
  440.         LOGLN("Saving matches graph...");  
  441.         ofstream f(save_graph_to.c_str());  
  442.         f << matchesGraphAsString(img_names, pairwise_matches, conf_thresh);  
  443.     }  
  444.   
  445.     // Leave only images we are sure are from the same panorama  
  446.     vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);  
  447.     vector<Mat> img_subset;  
  448.     vector<string> img_names_subset;  
  449.     vector<Size> full_img_sizes_subset;  
  450.     for (size_t i = 0; i < indices.size(); ++i)  
  451.     {  
  452.         img_names_subset.push_back(img_names[indices[i]]);  
  453.         img_subset.push_back(images[indices[i]]);  
  454.         full_img_sizes_subset.push_back(full_img_sizes[indices[i]]);  
  455.     }  
  456.   
  457.     images = img_subset;  
  458.     img_names = img_names_subset;  
  459.     full_img_sizes = full_img_sizes_subset;  
  460.   
  461.     // Check if we still have enough images  
  462.     num_images = static_cast<int>(img_names.size());  
  463.     if (num_images < 2)  
  464.     {  
  465.         LOGLN("Need more images");  
  466.         return -1;  
  467.     }  
  468.   
  469.     HomographyBasedEstimator estimator;  
  470.     vector<CameraParams> cameras;  
  471.     estimator(features, pairwise_matches, cameras);  
  472.   
  473.     for (size_t i = 0; i < cameras.size(); ++i)  
  474.     {  
  475.         Mat R;  
  476.         cameras[i].R.convertTo(R, CV_32F);  
  477.         cameras[i].R = R;  
  478.         LOGLN("Initial intrinsics #" << indices[i]+1 << ":\n" << cameras[i].K());  
  479.     }  
  480.   
  481.     Ptr<detail::BundleAdjusterBase> adjuster;  
  482.     if (ba_cost_func == "reproj") adjuster = new detail::BundleAdjusterReproj();  
  483.     else if (ba_cost_func == "ray") adjuster = new detail::BundleAdjusterRay();  
  484.     else  
  485.     {  
  486.         cout << "Unknown bundle adjustment cost function: '" << ba_cost_func << "'.\n";  
  487.         return -1;  
  488.     }  
  489.     adjuster->setConfThresh(conf_thresh);  
  490.     Mat_<uchar> refine_mask = Mat::zeros(3, 3, CV_8U);  
  491.     if (ba_refine_mask[0] == 'x') refine_mask(0,0) = 1;  
  492.     if (ba_refine_mask[1] == 'x') refine_mask(0,1) = 1;  
  493.     if (ba_refine_mask[2] == 'x') refine_mask(0,2) = 1;  
  494.     if (ba_refine_mask[3] == 'x') refine_mask(1,1) = 1;  
  495.     if (ba_refine_mask[4] == 'x') refine_mask(1,2) = 1;  
  496.     adjuster->setRefinementMask(refine_mask);  
  497.     (*adjuster)(features, pairwise_matches, cameras);  
  498.   
  499.     // Find median focal length  
  500.   
  501.     vector<double> focals;  
  502.     for (size_t i = 0; i < cameras.size(); ++i)  
  503.     {  
  504.         LOGLN("Camera #" << indices[i]+1 << ":\n" << cameras[i].K());  
  505.         focals.push_back(cameras[i].focal);  
  506.     }  
  507.   
  508.     sort(focals.begin(), focals.end());  
  509.     float warped_image_scale;  
  510.     if (focals.size() % 2 == 1)  
  511.         warped_image_scale = static_cast<float>(focals[focals.size() / 2]);  
  512.     else  
  513.         warped_image_scale = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;  
  514.   
  515.     if (do_wave_correct)  
  516.     {  
  517.         vector<Mat> rmats;  
  518.         for (size_t i = 0; i < cameras.size(); ++i)  
  519.             rmats.push_back(cameras[i].R);  
  520.         waveCorrect(rmats, wave_correct);  
  521.         for (size_t i = 0; i < cameras.size(); ++i)  
  522.             cameras[i].R = rmats[i];  
  523.     }  
  524.   
  525.     LOGLN("Warping images (auxiliary)... ");  
  526. #if ENABLE_LOG  
  527.     t = getTickCount();  
  528. #endif  
  529.   
  530.     vector<Point> corners(num_images);  
  531.     vector<Mat> masks_warped(num_images);  
  532.     vector<Mat> images_warped(num_images);  
  533.     vector<Size> sizes(num_images);  
  534.     vector<Mat> masks(num_images);  
  535.   
  536.     // Preapre images masks  
  537.     for (int i = 0; i < num_images; ++i)  
  538.     {  
  539.         masks[i].create(images[i].size(), CV_8U);  
  540.         masks[i].setTo(Scalar::all(255));  
  541.     }  
  542.   
  543.     // Warp images and their masks  
  544.   
  545.     Ptr<WarperCreator> warper_creator;  
  546. #if defined(HAVE_OPENCV_GPU)  
  547.     if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)  
  548.     {  
  549.         if (warp_type == "plane") warper_creator = new cv::PlaneWarperGpu();  
  550.         else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarperGpu();  
  551.         else if (warp_type == "spherical") warper_creator = new cv::SphericalWarperGpu();  
  552.     }  
  553.     else  
  554. #endif  
  555.     {  
  556.         if (warp_type == "plane") warper_creator = new cv::PlaneWarper();  
  557.         else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarper();  
  558.         else if (warp_type == "spherical") warper_creator = new cv::SphericalWarper();  
  559.         else if (warp_type == "fisheye") warper_creator = new cv::FisheyeWarper();  
  560.         else if (warp_type == "stereographic") warper_creator = new cv::StereographicWarper();  
  561.         else if (warp_type == "compressedPlaneA2B1") warper_creator = new cv::CompressedRectilinearWarper(2, 1);  
  562.         else if (warp_type == "compressedPlaneA1.5B1") warper_creator = new cv::CompressedRectilinearWarper(1.5, 1);  
  563.         else if (warp_type == "compressedPlanePortraitA2B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(2, 1);  
  564.         else if (warp_type == "compressedPlanePortraitA1.5B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(1.5, 1);  
  565.         else if (warp_type == "paniniA2B1") warper_creator = new cv::PaniniWarper(2, 1);  
  566.         else if (warp_type == "paniniA1.5B1") warper_creator = new cv::PaniniWarper(1.5, 1);  
  567.         else if (warp_type == "paniniPortraitA2B1") warper_creator = new cv::PaniniPortraitWarper(2, 1);  
  568.         else if (warp_type == "paniniPortraitA1.5B1") warper_creator = new cv::PaniniPortraitWarper(1.5, 1);  
  569.         else if (warp_type == "mercator") warper_creator = new cv::MercatorWarper();  
  570.         else if (warp_type == "transverseMercator") warper_creator = new cv::TransverseMercatorWarper();  
  571.     }  
  572.   
  573.     if (warper_creator.empty())  
  574.     {  
  575.         cout << "Can't create the following warper '" << warp_type << "'\n";  
  576.         return 1;  
  577.     }  
  578.   
  579.     Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));  
  580.   
  581.     for (int i = 0; i < num_images; ++i)  
  582.     {  
  583.         Mat_<float> K;  
  584.         cameras[i].K().convertTo(K, CV_32F);  
  585.         float swa = (float)seam_work_aspect;  
  586.         K(0,0) *= swa; K(0,2) *= swa;  
  587.         K(1,1) *= swa; K(1,2) *= swa;  
  588.   
  589.         corners[i] = warper->warp(images[i], K, cameras[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);  
  590.         sizes[i] = images_warped[i].size();  
  591.   
  592.         warper->warp(masks[i], K, cameras[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);  
  593.     }  
  594.   
  595.     vector<Mat> images_warped_f(num_images);  
  596.     for (int i = 0; i < num_images; ++i)  
  597.         images_warped[i].convertTo(images_warped_f[i], CV_32F);  
  598.   
  599.     LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");  
  600.   
  601.     Ptr<ExposureCompensator> compensator = ExposureCompensator::createDefault(expos_comp_type);  
  602.     compensator->feed(corners, images_warped, masks_warped);  
  603.   
  604.     Ptr<SeamFinder> seam_finder;  
  605.     if (seam_find_type == "no")  
  606.         seam_finder = new detail::NoSeamFinder();  
  607.     else if (seam_find_type == "voronoi")  
  608.         seam_finder = new detail::VoronoiSeamFinder();  
  609.     else if (seam_find_type == "gc_color")  
  610.     {  
  611. #if defined(HAVE_OPENCV_GPU)  
  612.         if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)  
  613.             seam_finder = new detail::GraphCutSeamFinderGpu(GraphCutSeamFinderBase::COST_COLOR);  
  614.         else  
  615. #endif  
  616.             seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR);  
  617.     }  
  618.     else if (seam_find_type == "gc_colorgrad")  
  619.     {  
  620. #if defined(HAVE_OPENCV_GPU)  
  621.         if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)  
  622.             seam_finder = new detail::GraphCutSeamFinderGpu(GraphCutSeamFinderBase::COST_COLOR_GRAD);  
  623.         else  
  624. #endif  
  625.             seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR_GRAD);  
  626.     }  
  627.     else if (seam_find_type == "dp_color")  
  628.         seam_finder = new detail::DpSeamFinder(DpSeamFinder::COLOR);  
  629.     else if (seam_find_type == "dp_colorgrad")  
  630.         seam_finder = new detail::DpSeamFinder(DpSeamFinder::COLOR_GRAD);  
  631.     if (seam_finder.empty())  
  632.     {  
  633.         cout << "Can't create the following seam finder '" << seam_find_type << "'\n";  
  634.         return 1;  
  635.     }  
  636.   
  637.     seam_finder->find(images_warped_f, corners, masks_warped);  
  638.   
  639.     // Release unused memory  
  640.     images.clear();  
  641.     images_warped.clear();  
  642.     images_warped_f.clear();  
  643.     masks.clear();  
  644.   
  645.     LOGLN("Compositing...");  
  646. #if ENABLE_LOG  
  647.     t = getTickCount();  
  648. #endif  
  649.   
  650.     Mat img_warped, img_warped_s;  
  651.     Mat dilated_mask, seam_mask, mask, mask_warped;  
  652.     Ptr<Blender> blender;  
  653.     //double compose_seam_aspect = 1;  
  654.     double compose_work_aspect = 1;  
  655.   
  656.     for (int img_idx = 0; img_idx < num_images; ++img_idx)  
  657.     {  
  658.         LOGLN("Compositing image #" << indices[img_idx]+1);  
  659.   
  660.         // Read image and resize it if necessary  
  661.         full_img = imread(img_names[img_idx]);  
  662.         if (!is_compose_scale_set)  
  663.         {  
  664.             if (compose_megapix > 0)  
  665.                 compose_scale = min(1.0, sqrt(compose_megapix * 1e6 / full_img.size().area()));  
  666.             is_compose_scale_set = true;  
  667.   
  668.             // Compute relative scales  
  669.             //compose_seam_aspect = compose_scale / seam_scale;  
  670.             compose_work_aspect = compose_scale / work_scale;  
  671.   
  672.             // Update warped image scale  
  673.             warped_image_scale *= static_cast<float>(compose_work_aspect);  
  674.             warper = warper_creator->create(warped_image_scale);  
  675.   
  676.             // Update corners and sizes  
  677.             for (int i = 0; i < num_images; ++i)  
  678.             {  
  679.                 // Update intrinsics  
  680.                 cameras[i].focal *= compose_work_aspect;  
  681.                 cameras[i].ppx *= compose_work_aspect;  
  682.                 cameras[i].ppy *= compose_work_aspect;  
  683.   
  684.                 // Update corner and size  
  685.                 Size sz = full_img_sizes[i];  
  686.                 if (std::abs(compose_scale - 1) > 1e-1)  
  687.                 {  
  688.                     sz.width = cvRound(full_img_sizes[i].width * compose_scale);  
  689.                     sz.height = cvRound(full_img_sizes[i].height * compose_scale);  
  690.                 }  
  691.   
  692.                 Mat K;  
  693.                 cameras[i].K().convertTo(K, CV_32F);  
  694.                 Rect roi = warper->warpRoi(sz, K, cameras[i].R);  
  695.                 corners[i] = roi.tl();  
  696.                 sizes[i] = roi.size();  
  697.             }  
  698.         }  
  699.         if (abs(compose_scale - 1) > 1e-1)  
  700.             resize(full_img, img, Size(), compose_scale, compose_scale);  
  701.         else  
  702.             img = full_img;  
  703.         full_img.release();  
  704.         Size img_size = img.size();  
  705.   
  706.         Mat K;  
  707.         cameras[img_idx].K().convertTo(K, CV_32F);  
  708.   
  709.         // Warp the current image  
  710.         warper->warp(img, K, cameras[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);  
  711.   
  712.         // Warp the current image mask  
  713.         mask.create(img_size, CV_8U);  
  714.         mask.setTo(Scalar::all(255));  
  715.         warper->warp(mask, K, cameras[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);  
  716.   
  717.         // Compensate exposure  
  718.         compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);  
  719.   
  720.         img_warped.convertTo(img_warped_s, CV_16S);  
  721.         img_warped.release();  
  722.         img.release();  
  723.         mask.release();  
  724.   
  725.         dilate(masks_warped[img_idx], dilated_mask, Mat());  
  726.         resize(dilated_mask, seam_mask, mask_warped.size());  
  727.         mask_warped = seam_mask & mask_warped;  
  728.   
  729.         if (blender.empty())  
  730.         {  
  731.             blender = Blender::createDefault(blend_type, try_gpu);  
  732.             Size dst_sz = resultRoi(corners, sizes).size();  
  733.             float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f;  
  734.             if (blend_width < 1.f)  
  735.                 blender = Blender::createDefault(Blender::NO, try_gpu);  
  736.             else if (blend_type == Blender::MULTI_BAND)  
  737.             {  
  738.                 MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(static_cast<Blender*>(blender));  
  739.                 mb->setNumBands(static_cast<int>(ceil(log(blend_width)/log(2.)) - 1.));  
  740.                 LOGLN("Multi-band blender, number of bands: " << mb->numBands());  
  741.             }  
  742.             else if (blend_type == Blender::FEATHER)  
  743.             {  
  744.                 FeatherBlender* fb = dynamic_cast<FeatherBlender*>(static_cast<Blender*>(blender));  
  745.                 fb->setSharpness(1.f/blend_width);  
  746.                 LOGLN("Feather blender, sharpness: " << fb->sharpness());  
  747.             }  
  748.             blender->prepare(corners, sizes);  
  749.         }  
  750.   
  751.         // Blend the current image  
  752.         blender->feed(img_warped_s, mask_warped, corners[img_idx]);  
  753.     }  
  754.   
  755.     Mat result, result_mask;  
  756.     blender->blend(result, result_mask);  
  757.   
  758.     LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");  
  759.   
  760.     imwrite(result_name, result);  
  761.   
  762.     LOGLN("Finished, total time: " << ((getTickCount() - app_start_time) / getTickFrequency()) << " sec");  
  763.     return 0;  
  764. }  
/*M///
//
//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
//  By downloading, copying, installing or using the software you agree to this license.
//  If you do not agree to this license, do not download, install,
//  copy or use the software.
//
//
//                          License Agreement
//                For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
//   * Redistribution's of source code must retain the above copyright notice,
//     this list of conditions and the following disclaimer.
//
//   * Redistribution's in binary form must reproduce the above copyright notice,
//     this list of conditions and the following disclaimer in the documentation
//     and/or other materials provided with the distribution.
//
//   * The name of the copyright holders may not be used to endorse or promote products
//     derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//
//M*/

#include <iostream>
#include <fstream>
#include <string>
#include "opencv2/opencv_modules.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/stitching/detail/autocalib.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/camera.hpp"
#include "opencv2/stitching/detail/exposure_compensate.hpp"
#include "opencv2/stitching/detail/matchers.hpp"
#include "opencv2/stitching/detail/motion_estimators.hpp"
#include "opencv2/stitching/detail/seam_finders.hpp"
#include "opencv2/stitching/detail/util.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/stitching/warpers.hpp"

using namespace std;
using namespace cv;
using namespace cv::detail;

static void printUsage()
{
    cout <<
        "Rotation model images stitcher.\n\n"
        "stitching_detailed img1 img2 [...imgN] [flags]\n\n"
        "Flags:\n"
        "  --preview\n"
        "      Run stitching in the preview mode. Works faster than usual mode,\n"
        "      but output image will have lower resolution.\n"
        "  --try_gpu (yes|no)\n"
        "      Try to use GPU. The default value is 'no'. All default values\n"
        "      are for CPU mode.\n"
        "\nMotion Estimation Flags:\n"
        "  --work_megapix <float>\n"
        "      Resolution for image registration step. The default is 0.6 Mpx.\n"
        "  --features (surf|orb)\n"
        "      Type of features used for images matching. The default is surf.\n"
        "  --match_conf <float>\n"
        "      Confidence for feature matching step. The default is 0.65 for surf and 0.3 for orb.\n"
        "  --conf_thresh <float>\n"
        "      Threshold for two images are from the same panorama confidence.\n"
        "      The default is 1.0.\n"
        "  --ba (reproj|ray)\n"
        "      Bundle adjustment cost function. The default is ray.\n"
        "  --ba_refine_mask (mask)\n"
        "      Set refinement mask for bundle adjustment. It looks like 'x_xxx',\n"
        "      where 'x' means refine respective parameter and '_' means don't\n"
        "      refine one, and has the following format:\n"
        "      <fx><skew><ppx><aspect><ppy>. The default mask is 'xxxxx'. If bundle\n"
        "      adjustment doesn't support estimation of selected parameter then\n"
        "      the respective flag is ignored.\n"
        "  --wave_correct (no|horiz|vert)\n"
        "      Perform wave effect correction. The default is 'horiz'.\n"
        "  --save_graph <file_name>\n"
        "      Save matches graph represented in DOT language to <file_name> file.\n"
        "      Labels description: Nm is number of matches, Ni is number of inliers,\n"
        "      C is confidence.\n"
        "\nCompositing Flags:\n"
        "  --warp (plane|cylindrical|spherical|fisheye|stereographic|compressedPlaneA2B1|compressedPlaneA1.5B1|compressedPlanePortraitA2B1|compressedPlanePortraitA1.5B1|paniniA2B1|paniniA1.5B1|paniniPortraitA2B1|paniniPortraitA1.5B1|mercator|transverseMercator)\n"
        "      Warp surface type. The default is 'spherical'.\n"
        "  --seam_megapix <float>\n"
        "      Resolution for seam estimation step. The default is 0.1 Mpx.\n"
        "  --seam (no|voronoi|gc_color|gc_colorgrad)\n"
        "      Seam estimation method. The default is 'gc_color'.\n"
        "  --compose_megapix <float>\n"
        "      Resolution for compositing step. Use -1 for original resolution.\n"
        "      The default is -1.\n"
        "  --expos_comp (no|gain|gain_blocks)\n"
        "      Exposure compensation method. The default is 'gain_blocks'.\n"
        "  --blend (no|feather|multiband)\n"
        "      Blending method. The default is 'multiband'.\n"
        "  --blend_strength <float>\n"
        "      Blending strength from [0,100] range. The default is 5.\n"
        "  --output <result_img>\n"
        "      The default is 'result.jpg'.\n";
}


// Default command line args
vector<string> img_names;
bool preview = false;
bool try_gpu = false;
double work_megapix = 0.6;
double seam_megapix = 0.1;
double compose_megapix = -1;
float conf_thresh = 1.f;
string features_type = "surf";
string ba_cost_func = "ray";
string ba_refine_mask = "xxxxx";
bool do_wave_correct = true;
WaveCorrectKind wave_correct = detail::WAVE_CORRECT_HORIZ;
bool save_graph = false;
std::string save_graph_to;
string warp_type = "spherical";
int expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
float match_conf = 0.3f;
string seam_find_type = "gc_color";
int blend_type = Blender::MULTI_BAND;
float blend_strength = 5;
string result_name = "result.jpg";

static int parseCmdArgs(int argc, char** argv)
{
    if (argc == 1)
    {
        printUsage();
        return -1;
    }
    for (int i = 1; i < argc; ++i)
    {
        if (string(argv[i]) == "--help" || string(argv[i]) == "/?")
        {
            printUsage();
            return -1;
        }
        else if (string(argv[i]) == "--preview")
        {
            preview = true;
        }
        else if (string(argv[i]) == "--try_gpu")
        {
            if (string(argv[i + 1]) == "no")
                try_gpu = false;
            else if (string(argv[i + 1]) == "yes")
                try_gpu = true;
            else
            {
                cout << "Bad --try_gpu flag value\n";
                return -1;
            }
            i++;
        }
        else if (string(argv[i]) == "--work_megapix")
        {
            work_megapix = atof(argv[i + 1]);
            i++;
        }
        else if (string(argv[i]) == "--seam_megapix")
        {
            seam_megapix = atof(argv[i + 1]);
            i++;
        }
        else if (string(argv[i]) == "--compose_megapix")
        {
            compose_megapix = atof(argv[i + 1]);
            i++;
        }
        else if (string(argv[i]) == "--result")
        {
            result_name = argv[i + 1];
            i++;
        }
        else if (string(argv[i]) == "--features")
        {
            features_type = argv[i + 1];
            if (features_type == "orb")
                match_conf = 0.3f;
            i++;
        }
        else if (string(argv[i]) == "--match_conf")
        {
            match_conf = static_cast<float>(atof(argv[i + 1]));
            i++;
        }
        else if (string(argv[i]) == "--conf_thresh")
        {
            conf_thresh = static_cast<float>(atof(argv[i + 1]));
            i++;
        }
        else if (string(argv[i]) == "--ba")
        {
            ba_cost_func = argv[i + 1];
            i++;
        }
        else if (string(argv[i]) == "--ba_refine_mask")
        {
            ba_refine_mask = argv[i + 1];
            if (ba_refine_mask.size() != 5)
            {
                cout << "Incorrect refinement mask length.\n";
                return -1;
            }
            i++;
        }
        else if (string(argv[i]) == "--wave_correct")
        {
            if (string(argv[i + 1]) == "no")
                do_wave_correct = false;
            else if (string(argv[i + 1]) == "horiz")
            {
                do_wave_correct = true;
                wave_correct = detail::WAVE_CORRECT_HORIZ;
            }
            else if (string(argv[i + 1]) == "vert")
            {
                do_wave_correct = true;
                wave_correct = detail::WAVE_CORRECT_VERT;
            }
            else
            {
                cout << "Bad --wave_correct flag value\n";
                return -1;
            }
            i++;
        }
        else if (string(argv[i]) == "--save_graph")
        {
            save_graph = true;
            save_graph_to = argv[i + 1];
            i++;
        }
        else if (string(argv[i]) == "--warp")
        {
            warp_type = string(argv[i + 1]);
            i++;
        }
        else if (string(argv[i]) == "--expos_comp")
        {
            if (string(argv[i + 1]) == "no")
                expos_comp_type = ExposureCompensator::NO;
            else if (string(argv[i + 1]) == "gain")
                expos_comp_type = ExposureCompensator::GAIN;
            else if (string(argv[i + 1]) == "gain_blocks")
                expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
            else
            {
                cout << "Bad exposure compensation method\n";
                return -1;
            }
            i++;
        }
        else if (string(argv[i]) == "--seam")
        {
            if (string(argv[i + 1]) == "no" ||
                string(argv[i + 1]) == "voronoi" ||
                string(argv[i + 1]) == "gc_color" ||
                string(argv[i + 1]) == "gc_colorgrad" ||
                string(argv[i + 1]) == "dp_color" ||
                string(argv[i + 1]) == "dp_colorgrad")
                seam_find_type = argv[i + 1];
            else
            {
                cout << "Bad seam finding method\n";
                return -1;
            }
            i++;
        }
        else if (string(argv[i]) == "--blend")
        {
            if (string(argv[i + 1]) == "no")
                blend_type = Blender::NO;
            else if (string(argv[i + 1]) == "feather")
                blend_type = Blender::FEATHER;
            else if (string(argv[i + 1]) == "multiband")
                blend_type = Blender::MULTI_BAND;
            else
            {
                cout << "Bad blending method\n";
                return -1;
            }
            i++;
        }
        else if (string(argv[i]) == "--blend_strength")
        {
            blend_strength = static_cast<float>(atof(argv[i + 1]));
            i++;
        }
        else if (string(argv[i]) == "--output")
        {
            result_name = argv[i + 1];
            i++;
        }
        else
            img_names.push_back(argv[i]);
    }
    if (preview)
    {
        compose_megapix = 0.6;
    }
    return 0;
}


int main(int argc, char* argv[])
{
#if ENABLE_LOG
    int64 app_start_time = getTickCount();
#endif

    cv::setBreakOnError(true);

    int retval = parseCmdArgs(argc, argv);
    if (retval)
        return retval;

    // Check if have enough images
    int num_images = static_cast<int>(img_names.size());
    if (num_images < 2)
    {
        LOGLN("Need more images");
        return -1;
    }

    double work_scale = 1, seam_scale = 1, compose_scale = 1;
    bool is_work_scale_set = false, is_seam_scale_set = false, is_compose_scale_set = false;

    LOGLN("Finding features...");
#if ENABLE_LOG
    int64 t = getTickCount();
#endif

    Ptr<FeaturesFinder> finder;
    if (features_type == "surf")
    {
#if defined(HAVE_OPENCV_NONFREE) && defined(HAVE_OPENCV_GPU)
        if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
            finder = new SurfFeaturesFinderGpu();
        else
#endif
            finder = new SurfFeaturesFinder();
    }
    else if (features_type == "orb")
    {
        finder = new OrbFeaturesFinder();
    }
    else
    {
        cout << "Unknown 2D features type: '" << features_type << "'.\n";
        return -1;
    }

    Mat full_img, img;
    vector<ImageFeatures> features(num_images);
    vector<Mat> images(num_images);
    vector<Size> full_img_sizes(num_images);
    double seam_work_aspect = 1;

    for (int i = 0; i < num_images; ++i)
    {
        full_img = imread(img_names[i]);
        full_img_sizes[i] = full_img.size();

        if (full_img.empty())
        {
            LOGLN("Can't open image " << img_names[i]);
            return -1;
        }
        if (work_megapix < 0)
        {
            img = full_img;
            work_scale = 1;
            is_work_scale_set = true;
        }
        else
        {
            if (!is_work_scale_set)
            {
                work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area()));
                is_work_scale_set = true;
            }
            resize(full_img, img, Size(), work_scale, work_scale);
        }
        if (!is_seam_scale_set)
        {
            seam_scale = min(1.0, sqrt(seam_megapix * 1e6 / full_img.size().area()));
            seam_work_aspect = seam_scale / work_scale;
            is_seam_scale_set = true;
        }

        (*finder)(img, features[i]);
        features[i].img_idx = i;
        LOGLN("Features in image #" << i+1 << ": " << features[i].keypoints.size());

        resize(full_img, img, Size(), seam_scale, seam_scale);
        images[i] = img.clone();
    }

    finder->collectGarbage();
    full_img.release();
    img.release();

    LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");

    LOG("Pairwise matching");
#if ENABLE_LOG
    t = getTickCount();
#endif
    vector<MatchesInfo> pairwise_matches;
    BestOf2NearestMatcher matcher(try_gpu, match_conf);
    matcher(features, pairwise_matches);
    matcher.collectGarbage();
    LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");

    // Check if we should save matches graph
    if (save_graph)
    {
        LOGLN("Saving matches graph...");
        ofstream f(save_graph_to.c_str());
        f << matchesGraphAsString(img_names, pairwise_matches, conf_thresh);
    }

    // Leave only images we are sure are from the same panorama
    vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
    vector<Mat> img_subset;
    vector<string> img_names_subset;
    vector<Size> full_img_sizes_subset;
    for (size_t i = 0; i < indices.size(); ++i)
    {
        img_names_subset.push_back(img_names[indices[i]]);
        img_subset.push_back(images[indices[i]]);
        full_img_sizes_subset.push_back(full_img_sizes[indices[i]]);
    }

    images = img_subset;
    img_names = img_names_subset;
    full_img_sizes = full_img_sizes_subset;

    // Check if we still have enough images
    num_images = static_cast<int>(img_names.size());
    if (num_images < 2)
    {
        LOGLN("Need more images");
        return -1;
    }

    HomographyBasedEstimator estimator;
    vector<CameraParams> cameras;
    estimator(features, pairwise_matches, cameras);

    for (size_t i = 0; i < cameras.size(); ++i)
    {
        Mat R;
        cameras[i].R.convertTo(R, CV_32F);
        cameras[i].R = R;
        LOGLN("Initial intrinsics #" << indices[i]+1 << ":\n" << cameras[i].K());
    }

    Ptr<detail::BundleAdjusterBase> adjuster;
    if (ba_cost_func == "reproj") adjuster = new detail::BundleAdjusterReproj();
    else if (ba_cost_func == "ray") adjuster = new detail::BundleAdjusterRay();
    else
    {
        cout << "Unknown bundle adjustment cost function: '" << ba_cost_func << "'.\n";
        return -1;
    }
    adjuster->setConfThresh(conf_thresh);
    Mat_<uchar> refine_mask = Mat::zeros(3, 3, CV_8U);
    if (ba_refine_mask[0] == 'x') refine_mask(0,0) = 1;
    if (ba_refine_mask[1] == 'x') refine_mask(0,1) = 1;
    if (ba_refine_mask[2] == 'x') refine_mask(0,2) = 1;
    if (ba_refine_mask[3] == 'x') refine_mask(1,1) = 1;
    if (ba_refine_mask[4] == 'x') refine_mask(1,2) = 1;
    adjuster->setRefinementMask(refine_mask);
    (*adjuster)(features, pairwise_matches, cameras);

    // Find median focal length

    vector<double> focals;
    for (size_t i = 0; i < cameras.size(); ++i)
    {
        LOGLN("Camera #" << indices[i]+1 << ":\n" << cameras[i].K());
        focals.push_back(cameras[i].focal);
    }

    sort(focals.begin(), focals.end());
    float warped_image_scale;
    if (focals.size() % 2 == 1)
        warped_image_scale = static_cast<float>(focals[focals.size() / 2]);
    else
        warped_image_scale = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;

    if (do_wave_correct)
    {
        vector<Mat> rmats;
        for (size_t i = 0; i < cameras.size(); ++i)
            rmats.push_back(cameras[i].R);
        waveCorrect(rmats, wave_correct);
        for (size_t i = 0; i < cameras.size(); ++i)
            cameras[i].R = rmats[i];
    }

    LOGLN("Warping images (auxiliary)... ");
#if ENABLE_LOG
    t = getTickCount();
#endif

    vector<Point> corners(num_images);
    vector<Mat> masks_warped(num_images);
    vector<Mat> images_warped(num_images);
    vector<Size> sizes(num_images);
    vector<Mat> masks(num_images);

    // Preapre images masks
    for (int i = 0; i < num_images; ++i)
    {
        masks[i].create(images[i].size(), CV_8U);
        masks[i].setTo(Scalar::all(255));
    }

    // Warp images and their masks

    Ptr<WarperCreator> warper_creator;
#if defined(HAVE_OPENCV_GPU)
    if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
    {
        if (warp_type == "plane") warper_creator = new cv::PlaneWarperGpu();
        else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarperGpu();
        else if (warp_type == "spherical") warper_creator = new cv::SphericalWarperGpu();
    }
    else
#endif
    {
        if (warp_type == "plane") warper_creator = new cv::PlaneWarper();
        else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarper();
        else if (warp_type == "spherical") warper_creator = new cv::SphericalWarper();
        else if (warp_type == "fisheye") warper_creator = new cv::FisheyeWarper();
        else if (warp_type == "stereographic") warper_creator = new cv::StereographicWarper();
        else if (warp_type == "compressedPlaneA2B1") warper_creator = new cv::CompressedRectilinearWarper(2, 1);
        else if (warp_type == "compressedPlaneA1.5B1") warper_creator = new cv::CompressedRectilinearWarper(1.5, 1);
        else if (warp_type == "compressedPlanePortraitA2B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(2, 1);
        else if (warp_type == "compressedPlanePortraitA1.5B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(1.5, 1);
        else if (warp_type == "paniniA2B1") warper_creator = new cv::PaniniWarper(2, 1);
        else if (warp_type == "paniniA1.5B1") warper_creator = new cv::PaniniWarper(1.5, 1);
        else if (warp_type == "paniniPortraitA2B1") warper_creator = new cv::PaniniPortraitWarper(2, 1);
        else if (warp_type == "paniniPortraitA1.5B1") warper_creator = new cv::PaniniPortraitWarper(1.5, 1);
        else if (warp_type == "mercator") warper_creator = new cv::MercatorWarper();
        else if (warp_type == "transverseMercator") warper_creator = new cv::TransverseMercatorWarper();
    }

    if (warper_creator.empty())
    {
        cout << "Can't create the following warper '" << warp_type << "'\n";
        return 1;
    }

    Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));

    for (int i = 0; i < num_images; ++i)
    {
        Mat_<float> K;
        cameras[i].K().convertTo(K, CV_32F);
        float swa = (float)seam_work_aspect;
        K(0,0) *= swa; K(0,2) *= swa;
        K(1,1) *= swa; K(1,2) *= swa;

        corners[i] = warper->warp(images[i], K, cameras[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);
        sizes[i] = images_warped[i].size();

        warper->warp(masks[i], K, cameras[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
    }

    vector<Mat> images_warped_f(num_images);
    for (int i = 0; i < num_images; ++i)
        images_warped[i].convertTo(images_warped_f[i], CV_32F);

    LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");

    Ptr<ExposureCompensator> compensator = ExposureCompensator::createDefault(expos_comp_type);
    compensator->feed(corners, images_warped, masks_warped);

    Ptr<SeamFinder> seam_finder;
    if (seam_find_type == "no")
        seam_finder = new detail::NoSeamFinder();
    else if (seam_find_type == "voronoi")
        seam_finder = new detail::VoronoiSeamFinder();
    else if (seam_find_type == "gc_color")
    {
#if defined(HAVE_OPENCV_GPU)
        if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
            seam_finder = new detail::GraphCutSeamFinderGpu(GraphCutSeamFinderBase::COST_COLOR);
        else
#endif
            seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR);
    }
    else if (seam_find_type == "gc_colorgrad")
    {
#if defined(HAVE_OPENCV_GPU)
        if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
            seam_finder = new detail::GraphCutSeamFinderGpu(GraphCutSeamFinderBase::COST_COLOR_GRAD);
        else
#endif
            seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR_GRAD);
    }
    else if (seam_find_type == "dp_color")
        seam_finder = new detail::DpSeamFinder(DpSeamFinder::COLOR);
    else if (seam_find_type == "dp_colorgrad")
        seam_finder = new detail::DpSeamFinder(DpSeamFinder::COLOR_GRAD);
    if (seam_finder.empty())
    {
        cout << "Can't create the following seam finder '" << seam_find_type << "'\n";
        return 1;
    }

    seam_finder->find(images_warped_f, corners, masks_warped);

    // Release unused memory
    images.clear();
    images_warped.clear();
    images_warped_f.clear();
    masks.clear();

    LOGLN("Compositing...");
#if ENABLE_LOG
    t = getTickCount();
#endif

    Mat img_warped, img_warped_s;
    Mat dilated_mask, seam_mask, mask, mask_warped;
    Ptr<Blender> blender;
    //double compose_seam_aspect = 1;
    double compose_work_aspect = 1;

    for (int img_idx = 0; img_idx < num_images; ++img_idx)
    {
        LOGLN("Compositing image #" << indices[img_idx]+1);

        // Read image and resize it if necessary
        full_img = imread(img_names[img_idx]);
        if (!is_compose_scale_set)
        {
            if (compose_megapix > 0)
                compose_scale = min(1.0, sqrt(compose_megapix * 1e6 / full_img.size().area()));
            is_compose_scale_set = true;

            // Compute relative scales
            //compose_seam_aspect = compose_scale / seam_scale;
            compose_work_aspect = compose_scale / work_scale;

            // Update warped image scale
            warped_image_scale *= static_cast<float>(compose_work_aspect);
            warper = warper_creator->create(warped_image_scale);

            // Update corners and sizes
            for (int i = 0; i < num_images; ++i)
            {
                // Update intrinsics
                cameras[i].focal *= compose_work_aspect;
                cameras[i].ppx *= compose_work_aspect;
                cameras[i].ppy *= compose_work_aspect;

                // Update corner and size
                Size sz = full_img_sizes[i];
                if (std::abs(compose_scale - 1) > 1e-1)
                {
                    sz.width = cvRound(full_img_sizes[i].width * compose_scale);
                    sz.height = cvRound(full_img_sizes[i].height * compose_scale);
                }

                Mat K;
                cameras[i].K().convertTo(K, CV_32F);
                Rect roi = warper->warpRoi(sz, K, cameras[i].R);
                corners[i] = roi.tl();
                sizes[i] = roi.size();
            }
        }
        if (abs(compose_scale - 1) > 1e-1)
            resize(full_img, img, Size(), compose_scale, compose_scale);
        else
            img = full_img;
        full_img.release();
        Size img_size = img.size();

        Mat K;
        cameras[img_idx].K().convertTo(K, CV_32F);

        // Warp the current image
        warper->warp(img, K, cameras[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);

        // Warp the current image mask
        mask.create(img_size, CV_8U);
        mask.setTo(Scalar::all(255));
        warper->warp(mask, K, cameras[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);

        // Compensate exposure
        compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);

        img_warped.convertTo(img_warped_s, CV_16S);
        img_warped.release();
        img.release();
        mask.release();

        dilate(masks_warped[img_idx], dilated_mask, Mat());
        resize(dilated_mask, seam_mask, mask_warped.size());
        mask_warped = seam_mask & mask_warped;

        if (blender.empty())
        {
            blender = Blender::createDefault(blend_type, try_gpu);
            Size dst_sz = resultRoi(corners, sizes).size();
            float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f;
            if (blend_width < 1.f)
                blender = Blender::createDefault(Blender::NO, try_gpu);
            else if (blend_type == Blender::MULTI_BAND)
            {
                MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(static_cast<Blender*>(blender));
                mb->setNumBands(static_cast<int>(ceil(log(blend_width)/log(2.)) - 1.));
                LOGLN("Multi-band blender, number of bands: " << mb->numBands());
            }
            else if (blend_type == Blender::FEATHER)
            {
                FeatherBlender* fb = dynamic_cast<FeatherBlender*>(static_cast<Blender*>(blender));
                fb->setSharpness(1.f/blend_width);
                LOGLN("Feather blender, sharpness: " << fb->sharpness());
            }
            blender->prepare(corners, sizes);
        }

        // Blend the current image
        blender->feed(img_warped_s, mask_warped, corners[img_idx]);
    }

    Mat result, result_mask;
    blender->blend(result, result_mask);

    LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");

    imwrite(result_name, result);

    LOGLN("Finished, total time: " << ((getTickCount() - app_start_time) / getTickFrequency()) << " sec");
    return 0;
}





  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值