opencv2 第10天 第十章的代码

/*------------------------------------------------------------------------------------------*\
   This file contains material supporting chapter 9 of the cookbook:  
   Computer Vision Programming using the OpenCV Library.
   by Robert Laganiere, Packt Publishing, 2011.

   This program is free software; permission is hereby granted to use, copy, modify,
   and distribute this source code, or portions thereof, for any purpose, without fee,
   subject to the restriction that the copyright notice may not be removed
   or altered from any source or altered source distribution.
   The software is released on an as-is basis and without any warranties of any kind.
   In particular, the software is not guaranteed to be fault-tolerant or free from failure.
   The author disclaims all warranties with regard to this software, any use,
   and any consequent failure, is purely the responsibility of the user.
 
   Copyright (C) 2010-2011 Robert Laganiere, www.laganiere.name
\*------------------------------------------------------------------------------------------*/

#include <iostream>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include "matcher.h"

int main()
{
        // Read input images
        cv::Mat image1= cv::imread("D:\\images\\parliament1.bmp",0);
        cv::Mat image2= cv::imread("D:\\images\\parliament2.bmp",0);
        if (!image1.data || !image2.data)
                return 0;

    // Display the images
        cv::namedWindow("Image 1");
        cv::imshow("Image 1",image1);
        cv::namedWindow("Image 2");
        cv::imshow("Image 2",image2);

        // Prepare the matcher
        RobustMatcher rmatcher;
        rmatcher.setConfidenceLevel(0.98);
        rmatcher.setMinDistanceToEpipolar(1.0);
        rmatcher.setRatio(0.65f);
        cv::Ptr<cv::FeatureDetector> pfd= new cv::SurfFeatureDetector(10);
        rmatcher.setFeatureDetector(pfd);

        // Match the two images
        std::vector<cv::DMatch> matches;
        std::vector<cv::KeyPoint> keypoints1, keypoints2;
        cv::Mat fundemental= rmatcher.match(image1,image2,matches, keypoints1, keypoints2);

        // draw the matches
        cv::Mat imageMatches;
        cv::drawMatches(image1,keypoints1,  // 1st image and its keypoints
                            image2,keypoints2,  // 2nd image and its keypoints
                                        matches,                        // the matches
                                        imageMatches,           // the image produced
                                        cv::Scalar(255,255,255)); // color of the lines
        cv::namedWindow("Matches");
        cv::imshow("Matches",imageMatches);
       
        // Convert keypoints into Point2f
        std::vector<cv::Point2f> points1, points2;
        for (std::vector<cv::DMatch>::const_iterator it= matches.begin();
                 it!= matches.end(); ++it) {

                         // Get the position of left keypoints
                         float x= keypoints1[it->queryIdx].pt.x;
                         float y= keypoints1[it->queryIdx].pt.y;
                         points1.push_back(cv::Point2f(x,y));
                         // Get the position of right keypoints
                         x= keypoints2[it->trainIdx].pt.x;
                         y= keypoints2[it->trainIdx].pt.y;
                         points2.push_back(cv::Point2f(x,y));
        }

        std::cout << points1.size() << " " << points2.size() << std::endl;

        // Find the homography between image 1 and image 2
        std::vector<uchar> inliers(points1.size(),0);
        cv::Mat homography= cv::findHomography(
                cv::Mat(points1),cv::Mat(points2), // corresponding points
                inliers,        // outputed inliers matches
                CV_RANSAC,      // RANSAC method
                1.);        // max distance to reprojection point

        // Draw the inlier points
        //std::vector<cv::Point2f>::const_iterator itPts= points1.begin();
        //std::vector<uchar>::const_iterator itIn= inliers.begin();
        //while (itPts!=points1.end()) {

        //        // draw a circle at each inlier location
        //        if (*itIn)
        //                cv::circle(image1,*itPts,3,cv::Scalar(255,255,255),2);
        //       
        //        ++itPts;
        //        ++itIn;
        //}

        //itPts= points2.begin();
        //itIn= inliers.begin();
        //while (itPts!=points2.end()) {

        //        // draw a circle at each inlier location
        //        if (*itIn)
        //                cv::circle(image2,*itPts,3,cv::Scalar(255,255,255),2);
        //       
        //        ++itPts;
        //        ++itIn;
        //}

    // Display the images with points
        cv::namedWindow("Image 1 Homography Points");
        cv::imshow("Image 1 Homography Points",image1);
        cv::namedWindow("Image 2 Homography Points");
        cv::imshow("Image 2 Homography Points",image2);

        // Warp image 1 to image 2
        cv::Mat result;
        cv::warpPerspective(image1, // input image
                result,                 // output image
                homography,             // homography
                cv::Size(2*image1.cols,image1.rows)); // size of output image

        // Copy image 1 on the first half of full image
        cv::Mat half(result,cv::Rect(0,0,image2.cols,image2.rows));
        image2.copyTo(half);

    // Display the warp image
        cv::namedWindow("After warping");
        cv::imshow("After warping",result);

        cv::waitKey();
        return 0;
}

头文件

/*------------------------------------------------------------------------------------------*\
   This file contains material supporting chapter 9 of the cookbook:  
   Computer Vision Programming using the OpenCV Library.
   by Robert Laganiere, Packt Publishing, 2011.

   This program is free software; permission is hereby granted to use, copy, modify,
   and distribute this source code, or portions thereof, for any purpose, without fee,
   subject to the restriction that the copyright notice may not be removed
   or altered from any source or altered source distribution.
   The software is released on an as-is basis and without any warranties of any kind.
   In particular, the software is not guaranteed to be fault-tolerant or free from failure.
   The author disclaims all warranties with regard to this software, any use,
   and any consequent failure, is purely the responsibility of the user.
 
   Copyright (C) 2010-2011 Robert Laganiere, www.laganiere.name
\*------------------------------------------------------------------------------------------*/

#if !defined MATCHER
#define MATCHER

#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <nonfree/features2d.hpp>
#include <legacy/legacy.hpp>

class RobustMatcher {

  private:

          // pointer to the feature point detector object
          cv::Ptr<cv::FeatureDetector> detector;
          // pointer to the feature descriptor extractor object
          cv::Ptr<cv::DescriptorExtractor> extractor;
          float ratio; // max ratio between 1st and 2nd NN
          bool refineF; // if true will refine the F matrix
          double distance; // min distance to epipolar
          double confidence; // confidence level (probability)

  public:

          RobustMatcher() : ratio(0.65f), refineF(true), confidence(0.99), distance(3.0) {        

                  // SURF is the default feature
                  detector= new cv::SurfFeatureDetector();
                  extractor= new cv::SurfDescriptorExtractor();
          }

          // Set the feature detector
          void setFeatureDetector(cv::Ptr<cv::FeatureDetector>& detect) {

                  detector= detect;
          }

          // Set descriptor extractor
          void setDescriptorExtractor(cv::Ptr<cv::DescriptorExtractor>& desc) {

                  extractor= desc;
          }

          // Set the minimum distance to epipolar in RANSAC
          void setMinDistanceToEpipolar(double d) {

                  distance= d;
          }

          // Set confidence level in RANSAC
          void setConfidenceLevel(double c) {

                  confidence= c;
          }

          // Set the NN ratio
          void setRatio(float r) {

                  ratio= r;
          }

          // if you want the F matrix to be recalculated
          void refineFundamental(bool flag) {

                  refineF= flag;
          }

          // Clear matches for which NN ratio is > than threshold
          // return the number of removed points
          // (corresponding entries being cleared, i.e. size will be 0)
          int ratioTest(std::vector<std::vector<cv::DMatch>>& matches) {

                int removed=0;

        // for all matches
                for (std::vector<std::vector<cv::DMatch>>::iterator matchIterator= matches.begin();
                         matchIterator!= matches.end(); ++matchIterator) {

                                 // if 2 NN has been identified
                                 if (matchIterator->size() > 1) {

                                         // check distance ratio
                                         if ((*matchIterator)[0].distance/(*matchIterator)[1].distance > ratio) {

                                                 matchIterator->clear(); // remove match
                                                 removed++;
                                         }

                                 } else { // does not have 2 neighbours

                                         matchIterator->clear(); // remove match
                                         removed++;
                                 }
                }

                return removed;
          }

          // Insert symmetrical matches in symMatches vector
          void symmetryTest(const std::vector<std::vector<cv::DMatch>>& matches1,
                                const std::vector<std::vector<cv::DMatch>>& matches2,
                                            std::vector<cv::DMatch>& symMatches) {
                       
                // for all matches image 1 -> image 2
                for (std::vector<std::vector<cv::DMatch>>::const_iterator matchIterator1= matches1.begin();
                         matchIterator1!= matches1.end(); ++matchIterator1) {

                        if (matchIterator1->size() < 2) // ignore deleted matches
                                continue;

                        // for all matches image 2 -> image 1
                        for (std::vector<std::vector<cv::DMatch>>::const_iterator matchIterator2= matches2.begin();
                                matchIterator2!= matches2.end(); ++matchIterator2) {

                                if (matchIterator2->size() < 2) // ignore deleted matches
                                        continue;

                                // Match symmetry test
                                if ((*matchIterator1)[0].queryIdx == (*matchIterator2)[0].trainIdx  &&
                                        (*matchIterator2)[0].queryIdx == (*matchIterator1)[0].trainIdx) {

                                                // add symmetrical match
                                                symMatches.push_back(cv::DMatch((*matchIterator1)[0].queryIdx,
                                                                                                            (*matchIterator1)[0].trainIdx,
                                                                                                            (*matchIterator1)[0].distance));
                                                break; // next match in image 1 -> image 2
                                }
                        }
                }
          }

          // Identify good matches using RANSAC
          // Return fundemental matrix
          cv::Mat ransacTest(const std::vector<cv::DMatch>& matches,
                                 const std::vector<cv::KeyPoint>& keypoints1,
                                                 const std::vector<cv::KeyPoint>& keypoints2,
                                             std::vector<cv::DMatch>& outMatches) {

                // Convert keypoints into Point2f      
                std::vector<cv::Point2f> points1, points2;      
                for (std::vector<cv::DMatch>::const_iterator it= matches.begin();
                         it!= matches.end(); ++it) {

                         // Get the position of left keypoints
                         float x= keypoints1[it->queryIdx].pt.x;
                         float y= keypoints1[it->queryIdx].pt.y;
                         points1.push_back(cv::Point2f(x,y));
                         // Get the position of right keypoints
                         x= keypoints2[it->trainIdx].pt.x;
                         y= keypoints2[it->trainIdx].pt.y;
                         points2.push_back(cv::Point2f(x,y));
            }

                // Compute F matrix using RANSAC
                std::vector<uchar> inliers(points1.size(),0);
                cv::Mat fundemental= cv::findFundamentalMat(
                        cv::Mat(points1),cv::Mat(points2), // matching points
                    inliers,      // match status (inlier ou outlier)  
                    CV_FM_RANSAC, // RANSAC method
                    distance,     // distance to epipolar line
                    confidence);  // confidence probability
       
                // extract the surviving (inliers) matches
                std::vector<uchar>::const_iterator itIn= inliers.begin();
                std::vector<cv::DMatch>::const_iterator itM= matches.begin();
                // for all matches
                for ( ;itIn!= inliers.end(); ++itIn, ++itM) {

                        if (*itIn) { // it is a valid match

                                outMatches.push_back(*itM);
                        }
                }

                std::cout << "Number of matched points (after cleaning): " << outMatches.size() << std::endl;

                if (refineF) {
                // The F matrix will be recomputed with all accepted matches

                        // Convert keypoints into Point2f for final F computation      
                        points1.clear();
                        points2.clear();
       
                        for (std::vector<cv::DMatch>::const_iterator it= outMatches.begin();
                                 it!= outMatches.end(); ++it) {

                                 // Get the position of left keypoints
                                 float x= keypoints1[it->queryIdx].pt.x;
                                 float y= keypoints1[it->queryIdx].pt.y;
                                 points1.push_back(cv::Point2f(x,y));
                                 // Get the position of right keypoints
                                 x= keypoints2[it->trainIdx].pt.x;
                                 y= keypoints2[it->trainIdx].pt.y;
                                 points2.push_back(cv::Point2f(x,y));
                        }

                        // Compute 8-point F from all accepted matches
                        fundemental= cv::findFundamentalMat(
                                cv::Mat(points1),cv::Mat(points2), // matching points
                                CV_FM_8POINT); // 8-point method
                }

                return fundemental;
          }

          // Match feature points using symmetry test and RANSAC
          // returns fundemental matrix
          cv::Mat match(cv::Mat& image1, cv::Mat& image2, // input images
                  std::vector<cv::DMatch>& matches, // output matches and keypoints
                  std::vector<cv::KeyPoint>& keypoints1, std::vector<cv::KeyPoint>& keypoints2) {

                // 1a. Detection of the SURF features
                detector->detect(image1,keypoints1);
                detector->detect(image2,keypoints2);

                std::cout << "Number of SURF points (1): " << keypoints1.size() << std::endl;
                std::cout << "Number of SURF points (2): " << keypoints2.size() << std::endl;

                // 1b. Extraction of the SURF descriptors
                cv::Mat descriptors1, descriptors2;
                extractor->compute(image1,keypoints1,descriptors1);
                extractor->compute(image2,keypoints2,descriptors2);

                std::cout << "descriptor matrix size: " << descriptors1.rows << " by " << descriptors1.cols << std::endl;

                // 2. Match the two image descriptors

                // Construction of the matcher
                cv::BruteForceMatcher<cv::L2<float>> matcher;

                // from image 1 to image 2
                // based on k nearest neighbours (with k=2)
                std::vector<std::vector<cv::DMatch>> matches1;
                matcher.knnMatch(descriptors1,descriptors2,
                        matches1, // vector of matches (up to 2 per entry)
                        2);               // return 2 nearest neighbours

                // from image 2 to image 1
                // based on k nearest neighbours (with k=2)
                std::vector<std::vector<cv::DMatch>> matches2;
                matcher.knnMatch(descriptors2,descriptors1,
                        matches2, // vector of matches (up to 2 per entry)
                        2);               // return 2 nearest neighbours

                std::cout << "Number of matched points 1->2: " << matches1.size() << std::endl;
                std::cout << "Number of matched points 2->1: " << matches2.size() << std::endl;

                // 3. Remove matches for which NN ratio is > than threshold

                // clean image 1 -> image 2 matches
                int removed= ratioTest(matches1);
                std::cout << "Number of matched points 1->2 (ratio test) : " << matches1.size()-removed << std::endl;
                // clean image 2 -> image 1 matches
                removed= ratioTest(matches2);
                std::cout << "Number of matched points 1->2 (ratio test) : " << matches2.size()-removed << std::endl;

                // 4. Remove non-symmetrical matches
            std::vector<cv::DMatch> symMatches;
                symmetryTest(matches1,matches2,symMatches);

                std::cout << "Number of matched points (symmetry test): " << symMatches.size() << std::endl;

                // 5. Validate matches using RANSAC
                cv::Mat fundemental= ransacTest(symMatches, keypoints1, keypoints2, matches);

                // return the found fundemental matrix
                return fundemental;
        }
};

#endif

可以实现简单的图像拼接和匹配

视频处理的简单代码

/*------------------------------------------------------------------------------------------*\
   This file contains material supporting chapter 10 of the cookbook:  
   Computer Vision Programming using the OpenCV Library.
   by Robert Laganiere, Packt Publishing, 2011.

   This program is free software; permission is hereby granted to use, copy, modify,
   and distribute this source code, or portions thereof, for any purpose, without fee,
   subject to the restriction that the copyright notice may not be removed
   or altered from any source or altered source distribution.
   The software is released on an as-is basis and without any warranties of any kind.
   In particular, the software is not guaranteed to be fault-tolerant or free from failure.
   The author disclaims all warranties with regard to this software, any use,
   and any consequent failure, is purely the responsibility of the user.
 
   Copyright (C) 2010-2011 Robert Laganiere, www.laganiere.name
\*------------------------------------------------------------------------------------------*/

#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>

#include "videoprocess.h"

void draw(cv::Mat& img, cv::Mat& out) {

        img.copyTo(out);
        cv::circle(out, cv::Point(100,100),5,cv::Scalar(255,0,0),2);
}

void canny(cv::Mat& img, cv::Mat& out) {

        // Convert to gray
        cv::cvtColor(img,out,CV_BGR2GRAY);
        // Compute Canny edges
        cv::Canny(out,out,100,200);
        // Invert the image
        cv::threshold(out,out,128,255,cv::THRESH_BINARY_INV);
}

int main()
{
        // Open the video file
    cv::VideoCapture capture("D:\\images\\bike.avi");
        // check if video successfully opened
        if (!capture.isOpened())
                return 1;

        // Get the frame rate
        double rate= capture.get(CV_CAP_PROP_FPS);

        bool stop(false);
        cv::Mat frame; // current video frame
        cv::namedWindow("Extracted Frame");

        // Delay between each frame
        // corresponds to video frame rate
        int delay= 1000/rate;

        // for all frames in video
        while (!stop) {

                // read next frame if any
                if (!capture.read(frame))
                        break;

                cv::imshow("Extracted Frame",frame);

                // introduce a delay
                // or press key to stop
                if (cv::waitKey(delay)>=0)
                                stop= true;
        }

        // Close the video file
        capture.release();

        cv::waitKey();
               
        // Now using the VideoProcessor class

        // Create instance
        VideoProcessor processor;
        // Open video file
        processor.setInput("D:\\images\\bike.avi");
        // Declare a window to display the video
        processor.displayInput("Input Video");
        processor.displayOutput("Output Video");
        // Play the video at the original frame rate
        processor.setDelay(1000./processor.getFrameRate());
        // Set the frame processor callback function
        processor.setFrameProcessor(canny);
        // Start the process
        processor.run();
        cv::waitKey();
       
        // Second test
        // Create instance
    //  VideoProcessor processor;
        // Open video file
        processor.setInput("D:\\images\\bike.avi");

        // Get basic info about video file
        cv::Size size= processor.getFrameSize();
        std::cout << size.width << " " << size.height << std::endl;
        std::cout << processor.getFrameRate() << std::endl;
        std::cout << processor.getTotalFrameCount() << std::endl;
        std::cout << processor.getFrameNumber() << std::endl;
        std::cout << processor.getPositionMS() << std::endl;

        // No processing
        processor.dontCallProcess();
        // Output filename
//      processor.setOutput("../output/bikeOut",".jpg");
        char codec[4];
        processor.setOutput("D:\\output\\bike.avi",processor.getCodec(codec),processor.getFrameRate());
        std::cout << "Codec: " << codec[0] << codec[1] << codec[2] << codec[3] << std::endl;

        // Position the stream at frame 300
    //  processor.setFrameNumber(300);
    //  processor.stopAtFrameNo(120);

        // Declare a window to display the video
        processor.displayInput("Current Frame");
        processor.displayOutput("Output Frame");

        // Play the video at the original frame rate
        processor.setDelay(1000./processor.getFrameRate());

        // Start the process
        processor.run();

        std::cout << processor.getFrameNumber() << std::endl;
        std::cout << processor.getPositionMS() << std::endl;

        cv::waitKey();
}

头文件

/*------------------------------------------------------------------------------------------*\
   This file contains material supporting chapter 10 of the cookbook:  
   Computer Vision Programming using the OpenCV Library.
   by Robert Laganiere, Packt Publishing, 2011.

   This program is free software; permission is hereby granted to use, copy, modify,
   and distribute this source code, or portions thereof, for any purpose, without fee,
   subject to the restriction that the copyright notice may not be removed
   or altered from any source or altered source distribution.
   The software is released on an as-is basis and without any warranties of any kind.
   In particular, the software is not guaranteed to be fault-tolerant or free from failure.
   The author disclaims all warranties with regard to this software, any use,
   and any consequent failure, is purely the responsibility of the user.
 
   Copyright (C) 2010-2011 Robert Laganiere, www.laganiere.name
\*------------------------------------------------------------------------------------------*/

#if !defined VPROCESSOR
#define VPROCESSOR

#include <iostream>
#include <iomanip>
#include <sstream>
#include <string>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>

// The frame processor interface
class FrameProcessor {

  public:
        // processing method
        virtual void process(cv:: Mat &input, cv:: Mat &output)= 0;
};

class VideoProcessor {

  private:

          // the OpenCV video capture object
          cv::VideoCapture capture;
          // the callback function to be called
          // for the processing of each frame
          void (*process)(cv::Mat&, cv::Mat&);
          // the pointer to the class implementing
          // the FrameProcessor interface
          FrameProcessor *frameProcessor;
          // a bool to determine if the
          // process callback will be called
          bool callIt;
          // Input display window name
          std::string windowNameInput;
          // Output display window name
          std::string windowNameOutput;
          // delay between each frame processing
          int delay;
          // number of processed frames
          long fnumber;
          // stop at this frame number
          long frameToStop;
          // to stop the processing
          bool stop;

          // vector of image filename to be used as input
          std::vector<std::string> images;
          // image vector iterator
          std::vector<std::string>::const_iterator itImg;

          // the OpenCV video writer object
          cv::VideoWriter writer;
          // output filename
          std::string outputFile;

          // current index for output images
          int currentIndex;
          // number of digits in output image filename
          int digits;
          // extension of output images
          std::string extension;

          // to get the next frame
          // could be: video file; camera; vector of images
          bool readNextFrame(cv::Mat& frame) {

                  if (images.size()==0)
                          return capture.read(frame);
                  else {

                          if (itImg != images.end()) {

                                  frame= cv::imread(*itImg);
                                  itImg++;
                                  return frame.data != 0;
                          }
                  }
          }

          // to write the output frame
          // could be: video file or images
          void writeNextFrame(cv::Mat& frame) {

                  if (extension.length()) { // then we write images
                 
                          std::stringstream ss;
                      ss << outputFile << std::setfill('0') << std::setw(digits) 
						 << currentIndex++ << extension;
                          cv::imwrite(ss.str(),frame);

                  } else { // then write video file

                          writer.write(frame);
                  }
          }

  public:

          // Constructor setting the default values
          VideoProcessor() : callIt(false), delay(-1),
                  fnumber(0), stop(false), digits(0), frameToStop(-1),
              process(0), frameProcessor(0) {}

          // set the name of the video file
          bool setInput(std::string filename) {

                fnumber= 0;
                // In case a resource was already
                // associated with the VideoCapture instance
                capture.release();
                images.clear();

                // Open the video file
                return capture.open(filename);
          }

          // set the camera ID
          bool setInput(int id) {

                fnumber= 0;
                // In case a resource was already
                // associated with the VideoCapture instance
                capture.release();
                images.clear();

                // Open the video file
                return capture.open(id);
          }

          // set the vector of input images
          bool setInput(const std::vector<std::string>& imgs) {

                fnumber= 0;
                // In case a resource was already
                // associated with the VideoCapture instance
                capture.release();

                // the input will be this vector of images
                images= imgs;
                itImg= images.begin();

                return true;
          }

          // set the output video file
          // by default the same parameters than input video will be used
          bool setOutput(const std::string &filename, int codec=0, double framerate=0.0, bool isColor=true) {

                  outputFile= filename;
                  extension.clear();
                 
                  if (framerate==0.0)
                          framerate= getFrameRate(); // same as input

                  char c[4];
                  // use same codec as input
                  if (codec==0) {
                          codec= getCodec(c);
                  }

                  // Open output video
                  return writer.open(outputFile, // filename
                          codec, // codec to be used
                          framerate,      // frame rate of the video
                          getFrameSize(), // frame size
                          isColor);       // color video?
          }

          // set the output as a series of image files
          // extension must be ".jpg", ".bmp" ...
          bool setOutput(const std::string &filename, // filename prefix
                  const std::string &ext, // image file extension
                  int numberOfDigits=3,   // number of digits
                  int startIndex=0) {     // start index

                  // number of digits must be positive
                  if (numberOfDigits<0)
                          return false;

                  // filenames and their common extension
                  outputFile= filename;
                  extension= ext;

                  // number of digits in the file numbering scheme
                  digits= numberOfDigits;
                  // start numbering at this index
                  currentIndex= startIndex;

                  return true;
          }

          // set the callback function that will be called for each frame
          void setFrameProcessor(void (*frameProcessingCallback)(cv::Mat&, cv::Mat&)) {

                  // invalidate frame processor class instance
                  frameProcessor= 0;
                  // this is the frame processor function that will be called
                  process= frameProcessingCallback;
                  callProcess();
          }

          // set the instance of the class that implements the FrameProcessor interface
          void setFrameProcessor(FrameProcessor* frameProcessorPtr) {

                  // invalidate callback function
                  process= 0;
                  // this is the frame processor instance that will be called
                  frameProcessor= frameProcessorPtr;
                  callProcess();
          }

          // stop streaming at this frame number
          void stopAtFrameNo(long frame) {

                  frameToStop= frame;
          }

          // process callback to be called
          void callProcess() {

                  callIt= true;
          }

          // do not call process callback
          void dontCallProcess() {

                  callIt= false;
          }

          // to display the processed frames
          void displayInput(std::string wn) {
           
                  windowNameInput= wn;
                  cv::namedWindow(windowNameInput);
          }

          // to display the processed frames
          void displayOutput(std::string wn) {
           
                  windowNameOutput= wn;
                  cv::namedWindow(windowNameOutput);
          }

          // do not display the processed frames
          void dontDisplay() {

                  cv::destroyWindow(windowNameInput);
                  cv::destroyWindow(windowNameOutput);
                  windowNameInput.clear();
                  windowNameOutput.clear();
          }

          // set a delay between each frame
          // 0 means wait at each frame
          // negative means no delay
          void setDelay(int d) {
         
                  delay= d;
          }

          // a count is kept of the processed frames
          long getNumberOfProcessedFrames() {
         
                  return fnumber;
          }

          // return the size of the video frame
          cv::Size getFrameSize() {

                if (images.size()==0) {

                        // get size of from the capture device
                        int w= static_cast<int>(capture.get(CV_CAP_PROP_FRAME_WIDTH));
                        int h= static_cast<int>(capture.get(CV_CAP_PROP_FRAME_HEIGHT));

                        return cv::Size(w,h);

                } else { // if input is vector of images

                        cv::Mat tmp= cv::imread(images[0]);
                        if (!tmp.data) return cv::Size(0,0);
                        else return tmp.size();
                }
          }

          // return the frame number of the next frame
          long getFrameNumber() {

                if (images.size()==0) {

                        // get info of from the capture device
                    long f= static_cast<long>(capture.get(CV_CAP_PROP_POS_FRAMES));
                    return f;

                } else { // if input is vector of images

                        return static_cast<long>(itImg-images.begin());
                }
          }

          // return the position in ms
          double getPositionMS() {

                  // undefined for vector of images
                  if (images.size()!=0) return 0.0;

                  double t= capture.get(CV_CAP_PROP_POS_MSEC);
                  return t;
          }

          // return the frame rate
          double getFrameRate() {

                  // undefined for vector of images
                  if (images.size()!=0) return 0;

                  double r= capture.get(CV_CAP_PROP_FPS);
                  return r;
          }

          // return the number of frames in video
          long getTotalFrameCount() {

                  // for vector of images
                  if (images.size()!=0) return images.size();

                  long t= capture.get(CV_CAP_PROP_FRAME_COUNT);
                  return t;
          }

          // get the codec of input video
          int getCodec(char codec[4]) {

                  // undefined for vector of images
                  if (images.size()!=0) return -1;

                  union {
                          int value;
                          char code[4]; } returned;

                  returned.value= static_cast<int>(capture.get(CV_CAP_PROP_FOURCC));

                  codec[0]= returned.code[0];
                  codec[1]= returned.code[1];
                  codec[2]= returned.code[2];
                  codec[3]= returned.code[3];

                  return returned.value;
          }
         
          // go to this frame number
          bool setFrameNumber(long pos) {

                  // for vector of images
                  if (images.size()!=0) {

                          // move to position in vector
                          itImg= images.begin() + pos;
                          // is it a valid position?
                          if (pos < images.size())
                                  return true;
                          else
                                  return false;

                  } else { // if input is a capture device

                        return capture.set(CV_CAP_PROP_POS_FRAMES, pos);
                  }
          }

          // go to this position
          bool setPositionMS(double pos) {

                  // not defined in vector of images
                  if (images.size()!=0)
                          return false;
                  else
                      return capture.set(CV_CAP_PROP_POS_MSEC, pos);
          }

          // go to this position expressed in fraction of total film length
          bool setRelativePosition(double pos) {

                  // for vector of images
                  if (images.size()!=0) {

                          // move to position in vector
                          long posI= static_cast<long>(pos*images.size()+0.5);
                          itImg= images.begin() + posI;
                          // is it a valid position?
                          if (posI < images.size())
                                  return true;
                          else
                                  return false;

                  } else { // if input is a capture device

                          return capture.set(CV_CAP_PROP_POS_AVI_RATIO, pos);
                  }
          }

          // Stop the processing
          void stopIt() {

                  stop= true;
          }

          // Is the process stopped?
          bool isStopped() {

                  return stop;
          }

          // Is a capture device opened?
          bool isOpened() {

                  return capture.isOpened() || !images.empty();
          }
         
          // to grab (and process) the frames of the sequence
          void run() {

                  // current frame
                  cv::Mat frame;
                  // output frame
                  cv::Mat output;

                  // if no capture device has been set
                  if (!isOpened())
                          return;

                  stop= false;

                  while (!isStopped()) {

                          // read next frame if any
                          if (!readNextFrame(frame))
                                  break;

                          // display input frame
                          if (windowNameInput.length()!=0)
                                  cv::imshow(windowNameInput,frame);

                      // calling the process function or method
                          if (callIt) {
                                 
                                // process the frame
                                if (process)
                                    process(frame, output);
                                else if (frameProcessor)
                                        frameProcessor->process(frame,output);
                                // increment frame number
                            fnumber++;

                          } else {

                                output= frame;
                          }

                          // write output sequence
                          if (outputFile.length()!=0)
                                  writeNextFrame(output);

                          // display output frame
                          if (windowNameOutput.length()!=0)
                                  cv::imshow(windowNameOutput,output);
                       
                          // introduce a delay
                          if (delay>=0 && cv::waitKey(delay)>=0)
                                stopIt();

                          // check if we should stop
                          if (frameToStop>=0 && getFrameNumber()==frameToStop)
                                  stopIt();
                  }
          }
};

#endif



  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值