在android 上,使用Opencv3.0实现图像无缝拼接,Fast查找特征点,BruteForce进行匹配

49 篇文章 5 订阅
2 篇文章 0 订阅

利用上一篇的思路,Opencv实现图像无缝拼接,Sift查找特征点,Flann进行匹配

决定把它移植到android 上去,目前我的android 版本的opencv 只有3.0 ,因此,Sift是不能用了,我使用了一个Fast 特征,BRISK进行特征描述,BruteForce 进行暴力匹配,发现效果也还可以。
如下是效果图:(图像太大了点,上传有限制,做了大小缩放)
这里写图片描述

直接上代码吧。
jni 部分代码如下:

//
// Created by fuzr1 on 2017/8/20.
//
//first step add below
#include <string>
#include <stdio.h>
#include <stdlib.h>
#include <jni.h>
#include <android/log.h>
#include <iostream>
#include <fstream>
//first step end

//second step
//#include "stdafx.h"
//second step end
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"

using namespace cv;
using namespace std;
#define LOG_TAG "CombinePicture"
#define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))
//third step
#ifdef __cplusplus
extern "C" {
#endif
//third step end
//计算原始图像点位在经过矩阵变换后在目标图像上对应位置
Point2f getTransformPoint(const Point2f originalPoint, const Mat &transformMaxtri);
// fiveth step
JNIEXPORT jlong JNICALL Java_com_lenovo_camera_orbmatch_MainActivity_doCombinePicture(JNIEnv *env, jclass clz, jlong img1, jlong img2)
//int main()
//fiveth step end
{

    Mat image01 =  Mat(*(Mat*)img1);
    cvtColor(image01, image01, CV_BGRA2BGR);
    Mat image02  =  Mat(*(Mat*)img2);
    cvtColor(image02, image02, CV_BGRA2BGR);


    if (image01.empty() || image02.empty())
    {
      //  printf("the loader the picture failed");
      //  waitKey();
        LOGD("there is some image input wrong");
        return 0;//图像没有全部读取成功
    }
    //imshow("拼接图像1", image01);
   // imshow("拼接图像2", image02);
    double time = getTickCount();
    //灰度图转换
    Mat image1, image2;
    cvtColor(image01, image1, CV_RGB2GRAY);
    cvtColor(image02, image2, CV_RGB2GRAY);

    //提取特征点
    //SiftFeatureDetector siftDetector(800);  // 海塞矩阵阈值
    Ptr<FeatureDetector> fastDetector = FastFeatureDetector::create();
    vector<KeyPoint> keyPoint1, keyPoint2;
    fastDetector->detect(image1, keyPoint1);
    fastDetector->detect(image2, keyPoint2);

    //特征点描述,为下边的特征点匹配做准备
    Ptr<DescriptorExtractor> BriskDescriptor = BRISK::create();
    Mat imageDesc1, imageDesc2;
    BriskDescriptor->compute(image1, keyPoint1, imageDesc1);
    BriskDescriptor->compute(image2, keyPoint2, imageDesc2);

    //获得匹配特征点,并提取最优配对
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce");
    //FlannBasedMatcher matcher;
    vector<DMatch> matchePoints;
    matcher->match(imageDesc1, imageDesc2, matchePoints, Mat());
    if (matchePoints.size() < 10)
    {
        LOGD("the match point is below 10");
        //waitKey();
        return 0;
    }
    sort(matchePoints.begin(), matchePoints.end()); //特征点排序,opencv按照匹配点准确度排序
    //获取排在前N个的最优匹配特征点
    vector<Point2f> imagePoints1, imagePoints2;
    for (int i = 0; i<10; i++)
    {
        imagePoints1.push_back(keyPoint1[matchePoints[i].queryIdx].pt);
        imagePoints2.push_back(keyPoint2[matchePoints[i].trainIdx].pt);
    }

    //获取图像1到图像2的投影映射矩阵,尺寸为3*3
    Mat homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC);
    Mat adjustMat;
    adjustMat = (Mat_<double>(3, 3) << 1.0, 0, image01.cols, 0, 1.0, 0, 0, 0, 1.0);//向后偏移image01.cols矩阵
    //Mat adjustMat =Mat::eye(cv::Size(3,3),CV_64F);
   // adjustMat.at<double>(0, 2) = image01.cols;
    Mat adjustHomo = adjustMat*homo;//矩阵相乘,先偏移

    //获取最强配对点(就是第一个配对点)在原始图像和矩阵变换后图像上的对应位置,用于图像拼接点的定位
    Point2f originalLinkPoint, targetLinkPoint, basedImagePoint;
    originalLinkPoint = keyPoint1[matchePoints[0].queryIdx].pt;
    targetLinkPoint = getTransformPoint(originalLinkPoint, adjustHomo);
    basedImagePoint = keyPoint2[matchePoints[0].trainIdx].pt;

    //图像配准
    Mat imageTransform;
    //将图片1进行映射到图像2,本来映射后x值为负值,但是把映射矩阵向后偏移image01.cols矩阵
    //我们很难判断出拼接后图像的大小尺寸,为了尽可能保留原来的像素,我们尽可能的大一些,对于拼接后的图片可以进一步剪切无效或者不规则的边缘
    warpPerspective(image01, imageTransform, adjustMat*homo, Size(image02.cols + image01.cols + 10, image02.rows));

    //在最强匹配点的位置处衔接,最强匹配点左侧是图1,右侧是图2,这样直接替换图像衔接不好,光线有突变
    //Mat ROIMat = image02(Rect(Point(basedImagePoint.x, 0), Point(image02.cols, image02.rows)));
    //ROIMat.copyTo(Mat(imageTransform1, Rect(targetLinkPoint.x, 0, image02.cols - basedImagePoint.x + 1, image02.rows)));

    //在最强匹配点左侧的重叠区域进行累加,是衔接稳定过渡,消除突变
    Mat image1Overlap, image2Overlap; //图1和图2的重叠部分
    image1Overlap = imageTransform(Rect(Point(targetLinkPoint.x - basedImagePoint.x, 0), Point(targetLinkPoint.x, image02.rows)));
    image2Overlap = image02(Rect(0, 0, image1Overlap.cols, image1Overlap.rows));
    Mat image1ROICopy = image1Overlap.clone();  //复制一份图1的重叠部分
    for (int i = 0; i<image1Overlap.rows; i++)
    {
        for (int j = 0; j<image1Overlap.cols; j++)
        {
            double weight;
            weight = (double)j / image1Overlap.cols;  //随距离改变而改变的叠加系数
            image1Overlap.at<Vec3b>(i, j)[0] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[0] + weight*image2Overlap.at<Vec3b>(i, j)[0];
            image1Overlap.at<Vec3b>(i, j)[1] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[1] + weight*image2Overlap.at<Vec3b>(i, j)[1];
            image1Overlap.at<Vec3b>(i, j)[2] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[2] + weight*image2Overlap.at<Vec3b>(i, j)[2];
        }
    }
    Mat ROIMat = image02(Rect(Point(image1Overlap.cols, 0), Point(image02.cols, image02.rows)));  //图2中不重合的部分
    ROIMat.copyTo(Mat(imageTransform, Rect(targetLinkPoint.x, 0, ROIMat.cols, image02.rows))); //不重合的部分直接衔接上去

    time = getTickCount() - time;
    time /= getTickFrequency();
    LOGD("match time=%f\n", time);
 //   namedWindow("拼接结果", 0);
 //   imshow("拼接结果", imageTransform);
  //  imwrite("matchResult.jpg", imageTransform);
   // waitKey();
   // return 0;
    Mat *ret = new Mat(imageTransform);
    return (jlong) ret;
}

//计算原始图像点位在经过矩阵变换后在目标图像上对应位置
Point2f getTransformPoint(const Point2f originalPoint, const Mat &transformMaxtri)
{
    Mat originelP, targetP;
    originelP = (Mat_<double>(3, 1) << originalPoint.x, originalPoint.y, 1.0);
    targetP = transformMaxtri*originelP;
    float x = targetP.at<double>(0, 0) / targetP.at<double>(2, 0);
    float y = targetP.at<double>(1, 0) / targetP.at<double>(2, 0);
    return Point2f(x, y);
}
//forrth step
#ifdef __cplusplus
}
#endif
//fourth step  end

改动的并不是很大,从c++ 到一个功能导入到anroid 还是比较方便的。

另外把Android.mk 也贴在下面。

LOCAL_PATH := $(call my-dir)
NATIVE_PATH :=$(LOCAL_PATH)/..
include $(CLEAR_VARS)


LOCAL_SRC_FILES  :=  $(NATIVE_PATH)/native/libs/arm864-v8a/libopencv_java3.so

LOCAL_MODULE     := libopencv_java3

include $(BUILD_SHARED_LIBRARY)


include $(CLEAR_VARS)

#OPENCV_CAMERA_MODULES:=on
#OPENCV_INSTALL_MODULES:=off
#OPENCV_LIB_TYPE:=STATIC


include  $(NATIVE_PATH)/native/jni/OpenCV.mk

LOCAL_C_INCLUDES :=  $(NATIVE_PATH)/native/jni/include
LOCAL_LDLIBS += -llog
LOCAL_MODULE    := combinepicture
LOCAL_SRC_FILES := combinepicture.cpp

include $(BUILD_SHARED_LIBRARY)

特别说明下: 我的jni native目录分别是: src/main/jni ,opencv 的native 目录是 src/main/native

  • 0
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 6
    评论
评论 6
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值