图像拼接 python c++

 

https://github.com/AlessandroMinervini/Mosaic-creation-through-the-stitching-of-aerial-rectified-images-obtained-from-a-moving-vehicle

https://github.com/lionelmessi6410/Panorama-Stitching

https://github.com/thoailinh/panorama_stitching_images

 

国外开源的:

https://blog.csdn.net/yangpan011/article/details/81387299

 

基于深度学习Superpoint 的Python图像全景拼接

参考
https://github.com/kushalvyas/Python-Multiple-Image-Stitching
https://github.com/MagicLeapResearch/SuperPointPretrainedNetwork
https://github.com/syinari0123/SuperPoint-VO
用superpoint方法代替surf提取图像特征,进行Python版本的图像拼接。
注意,Python版本图像拼接效果并不好,本博客只是学习记录。
 

用superpoint方法代替surf提取图像特征,进行Python版本的图像拼接。
注意,Python版本图像拼接效果并不好,本博客只是学习记录。
改动后的matchers.py如下:

import cv2
import numpy as np 
from sp_extractor import SuperPointFrontend
class matchers:
    def __init__(self):
        self.surf = cv2.xfeatures2d.SURF_create()
                self.detector = SuperPointFrontend(weights_path="superpoint_v1.pth",
                                           nms_dist=4,
                                           conf_thresh=0.015,
                                           nn_thresh=0.7,
                                           cuda=True)
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=0, trees=5)
        search_params = dict(checks=50)
        self.flann = cv2.FlannBasedMatcher(index_params, search_params)

    def match(self, i1, i2, direction=None):
        imageSet1 = self.getSURFFeatures(i1)
        imageSet2 = self.getSURFFeatures(i2)
        print "Direction : ", direction
        
                matches = self.flann.knnMatch(
            np.asarray(imageSet2['des'],np.float32),
            np.asarray(imageSet1['des'],np.float32),
            k=2
            )
        good = []
        for i , (m, n) in enumerate(matches):
            if m.distance < 0.7*n.distance:
                good.append((m.trainIdx, m.queryIdx))

        if len(good) > 4:
            pointsCurrent = imageSet2['kp']
            pointsPrevious = imageSet1['kp']

            matchedPointsCurrent = np.float32(
                [pointsCurrent[i] for (__, i) in good]
            )
            matchedPointsPrev = np.float32(
                [pointsPrevious[i] for (i, __) in good]
                )

            H, s = cv2.findHomography(matchedPointsCurrent, matchedPointsPrev, cv2.RANSAC, 4)
            return H
        return None

    def getSURFFeatures(self, im):
        gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
                pts, desc, heatmap = self.detector.run(gray)
        #kp, des = self.surf.detectAndCompute(gray, None)
原文:https://blog.csdn.net/qq_33591712/article/details/84947829

 

GPU加速与L-ORB特征提取的全景视频实时拼接

https://download.csdn.net/download/niyan6281/10311358

 

surf,sift算法配准,利用Ransac去除误匹配,c++源码

https://download.csdn.net/download/u011000097/10666371

surf+ransac匹配

https://download.csdn.net/download/qq_41800983/11110370

 

免费软件:

http://www.greenxf.com/soft/264469.html

 

可以用四个方向键控制拼接方向,只是速度有点慢。

#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os

import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt

if __name__ == '__main__':


    path='shudong/'

    files =os.listdir(path)
    srcImg = cv.imread('shudong/00001.jpg')
    # srcImg=cv.resize(srcImg,(srcImg.shape[1]//2,srcImg.shape[0]//2))
    srcImg=cv.resize(srcImg,(400,300))
    move_top, move_bot, move_left, move_right = 4, 0, 0, 1
    # srcImg = cv.copyMakeBorder(srcImg, top, bot, left, right, cv.BORDER_CONSTANT, value=(0, 0, 0))

    index=0
    for i,file in enumerate(files):
        if i%5<4:
            continue
        index+=1

        srcImg = cv.copyMakeBorder(srcImg, move_top, 0, 0, move_right, cv.BORDER_CONSTANT, value=(0, 0, 0))

        testImg=cv.imread(path+file)
        testImg = cv.resize(testImg, (400, 300))
        cv.imshow("a", testImg)
        testImg = cv.copyMakeBorder(testImg, move_top*index, 0, 0, move_right*index, cv.BORDER_CONSTANT, value=(0, 0, 0))
        img1gray = cv.cvtColor(srcImg, cv.COLOR_BGR2GRAY)
        img2gray = cv.cvtColor(testImg, cv.COLOR_BGR2GRAY)
        sift = cv.xfeatures2d_SIFT().create()
        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(img1gray, None)
        kp2, des2 = sift.detectAndCompute(img2gray, None)
        # FLANN parameters
        FLANN_INDEX_KDTREE = 1
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)

        # Need to draw only good matches, so create a mask
        matchesMask = [[0, 0] for i in range(len(matches))]

        good = []
        pts1 = []
        pts2 = []
        # ratio test as per Lowe's paper
        for i, (m, n) in enumerate(matches):
            if m.distance < 0.7*n.distance:
                good.append(m)
                pts2.append(kp2[m.trainIdx].pt)
                pts1.append(kp1[m.queryIdx].pt)
                matchesMask[i] = [1, 0]

        draw_params = dict(matchColor=(0, 255, 0),
                           singlePointColor=(255, 0, 0),
                           matchesMask=matchesMask,
                           flags=0)
        # img3 = cv.drawMatchesKnn(img1gray, kp1, img2gray, kp2, matches, None, **draw_params)
        # plt.imshow(img3, ), plt.show()

        rows, cols = srcImg.shape[:2]
        MIN_MATCH_COUNT = 10
        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
            M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5.0)
            warpImg = cv.warpPerspective(testImg, np.array(M), (testImg.shape[1], testImg.shape[0]), flags=cv.WARP_INVERSE_MAP)

            for col in range(0, cols):
                if srcImg[:, col].any() and warpImg[:, col].any():
                    left = col
                    break
            for col in range(cols-1, 0, -1):
                if srcImg[:, col].any() and warpImg[:, col].any():
                    right = col
                    break

            res = np.zeros([rows, cols, 3], np.uint8)
            for row in range(0, rows):
                for col in range(0, cols):
                    if not srcImg[row, col].any():
                        res[row, col] = warpImg[row, col]
                    elif not warpImg[row, col].any():
                        res[row, col] = srcImg[row, col]
                    else:
                        srcImgLen = float(abs(col - left))
                        testImgLen = float(abs(col - right))
                        alpha = srcImgLen / (srcImgLen + testImgLen)
                        res[row, col] = np.clip(srcImg[row, col] * (1-alpha) + warpImg[row, col] * alpha, 0, 255)

            # opencv is bgr, matplotlib is rgb
            cv.imshow("asdf",res)

            print(res.shape)
            cv.waitKeyEx(1)
            srcImg=res

        else:
            print("Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT))
            matchesMask = None

c++ opencv4 图像拼接:

【OpenCV】opencv4.0.1下 SIFT、SURF的使用
2019年03月30日 22:29:09 GordonWei 阅读数 1171
 版权声明:本文为博主原创文章,转载请注明出处。 https://blog.csdn.net/Gordon_Wei/article/details/88920411
opencv4.0.1 的编译完成版本已经没有SIFT和SURF算法了, 一些算法因为专利或者未成熟的原因,不在发布的release版本中了,其中就包括SIFT和SURF,他们因为专利的原因不能用于商业,在2.x版本中,放在在nofree中,而3.x版本开始,这些方法被放入了opencv_contrib中,如果想使用需要自己编译到opencv中。
这里需要自己编译OpenCV4.0.1 + opencv_contrib4.0.1 具体方法可参考我的另一篇博客【OpenCV】opencv4.0.1+opencv_contrib4.0.1+VS2015的编译

以下是实现的SUFR算法 如果使用SIFT稍微改下即可

#include <opencv2/opencv.hpp>
#include <opencv2/xfeatures2d.hpp>

int main() 
{
	cv::Mat imageL = cv::imread("imgL.bmp");
	cv::Mat imageR = cv::imread("imgR.bmp");


	//提取特征点方法
	//SIFT
	//cv::Ptr<cv::xfeatures2d::SIFT> sift = cv::xfeatures2d::SIFT::create();
	//ORB
	//cv::Ptr<cv::ORB> orb = cv::ORB::create();
	//SURF
	cv::Ptr<cv::xfeatures2d::SURF> surf = cv::xfeatures2d::SURF::create();
	
	//特征点
	std::vector<cv::KeyPoint> keyPointL, keyPointR;
	//单独提取特征点
	surf->detect(imageL, keyPointL);
	surf->detect(imageR, keyPointR);

	//画特征点
	cv::Mat keyPointImageL;
	cv::Mat keyPointImageR;
	drawKeypoints(imageL, keyPointL, keyPointImageL, cv::Scalar::all(-1), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
	drawKeypoints(imageR, keyPointR, keyPointImageR, cv::Scalar::all(-1), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

	//显示窗口
	cv::namedWindow("KeyPoints of imageL");
	cv::namedWindow("KeyPoints of imageR");

	//显示特征点
	cv::imshow("KeyPoints of imageL", keyPointImageL);
	cv::imshow("KeyPoints of imageR", keyPointImageR);

	//特征点匹配
	cv::Mat despL, despR;
	//提取特征点并计算特征描述子
	surf->detectAndCompute(imageL, cv::Mat(), keyPointL, despL);
	surf->detectAndCompute(imageR, cv::Mat(), keyPointR, despR);

	//Struct for DMatch: query descriptor index, train descriptor index, train image index and distance between descriptors.
	//int queryIdx –>是测试图像的特征点描述符(descriptor)的下标,同时也是描述符对应特征点(keypoint)的下标。
	//int trainIdx –> 是样本图像的特征点描述符的下标,同样也是相应的特征点的下标。
	//int imgIdx –>当样本是多张图像的话有用。
	//float distance –>代表这一对匹配的特征点描述符(本质是向量)的欧氏距离,数值越小也就说明两个特征点越相像。
	std::vector<cv::DMatch> matches;

	//如果采用flannBased方法 那么 desp通过orb的到的类型不同需要先转换类型
	if (despL.type() != CV_32F || despR.type() != CV_32F)
	{
		despL.convertTo(despL, CV_32F);
		despR.convertTo(despR, CV_32F);
	}

	cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create("FlannBased");
	matcher->match(despL, despR, matches);

	//计算特征点距离的最大值 
	double maxDist = 0; 
	for (int i = 0; i < despL.rows; i++)
	{
		double dist = matches[i].distance;
		if (dist > maxDist) 
			maxDist = dist;
	}

	//挑选好的匹配点
	std::vector< cv::DMatch > good_matches;
	for (int i = 0; i < despL.rows; i++)
	{
		if (matches[i].distance < 0.5*maxDist)
		{
			good_matches.push_back(matches[i]);
		}
	}



	cv::Mat imageOutput;
	cv::drawMatches(imageL, keyPointL, imageR, keyPointR, good_matches, imageOutput);

	cv::namedWindow("picture of matching");
	cv::imshow("picture of matching", imageOutput);
	cv::waitKey(0);
	return 0;
}

 

  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

AI算法网奇

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值