Python / C++ OpenCV 前后两张截图去重 & 图像找不同

概述:

  • 场景与任务:判断相邻的两张微信聊天截图是否为同一张(传输压缩、格式转换过程中存在一定像素失真和边缘抖动,不可以直接相减)
  • 要求:使用数字图像处理的方法(仅作为预处理去重,不用深度学习方法);一组(两张)图片判断的时间要求在20ms以内;
  • 思路:
    • 转换到HSV空间下,先将聊天窗口通过颜色阈值单独分离出来;
    • 轮廓查找判断两张图的闭包矩形框的数目是否相同,不同则为不同截图;
    • 否则再利用ORB提取特征点(keypoints)和描述子(descriptors),计算两张图对应特征点的斜率,若60%的线条均值在0附近(前后各去掉最大最小的20%斜率的曲线),则为同一张图,否则为不同张;
    • Python 测试成功后转写成 C++ 版本以适应速度需求;

Python 代码:

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
import os, glob, time


orb = cv2.ORB_create()


def read_img(name1, name2):
    img1 = cv2.imread(name1, 1)
    img2 = cv2.imread(name2)
    h, w = min(img1.shape[0], img2.shape[0]), min(img1.shape[1], img2.shape[1])
    img1 = img1[:h, :w]
    img2 = img2[:h, :w]
    img1 = cv2.resize(img1, (800, 800 * img1.shape[0] // img1.shape[1]))
    img2 = cv2.resize(img2, (800, 800 * img2.shape[0] // img2.shape[1]))
    return img1, img2


def color_enhance(img):
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    lower_green = np.array([35, 43, 46])
    upper_green = np.array([77, 255, 255])
    mask_green = cv2.inRange(hsv, lowerb=lower_green, upperb=upper_green)
    lower_white = np.array([0, 0, 245])
    upper_white = np.array([180, 30, 255])
    mask_white = cv2.inRange(hsv, lowerb=lower_white, upperb=upper_white)

    dst_w = cv2.bitwise_and(img, img, mask=mask_white)
    dst_g = cv2.bitwise_and(img, img, mask=mask_green)
    dst = dst_w + dst_g
    return dst


def count_box(img, show=False):
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 9, 2)
    contours, hierarchy = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    H, W = img.shape[:2]
    count = 0
    for contour in contours:
        if cv2.contourArea(contour) < H * W / 500 or H > W * 1.1:
            continue
        count += 1
        x, y, w, h = cv2.boundingRect(contour)
        cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)

    if show:
        cv2.imshow('img', img)
        cv2.imshow('gray', gray)
        cv2.imshow('thresh', thresh)
        cv2.waitKey(0)
    return count


def orb_match(img1, img2):
    kp1, des1 = orb.detectAndCompute(img1, None)  # des是描述子
    kp2, des2 = orb.detectAndCompute(img2, None)
    return kp1, des1, kp2, des2


def draw_keypoints(img, keypoints, color=(0, 255, 255)):
    for kp in keypoints:
            x, y = kp.pt
            cv2.circle(img, (int(x), int(y)), 2, color)
    return img


def match_imgs(des1, des2):
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)
    good = []
    for m, n in matches:
        if m.distance < 0.8 * n.distance:
            good.append([m])
    return good


def compute_slope(src, dst):
    # slope = (y - y') / (x' - x + 800)
    return (src[1] - dst[1]) / (dst[0] - src[0] + 800)


def judge(img1, img2, show=False):
    img3, img4 = color_enhance(img1), color_enhance(img2)
    n1 = count_box(img3)
    n2 = count_box(img4)
    if n1 != n2:
        print('n1, n2: ', n1, n2)
        return False
    kp1, des1, kp2, des2 = orb_match(img3, img4)
    good = match_imgs(des1, des2)
    src_pts = np.float32([kp1[m[0].queryIdx].pt for m in good]).reshape(-1, 2)
    dst_pts = np.float32([kp2[m[0].trainIdx].pt for m in good]).reshape(-1, 2)
    all_slopes = []
    for i in range(len(src_pts)):
        all_slopes.append(compute_slope(src_pts[i], dst_pts[i]))
    all_slopes.sort()
    len_s = len(all_slopes) // 5
    filtered_slopes = all_slopes[len_s:-len_s]
    slopes = filtered_slopes if filtered_slopes else all_slopes

    if show:
        slopes = pd.Series(slopes)
        # print(slopes.describe())
        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.hist(slopes, bins=20, color='blue', alpha=0.8)
        plt.show()

        img5 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)
        thresh_merge = np.hstack((img3, img4))
        cv2.imshow("thresh_merge", thresh_merge)

        visual_1 = draw_keypoints(img1, kp1, color=(255, 0, 255))
        visual_2 = draw_keypoints(img2, kp2, color=(255, 0, 255))
        hmerge = np.hstack((visual_1, visual_2))
        cv2.imshow("point", hmerge)
        cv2.imshow("ORB", img5)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    slopes_mean = sum(slopes) / len(slopes)
    print('abs slope mean: ', abs(slopes_mean))
    if abs(slopes_mean) < 0.01:
        return True
    else:
        return False


if __name__ == '__main__':
    name1, name2 = './1.png', './2.png'
    img1, img2 = read_img(name1, name2)
    if judge(img1, img2, show=True):
        print('Same screenshots.')
    else:
        print('Different screenshots.')

 

C++ 代码(去掉了颜色增强部分):

JudgeDuplicates.h

#ifndef JUDGEDUPLICATES_H
#define JUDGEDUPLICATES_H

#include <cstdlib>
#include <iostream>
#include <vector>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>


class JudgeDuplicates
{
    public:
        JudgeDuplicates();
        void orb_match(cv::Mat, cv::Mat,
                       std::vector<cv::KeyPoint>&,
                       std::vector<cv::KeyPoint>&,
                       std::vector<cv::DMatch>&);
        double compute_slope(cv::Point, cv::Point);
        bool judge(std::string, std::string);
        virtual ~JudgeDuplicates();

    protected:

    private:
};

#endif // JUDGEDUPLICATES_H

JudgeDuplicates.cpp

#include "JudgeDuplicates.h"

JudgeDuplicates::JudgeDuplicates()
{
    //ctor
}

JudgeDuplicates::~JudgeDuplicates()
{
    //dtor
}


void JudgeDuplicates::orb_match(cv::Mat img1, cv::Mat img2,
                                std::vector<cv::KeyPoint>& kp1,
                                std::vector<cv::KeyPoint>& kp2,
                                std::vector<cv::DMatch>& goodmatches){
    int Hessian = 500;
    cv::Ptr<cv::ORB> detector = cv::ORB::create(Hessian);
    cv::Mat des1, des2;
    detector->detectAndCompute(img1, cv::Mat(), kp1, des1);
    detector->detectAndCompute(img2, cv::Mat(), kp2, des2);

    cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create("BruteForce");
    std::vector<std::vector<cv::DMatch> > matches_knn;
    matcher->knnMatch(des1, des2, matches_knn, 2);
    for (size_t i = 0; i < matches_knn.size(); ++i){
        if(matches_knn[i][0].distance < 0.8 * matches_knn[i][1].distance){
            goodmatches.push_back(matches_knn[i][0]);
        }
    }
}


double JudgeDuplicates::compute_slope(cv::Point src, cv::Point dst){
    // slope = (y - y') / (x' - x + 800)
    return double(src.y - dst.y) / double(dst.x - src.x + 800.0);
}


bool JudgeDuplicates::judge(std::string name1, std::string name2){
    cv::Mat img1 = cv::imread(name1, 1);
    cv::Mat img2 = cv::imread(name2, 1);
    int h1 = img1.rows;
    int w1 = img1.cols;
    cv::resize(img1, img1, cv::Size(800, int(800 * h1 / w1)));
    cv::resize(img2, img2, cv::Size(800, int(800 * h1 / w1)));

    std::vector<cv::KeyPoint> kp1, kp2;
    std::vector<cv::DMatch> good_matches;
    orb_match(img1, img2, kp1, kp2, good_matches);
    std::cout << good_matches.size() << std::endl;

    std::vector<cv::Point> src_pts, dst_pts;
    for(size_t i = 0; i < good_matches.size(); ++i){
        int x1 = kp1[good_matches[i].queryIdx].pt.x;
        int y1 = kp1[good_matches[i].queryIdx].pt.y;
        int x2 = kp2[good_matches[i].trainIdx].pt.x;
        int y2 = kp2[good_matches[i].trainIdx].pt.y;
        cv::Point src_pt = cv::Point(x1, y1);
        cv::Point dst_pt = cv::Point(x2, y2);
        src_pts.push_back(src_pt);
        dst_pts.push_back(dst_pt);
    }
    double slope, mean_slope = 0.0;
    std::vector<double> slopes;
    for(size_t i = 0; i < src_pts.size(); ++i){
        slope = compute_slope(src_pts[i], dst_pts[i]);
        slopes.push_back(slope);
    }
    sort(slopes.begin(), slopes.end());
    int line_cnt = 0;
    for(size_t i = 0; i < slopes.size(); ++i){
        if(i < slopes.size() * 0.2){
            continue;
        }
        if(i > slopes.size() * 0.8){
            break;
        }
        line_cnt += 1;
        mean_slope += fabs(slopes[i]);
        std::cout << slopes[i] << std::endl;
    }

    if(line_cnt != 0){
        mean_slope /= line_cnt;
    }
    else{
        mean_slope = 1000000;
    }
    std::cout << mean_slope << " line_cnt " << line_cnt << std::endl;

    if(mean_slope < 0.001)
        return true;
    else
        return false;
}

 

  • 1
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值