pybind11用python调用C++代码

C++代码

#include <pybind11/pybind11.h>
#include<opencv2/opencv.hpp>
#include<pybind11/numpy.h>

using namespace std;
using namespace cv;
namespace py = pybind11;

void BilinearInsert(Mat& src, Mat& dst, float ux, float uy, int i, int j)
{
	auto Abs = [&](float f) {
		return f > 0 ? f : -f;
	};
	int height = src.rows-1;
	int width = src.cols-1;
	int c = src.channels();
	if (c == 3)
	{
		//存储图像得浮点坐标
		CvPoint2D32f uv;
		CvPoint3D32f f1;
		CvPoint3D32f f2;

		//取整数
		int iu = (int)ux;
		int iv = (int)uy;
		if (iu > width)
		{
			iu = width;
		}
		if (iv > height)
		{
			iv = height;
		}
		uv.x = iu + 1;
		uv.y = iv + 1;

		//step图象像素行的实际宽度  三个通道进行计算(0 , 1 2  三通道)
		/*cout << "src.data" << src.data << std::endl;
		//C++ unsigned char *是表示无符号字符指针的意思
		//unsigned若省略后一个关键字,大多数编译器都会认为是unsigned int。
		/*cout << "src.data.type" << typeid(src.data).name() << std::endl;*/

		f1.x = ((uchar*)(src.data + src.step * iv))[iu * 3] * (1 - Abs(uv.x - iu)) + \
			((uchar*)(src.data + src.step * iv))[(iu + 1) * 3] * (uv.x - iu);
		f1.y = ((uchar*)(src.data + src.step * iv))[iu * 3 + 1] * (1 - Abs(uv.x - iu)) + \
			((uchar*)(src.data + src.step * iv))[(iu + 1) * 3 + 1] * (uv.x - iu);
		f1.z = ((uchar*)(src.data + src.step * iv))[iu * 3 + 2] * (1 - Abs(uv.x - iu)) + \
			((uchar*)(src.data + src.step * iv))[(iu + 1) * 3 + 2] * (uv.x - iu);

		f2.x = ((uchar*)(src.data + src.step * (iv + 1)))[iu * 3] * (1 - Abs(uv.x - iu)) + \
			/*cout << "f1.x" << f1.x << std::endl;*/
			((uchar*)(src.data + src.step * (iv + 1)))[(iu + 1) * 3] * (uv.x - iu);
		f2.y = ((uchar*)(src.data + src.step * (iv + 1)))[iu * 3 + 1] * (1 - Abs(uv.x - iu)) + \
			((uchar*)(src.data + src.step * (iv + 1)))[(iu + 1) * 3 + 1] * (uv.x - iu);
		f2.z = ((uchar*)(src.data + src.step * (iv + 1)))[iu * 3 + 2] * (1 - Abs(uv.x - iu)) + \
			((uchar*)(src.data + src.step * (iv + 1)))[(iu + 1) * 3 + 2] * (uv.x - iu);
		//step指的字节数,一个图像是3通道,每个通道是16位,那么
		//其elemsize就是3* (16 / 8) = 6,即每个元素占6个字节
		((uchar*)(dst.data + dst.step * j))[i * 3] = f1.x * (1 - Abs(uv.y - iv)) + f2.x * (Abs(uv.y - iv));  //三个通道进行赋值
		((uchar*)(dst.data + dst.step * j))[i * 3 + 1] = f1.y * (1 - Abs(uv.y - iv)) + f2.y * (Abs(uv.y - iv));
		((uchar*)(dst.data + dst.step * j))[i * 3 + 2] = f1.z * (1 - Abs(uv.y - iv)) + f2.z * (Abs(uv.y - iv));

	}
}
Mat LocalTranslationWarp_Face(Mat& img, int warpX, int warpY, int endX, int endY, float radius)
{
	Mat dst = img.clone();
	//平移距离 
	float ddradius = radius * radius;
	//计算|m-c|^2
	size_t mc = (endX - warpX) * (endX - warpX) + (endY - warpY) * (endY - warpY);
	//计算 图像的高  宽 通道数量
	int height = img.rows;
	int width = img.cols;
	int chan = img.channels();

	auto Abs = [&](float f) {
		return f > 0 ? f : -f;
	};

	for (int i = 0; i < width; i++)
	{
		for (int j = 0; j < height; j++)
		{
			// # 计算该点是否在形变圆的范围之内
			//# 优化,第一步,直接判断是会在(startX, startY)的矩阵框中
			if ((Abs(i - warpX) > radius) && (Abs(j - warpY) > radius))
				continue;

			float distance = (i - warpX) * (i - warpX) + (j - warpY) * (j - warpY);
			if (distance < ddradius)
			{
				//# 计算出(i, j)坐标的原坐标
				//# 计算公式中右边平方号里的部分
				float ratio = (ddradius - distance) / (ddradius - distance + mc);
				ratio *= ratio;

				//映射原位置
				float UX = i - ratio * (endX - warpX);
				float UY = j - ratio * (endY - warpY);

				//根据双线性插值得到UX UY的值
				BilinearInsert(img, dst, UX, UY, i, j);
				//改变当前的值
			}
		}
	}

	return dst;

}
Mat LocalTranslationWarp_Eye(Mat& img, int warpX, int warpY, int endX, int endY, float radius)
{
	//平移距离 
	Mat dst = img.clone();
	float ddradius = radius * radius;
	//计算|m-c|^2
	size_t mc = (endX - warpX) * (endX - warpX) + (endY - warpY) * (endY - warpY);
	//计算 图像的高  宽 通道数量
	int height = img.rows;
	int width = img.cols;
	int chan = img.channels();

	auto Abs = [&](float f) {
		return f > 0 ? f : -f;
	};

	for (int i = 0; i < width; i++)
	{
		for (int j = 0; j < height; j++)
		{
			// # 计算该点是否在形变圆的范围之内
			//# 优化,第一步,直接判断是会在(startX, startY)的矩阵框中
			if ((Abs(i - warpX) > radius) && (Abs(j - warpY) > radius))
				continue;

			float distance = (i - warpX) * (i - warpX) + (j - warpY) * (j - warpY);
			if (distance < ddradius)
			{
				float rnorm = sqrt(distance) / radius;
				float ratio = 1 - (rnorm - 1) * (rnorm - 1) * 0.5;
				//映射原位置
				float UX = warpX + ratio * (i - warpX);
				float UY = warpY + ratio * (j - warpY);

				//根据双线性插值得到UX UY的值
				BilinearInsert(img, dst, UX, UY, i, j);
			}
		}
	}
	return dst;

}
Mat numpy_uint8_3c_to_cv_mat(py::array_t<unsigned char>& input) {
	if (input.ndim() != 3)
		throw std::runtime_error("1-channel image must be 2 dims ");
	py::buffer_info buf = input.request();
	Mat mat(buf.shape[0], buf.shape[1], CV_8UC3, (unsigned char*)buf.ptr);
	//imshow("显示读取图片", mat);
	return mat;
}
Mat numpy_uint8_1c_to_cv_mat(py::array_t<unsigned char>& input) {

	if (input.ndim() != 2)
		throw std::runtime_error("1-channel image must be 2 dims ");

	py::buffer_info buf = input.request();

	Mat mat(buf.shape[0], buf.shape[1], CV_8UC1, (unsigned char*)buf.ptr);

	return mat;
}
py::array_t<unsigned char> cv_mat_uint8_1c_to_numpy(cv::Mat& input) {

	py::array_t<unsigned char> dst = py::array_t<unsigned char>({ input.rows,input.cols }, input.data);
	return dst;
}

py::array_t<unsigned char> cv_mat_uint8_3c_to_numpy(cv::Mat& input) {

	py::array_t<unsigned char> dst = py::array_t<unsigned char>({ input.rows,input.cols,3 }, input.data);
	return dst;
}

py::array_t<unsigned char> eyebig(py::array_t<unsigned char>& input, int warpX, int warpY, int endX, int endY, float radius) {

	Mat img_rgb = numpy_uint8_3c_to_cv_mat(input);
	//cout << "input.rows" << img_rgb.rows << "input.cols" << img_rgb.cols << endl;
	Mat dst = LocalTranslationWarp_Eye(img_rgb, warpX, warpY, endX, endY, radius);
	//imshow("显示读取图片2", dst);
	return cv_mat_uint8_3c_to_numpy(dst);
}
py::array_t<unsigned char> facethin(py::array_t<unsigned char>& input, int warpX, int warpY, int endX, int endY, float radius) {

	Mat img_rgb = numpy_uint8_3c_to_cv_mat(input);
	//cout << "input.rows" << img_rgb.rows << "input.cols" << img_rgb.cols << endl;
	Mat dst = LocalTranslationWarp_Face(img_rgb, warpX, warpY, endX, endY, radius);
	//imshow("显示读取图片2", dst);
	return cv_mat_uint8_3c_to_numpy(dst);
}
py::array_t<unsigned char> test_rgb_to_gray(py::array_t<unsigned char>& input) {

	Mat img_rgb = numpy_uint8_3c_to_cv_mat(input);
	Mat dst;
	cv::cvtColor(img_rgb, dst, cv::COLOR_RGB2GRAY);
	return cv_mat_uint8_1c_to_numpy(dst);
}

PYBIND11_MODULE(py2cpp, m) {
	m.doc() = "pybind11 example";
	m.def("LocalTranslationWarp_Eye", &LocalTranslationWarp_Eye, "LocalTranslationWarp_Eye");
	m.def("test_rgb_to_gray", &test_rgb_to_gray,"test_rgb_to_grayt");
	m.def("eyebig", &eyebig, "eyebig");
	m.def("facethin", &facethin, "facethin");

}

python代码

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import py2cpp as obj
import dlib
import cv2
import numpy as np
import math
import time
predictor_path = 'shape_predictor_68_face_landmarks.dat'

# 使用dlib自带的frontal_face_detector作为我们的特征提取器
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)

def landmark_dec_dlib_fun(img_src):
    #转灰度图
    img_gray = cv2.cvtColor(img_src, cv2.COLOR_BGR2GRAY)
    #cv2.imshow('grayimg',img_gray)
    land_marks = []

    rects = detector(img_gray, 0)
    print(rects)
    for i in range(len(rects)):
        land_marks_node = np.matrix([[p.x, p.y] for p in predictor(img_gray, rects[i]).parts()])
        # for idx,point in enumerate(land_marks_node):
        #     # 68点坐标
        #     pos = (point[0,0],point[0,1])
        #     print(idx,pos)
        #     # 利用cv2.circle给每个特征点画一个圈,共68个
        #     cv2.circle(img_src, pos, 5, color=(0, 255, 0))
        #     # 利用cv2.putText输出1-68
        #     font = cv2.FONT_HERSHEY_SIMPLEX
        #     cv2.putText(img_src, str(idx + 1), pos, font, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
        land_marks.append(land_marks_node)

    return land_marks


'''
方法1: Interactive Image Warping 局部放大算法
'''
def localTranslationWarp1(srcImg, startX, startY, endX, endY, radius):

    ddradius = float(radius * radius)
    copyImg = np.zeros(srcImg.shape, np.uint8)
    copyImg = srcImg.copy()
    t1 = time.time()
    # 计算公式中的|m-c|^2
    ddmc = (endX - startX) * (endX - startX) + (endY - startY) * (endY - startY)
    H, W, C = srcImg.shape
    for i in range(W):
        for j in range(H):
            # 计算该点是否在形变圆的范围之内
            # 优化,第一步,直接判断是会在(startX,startY)的矩阵框中
            if math.fabs(i - startX) > radius and math.fabs(j - startY) > radius:
                continue

            distance = (i - startX) * (i - startX) + (j - startY) * (j - startY)

            if (distance < ddradius):
                # 计算出(i,j)坐标的原坐标
                # 计算公式中右边平方号里的部分
                # ratio = (ddradius - distance) / (ddradius - distance + ddmc)
                # ratio = ratio * ratio
                rnorm = math.sqrt(distance) / radius
                ratio = 1 - (rnorm - 1)*(rnorm - 1)*0.5
                # 映射原位置
                UX = startX + ratio * (i - startX)
                UY = startY + ratio * (j - startY)
                # 根据双线性插值法得到UX,UY的值
                value = BilinearInsert(srcImg, UX, UY)
                # 改变当前 i ,j的值
                copyImg[j, i] = value
    return copyImg
'''
方法2: Interactive Image Warping 局部平移算法
'''
def localTranslationWarp2(srcImg, startX, startY, endX, endY, radius):
    ddradius = float(radius * radius)
    copyImg = np.zeros(srcImg.shape, np.uint8)
    copyImg = srcImg.copy()

    # 计算公式中的|m-c|^2
    ddmc = (endX - startX) * (endX - startX) + (endY - startY) * (endY - startY)
    H, W, C = srcImg.shape
    for i in range(W):
        for j in range(H):
            # 计算该点是否在形变圆的范围之内
            # 优化,第一步,直接判断是会在(startX,startY)的矩阵框中
            if math.fabs(i - startX) > radius and math.fabs(j - startY) > radius:
                continue

            distance = (i - startX) * (i - startX) + (j - startY) * (j - startY)

            if (distance < ddradius):
                # 计算出(i,j)坐标的原坐标
                # 计算公式中右边平方号里的部分
                ratio = (ddradius - distance) / (ddradius - distance + ddmc)
                ratio = ratio * ratio

                # 映射原位置
                UX = i - ratio * (endX - startX)
                UY = j - ratio * (endY - startY)

                # 根据双线性插值法得到UX,UY的值
                value = BilinearInsert(srcImg, UX, UY)
                # 改变当前 i ,j的值
                copyImg[j, i] = value

    return copyImg

predictor_path = 'shape_predictor_68_face_landmarks.dat'

# 使用dlib自带的frontal_face_detector作为我们的特征提取器
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)


def landmark_dec_dlib_fun(img_src):
    #转灰度图
    img_gray = cv2.cvtColor(img_src, cv2.COLOR_BGR2GRAY)
    #cv2.imshow('grayimg',img_gray)
    land_marks = []

    rects = detector(img_gray, 0)
    #print(rects)
    for i in range(len(rects)):
        land_marks_node = np.matrix([[p.x, p.y] for p in predictor(img_gray, rects[i]).parts()])
        # for idx,point in enumerate(land_marks_node):
        #     # 68点坐标
        #     pos = (point[0,0],point[0,1])
        #     print(idx,pos)
        #     # 利用cv2.circle给每个特征点画一个圈,共68个
        #     cv2.circle(img_src, pos, 5, color=(0, 255, 0))
        #     # 利用cv2.putText输出1-68
        #     font = cv2.FONT_HERSHEY_SIMPLEX
        #     cv2.putText(img_src, str(idx + 1), pos, font, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
        land_marks.append(land_marks_node)

    return land_marks


# 双线性插值法
def BilinearInsert(src, ux, uy):
    w, h, c = src.shape
    if c == 3:
        x1 = int(ux)
        x2 = x1 + 1
        y1 = int(uy)
        y2 = y1 + 1

        part1 = src[y1, x1].astype(np.float64) * (float(x2) - ux) * (float(y2) - uy)
        part2 = src[y1, x2].astype(np.float64) * (ux - float(x1)) * (float(y2) - uy)
        part3 = src[y2, x1].astype(np.float64) * (float(x2) - ux) * (uy - float(y1))
        part4 = src[y2, x2].astype(np.float64) * (ux - float(x1)) * (uy - float(y1))

        insertValue = part1 + part2 + part3 + part4

        return insertValue.astype(np.int8)

src = cv2.imread('1.jpg')

landmarks = landmark_dec_dlib_fun(src)
# if len(landmarks) == 0:
#     return

for landmarks_node in landmarks:
    # 第4个点左
    left_landmark = landmarks_node[37]
    # 第6个点左
    left_landmark_down = landmarks_node[27]
    # 第14个点右
    right_landmark = landmarks_node[44]
    # 第16个点右
    right_landmark_down = landmarks_node[27]
    # 第31个点鼻尖
    endPt = landmarks_node[30]
def face_big_auto(x):
    #68个关键点二维数组
    #landmarks = landmark_dec_dlib_fun(src)
    if len(landmarks) == 0:
        return
    w = cv2.getTrackbarPos('Bigeye', 'How big the eyes do you want')
        # 计算第4个点到第6个点的距离作为瘦脸距离
        # r_left = math.sqrt(
        #     (left_landmark[0, 0] - left_landmark_down[0, 0]) * (left_landmark[0, 0] - left_landmark_down[0, 0]) +
        #     (left_landmark[0, 1] - left_landmark_down[0, 1]) * (left_landmark[0, 1] - left_landmark_down[0, 1]))
        #
        # # 计算第14个点到第16个点的距离作为瘦脸距离
        # r_right = math.sqrt(
        #     (right_landmark[0, 0] - right_landmark_down[0, 0]) * (right_landmark[0, 0] - right_landmark_down[0, 0]) +
        #     (right_landmark[0, 1] - right_landmark_down[0, 1]) * (right_landmark[0, 1] - right_landmark_down[0, 1]))
    r_left = w
    r_right = w
    # 大左边眼
    # big_image = localTranslationWarp1(src, left_landmark[0, 0], left_landmark[0, 1], endPt[0, 0], endPt[0, 1],
    #                                   r_left)
    big_image = obj.eyebig(src, left_landmark[0, 0], left_landmark[0, 1], endPt[0, 0], endPt[0, 1],
                             r_left)
    # 大右边眼
    # big_image = localTranslationWarp1(big_image, right_landmark[0, 0], right_landmark[0, 1], endPt[0, 0],
    #                                   endPt[0, 1], r_right)
    big_image = obj.eyebig(big_image, right_landmark[0, 0], right_landmark[0, 1], endPt[0, 0],
                                       endPt[0, 1], r_right)
    # 显示
    cv2.imshow('How big the eyes do you want', big_image)
    cv2.waitKey(1)

def face_thin_auto(x):
    s = cv2.getTrackbarPos('Thinface', 'How big the eyes do you want')
    # 如果未检测到人脸关键点,就不进行瘦脸
    if len(landmarks) == 0:
        return

    for landmarks_node in landmarks:
        #第4个点左
        left_landmark = landmarks_node[3]
        #第6个点左
        left_landmark_down = landmarks_node[5]
        #第14个点右
        right_landmark = landmarks_node[13]
        #第16个点右
        right_landmark_down = landmarks_node[15]
        #第31个点鼻尖
        endPt = landmarks_node[30]

        # 计算第4个点到第6个点的距离作为瘦脸距离
        # r_left = math.sqrt(
        #     (left_landmark[0, 0] - left_landmark_down[0, 0]) * (left_landmark[0, 0] - left_landmark_down[0, 0]) +
        #     (left_landmark[0, 1] - left_landmark_down[0, 1]) * (left_landmark[0, 1] - left_landmark_down[0, 1]))
        #
        # # 计算第14个点到第16个点的距离作为瘦脸距离
        # r_right = math.sqrt(
        #     (right_landmark[0, 0] - right_landmark_down[0, 0]) * (right_landmark[0, 0] - right_landmark_down[0, 0]) +
        #     (right_landmark[0, 1] - right_landmark_down[0, 1]) * (right_landmark[0, 1] - right_landmark_down[0, 1]))
        r_left = s
        r_right = s
        # 瘦左边脸
        # thin_image = localTranslationWarp2(src, left_landmark[0, 0], left_landmark[0, 1], endPt[0, 0], endPt[0, 1],
        #                                   r_left)
        thin_image = obj.facethin(src, left_landmark[0, 0], left_landmark[0, 1], endPt[0, 0], endPt[0, 1],
                                           r_left)
        # 瘦右边脸
        # thin_image = localTranslationWarp2(thin_image, right_landmark[0, 0], right_landmark[0, 1], endPt[0, 0],
        #                                   endPt[0, 1], r_right)
        thin_image = obj.facethin(thin_image, right_landmark[0, 0], right_landmark[0, 1], endPt[0, 0],
                                           endPt[0, 1], r_right)

    # 显示
    cv2.imshow('How big the eyes do you want', thin_image)
    cv2.waitKey(1)


cv2.namedWindow('How big the eyes do you want')
cv2.createTrackbar('Bigeye', 'How big the eyes do you want', 0, 70, face_big_auto)
cv2.createTrackbar('Thinface', 'How big the eyes do you want', 0, 60, face_thin_auto)
cv2.imshow('How big the eyes do you want',src)
cv2.waitKey(0)
# while True:
#     #cv2.imshow('How big the eyes do you want', window)
#     ch = cv2.waitKey(1) & 0xFF
#     if ch == 27:
#         break

cv2.destroyAllWindows()
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值