利用OPENCV为android开发畸变校正的JNI库

需要为项目提供一套畸变校正的算法,由于需要大量的矩阵运算,考虑到效率和适时性,使用JNI开发,希望把有关数组短阵的处理的变换全部放入C语言中处理。

主要用于android移动端,大致的数据来源一是从camera直接读取YUV数据,一种是从第三方接读取RGB数据,另一种是直接对BITMAP进行处理。

1.考虑到硬件设备接口,第三方软件接口,图像接口,OPENCV接口,希望能够开发出通用的算法库,一劳永逸的解决各种复杂的使用场景,因此数据要支持YUV,支持ARGB,支持MAT

2android对BITMAP有获取像素点的操作,也有通过象素点生成BITMAP的操作,而且有很多图像处理接口和第三方可以处理RGB矩阵,如

bm.getPixels(pixs, 0, w, 0, 0, w, h);
int[] pixs1 = new int[w*h]; 
        final Bitmap bm2 = Bitmap.createBitmap(pixs1, w, h, Bitmap.Config.ARGB_8888);

因此设计如下接口,入口为ARGB的整型,输出也是整型

public static native boolean RgbaUndistort(int[] argb, int width, int height, int[] pixels);

3考虑到有些情况需要二维数组,

public static native boolean RgbaUndistort2(int[][] rgb, int width, int height, int[] pixels);

4考虑到OPENCV的MAT结构,由于MAT有matToBitmap可以直接转化为BITMAP,应用MAT 提供

public static native boolean RgbaUndistortMat(int[] argb, int width, int height, long pArgbOutMatAddr);

5考虑到第三方使用MAT的情况,因此输入也可以支持MAT因此设计接口

public static native boolean RgbMatUndistortMat(long pArgbMatAddr, int width, int height, long pArgbOutMatAddr);

6考虑到摄像头输出YUV,提供YUV数据处理, 一个输出RGB, 一个输出MAT

public static native boolean YuvNv21UndistortRgba(byte[] YuvNv21, int width, int height, int[] pixels);
public static native boolean YuvNv21UndistortRgbaMat(byte[] YuvNv21, int width, int height, long pMatAddr);

7考虑到可能有不需要畸变的场合,为YUV设计一个灰度,一个RGB接口

public static native boolean YuvNv21ToGray(byte[] YuvNv21,int width, int height,  int[] pixels);
public static native boolean YuvNv21ToRGBA(byte[] YuvNv21, int width, int height, int[] pixels);


8于是编写简单的JAVA头源生类

public class ImageProc3 {
	static {
		System.loadLibrary("ImgProc3");
	}
	
	public static native boolean YuvNv21ToGray(byte[] YuvNv21,int width, int height,  int[] pixels);
	public static native boolean YuvNv21ToRGBA(byte[] YuvNv21, int width, int height, int[] pixels);
	
	
	public static native boolean RgbaUndistort(int[] argb, int width, int height, int[] pixels);
	public static native boolean RgbaUndistort2(int[][] rgb, int width, int height, int[] pixels);
	public static native boolean RgbaUndistortMat(int[] argb, int width, int height, long pArgbOutMatAddr);
	public static native boolean RgbMatUndistortMat(long pArgbMatAddr, int width, int height, long pArgbOutMatAddr);
	
	public static native boolean YuvNv21UndistortRgba(byte[] YuvNv21, int width, int height, int[] pixels);
	public static native boolean YuvNv21UndistortRgbaMat(byte[] YuvNv21, int width, int height, long pMatAddr);

}

进入BIN目录的classes文件夹使用java -classpath . -jni 生成C头文件

根据头文件编写实现的C代码


#include <stdio.h>
#include <jni.h>
#include<Android/log.h>


#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>


using namespace std;
using namespace cv;


#define TAG    "Camera XXXXX" // 锟斤拷锟斤拷锟斤拷远锟斤拷锟斤拷LOG锟侥憋拷识
#define LOGD(...)  __android_log_print(ANDROID_LOG_DEBUG,TAG,__VA_ARGS__) // 锟斤拷锟斤拷LOGD锟斤拷锟斤拷


#ifdef __cplusplus
extern "C" {
#endif
/*
 * Class:     ImgProc_ImageProc3
 * Method:    YuvNv21ToGray
 * Signature: ([BII[I)Z
 */
JNIEXPORT jboolean JNICALL Java_ImgProc_ImageProc3_YuvNv21ToGray
  (JNIEnv *jenv, jclass jclassz, jbyteArray YuvNv21, jint width, jint height, jintArray pixels){


	jbyte * pNV21FrameData = jenv->GetByteArrayElements(YuvNv21, 0);
	jint * poutPixels = jenv->GetIntArrayElements(pixels, 0);


	Mat mNV(height, width, CV_8UC1, (unsigned char*) pNV21FrameData);
	Mat mBgra(height, width, CV_8UC4, (unsigned char*) poutPixels);


	cvtColor(mNV, mBgra, CV_YUV420sp2RGBA);


	jenv->ReleaseByteArrayElements(YuvNv21, pNV21FrameData, 0);
	jenv->ReleaseIntArrayElements(pixels, poutPixels, 0);


    return true;
}


/*
 * Class:     ImgProc_ImageProc3
 * Method:    YuvNv21ToRGBA
 * Signature: ([BII[I)Z
 */
JNIEXPORT jboolean JNICALL Java_ImgProc_ImageProc3_YuvNv21ToRGBA
  (JNIEnv *jenv, jclass jclassz, jbyteArray YuvNv21, jint width, jint height, jintArray pixels){
	jbyte * pBuf = (jbyte*) jenv->GetByteArrayElements(YuvNv21, 0);
	jint * poutPixels = jenv->GetIntArrayElements(pixels, 0);


	Mat image(height + height / 2, width, CV_8UC1, (unsigned char *) pBuf);
	Mat rgba(height, width, CV_8UC4, (unsigned char*) poutPixels);
	Mat tmp(height, width, CV_8UC4);
	cvtColor(image, tmp, CV_YUV420sp2RGBA);


	vector <Mat> channels;
	split(tmp, channels);
	Mat r = channels.at(0);
	Mat g = channels.at(1);
	Mat b = channels.at(2);
	Mat a = channels.at(3);


	vector <Mat> mbgr(4);
	mbgr[0] = b;
	mbgr[1] = g;
	mbgr[2] = r;
	mbgr[3] = a;


	merge(mbgr, rgba);


	jenv->ReleaseByteArrayElements(YuvNv21, pBuf, 0);
	jenv->ReleaseIntArrayElements(pixels, poutPixels, 0);


	return true;
}


/*
 * Class:     ImgProc_ImageProc3
 * Method:    RgbaUndistort
 * Signature: ([III[I)Z
 */
JNIEXPORT jboolean JNICALL Java_ImgProc_ImageProc3_RgbaUndistort
  (JNIEnv *jenv, jclass jclassz, jintArray argb, jint width, jint height, jintArray pixels){
	jint * poutPixels = jenv->GetIntArrayElements(pixels, 0);
	jint * pinPixels = jenv->GetIntArrayElements(argb, 0);


	Mat out(height, width, CV_8UC4, (unsigned char*) poutPixels);
	Mat in(height, width, CV_8UC4, (unsigned char*) pinPixels);


	double cam[] = {width, 0, width / 2, 0, height, height / 2, 0, 0, 1 };
	double distort[] = { 0.1, 0.35, 0.0, 0.0, 0.01 };


	Mat camMat = Mat(3, 3, CV_64FC1, cam);
	Mat disMat = Mat(5, 1, CV_64FC1, distort);
	undistort(in, out, camMat, disMat);


	jenv->ReleaseIntArrayElements(argb, pinPixels, 0);
	jenv->ReleaseIntArrayElements(pixels, poutPixels, 0);
	return true;
}


/*
 * Class:     ImgProc_ImageProc3
 * Method:    RgbaUndistort2
 * Signature: ([[III[I)Z
 */
JNIEXPORT jboolean JNICALL Java_ImgProc_ImageProc3_RgbaUndistort2(JNIEnv *jenv,
		jclass jclassz, jobjectArray argb, jint width, jint height,
		jintArray pixels) {


	jint i, j;
	int row = jenv->GetArrayLength(argb);
	jintArray myarray = (jintArray)(jenv->GetObjectArrayElement(argb, 0));
	int col = jenv->GetArrayLength(myarray);
	jint jniData[row][col];
	LOGD("jiaXXX %s", "Java_ImgProc_ImageProc_convertRGB3");
	for (i = 0; i < row; i++) {
		myarray = (jintArray)(jenv->GetObjectArrayElement(argb, i));
		jint *coldata = jenv->GetIntArrayElements(myarray, 0);


		for (j = 0; j < col; j++) {
			jniData[i][j] = coldata[j];
			LOGD("jiaXXX %d", jniData[i][j]);
		}


		jenv->ReleaseIntArrayElements(myarray, coldata, 0);


	}


	Mat img = Mat(row, col, CV_8UC4, jniData);
	LOGD("jiaXXX  %x", img.at<unsigned int>(1, 1));


	double cam[] = {width, 0, width / 2, 0, height, height / 2, 0, 0, 1 };
	double distort[] = { 0.1, 0.35, 0.0, 0.0, 0.01 };


	Mat camMat = Mat(3, 3, CV_64FC1, cam);
	Mat disMat = Mat(5, 1, CV_64FC1, distort);


	jint * poutPixels = jenv->GetIntArrayElements(pixels, 0);
	Mat out(height, width, CV_8UC4, (unsigned char*) poutPixels);
	undistort(img, out, camMat, disMat);


	jenv->ReleaseIntArrayElements(pixels, poutPixels, 0);


	return true;
}


/*
 * Class:     ImgProc_ImageProc3
 * Method:    RgbaUndistortMat
 * Signature: ([IIIJ)Z
 */
JNIEXPORT jboolean JNICALL Java_ImgProc_ImageProc3_RgbaUndistortMat
  (JNIEnv *jenv, jclass jclassz, jintArray argb, jint width, jint height, jlong pArgbOutMatAddr){


	//jint * poutPixels = jenv->GetIntArrayElements(pixels, 0);
	jint * pinPixels = jenv->GetIntArrayElements(argb, 0);


	//Mat out(height, width, CV_8UC4, (unsigned char*) poutPixels);
	Mat in(height, width, CV_8UC4, (unsigned char*) pinPixels);
	Mat out = *((Mat*)pArgbOutMatAddr);


	double cam[] = {width, 0, width / 2, 0, height, height / 2, 0, 0, 1 };
	double distort[] = { 0.1, 0.35, 0.0, 0.0, 0.01 };


	Mat camMat = Mat(3, 3, CV_64FC1, cam);
	Mat disMat = Mat(5, 1, CV_64FC1, distort);
	undistort(in, out, camMat, disMat);


	jenv->ReleaseIntArrayElements(argb, pinPixels, 0);
	//jenv->ReleaseIntArrayElements(pixels, poutPixels, 0);


	return true;
}


/*
 * Class:     ImgProc_ImageProc3
 * Method:    RgbMatUndistortMat
 * Signature: (JIIJ)Z
 */
JNIEXPORT jboolean JNICALL Java_ImgProc_ImageProc3_RgbMatUndistortMat
  (JNIEnv *jenv, jclass jclassz, jlong pArgbMatAddr, jint width, jint height, jlong pArgbOutMatAddr){


	Mat in=*((Mat*)pArgbMatAddr);
	Mat out = *((Mat*)pArgbOutMatAddr);


	double cam[] = {width, 0, width / 2, 0, height, height / 2, 0, 0, 1 };
	double distort[] = { 0.1, 0.35, 0.0, 0.0, 0.01 };


	Mat camMat = Mat(3, 3, CV_64FC1, cam);
	Mat disMat = Mat(5, 1, CV_64FC1, distort);
	undistort(in, out, camMat, disMat);


	return true;
}


/*
 * Class:     ImgProc_ImageProc3
 * Method:    YuvNv21UndistortRgba
 * Signature: ([BII[I)Z
 */
JNIEXPORT jboolean JNICALL Java_ImgProc_ImageProc3_YuvNv21UndistortRgba
  (JNIEnv *jenv, jclass jclassz, jbyteArray YuvNv21, jint width, jint height, jintArray pixels){


	jbyte * pBuf = (jbyte*) jenv->GetByteArrayElements(YuvNv21, 0);
	jint * poutPixels = jenv->GetIntArrayElements(pixels, 0);


	Mat image(height + height / 2, width, CV_8UC1, (unsigned char *) pBuf);
	Mat rgba(height, width, CV_8UC4, (unsigned char*) poutPixels);
	Mat tmp(height, width, CV_8UC4);
	cvtColor(image, tmp, CV_YUV420sp2RGBA);


	double cam[] = { width, 0, width / 2, 0, height, height / 2, 0, 0, 1 };
	double distort[] = { 0.1, 0.35, 0.0, 0.0, 0.01 };


	Mat camMat = Mat(3, 3, CV_64FC1, cam);
	Mat disMat = Mat(5, 1, CV_64FC1, distort);
	undistort(tmp, tmp, camMat, disMat);


	vector < Mat > channels;
	split(tmp, channels);
	Mat r = channels.at(0);
	Mat g = channels.at(1);
	Mat b = channels.at(2);
	Mat a = channels.at(3);


	vector < Mat > mbgr(4);
	mbgr[0] = b;
	mbgr[1] = g;
	mbgr[2] = r;
	mbgr[3] = a;


	merge(mbgr, rgba);


	jenv->ReleaseByteArrayElements(YuvNv21, pBuf, 0);
	jenv->ReleaseIntArrayElements(pixels, poutPixels, 0);


	return true;
}


/*
 * Class:     ImgProc_ImageProc3
 * Method:    YuvNv21UndistortRgbaMat
 * Signature: ([BIIJ)Z
 */
JNIEXPORT jboolean JNICALL Java_ImgProc_ImageProc3_YuvNv21UndistortRgbaMat
  (JNIEnv *jenv, jclass jclassz, jbyteArray YuvNv21, jint width, jint height, jlong pMatAddr){


	jbyte * pBuf = (jbyte*) jenv->GetByteArrayElements(YuvNv21, 0);
	//jint * poutPixels = jenv->GetIntArrayElements(pixels, 0);


	Mat image(height + height / 2, width, CV_8UC1, (unsigned char *) pBuf);
	//Mat rgba(height, width, CV_8UC4, (unsigned char*) poutPixels);
	Mat rgba = *((Mat*) pMatAddr);
	Mat tmp(height, width, CV_8UC4);
	cvtColor(image, tmp, CV_YUV420sp2RGBA);


	double cam[] = { width, 0, width / 2, 0, height, height / 2, 0, 0, 1 };
	double distort[] = { 0.1, 0.35, 0.0, 0.0, 0.01 };


	Mat camMat = Mat(3, 3, CV_64FC1, cam);
	Mat disMat = Mat(5, 1, CV_64FC1, distort);
	undistort(tmp, tmp, camMat, disMat);


	vector < Mat > channels;
	split(tmp, channels);
	Mat r = channels.at(0);
	Mat g = channels.at(1);
	Mat b = channels.at(2);
	Mat a = channels.at(3);


	vector < Mat > mbgr(4);
	mbgr[0] = b;
	mbgr[1] = g;
	mbgr[2] = r;
	mbgr[3] = a;


	merge(mbgr, rgba);


	jenv->ReleaseByteArrayElements(YuvNv21, pBuf, 0);
	//jenv->ReleaseIntArrayElements(pixels, poutPixels, 0);


	return true;
}


#ifdef __cplusplus
}
#endif



  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 5
    评论
畸变校正图像处理中的一个重要步骤,它可以消除图像中因镜头畸变而引起的失真现象。OpenCV是一个广泛使用的计算机视觉,它提供了许多图像处理的函数和工具。下面是一个使用OpenCV 4.7版本实现畸变校正的示例代码。 首先,我们需要读取原始图像和相机的畸变参数。假设我们已经有了一个名为"image.jpg"的图像和一个名为“camera.xml”的相机参数文件。可以使用以下代码加载它们: ``` python import cv2 # Load image img = cv2.imread("image.jpg") # Load camera parameters with open("camera.xml", 'r') as f: parameters = f.read() ``` 注意,相机参数文件应该是XML格式的,并且应该包含以下参数: - camera_matrix: 相机内部参数矩阵 - dist_coeffs: 相机畸变系数 接下来,我们需要使用相机参数和图像的大小创建一个畸变校正映射。这可以通过cv2.initUndistortRectifyMap函数完成。示例代码如下: ``` python # Get image size h, w = img.shape[:2] # Load camera parameters with open("camera.xml", 'r') as f: parameters = f.read() # Parse camera parameters camera_params = cv2.omnidir_undistorter.parseXML(parameters) # Create rectification map map1, map2 = cv2.omnidir_undistorter.initUndistortRectifyMap( camera_params["camera_matrix"], camera_params["dist_coeffs"], camera_params["xi"], np.eye(3), camera_params["new_size"], cv2.CV_16SC2) ``` 在上面的代码中,我们首先获取图像的大小。然后,我们解析相机参数,并使用cv2.omnidir_undistorter.initUndistortRectifyMap函数创建了一个校正映射。这个函数需要以下参数: - camera_matrix: 相机内部参数矩阵 - dist_coeffs: 相机畸变系数 - xi: xi参数,用于描述广角镜头的畸变 - R: 旋转矩阵,用于对图像进行旋转 - new_size: 输出图像的大小 - map1, map2: 输出的校正映射 一旦我们有了校正映射,我们就可以使用cv2.remap函数来对图像进行畸变校正。示例代码如下: ``` python # Apply rectification map undistorted = cv2.remap(img, map1, map2, cv2.INTER_LINEAR) ``` 在上面的代码中,我们使用cv2.remap函数将校正映射应用于原始图像。这个函数需要以下参数: - src: 输入图像 - map1, map2: 校正映射 - interpolation: 插值方法 最后,我们可以将校正后的图像保存到文件中。示例代码如下: ``` python # Save output image cv2.imwrite("undistorted.jpg", undistorted) ``` 以上就是使用OpenCV 4.7版本实现畸变校正的完整代码。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值