ndk实例总结系列
ndk实例总结:jni实例
ndk实例总结:opencv图像处理
ndk实例总结:安卓Camera与usbCamera原始图像处理
ndk实例总结补充:使用V4L2采集usb图像分析
ndk实例总结:使用fmpeg播放rtsp流
ndk实例总结:基于libuvc的双usbCamera处理
ndk实例总结补充:使用libuvc采集usb图像分析
ndk实例总结:jni日志存储
前言
本篇博客总结下在jni中对安卓Camera与UsbCamera的原始图像进行处理的使用实例
项目构架
项目构架与上一篇文章ndk实例总结:opencv图像处理类似,也同样使用到了opencv库,区别只是jni代码的不同,build.gradle与CMakeLists.txt文件也类似,这里不在详细说明
安卓Camera原始图像处理实例
该实例的场景是用于将手机摄像头的图像进行特殊的处理后再预览出来
流程如下:使用camera api拿到摄像头的原始图像,然后在jni层进行处理,最后通过textureView将处理好的图像绘制出来
系统摄像头操作函数封装
object Camera1Manager {
private val TAG = Camera1Manager::class.java.simpleName
private const val MAGIC_TEXTURE_ID = 10
const val PREVIEW_WIDTH = 480
const val PREVIEW_HEIGHT = 640
const val RAW_PREVIEW_WIDTH = 640
const val RAW_PREVIEW_HEIGHT = 480
private var isProperResolution = false
// 定义系统所用的照相机
private var camera: Camera? = null
// 是否在预览中
private var isPreview = false
var preBuffer: ByteArray? = null
private set
private val emptySurfaceTexture: SurfaceTexture by lazy { SurfaceTexture(MAGIC_TEXTURE_ID) }
var systemCameraFrame: Int = 0
/**
* 获取Camera实例
*
* @param id
* @return
*/
fun openCamera(id: Int) {
if (camera == null) {
camera = Camera.open(id)
}
}
/**
* 摄像机参数设置
*/
fun setupCamera(): Int {
Log.i(TAG, "setupCamera: isPreview $isPreview")
if (isPreview) {
return -1
}
camera?.let { camera ->
camera.setPreviewTexture(emptySurfaceTexture)
Log.i(TAG, "camera.open OK")
val parameters = camera.parameters
val supportedPreviewSizes = parameters.supportedPreviewSizes
val supportedPictureSizes = parameters.supportedPictureSizes
val size = supportedPreviewSizes.size
for (i in 0 until size) {
Log.i(TAG, "setupCamera : supportedPreviewSizes " + i + " = " +
supportedPreviewSizes[i].width + " x " +
supportedPreviewSizes[i].height)
if (supportedPreviewSizes[i].width == RAW_PREVIEW_WIDTH && supportedPreviewSizes[i].height == RAW_PREVIEW_HEIGHT) {
isProperResolution = true
}
}
if (!isProperResolution) {
Log.e(TAG, "no proper resolution")
return -1
}
Log.i(TAG, "setupCamera: IMG_WIDTH$RAW_PREVIEW_WIDTH")
Log.i(TAG, "setupCamera: IMG_HEIGHT$RAW_PREVIEW_HEIGHT")
parameters.setPreviewSize(RAW_PREVIEW_WIDTH, RAW_PREVIEW_HEIGHT)
parameters.setPictureSize(RAW_PREVIEW_WIDTH, RAW_PREVIEW_HEIGHT)
parameters.setRecordingHint(true)
camera.parameters = parameters
//跟setPreviewCallback的工作方式一样,但是要求指定一个字节数组作为缓冲区,用于预览帧数据,
//这样能够更好的管理预览帧数据时使用的内存。它一般搭配addCallbackBuffer方法使用
//需要在startPreview()之前调用
//camera.setPreviewCallback(new StreamIt(""));
camera.setPreviewCallbackWithBuffer(StreamIt())
val byteSize = parameters.previewSize.width * parameters.previewSize.height * ImageFormat.getBitsPerPixel(ImageFormat.NV21) / 8
preBuffer = ByteArray(byteSize)
camera.addCallbackBuffer(preBuffer)
// 设置回调的类
camera.startPreview() // 开始预览
// camera.autoFocus(null); // 自动对焦
Log.i(TAG, "startPreview OK")
isPreview = true
return 0
} ?: let {
Log.e(TAG, "camera.open fail")
return -1
}
}
/**
* 释放相机资源
*/
fun releaseCamera() {
if (isPreview) {
camera?.apply {
setPreviewCallbackWithBuffer(null)
setPreviewTexture(null)
//setPreviewCallback(null);
stopPreview()
release()
}
camera = null
}
isPreview = false
emptySurfaceTexture.release()
Log.i(TAG, "releaseCamera: isPreview $isPreview")
}
/**
* 原始帧回调
*/
internal class StreamIt : Camera.PreviewCallback {
override fun onPreviewFrame(data: ByteArray, camera: Camera) {
systemCameraFrame++
camera.addCallbackBuffer(data)
}
}
}
camera 1 api 的封装类,封装了打开摄像头、设置摄像头参数、关闭摄像头和获取原始帧功能
这里有一点要注意:正常获取原始图像的流程是将surfaceView的surfaceHolder传递给camera,然后surfaceView会自动绘制摄像头的图像,同时也能获取到原始图像,但是如果只想要原始图像而不希望surfaceView自动把图像绘制出来的话就要进行如下操作
private val emptySurfaceTexture: SurfaceTexture by lazy { SurfaceTexture(MAGIC_TEXTURE_ID) }
...
fun setupCamera(): Int {
camera.setPreviewTexture(emptySurfaceTexture)
...
camera.setPreviewCallbackWithBuffer(StreamIt())
...
camera.startPreview()
}
创建一个空的SurfaceTexture对象,然后调用setPreviewTexture,最后再调用setPreviewCallbackWithBuffer和startPreview,这样就既能不自动绘制图像又能获取到原始图像
系统摄像头的绘制
private val mBitmap: Bitmap by lazy {
Bitmap.createBitmap(Camera1Manager.PREVIEW_WIDTH, Camera1Manager.PREVIEW_HEIGHT,
Bitmap.Config.ARGB_8888)
}
override fun onSurfaceTextureAvailable(surface: SurfaceTexture, width: Int, height: Int) {
//打开相机
Camera1Manager.openCamera(1)
var ret = Camera1Manager.setupCamera()
if (ret == -1) {
Toast.makeText(context, "摄像头初始化失败", Toast.LENGTH_SHORT).show()
return
}
ret = CameraProcessLib.prepareSystemCamera(Camera1Manager.RAW_PREVIEW_WIDTH, Camera1Manager.RAW_PREVIEW_HEIGHT)
Log.i(TAG, "onSurfaceTextureAvailable: ret $ret")
if (ret == -1) {
Toast.makeText(context, "算法初始化失败", Toast.LENGTH_SHORT).show()
return
}
//开启线程
val mPlayThread = PlayThread()
mPlayThread.start()
}
override fun onSurfaceTextureSizeChanged(surface: SurfaceTexture, width: Int, height: Int) {
Log.i(TAG, "onSurfaceTextureSizeChanged: ")
}
override fun onSurfaceTextureDestroyed(surface: SurfaceTexture): Boolean {
Log.i(TAG, "onSurfaceTextureDestroyed: ")
stop = true
while (stop) {
SystemClock.sleep(50)
}
Camera1Manager.releaseCamera()
CameraProcessLib.releaseSystemCamera()
releaseBitmap()
return true
}
使用了TextureView来进行图像绘制,在onSurfaceTextureAvailable函数中设置摄像头,开启绘制线程,将图像尺寸传递给jni层等,在onSurfaceTextureDestroyed中释放资源
private inner class PlayThread : Thread() {
override fun run() {
while (true) {
if (stop) {
stop = false
break
}
SystemClock.sleep(3)
val frame = Camera1Manager.preBuffer
CameraProcessLib.processSystemCamera(frame)
CameraProcessLib.pixelToBmp(mBitmap)
drawBitmap(mBitmap)
}
}
}
private fun drawBitmap(bitmap: Bitmap) {
// Log.i(TAG, "drawBitmap: ");
//锁定画布
val canvas = lockCanvas()
if (canvas != null) {
//清空画布
canvas.drawColor(Color.TRANSPARENT, PorterDuff.Mode.CLEAR)
//将bitmap画到画布上
canvas.drawBitmap(bitmap, mSrcRect, mDstRect, null)
//解锁画布同时提交
unlockCanvasAndPost(canvas)
}
drawFrame++
}
绘制线程是一个死循环,每次循环都需要拿一次摄像头的原始图像,也就是Byte数组,将数组传递到jni层进行处理,然后将图像数据放入颜色格式为ARGB_8888的bitmap中,最后进行绘制
jni层图像处理
int prepareSystemCamera(int width, int height) {
LOGE("prepareSystemCamera");
IMG_WIDTH = width;
IMG_HEIGHT = height;
PREVIEW_IMG_WIDTH = height;
PREVIEW_IMG_HEIGHT = width;
return JNI_OK;
}
void processSystemCamera(JNIEnv *env, jbyteArray &yuv) {
// LOGE("processSystemCamera");
jbyte *_yuv = env->GetByteArrayElements(yuv, nullptr);
cv::Mat yuvimg(IMG_HEIGHT + IMG_HEIGHT / 2, IMG_WIDTH, CV_8UC1, (uchar *) _yuv);
cv::cvtColor(yuvimg, yuvimg, cv::COLOR_YUV420sp2RGBA);
//flip图像旋转角度校准
cv::transpose(yuvimg, yuvimg);
//图像垂直镜像
cv::flip(yuvimg, yuvimg, 0);
frame = yuvimg;
env->ReleaseByteArrayElements(yuv, _yuv, 0);
}
void pixelToBmp(JNIEnv *env, jobject &bitmap) {
cv::Mat tmp = !frame.empty() ?
frame(cv::Rect(0, 0, PREVIEW_IMG_WIDTH, PREVIEW_IMG_HEIGHT)).clone() :
cv::Mat(PREVIEW_IMG_HEIGHT, PREVIEW_IMG_WIDTH, CV_8UC4,
cv::Scalar(255, 0, 0, 255));
pixel2Bmp(env, bitmap, tmp.data);
}
void pixel2Bmp(JNIEnv *env, jobject &bitmap, void *data) {
AndroidBitmapInfo info;
void *pixels;
int ret, width = 0, height = 0;
if ((ret = AndroidBitmap_getInfo(env, bitmap, &info)) < 0) {
LOGE("AndroidBitmap_getInfo() failed ! error=%d", ret);
return;
}
width = info.width;
height = info.height;
// LOGI("pixel2Bmp width %d", width);
// LOGI("pixel2Bmp height %d", height);
if (info.format != ANDROID_BITMAP_FORMAT_RGBA_8888) {
LOGE("Bitmap format is not RGBA_8888 !");
return;
}
if ((ret = AndroidBitmap_lockPixels(env, bitmap, &pixels)) < 0) {
LOGE("AndroidBitmap_lockPixels() failed ! error=%d", ret);
}
memcpy(pixels, data, (size_t) (width * height * 4));
AndroidBitmap_unlockPixels(env, bitmap);
}
prepareSystemCamera函数中存放java层传递过来的图像尺寸,由于系统摄像头拿到的原始图像是90度偏移的,因此需要把旋转后的尺寸也保存下来
processSystemCamera函数中进行图像的颜色格式转换和90度旋转,从系统摄像头获取的图像的颜色格式默认是YUV420sp,而bitmap的颜色格式是RGB类型的,因此这里创建一个opencv的mat的对象,将图片的颜色格式转换为RGBA,然后进行90度的旋转,在该函数中可以进行一些特殊的处理,比如人脸识别、高斯模糊等
pixelToBmp函数将opencv的mat对象里的图片像素数据放入bitmap对象中
USBCamera原始图像处理实例
该实例的场景是用于从usb摄像头中拿到图像进行特殊的处理后再预览出来
流程如下:使用linux的v4l2 api拿到摄像头的原始图像,然后在jni层进行处理,最后通过textureView将处理好的图像绘制出来
usb摄像头的绘制
private val mBitmap: Bitmap by lazy {
Bitmap.createBitmap(PREVIEW_WIDTH, PREVIEW_HEIGHT,
Bitmap.Config.ARGB_8888)
}
override fun onSurfaceTextureAvailable(surface: SurfaceTexture, width: Int, height: Int) {
val ret = CameraProcessLib.prepareUsbCamera(PREVIEW_WIDTH, PREVIEW_HEIGHT)
Log.i(TAG, "onSurfaceTextureAvailable: ret $ret")
if (ret == -1) {
Toast.makeText(context, "初始化失败", Toast.LENGTH_SHORT).show()
return
}
//开启线程
val mPlayThread = PlayThread()
mPlayThread.start()
}
override fun onSurfaceTextureSizeChanged(surface: SurfaceTexture, width: Int, height: Int) {
Log.i(TAG, "onSurfaceTextureSizeChanged: ")
}
override fun onSurfaceTextureDestroyed(surface: SurfaceTexture): Boolean {
Log.i(TAG, "onSurfaceTextureDestroyed: ")
stop = true
while (stop) {
SystemClock.sleep(50)
}
CameraProcessLib.releaseUsbCamera()
releaseBitmap()
return true
}
usb摄像头绘制所用的视图类与系统摄像头绘制相同都是TextureView,流程也类似,区别只是usb摄像头获取原始图像是在jni层获取
private inner class PlayThread : Thread() {
override fun run() {
while (true) {
if (stop) {
stop = false
break
}
// SystemClock.sleep(2);
CameraProcessLib.processUsbCamera()
CameraProcessLib.pixelToBmp(mBitmap)
drawBitmap(mBitmap)
}
}
}
private fun drawBitmap(bitmap: Bitmap) {
// Log.i(TAG, "drawBitmap: ");
//锁定画布
val canvas = lockCanvas()
if (canvas != null) {
//清空画布
canvas.drawColor(Color.TRANSPARENT, PorterDuff.Mode.CLEAR)
//将bitmap画到画布上
canvas.drawBitmap(bitmap, mSrcRect, mDstRect, null)
//解锁画布同时提交
unlockCanvasAndPost(canvas)
}
drawFrame++
}
绘制线程也类似,区别只是系统摄像头需要从camera封装类中拿到原始图像的Byte数组然后传递给jni层,而usb摄像头直接在jni层从v4l2中取出一帧图像进行处理
int prepareUsbCamera(int width, int height) {
LOGE("prepareUsbCamera");
IMG_WIDTH = width;
IMG_HEIGHT = height;
PREVIEW_IMG_WIDTH = width;
PREVIEW_IMG_HEIGHT = height;
if (oV4LAchieve == nullptr) {
oV4LAchieve = new V4LAchieve(getVideoId(), IMG_WIDTH, IMG_HEIGHT);
if (!oV4LAchieve->OpenCamera()) {
LOGE("V4LAchieve OpenCamera ERROR");
return JNI_ERR;
}
}
return JNI_OK;
}
int getVideoId() {
std::string dirname = "/dev/";
std::vector<std::string> files;
int id = 0;
DIR *dp;
struct dirent *dirp;
// cout<<dirname<<endl;
if ((dp = opendir(dirname.c_str())) == NULL)
LOGE("Can't open %s", dirname.c_str());
std::regex reg_dev("video+[0-9]\d*", std::regex::icase);
while ((dirp = readdir(dp)) != NULL) {
// regex_match()匹配
if (regex_match(dirp->d_name, reg_dev)) {
LOGI("getVideoDevice %s", dirp->d_name);
files.push_back(
(std::basic_string<char, std::char_traits<char>, std::allocator<char>> &&) dirp->d_name);
}
}
closedir(dp);
std::sort(files.begin(), files.end());
for (auto file : files) {
LOGI("file %s", file.c_str());
}
auto videoName = (std::basic_string<char, std::char_traits<char>, std::allocator<char>> &&) files.back();
LOGI("files.back() %s", videoName.c_str());
id = atoi(videoName.substr(5, videoName.length() - 5).c_str());
LOGI("getVideoId %d", id);
return id;
}
void processUsbCamera() {
oV4LAchieve->CameraVideoGetLoop();
cv::Mat yuvimg(IMG_HEIGHT, IMG_WIDTH, CV_8UC2, oV4LAchieve->GetpYUYV422());
cv::cvtColor(yuvimg, yuvimg, cv::COLOR_YUV2RGBA_YUYV);
frame = yuvimg;
}
void pixelToBmp(JNIEnv *env, jobject &bitmap) {
cv::Mat tmp = !frame.empty() ?
frame(cv::Rect(0, 0, PREVIEW_IMG_WIDTH, PREVIEW_IMG_HEIGHT)).clone() :
cv::Mat(PREVIEW_IMG_HEIGHT, PREVIEW_IMG_WIDTH, CV_8UC4,
cv::Scalar(255, 0, 0, 255));
pixel2Bmp(env, bitmap, tmp.data);
}
void releaseUsbCamera() {
LOGE("releaseUsbCamera");
if (oV4LAchieve != nullptr) {
oV4LAchieve->CloseCamera();
delete oV4LAchieve;
oV4LAchieve = nullptr;
}
}
void pixel2Bmp(JNIEnv *env, jobject &bitmap, void *data) {
AndroidBitmapInfo info;
void *pixels;
int ret, width = 0, height = 0;
if ((ret = AndroidBitmap_getInfo(env, bitmap, &info)) < 0) {
LOGE("AndroidBitmap_getInfo() failed ! error=%d", ret);
return;
}
width = info.width;
height = info.height;
// LOGI("pixel2Bmp width %d", width);
// LOGI("pixel2Bmp height %d", height);
if (info.format != ANDROID_BITMAP_FORMAT_RGBA_8888) {
LOGE("Bitmap format is not RGBA_8888 !");
return;
}
if ((ret = AndroidBitmap_lockPixels(env, bitmap, &pixels)) < 0) {
LOGE("AndroidBitmap_lockPixels() failed ! error=%d", ret);
}
memcpy(pixels, data, (size_t) (width * height * 4));
AndroidBitmap_unlockPixels(env, bitmap);
}
prepareUsbCamera函数中存放java层传递过来的图像尺寸,然后初始化v4l2封装类对象,然后调用v4l2对象的OpenCamera函数
初始化v4l2封装类对象时需要传入usb摄像头的设备号,一般是"/dev/video0",但不排除系统本身已经有video设备的情况,因此这里调用了getVideoId函数,使用正则匹配所有video设备,然后进行排序取序号最大的设备
processUsbCamera函数中进行图像的采集和颜色格式转换,首先调用v4l2对象的CameraVideoGetLoop函数进行图像采集,然后通过v4l2对象的GetpYUYV422函数获取一帧图像数据后放入opencv的mat对象中,由于从usb摄像头获取的图像的颜色格式默认是YUV,所以这里使用opencv将mat的颜色格式转换为RGBA,在该函数中同样可以进行一些特殊的处理,比如人脸识别、高斯模糊等
pixelToBmp函数将opencv的mat对象里的图片像素数据放入bitmap对象中
最后完整代码可以参考demo
ndk开发基础学习系列:
JNI和NDK编程(一)JNI的开发流程
JNI和NDK编程(二)NDK的开发流程
JNI和NDK编程(三)JNI的数据类型和类型签名
JNI和NDK编程(四)JNI调用Java方法的流程