Android端获取摄像头数据有好几种。我使用的是onPreviewFrame(byte data[],Camera camera);这个回调函数
遇到的问题:
问题1、打印了下data[]的长度,是3110400。
手机摄像头像素是1920*1080=2073600
3110400/2073600=1.5,这个1.5就懂了。
data[]里默认存放的是YUV420SP格式的帧,有YUV三个分量。其中像素占2/3,就是Y分量,是控制整体的视频内容的。而U、V占剩下的1/3,是控制颜色的。所以1920*1080=2073600算出来的是Y分量。剩下的是U和V的分量。帧的width和height就是Y分量的width和height。
问题2、YUV格式有好几种,有YUV420SP有YUV420P,等等,只看这两种吧。
从这两张图里也能看出来,Y分量通常占整个帧长度的2/3。
第一张YUV420SP,下边U和V是相互交错着的,而YUV420P的U和V就很整齐。YUV420P这个性质很重要,我们要利用这个性质,使分离YUV分量变得容易。
问题3、改变onPreviewFrame()回调的data[]的大小,改变预览帧data[]的格式。
直接上
<span style="white-space:pre"> </span>Camera.Parameters parameters = mCamera.getParameters();
通过parameters来设置预览帧的大小(就是改变帧的像素),和预览帧的格式。
parameters.setPreviewSize(864, 480);
parameters.setPreviewFormat(ImageFormat.YV12);
如果你不设置setPreviewFormat的话,默认回调的data[]格式是YUV420SP。CbCr就是U和V。Cb控制蓝色,Cr控制红色。直接点进源码里看就好。
所以一定要设置一下格式为ImageFormat.YV12,就是YUV420P。ImageFormat.YV12这个是我在prameters.setPreviewFormat(),下图里找到的。。。
最后,最重要一点:把parameters设置回去!我忘了设置,调了好久,发现data[]永远是3110400大小,改了prameters.setPreviewFormat()也没用。
mCamera.setParameters(parameters);
Android端完了,剩下的就是底层NDK端FFMpeg的调试。
讲讲遇到的问题:
问题1、由于开始先VS上做的实验,然后移植。结果导致把所有代码都放到一个jni函数里,结果一直崩溃,原来是:因为每一帧都要传进来压缩,所以一直在执行av_register_all();等等类似代码。导致崩溃。所以要把这类初始化的过程,单独放到一个native方法里。把AVCodecContext *pCodecCtx;这种声明成全局变量。
问题2、把传进来的jbyteArray转换成jbyte *,忘了在结束的时候释放掉,导致运行一秒钟左右就会内存溢出。
<span style="font-size:18px;">jbyte *yuv420sp = (jbyte*) (*env)->GetByteArrayElements(env, yuvdata, 0);</span>
最后一定要有这句,来把内存释放掉<span style="font-size:18px;">(*env)->ReleaseByteArrayElements(env, yuvdata, yuv420sp, 0);</span>
问题3、在jni里写本地文件,路径为char filename_out[] = "/storage/emulated/0/yourname.h264";
JAVA
public class MainActivity extends Activity implements Camera.PreviewCallback,
SurfaceHolder.Callback {
List<Size> list;
SurfaceView mSurfaceView;
SurfaceHolder mSurfaceHolder;
Camera mCamera;
TextView tv;
Handler mHandler = new Handler() {
public void handleMessage(android.os.Message msg) {
switch (msg.what) {
case 1:
byte[] bytedata = msg.getData().getByteArray("messageyuvdata");
if (bytedata != null) {
// tv.setText(temp+"");
int count = addVideoData(bytedata);
tv.setText("length:"+length);
}
break;
case 2 :
String s = msg.getData().getString("supportFrameSize");
tv.setText(s);
break;
}
};
};
int temp = 0;
int length = 0;
@Override
protected void onCreate(Bundle savedInstanceState) {
// TODO Auto-generated method stub
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
tv = (TextView) findViewById(R.id.tv);
temp = FFMpegLib.getVersion();
mSurfaceView = (SurfaceView) this.findViewById(R.id.surfaceview);
mSurfaceHolder = mSurfaceView.getHolder();
mSurfaceHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS);
mSurfaceHolder.addCallback(this);
}
@Override
public void onPreviewFrame(byte[] data, Camera camera) {
length = data.length;
Log.i("log", data + "");
Message msg = new Message();
Bundle bl = new Bundle();
bl.putByteArray("messageyuvdata", data);
msg.setData(bl);
msg.what = 1;
mHandler.sendMessage(msg);
}
@Override
public void surfaceChanged(SurfaceHolder holder, int format, int width,
int height) {
// TODO Auto-generated method stub
mCamera.startPreview();
}
@Override
public void surfaceCreated(SurfaceHolder holder) {
// TODO Auto-generated method stub
// 打开前置摄像头
mCamera = Camera.open(CameraInfo.CAMERA_FACING_BACK);
try {
Camera.Parameters parameters = mCamera.getParameters();
List<Integer> supportedPictureFormats = parameters
.getSupportedPictureFormats();
for (Integer integer : supportedPictureFormats) {
Log.i("sun", integer + "");
}
list = parameters.getSupportedPreviewSizes();
parameters.setPreviewSize(864, 480);
parameters.setPreviewFormat(ImageFormat.YV12);
parameters.setPreviewFpsRange(20, 20); // 每秒显示20~30帧
parameters.setPictureFormat(PixelFormat.JPEG); // 设置图片格式
//parameters.setPreviewFormat(PixelFormat.YCbCr_420_SP);
//parameters.setFlashMode(Parameters.FLASH_MODE_TORCH);
String supportFrameSize = null;
for (int i = 0; i < list.size(); i++) {
int width =list.get(i).width;
int height =list.get(i).height;
supportFrameSize = supportFrameSize+width+"-"+height+"||||||";
}
Message msg = new Message();
Bundle bl = new Bundle();
bl.putString("supportFrameSize", supportFrameSize);
msg.setData(bl);
msg.what = 2;
mHandler.sendMessage(msg);
mCamera.setParameters(parameters);
} catch (Exception e) {
e.printStackTrace();
}
// 开始预览
try {
// 设置哪个surfaceView显示图片
mCamera.setPreviewDisplay(mSurfaceHolder);
} catch (IOException e) {
e.printStackTrace();
}
// 设置预览帧的接口,就是通过这个接口,我们来获得预览帧的数据的
mCamera.setPreviewCallback(MainActivity.this);
mCamera.startPreview();
}
@Override
public void surfaceDestroyed(SurfaceHolder holder) {
// TODO Auto-generated method stub
mCamera.stopPreview();
mCamera.release();
}
public synchronized int addVideoData(byte[] data) {
int s = FFMpegLib.Encoding(data);
return s;
}
@Override
protected void onStart() {
super.onStart();
}
@Override
protected void onDestroy() {
super.onDestroy();
FFMpegLib.CloseVideo();
}
}
JNI
#include <string.h>
#include <stdio.h>
#include <android/log.h>
#include <stdlib.h>
#include <jni.h>
#include <ffmpeg/libavcodec/avcodec.h>
#include "ffmpeg/libavformat/avformat.h"
#include "ffmpeg/libavdevice/avdevice.h"
#include "ffmpeg/libavutil/avutil.h"
#include "ffmpeg/libavutil/opt.h"
#include "ffmpeg/libavutil/imgutils.h"
#include "ffmpeg/libavutil/log.h"
#define TEST_H264 1
#ifdef ANDROID
#include <jni.h>
#include <android/log.h>
#define LOGE(format, ...) __android_log_print(ANDROID_LOG_ERROR, "(>_<)", format, ##__VA_ARGS__)
#define LOGI(format, ...) __android_log_print(ANDROID_LOG_INFO, "(^_^)", format, ##__VA_ARGS__)
#else
#define LOGE(format, ...) printf("(>_<) " format "\n", ##__VA_ARGS__)
#define LOGI(format, ...) printf("(^_^) " format "\n", ##__VA_ARGS__)
#endif
AVCodec *pCodec;
AVCodecContext *pCodecCtx = NULL;
int i, ret, got_output;
FILE *fp_out;
AVFrame *pFrame;
AVPacket pkt;
int y_size;
int framecnt = 0;
char filename_out[] = "/storage/emulated/0/yourname.h264";
int in_w = 864, in_h = 480;
int count = 0;
JNIEXPORT jint JNICALL Java_com_cpi_ffmpeg_FFMpegLib_getVersion(JNIEnv *env,
jclass jclass) {
avcodec_register_all();
pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!pCodec) {
printf("Codec not found\n");
return -1;
}
pCodecCtx = avcodec_alloc_context3(pCodec);
if (!pCodecCtx) {
printf("Could not allocate video codec context\n");
return -1;
}
pCodecCtx->bit_rate = 400000;
pCodecCtx->width = in_w;
pCodecCtx->height = in_h;
pCodecCtx->time_base.num = 1;
pCodecCtx->time_base.den = 20;
pCodecCtx->gop_size = 10;
pCodecCtx->max_b_frames = 5;
pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
av_opt_set(pCodecCtx->priv_data, "preset", "superfast", 0);
// av_opt_set(pCodecCtx->priv_data, "preset", "slow", 0);
av_opt_set(pCodecCtx->priv_data, "tune", "zerolatency", 0);
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
printf("Could not open codec\n");
return -1;
}
if ((fp_out = fopen(filename_out, "wb")) == NULL) {
return -1;
}
y_size = pCodecCtx->width * pCodecCtx->height;
return 1;
}
JNIEXPORT jint JNICALL Java_com_cpi_ffmpeg_FFMpegLib_Encoding(JNIEnv *env,
jclass jclass, jbyteArray yuvdata) {
jbyte *yuv420sp = (jbyte*) (*env)->GetByteArrayElements(env, yuvdata, 0);
// av_opt_set(pCodecCtx->priv_data, "preset", "superfast", 0);
// av_opt_set(pCodecCtx->priv_data, "tune", "zerolatency", 0);
pFrame = av_frame_alloc();
if (!pFrame) {
printf("Could not allocate video frame\n");
return -1;
}
pFrame->format = pCodecCtx->pix_fmt;
pFrame->width = pCodecCtx->width;
pFrame->height = pCodecCtx->height;
ret = av_image_alloc(pFrame->data, pFrame->linesize, pCodecCtx->width,
pCodecCtx->height, pCodecCtx->pix_fmt, 16);
if (ret < 0) {
printf("Could not allocate raw picture buffer\n");
return -1;
}
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
//Read raw YUV data 这里出错了,是按YUV_SP处理的 应该是YUV_P
pFrame->data[0] = yuv420sp; //PCM Data
pFrame->data[1] = yuv420sp + y_size * 5 / 4; // V
pFrame->data[2] = yuv420sp + y_size; // U
pFrame->pts = count;
count++;
/* encode the image */
ret = avcodec_encode_video2(pCodecCtx, &pkt, pFrame, &got_output);
int sizee = pkt.size;
if (ret < 0) {
printf("Error encoding frame\n");
return -1;
}
if (got_output) {
printf("Succeed to encode frame: %5d\tsize:%5d\n", framecnt, pkt.size);
framecnt++;
fwrite(pkt.data, 1, pkt.size, fp_out);
av_free_packet(&pkt);
av_freep(&pFrame->data[0]);
av_frame_free(&pFrame);
//(*env)->ReleaseByteArrayElements(env, yuvdata, ydata, 0);
// return framecnt;
}
//av_freep(&pFrame->data[0]);
//av_frame_free(&pFrame);
(*env)->ReleaseByteArrayElements(env, yuvdata, yuv420sp, 0);
return 1;
}
JNIEXPORT jint JNICALL Java_com_cpi_ffmpeg_FFMpegLib_CloseVideo(JNIEnv *env,
jclass jclass) {
for (got_output = 1; got_output; i++) {
ret = avcodec_encode_video2(pCodecCtx, &pkt, NULL, &got_output);
if (ret < 0) {
printf("Error encoding frame\n");
return -1;
}
if (got_output) {
printf("Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n",
pkt.size);
fwrite(pkt.data, 1, pkt.size, fp_out);
av_free_packet(&pkt);
}
}
fclose(fp_out);
avcodec_close(pCodecCtx);
av_free(pCodecCtx);
av_freep(&pFrame->data[0]);
av_frame_free(&pFrame);
return 0;
}
已上传(2017年11月22日)
资源下载地址:http://download.csdn.net/download/bless2015/10129278