先推荐一个开源项目:硬编码h264和aac文件并推送至服务器
项目首页:https://github.com/simple-rtmp-server/srs-sea
本文适合对这方面的小白阅读,博主也是初学,踩了一个多月的坑,现在回头对坑进行总结。
上代码:
MediaFormat vformat = MediaFormat.createVideoFormat(MediaFormat.MIMETYPE_VIDEO_AVC, vsize.width, vsize.height);
vformat.setInteger(MediaFormat.KEY_COLOR_FORMAT, vcolor);
vformat.setInteger(MediaFormat.KEY_MAX_INPUT_SIZE, 0);
vformat.setInteger(MediaFormat.KEY_BIT_RATE, 1000 * vbitrate_kbps);
vformat.setInteger(MediaFormat.KEY_FRAME_RATE, VFPS);
vformat.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, VGOP);
vencoder.configure(vformat, null, null,MediaCodec.CONFIGURE_FLAG_ENCODE);
设置编码器,没啥好说的,网上都有说明。
// 获取摄像头数量
int numberOfCameras = Camera.getNumberOfCameras();
if (numberOfCameras < 2) {
mCamera = Camera.open(0);
} else {
mCamera = Camera.open(1);
}
parameters.setFlashMode(Camera.Parameters.FLASH_MODE_OFF);
parameters.setWhiteBalance(Camera.Parameters.WHITE_BALANCE_AUTO);
parameters.setSceneMode(Camera.Parameters.SCENE_MODE_AUTO);
// 获取摄像头聚焦模式
List<String> supportedFocusModes = parameters.getSupportedFocusModes();
if (supportedFocusModes.size() > 0) {
for (int i = 0; i < supportedFocusModes.size(); i++) {
if (Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE
.equals(supportedFocusModes.get(i))) {
parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_AUTO);
break;
} else if (Camera.Parameters.FOCUS_MODE_AUTO
.equals(supportedFocusModes.get(i))) {
parameters
.setFocusMode(Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE);// 连续对焦
mCamera.cancelAutoFocus();// 如果要实现连续的自动对焦,这一句必须加上
break;
}
}
}
parameters.setPreviewFormat(ImageFormat.YV12);
// 获取支持图片格式
List<Camera.Size> sizes = parameters.getSupportedPictureSizes();
for (int i = 0; i < sizes.size(); i++) {
Camera.Size s = sizes.get(i);
if (size == null) {
if (s.height == VHEIGHT) {
size = s;
}
} else {
if (s.width == VWIDTH) {
size = s;
}
}
}
parameters.setPictureSize(size.width, size.height);
Camera.Size size1 = null;
// 获取支持预览格式
sizes = parameters.getSupportedPreviewSizes();
for (int i = 0; i < sizes.size(); i++) {
Camera.Size s = sizes.get(i);
if (size1 == null) {
if (s.height == VHEIGHT) {
size1 = s;
}
} else {
if (s.width == VWIDTH) {
size1 = s;
}
}
}
vsize = size1;
parameters.setPreviewSize(size1.width, size1.height);
parameters.setPreviewFrameRate(25);
设置相机,这块感觉也没啥好说的,主要就是一定要记得获取相机支持的各种参数,例如:图片尺寸,聚焦模式等,千万不要图省事就设个常见参数,反正我是在这被坑了,聚焦模式只设了个常见的参数结果切换到前置摄像头程序就崩溃了。
mCamera.setPreviewCallbackWithBuffer((Camera.PreviewCallback) onYuvFrame);
PreviewCallback previewCallback = new Camera.PreviewCallback() {
@Override
public void onPreviewFrame(byte[] data, Camera arg1) {
// TODO Auto-generated method stub
byte[] frame = new byte[data.length];
if (vcolor == MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420Planar) {
YV12toYUV420Planar(data, frame, vsize.width, vsize.height);
} else if (vcolor == MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420PackedPlanar) {
YV12toYUV420PackedSemiPlanar(data, frame, vsize.width,
vsize.height);
} else if (vcolor == MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420SemiPlanar) {
YV12toYUV420PackedSemiPlanar(data, frame, vsize.width,
vsize.height);
} else {
System.arraycopy(data, 0, frame, 0, data.length);
}
// feed the frame to vencoder and muxer.
try {
onGetYuvFrame(frame);
} catch (Exception e) {
e.printStackTrace();
try {
throw e;
} catch (Exception e1) {
e1.printStackTrace();
}
}
// to fetch next frame.
mCamera.addCallbackBuffer(vbuffers);
}
};
return previewCallback;
}
设置相机回调获取预览帧,并获取颜色空间,然后开始编码,
private void onGetYuvFrame(byte[] data) {
// feed the vencoder with yuv frame, got the encoded 264 es stream.
ByteBuffer[] outBuffers = vencoder.getOutputBuffers();
ByteBuffer[] inBuffers = vencoder.getInputBuffers();
int inBufferIndex = vencoder.dequeueInputBuffer(-1);
if (inBufferIndex >= 0) {
ByteBuffer bb = inBuffers[inBufferIndex];
bb.clear();
bb.put(data, 0, data.length);
long pts = new Date().getTime() * 1000 - presentationTimeUs;
vencoder.queueInputBuffer(inBufferIndex, 0, data.length, pts, 0);
}
for (;;) {
int outBufferIndex = vencoder.dequeueOutputBuffer(vebi, 0);
if (outBufferIndex >= 0) {
ByteBuffer bb = outBuffers[outBufferIndex];
byte[] outData = new byte[vebi.size];
Log.i("outData", "outData=" + outData.length);
bb.get(outData);
if (mMediaHead == null) {
outByte = new byte[outData.length];
} else {
outByte = new byte[outData.length + mMediaHead.length];
}
if (mMediaHead == null) {
ByteBuffer spsPpsBuffer = ByteBuffer.wrap(outData);
if (spsPpsBuffer.getInt() == 0x00000001) {
mMediaHead = new byte[outData.length];
System.arraycopy(outData, 0, mMediaHead, 0,
outData.length);
} else {
Log.e(TAG, "not found media head.");
}
}
if (mMediaHead != null
&& ((outData[4] == 0x65) || (outData[4] == 0x25))) { // key
// frame
// 编码器生成关键帧时只有
// 00 00 00
// 01 65
// 没有pps
// sps, 要加上
Log.i("Ifarme", "关键帧长度=" + outData.length + "*****"
+ outData[4] + "**outByte长度**" + outByte.length
+ "****" + mMediaHead.length);
System.arraycopy(mMediaHead, 0, outByte, 0,
mMediaHead.length);
System.arraycopy(outData, 0, outByte, mMediaHead.length,
outData.length);
if (outByte.length > 0) {
int writeStreamVideoData = myNative.peekInstance()
.WriteStreamVideoData(outByte, outByte.length,
1);
Log.i(TAG, "writeStreamVideoData"
+ writeStreamVideoData);
}
} else {
outByte = outData;
if (outByte.length > 0) {
int writeStreamVideoData = myNative.peekInstance()
.WriteStreamVideoData(outByte, outByte.length,
0);
Log.i(TAG, "writeStreamVideoData"
+ writeStreamVideoData);
}
}
vencoder.releaseOutputBuffer(outBufferIndex, false);
}
if (outBufferIndex < 0) {
break;
}
}
这里就是编码并输出了,我在这被坑了好久,首先是获取pps和sps,本人作为新手小白完全不懂这玩意结果编码的时候没给关键帧添加这玩意,发出去的码流始终无法被解析。。。坑爹。
android硬编出来的第一帧数据就是sps和pps其数组长度很短,0, 0, 0, 1, 39, 66, -32, 30, -115, 104, 10, 3, -38, 108, -128, 0, 0, 3, 0, -128, 0, 0, 12, -121, -118, 17, 80 -----sps
0, 0, 0, 1, 40, -50, 4, 73, 32----pps这就是我获取的sps和pps的数据,0,0,0,1是startcode。
获取到了pps和sps存起来碰到关键帧给他加上去就好了。
(outData[4] == 0x65) || (outData[4] == 0x25)
这句是判断该帧是否是关键帧的依据,有的手机上是0x65表示关键帧有的是0x25反正我是都写了,另外关键帧的数组长度比一般帧要长很多。
以上的做完了就基本上是调C/C++代码了也就没啥咱的事了。