FFMEPG 平台移植,接口简化和外部模块接入 (四)ffmpeg android移植(ffmpeg 视频编码)

FFMPEG 视频编码最常见的H264,H265需要X264,X265外部模块支持,可以从我们开源平台的FFMPEG编译项目里面获取代码和配置进行一键式编译:https://github.com/Car-eye-team/Car-eye-FFMPEG,我们下面的代码主要是为了简化代码调用结构。只需要配置参数,输入数据就可以进行视频编码,不多说,贴上代码:

/*
 * Car eye 车辆管理平台: www.car-eye.cn
 * Car eye 开源网址: https://github.com/Car-eye-team
 * CarEyeEncoderAPI.h
 *
 * Author: Wgj
 * Date: 2018-04-29 20:01
 * Copyright 2018
 *
 * CarEye 媒体编码库接口声明
 */

#ifndef __CAREYE_ENCODER_H__
#define __CAREYE_ENCODER_H__


#include "public.h"


 // 编码器对象句柄定义
#define CarEye_Encoder_Handle void*

 // 最大音频帧大小 1 second of 48khz 32bit audio
#define MAX_AUDIO_FRAME_SIZE 192000

 // 媒体编码类型定义 与FFMPEG中一一对应,H265定义与其他库定义需要转换
  enum CarEye_CodecType
{
	// 不进行编码
	CAREYE_CODEC_NONE = 0,
	// H264编码
	CAREYE_CODEC_H264 = 0x1C,
	// H265编码
	CAREYE_CODEC_H265 = 0xAE,
	// MJPEG编码
	CAREYE_CODEC_MJPEG = 0x08,
	// MPEG4编码
	CAREYE_CODEC_MPEG4 = 0x0D,
	// AAC编码
	CAREYE_CODEC_AAC = 0x15002,
	// G711 Ulaw编码 对应FFMPEG中的AV_CODEC_ID_PCM_MULAW定义
	CAREYE_CODEC_G711U = 0x10006,
	// G711 Alaw编码 对应FFMPEG中的AV_CODEC_ID_PCM_ALAW定义
	CAREYE_CODEC_G711A = 0x10007,
	// G726编码 对应FFMPEG中的AV_CODEC_ID_ADPCM_G726定义
	CAREYE_CODEC_G726 = 0x1100B,
};

// YUV视频流格式定义,与FFMPEG中一一对应
enum CarEye_AVType
{
	CAREYE_FMT_YUV420P = 0,
	CAREYE_FMT_YUV422P = 4,
	CAREYE_FMT_YUV444P = 5,
	CAREYE_FMT_YUV410P = 6,
	CAREYE_FMT_YUV411P = 7,
};

// 原始流结构定义
typedef struct CarEye_OriginalStream
{
	// 视频输入流格式
    enum CarEye_AVType InVideoType;
	// 期望输出的视频流格式,不期望输出可设置为CAREYE_CODEC_NONE
    enum CarEye_CodecType OutVideoType;
	// 期望输出的音频流格式,不期望输出可设置为CAREYE_CODEC_NONE
    enum CarEye_CodecType OutAudioType;
	// 视频帧率(FPS),推荐值:25
	unsigned char	FramesPerSecond;
	// 视频宽度像素
	unsigned short	Width;
	// 视频的高度像素
	unsigned short  Height;
	// 一组图片中的图片数量,推荐值:10
	int				GopSize;
	// 非B帧之间的B帧的最大数量,推荐值:1
	int				MaxBFrames;
	// 视频码率,越高视频越清楚,相应体积也越大 如:4000000
	float			VideoBitrate;

	// 音频采样率 如:44100
	unsigned int	SampleRate;
	// 音频比特率 如:64000,越高声音越清楚,相应体积也越大
	float			AudioBitrate;
}CarEye_OriginalStream;

// YUV媒体流结构定义


#ifdef __cplusplus
extern "C"
{
#endif
	/*
	* Comments: 创建一个编码器对象
	* Param aInfo: 要编码的媒体信息
	* @Return CarEye_Encoder_Handle 成功返回编码器对象,否则返回NULL
	*/
	CE_API CarEye_Encoder_Handle CE_APICALL CarEye_EncoderCreate( CarEye_OriginalStream aInfo);

	/*
	* Comments: 释放编码器资源
	* Param aEncoder: 要释放的编码器
	* @Return None
	*/
	CE_API void CE_APICALL CarEye_EncoderRelease(CarEye_Encoder_Handle aEncoder);


	/*
	* Comments: 将输入YUV视频编码为设置好的格式数据输出
	* Param aEncoder: 申请到的有效编码器
	* Param aYuv: 要编码的YUV数据
	* Param aPts: 当前视频帧序号
	* Param aBytes: [输出]编码后的视频流
	* @Return int < 0编码失败,> 0为编码后数据字节个数 ==0表示参数无效
	*/
	CE_API int CE_APICALL CarEye_EncoderYUV(CarEye_Encoder_Handle aEncoder,
									CarEye_YUVFrame *aYuv, int aPts,
									unsigned char *aBytes);

	/*
	* Comments: 获取PCM编码时接受的最大字节数
	* Param aEncoder: 申请到的有效编码器
	* @Return PCM编码缓冲区最大字节数
	*/
	CE_API int CE_APICALL CarEye_GetPcmMaxSize(CarEye_Encoder_Handle aEncoder);

	/*
	* Comments: 将输入的PCM音频编码为指定数据格式输出
	* Param aEncoder: 申请到的有效编码器
	* Param aPcm: 要编码的PCM数据
	* Param aSize: 要编码音频流字节数
	* Param aBytes: [输出] 编码后的音频流
	* Param aPts: 当前编码帧的序号
	* @Return int < 0编码失败,> 0为编码后PCM的字节个数 ==0表示参数无效
	*/
	CE_API int CE_APICALL CarEye_EncoderPCM(CarEye_Encoder_Handle aEncoder,
									unsigned char *aPcm, int aSize, int aPts,
									unsigned char *aBytes);

#ifdef __cplusplus
}
#endif

#endif
/*
 * Car eye 车辆管理平台: www.car-eye.cn
 * Car eye 开源网址: https://github.com/Car-eye-team
 * CarEyeEncoderAPI.cpp
 *
 * Author: Wgj
 * Date: 2018-04-29 20:02
 * Copyright 2018
 *
 * CarEye 媒体编码库接口实现
 */

#include "CarEyeEncoderAPI.h"
#include "FFVideoFilter.h"

#ifdef _WIN32
 //Windows
extern "C"
{
#include "libavutil/opt.h"
#include "libavcodec/avcodec.h"
#include "libavutil/imgutils.h"
#include "libavformat/avformat.h"
#include "libswresample/swresample.h"
#include "libavfilter/avfilter.h"
};
#else
 //Linux...
#ifdef __cplusplus
extern "C"
{
#endif
#include <libavutil/opt.h>
#include <libavcodec/avcodec.h>
#include <libavutil/imgutils.h>
#include <libavformat/avformat.h>
#include <libswresample/swresample.h>
#include <libavfilter/avfilter.h>
#ifdef __cplusplus
};
#endif
#endif


// 编码器结构体定义
typedef struct
{
	// 视频编码器
	AVCodecContext *VEncoder;
	// 音频编码器
	AVCodecContext *AEncoder;
	// 编码后的视频帧 音视频帧对象分别定义,防止多线程分别编码音视频造成读写冲突
	AVFrame *VFrame;
	// 编码后的音频帧
	AVFrame *AFrame;
	// 音频转码器
	struct SwrContext *AConverter;
	// 存储PCM数据缓冲区
	unsigned char *PcmBuffer;
	// 接收PCM字节个数上限
	int PcmSize;
	// 每组PCM数据的字节数
	int PerPcmSize;
	// 视频字幕对象
}CarEyeEncoder;


/*
* Comments: 利用编码器对媒体包进行编码并输出编码后的数据
* Param aEncoder: 有效的编码器
* Param aFrame: 要编码的媒体数据包
* Param aPacket: [输出] 编码后的数据
* @Return int 小于0失败,等于0成功
*/
static int Encode(AVCodecContext *aEncoder, AVFrame *aFrame, AVPacket *aPacket)
{
	int ret;

	ret = avcodec_send_frame(aEncoder, aFrame);
	if (ret < 0)
	{
		printf("Error sending a packet for encoding\n");
		return ret;
	}

	ret = avcodec_receive_packet(aEncoder, aPacket);
	if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
	{
		return 0;
	}
	else if (ret < 0)
	{
		//error during encoding
		return -1;
	}
	return ret;
}

/*
* Comments: 创建一个编码器对象
* Param aInfo: 要编码的媒体信息
* @Return CarEye_Encoder_Handle 成功返回编码器对象,否则返回NULL
*/
CE_API CarEye_Encoder_Handle CE_APICALL CarEye_EncoderCreate(CarEye_OriginalStream aInfo)
{
	if (aInfo.OutVideoType == CAREYE_CODEC_NONE
		&& aInfo.OutAudioType == CAREYE_CODEC_NONE)
	{
		CarEyeLog("null paramter\n");
		// 至少包含一项编码需求
		return NULL;
	}

	CarEyeEncoder *encoder = new CarEyeEncoder;
	if (encoder == NULL)
	{
		CarEyeLog("alloc encoder fail\n");
		return NULL;
	}

	memset(encoder, 0x00, sizeof(CarEyeEncoder));

	// 注册编码器
	av_register_all();

	// 媒体编码器
	AVCodec *pCodec;
	if (aInfo.OutVideoType != CAREYE_CODEC_NONE)
	{
		// 请求视频编码器
		pCodec = avcodec_find_encoder((AVCodecID)aInfo.OutVideoType);
		if (pCodec == NULL)
		{
			CarEyeLog("Could not find video encoder.\n");
			CarEye_EncoderRelease(encoder);
			return NULL;
		}
		// 申请编码器上下文
		encoder->VEncoder = avcodec_alloc_context3(pCodec);
		if (encoder->VEncoder == NULL)
		{
			CarEyeLog("Could not alloc video encoder.\n");
			CarEye_EncoderRelease(encoder);
			return NULL;
		}
		encoder->VEncoder->codec_id = (AVCodecID)aInfo.OutVideoType;
		encoder->VEncoder->time_base.num = 1;
		// 帧率
		encoder->VEncoder->time_base.den = aInfo.FramesPerSecond;
		// 每包一个视频帧
		encoder->VEncoder->frame_number = 1;
		// 媒体类型为视频
		encoder->VEncoder->codec_type = AVMEDIA_TYPE_VIDEO;
		encoder->VEncoder->bit_rate = aInfo.VideoBitrate;
		// 视频分辨率
		encoder->VEncoder->width = aInfo.Width;
		encoder->VEncoder->height = aInfo.Height;
		encoder->VEncoder->gop_size = aInfo.GopSize;
		encoder->VEncoder->max_b_frames = aInfo.MaxBFrames;
		encoder->VEncoder->pix_fmt = (AVPixelFormat)aInfo.InVideoType;

		AVDictionary *param = NULL;
		//H.264
		if (aInfo.OutVideoType == CAREYE_CODEC_H264)
		{
			av_dict_set(¶m, "preset", "slow", 0);
			av_dict_set(¶m, "tune", "zerolatency", 0);
		}
		//H.265
		if (aInfo.OutVideoType == CAREYE_CODEC_H265)
		{
			av_dict_set(¶m, "preset", "ultrafast", 0);
			av_dict_set(¶m, "tune", "zero-latency", 0);
		}

		if (avcodec_open2(encoder->VEncoder, pCodec, ¶m) < 0)
		{
			CarEyeLog("Could not open video encoder.\n");
			CarEye_EncoderRelease(encoder);
			return NULL;
		}

		encoder->VFrame = av_frame_alloc();
		if (encoder->VFrame == NULL)
		{
			CarEyeLog("Alloc video frame faile!\n");
			CarEye_EncoderRelease(encoder);
			return NULL;
		}
		encoder->VFrame->format = encoder->VEncoder->pix_fmt;
		encoder->VFrame->width = encoder->VEncoder->width;
		encoder->VFrame->height = encoder->VEncoder->height;

		if (av_image_alloc(encoder->VFrame->data, encoder->VFrame->linesize,
			encoder->VEncoder->width, encoder->VEncoder->height,
			encoder->VEncoder->pix_fmt, 16) < 0)
		{
			CarEyeLog("Could not allocate raw picture buffer!\n");
			CarEye_EncoderRelease(encoder);
			return NULL;
		}
	}
	if (aInfo.OutAudioType != CAREYE_CODEC_NONE)
	{
		// 请求音频编码器
		pCodec = avcodec_find_encoder((AVCodecID)aInfo.OutAudioType);
		if (pCodec == NULL)
		{
			CarEyeLog("Could not find audio encoder.\n");
			CarEye_EncoderRelease(encoder);
			return NULL;
		}
		// 申请编码器上下文
		encoder->AEncoder = avcodec_alloc_context3(pCodec);
		if (encoder->AEncoder == NULL)
		{
			CarEyeLog("Could not alloc audio encoder.\n");
			CarEye_EncoderRelease(encoder);
			return NULL;
		}

		// 参数赋值
		encoder->AEncoder->codec_id = (AVCodecID)aInfo.OutAudioType;
		encoder->AEncoder->codec_type = AVMEDIA_TYPE_AUDIO;
		encoder->AEncoder->sample_fmt = AV_SAMPLE_FMT_S16P; //AV_SAMPLE_FMT_FLTP;
		encoder->AEncoder->sample_rate = aInfo.SampleRate;
		encoder->AEncoder->bit_rate = aInfo.AudioBitrate;
		encoder->AEncoder->channel_layout = AV_CH_LAYOUT_STEREO; //AV_CH_LAYOUT_STEREO;
		encoder->AEncoder->channels = av_get_channel_layout_nb_channels(encoder->AEncoder->channel_layout);
		int ret = avcodec_open2(encoder->AEncoder, pCodec, NULL);
		if (ret < 0)
		{
			CarEyeLog("Could not open audio encoder.\n");
			CarEye_EncoderRelease(encoder);
			return NULL;
		}

		encoder->AFrame = av_frame_alloc();
		if (encoder->AFrame == NULL)
		{
			printf("Alloc audio frame fail!\n");
			CarEye_EncoderRelease(encoder);
			return NULL;
		}
		encoder->AFrame->nb_samples = encoder->AEncoder->frame_size;
		encoder->AFrame->format = encoder->AEncoder->sample_fmt;
 		encoder->AFrame->channel_layout = encoder->AEncoder->channel_layout;
		if (av_frame_get_buffer(encoder->AFrame, 0) < 0)
		{
			CarEyeLog("Failed to allocate the audio frame data\n");
			CarEye_EncoderRelease(encoder);
			return NULL;
		}
		encoder->PerPcmSize = av_get_bytes_per_sample(encoder->AEncoder->sample_fmt);
		encoder->PcmSize = encoder->PerPcmSize * encoder->AEncoder->channels * encoder->AFrame->nb_samples;
//		encoder->PcmBuffer = (uint8_t *)av_malloc(encoder->PcmSize);
//		avcodec_fill_audio_frame(encoder->AFrame, encoder->AEncoder->channels, encoder->AEncoder->sample_fmt, (const uint8_t *)encoder->PcmBuffer, encoder->PcmSize, 1);
		encoder->AConverter = swr_alloc();
		if (encoder->AConverter == NULL)
		{
			CarEyeLog("Allock audio converter fail!\n");
			CarEye_EncoderRelease(encoder);
			return NULL;
		}
		int out_channels = av_get_default_channel_layout(encoder->AEncoder->channels);
		encoder->AConverter = swr_alloc_set_opts(encoder->AConverter, encoder->AEncoder->channel_layout,
			encoder->AEncoder->sample_fmt, encoder->AEncoder->sample_rate,
			out_channels, encoder->AEncoder->sample_fmt, encoder->AEncoder->sample_rate, 0, NULL);
		if (swr_init(encoder->AConverter) < 0)
		{
			CarEyeLog("Init audio converter fail!\n");
			CarEye_EncoderRelease(encoder);
			return NULL;
		}
	}

	return encoder;
}

/*
* Comments: 释放编码器资源
* Param aEncoder: 要释放的编码器
* @Return None
*/
CE_API void CE_APICALL CarEye_EncoderRelease(CarEye_Encoder_Handle aEncoder)
{
	CarEyeEncoder *encoder = (CarEyeEncoder *)aEncoder;

	if (encoder == NULL)
	{
		return;
	}
	if (encoder->VEncoder != NULL)
	{
		avcodec_close(encoder->VEncoder);
		av_free(encoder->VEncoder);
		encoder->VEncoder = NULL;
	}
	if (encoder->AEncoder != NULL)
	{
		avcodec_close(encoder->AEncoder);
		av_free(encoder->AEncoder);
		encoder->AEncoder = NULL;
	}
	if (encoder->VFrame != NULL)
	{
		av_frame_free(&encoder->VFrame);
		encoder->VFrame = NULL;
	}
	if (encoder->PcmBuffer != NULL)
	{
		av_freep(encoder->PcmBuffer);
		encoder->PcmBuffer = NULL;
	}
	if (encoder->AFrame != NULL)
	{
		av_frame_free(&encoder->AFrame);
		encoder->AFrame = NULL;
	}
	if (encoder->AConverter != NULL)
	{
		swr_free(&encoder->AConverter);
		encoder->AConverter = NULL;
	}
	delete encoder;
	encoder = NULL;
}

/*
* Comments: 将输入YUV视频编码为设置好的格式数据输出
* Param aEncoder: 申请到的有效编码器
* Param aYuv: 要编码的YUV数据
* Param aPts: 当前视频帧序号
* Param aBytes: [输出]编码后的视频流
* @Return int < 0编码失败,> 0为编码后数据字节个数 ==0表示参数无效
*/
CE_API int CE_APICALL CarEye_EncoderYUV(CarEye_Encoder_Handle aEncoder,
										CarEye_YUVFrame *aYuv, int aPts,
										unsigned char *aBytes)
{
	CarEyeEncoder *encoder = (CarEyeEncoder *)aEncoder;
	if (encoder == NULL || encoder->VEncoder == NULL)
	{
		return 0;
	}
	if (aBytes == NULL)
	{
		return 0;
	}
	int ret;
	int out_size = 0;
	AVPacket packet = { 0 };
	av_init_packet(&packet);
	packet.data = NULL;
	packet.size = 0;
	// 赋值Y值
	memcpy(encoder->VFrame->data[0], aYuv->Y, aYuv->YSize);
	memcpy(encoder->VFrame->data[1], aYuv->U, aYuv->USize);
	memcpy(encoder->VFrame->data[2], aYuv->V, aYuv->VSize);
	encoder->VFrame->pts = aPts;
	ret = Encode(encoder->VEncoder, encoder->VFrame, &packet);
	if (ret < 0)
	{
        CarEyeLog("Encode video error.\n");
		av_packet_unref(&packet);
		return ret;
	}

	out_size = packet.size;
	if (out_size > 0)
	{
		memcpy(aBytes, packet.data, packet.size);
	}
	av_packet_unref(&packet);
  	return out_size;
}

/*
* Comments: 获取PCM编码时接受的最大字节数
* Param aEncoder: 申请到的有效编码器
* @Return PCM编码缓冲区最大字节数
*/
CE_API int CE_APICALL CarEye_GetPcmMaxSize(CarEye_Encoder_Handle aEncoder)
{
	CarEyeEncoder *encoder = (CarEyeEncoder *)aEncoder;
	if (encoder == NULL || encoder->AEncoder == NULL)
	{
		return -1;
	}

	return encoder->PcmSize;
}

/*
* Comments: 将输入的PCM音频编码为指定数据格式输出
* Param aEncoder: 申请到的有效编码器
* Param aPcm: 要编码的PCM数据
* Param aSize: 要编码音频流字节数
* Param aBytes: [输出] 编码后的音频流
* Param aPts: 当前编码帧的序号
* @Return int < 0编码失败,> 0为编码后PCM的字节个数 ==0表示参数无效
*/
CE_API int CE_APICALL CarEye_EncoderPCM(CarEye_Encoder_Handle aEncoder,
								unsigned char *aPcm, int aSize, int aPts,
								unsigned char *aBytes)
{
	CarEyeEncoder *encoder = (CarEyeEncoder *)aEncoder;
	if (encoder == NULL || encoder->AEncoder == NULL)
	{
		return 0;
	}
	if (aBytes == NULL || aSize < 1 || aPcm == NULL)
	{
		return 0;
	}

	int ret;
	int out_size = 0;
	int i = 0, j = 0;
	int cp_count = 0;
	AVPacket packet = { 0 };

	av_init_packet(&packet);
	packet.data = NULL;
	packet.size = 0;

	for (i = 0; i < encoder->AFrame->nb_samples; i++)
	{
		for (j = 0; j < encoder->AEncoder->channels; j++)
		{
			memcpy(encoder->AFrame->data[j] + i * encoder->PerPcmSize, aPcm, encoder->PerPcmSize);
			cp_count += encoder->PerPcmSize;
			if (cp_count >= aSize)
			{
				break;
			}
		}
	}

	encoder->AFrame->pts = aPts;
	ret = Encode(encoder->AEncoder, encoder->AFrame, &packet);
	if (ret < 0)
	{
		printf("Decode audio error.\n");
		av_packet_unref(&packet);
		return ret;
	}

	out_size = packet.size;
	if (out_size > 0)
	{
		memcpy(aBytes, packet.data, packet.size);
	}
	av_packet_unref(&packet);

	return out_size;
}

以上库是支持音视频编码的。我们本节只说明视频编码部分。给出视频编码的JNI接口

typedef struct{
    int  InVedioType;
    int  OutVedioType;
    int  fps;
    int  width;
    int  height;
    int  VideoBitrate;
    int  InputAuidoType;
    int  OutAudioType;
    int  SampleRate;
    int  AudioBitrate;
    int  Encodetype;
}ParamInfo;

JNIEXPORT jlong JNICALL Java_com_CarEye_CarEyelib_ffmpegandroid_FFmpegNative_CreateEncode(JNIEnv* env, jobject obj, jobject para) {
    void*  ret;
    CarEye_OriginalStream param;
    jclass jcInfo = (*env)->GetObjectClass(env, para);
    if (0 == jcInfo) {
        CarEyeLog("GetObjectClass returned 0\n");
        return 0;
    }
    int fps = (*env)->GetIntField(env, para, (*env)->GetFieldID(env, jcInfo, "fps", "I"));
    int InVedioType = (*env)->GetIntField(env, para,(*env)->GetFieldID(env, jcInfo, "InVedioType", "I"));
    int OutVedioType =(*env)->GetIntField(env, para,(*env)->GetFieldID(env, jcInfo, "OutVedioType", "I"));
    int width = (*env)->GetIntField(env, para,(*env)->GetFieldID(env, jcInfo, "width", "I"));
    int height =  (*env)->GetIntField(env, para,(*env)->GetFieldID(env, jcInfo, "height", "I"));
    int VideoBitrate = (*env)->GetIntField(env, para,(*env)->GetFieldID(env, jcInfo, "VideoBitrate", "I"));
    int InputAuidoType = (*env)->GetIntField(env, para,(*env)->GetFieldID(env, jcInfo, "InputAuidoType", "I"));
    int OutAudioType = (*env)->GetIntField(env, para,(*env)->GetFieldID(env, jcInfo, "OutAudioType", "I"));
    int SampleRate = (*env)->GetIntField(env, para,(*env)->GetFieldID(env, jcInfo, "SampleRate", "I"));
    int AudioBitrate=(*env)->GetIntField(env, para,(*env)->GetFieldID(env, jcInfo, "AudioBitrate", "I"));
    CarEyeLog("fps:%d", fps);
    CarEyeLog("InVedioType:%d", InVedioType);
    CarEyeLog("width:%d,VideoBitrate:%d,OutVedioType:%d ", width,VideoBitrate,OutVedioType);
    param.AudioBitrate = AudioBitrate;
    param.InVideoType = InVedioType;
    param.OutAudioType = OutAudioType;
    param.OutVideoType = OutVedioType;
    param.FramesPerSecond = fps;
    param.GopSize = 10;
    param.MaxBFrames =1;
    param.Width = width;
    param.Height = height;
    param.VideoBitrate =VideoBitrate;
    param.SampleRate = SampleRate;
    ret = CarEye_EncoderCreate(param);
    if(  ret  == NULL) {
        return 0;
    }else
    {
        return (long)ret;
    }
}
JNIEXPORT jint JNICALL Java_com_CarEye_CarEyelib_ffmpegandroid_FFmpegNative_encode(JNIEnv* env, jobject obj, jlong handle,jint index, jbyteArray frame, jbyteArray OutFrame) {
    void* pHandle;
    int ret;
    unsigned char* in_data;
    unsigned char* out_data;
    CarEye_YUVFrame yuv_frame;
    if(handle==0)
        return -1;
    pHandle = (void*)handle;
    in_data = (*env)->GetByteArrayElements(env,frame, 0 );
    int len = (*env)->GetArrayLength(env,frame);
    out_data = (*env)->GetByteArrayElements(env,OutFrame, 0 );
    yuv_frame.Y = in_data;
    yuv_frame.YSize = len*2/3;
    yuv_frame.U = &in_data[len*2/3];
    yuv_frame.USize = len/6;
    yuv_frame.V = &in_data[len*5/6];
    yuv_frame.VSize = len/6;
    ret =  CarEye_EncoderYUV(pHandle,  &yuv_frame, index,out_data );
    (*env)->ReleaseByteArrayElements(env,frame,in_data,0);
    (*env)->ReleaseByteArrayElements(env,OutFrame,out_data,0);
    return ret;
}

JNIEXPORT jint JNICALL Java_com_CarEye_CarEyelib_ffmpegandroid_FFmpegNative_ReleaseEncode(JNIEnv* env, jobject obj, jlong handle) {
    void* pHandle;
    if(handle==0)
        return -1;
    pHandle = (void*)handle;
    CarEye_EncoderRelease(pHandle);
    return 0;
}

注意的是,参数传递了一个结构体进来,对应JAVA层需要传递一个类:给出java的类,和调用代码


public class EncodeParamInfo  {
    int  InVedioType;
    int  OutVedioType;
    int  fps;
    int  width;
    int  height;
    int  VideoBitrate;
    int  InputAuidoType;
    int  OutAudioType;
    int  SampleRate;
    int  AudioBitrate;
    int  Encodetype;
};
  void TestEncode() {
        new Thread(new Runnable() {
            @Override
            public void run() {
                int loop = 0;
                FileOutputStream out;
                FileInputStream in;
                FFmpegNative ffmpeg = new FFmpegNative();
                EncodeParamInfo info = new EncodeParamInfo();
                info.fps = 25;
                info.width = 1280;
                info.height = 720;
                info.InVedioType = 0;
                info.OutVedioType = 0x1C;
                info.InputAuidoType = 0;
                info.VideoBitrate = 3000000;
                long handle = ffmpeg.InitEncode(info);
                if (handle == 0) {
                    Log.d(TAG, "init encoder fail");
                }
                Log.d(TAG, "init encoder success");
                byte[] data = new byte[1280 * 720 * 3 / 2];
                byte[] out_data = new byte[1280 * 720 * 3 / 2];
                try {
                    File f = new File("/mnt/sdcard/out.h264");
                    if (f.exists()) f.delete();
                    f.createNewFile();
                    out = new FileOutputStream(f);
                    File input = new File("/mnt/sdcard/input.yuv");
                    in = new FileInputStream(input);
                    int len;
                    while (true) {
                        if (in.read(data, 0, 1280 * 720 * 3 / 2) < 0) {
                            Log.d(TAG, "read fail:");
                            break;
                        } else {

                            int result = ffmpeg.EncodeData(handle, loop++, data, out_data);
                            if (result > 0) {
                                out.write(out_data, 0, result);
                                Log.d(TAG, "encoder sucessful:"+result);
                            }else {
                                Log.d(TAG, "encoder fail:"+result);
                            }
                        }
                    }
                    in.close();
                    out.close();
                    ffmpeg.DestroyEncode(handle);

                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }).start();

    }

相关代码请参考car-eye 开源网站和github为准

car-eye开源官方网址:www.car-eye.cn  

car-eye 流媒体平台网址:www.liveoss.com   

car-eye 技术官方邮箱: support@car-eye.cn   
car-eye技术交流QQ群: 590411159    


CopyRight©  car-eye 开源团队 2018

下一篇:FFMEPG 平台移植,接口简化和外部模块接入 (五)ffmpeg android移植(ffmpeg android studio 静态编译)

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值