MP4AddH264VideoTrack(MP4FileHandle hFile,
uint32_t timeScale,
MP4Duration sampleDuration,
uint16_t width,
uint16_t height,
uint8_t AVCProfileIndication,
uint8_t profile_compat,
uint8_t AVCLevelIndication,
uint8_t sampleLenFieldSizeMinusOne)
sampleDuration為視頻的固定的視頻幀的顯示時間,計算的方法為timeScale(90000)* during(這個值是當前視頻幀的采集時間 - 上一幀的視頻
采集時間)/1000,公式也可以改為timeScale(90000)/fps(碼率例如20f)
音頻:
sampleDuration
MP4TrackId MP4AddAudioTrack(
MP4FileHandle hFile,
u_int32_t timeScale,
u_int32_t sampleDuration,
u_int8_t audioType = MP4_MPEG4_AUDIO_TYPE
)
sampleDuration 是音頻的音頻幀在時間刻度上存在的時間,這里的timeScale為音頻的采樣時間,例如44100,32000,計算方法給上面的視頻一樣的。
sampleDuration主要作用是用於音視頻同步問題。
5、mp4v2錄制視頻中h264的格式要求:NAL長度+NAL數據,如果傳過來的數據沒有0x00000001則是純數據
(1)h264流中的NAL,頭四個字節是0x00000001;
(2)mp4中的h264track,頭四個字節要求是NAL的長度,並且是大端順序;
(3)mp4v2很可能針對此種情況並沒有做處理,所以寫到mp4文件中的每個NAL頭四個字節還是0x00000001.
因此如果傳過來的h264數據是純數據的話則需要如下修改:
intnalsize = frameSize;
buf[0] = (nalsize&0xff000000)>>24;
buf[1] = (nalsize&0x00ff0000)>>16;
buf[2] = (nalsize&0x0000ff00)>>8;
buf[3] = nalsize&0x000000ff;
如果頭部格式有其他的,則按照上面的方式偏移到純數據位置。6、mp4v2錄制視頻中aac的格式要求:有時遠程傳過來的aac數據的格式為adts+aac純數據,則需要將adts部分去掉,即需要偏移7個字節的單位
下面就開始編寫如何調用mp4v2庫的方法:
#include
#include
#include "../mp4v2/mp4v2.h"
#include "AppCameraShooting.h"
MP4TrackId video;
MP4TrackId audio;
MP4FileHandle fileHandle;
unsigned charsps_pps_640[17] = {0x67, 0x42, 0x40, 0x1F, 0x96 ,0x54, 0x05, 0x01, 0xED, 0x00, 0xF3, 0x9E, 0xA0, 0x68, 0xCE, 0x38, 0x80};//存儲sps和pps
intvideo_width = 640;
intvideo_height = 480;
//視頻錄制的調用,實現初始化
JNIEXPORT boolJNICALL Java_com_seuic_jni_AppCameraShooting_mp4init
(JNIEnv *env, jclass clz, jstring title, jint type)
{
constchar* local_title = (*env)->GetStringUTFChars(env,title, NULL);
//創建mp4文件
fileHandle = MP4Create(local_title, 0);
if(fileHandle == MP4_INVALID_FILE_HANDLE)
{
returnfalse;
}
memcpy(sps_pps, sps_pps_640, 17);
video_width = 640;
video_height = 480;
//設置mp4文件的時間單位
MP4SetTimeScale(fileHandle, 90000);
//創建視頻track //根據ISO/IEC 14496-10 可知sps的第二個,第三個,第四個字節分別是 AVCProfileIndication,profile_compat,AVCLevelIndication 其中90000/20 中的20>是fps
video = MP4AddH264VideoTrack(fileHandle, 90000, 90000/20, video_width, video_height, sps_pps[1], sps_pps[2], sps_pps[3], 3);
if(video == MP4_INVALID_TRACK_ID)
{
MP4Close(fileHandle, 0);
returnfalse;
}
audio = MP4AddAudioTrack(fileHandle, 16000, 1024, MP4_MPEG2_AAC_LC_AUDIO_TYPE);
if(audio == MP4_INVALID_TRACK_ID)
{
MP4Close(fileHandle, 0);
returnfalse;
}
//設置sps和pps
MP4AddH264SequenceParameterSet(fileHandle, video, sps_pps, 13);
MP4AddH264PictureParameterSet(fileHandle, video, sps_pps+13, 4);
MP4SetVideoProfileLevel(fileHandle, 0x7F);
MP4SetAudioProfileLevel(fileHandle, 0x02);
MP4SetTrackESConfiguration(fileHandle, audio, &ubuffer[0], 2);
(*env)->ReleaseStringUTFChars(env, title, local_title);
returntrue;
}
//添加視頻幀的方法
JNIEXPORT voidJNICALL Java_com_seuic_jni_AppCameraShooting_mp4packVideo
(JNIEnv *env, jclass clz, jbyteArray data, jint size, jint keyframe)
{
unsigned char*buf = (unsignedchar*)(*env)->GetByteArrayElements(env, data, JNI_FALSE);
if(video_type == 1){
intnalsize = size;
buf[0] = (nalsize & 0xff000000) >> 24;
buf[1] = (nalsize & 0x00ff0000) >> 16;
buf[2] = (nalsize & 0x0000ff00) >> 8;
buf[3] = nalsize & 0x000000ff;
MP4WriteSample(fileHandle, video, buf, size, MP4_INVALID_DURATION, 0, keyframe);
}
(*env)->ReleaseByteArrayElements(env, data, (jbyte *)buf, 0);
}
//添加音頻幀的方法
JNIEXPORT voidJNICALL Java_com_seuic_jni_AppCameraShooting_mp4packAudio
(JNIEnv *env, jclass clz, jbyteArray data, jint size)
{
uint8_t *bufaudio = (uint8_t *)(*env)->GetByteArrayElements(env, data, JNI_FALSE);
MP4WriteSample(fileHandle, audio, &bufaudio[7], size-7, MP4_INVALID_DURATION, 0, 1); //減去7為了刪除adts頭部的7個字節
(*env)->ReleaseByteArrayElements(env, data, (jbyte *)bufaudio, 0);
}
//視頻錄制結束調用
JNIEXPORT voidJNICALL Java_com_seuic_jni_AppCameraShooting_mp4close
(JNIEnv *env, jclass clz)
{
MP4Close(fileHandle, 0);
}