1.从 https://github.com/kewlbear/FFmpeg-iOS-build-script 获取到一套能运行于armv7, armv7s, arm64, i386的ffmpeg库;
2.在工程配置中, 加入libz.dylib, libbz2.dylib, libiconv.dylib这三个系统库;
3.在代码中加入avcodec.h, swscale.h, avformat.h等ffmpeg的库文件, 其中avcodec.h是必要的, 其他则根据需要添加;
4.在工程配置中, 加入搜索路径 "$(PROJECT_DIR)/工程名/ffmpeg/lib" 和 "$(PROJECT_DIR)/工程名/ffmpeg/include"
5.填写代码, 可以参考 http://blog.csdn.net/smking/article/details/7561688
6.已经编译好的库: http://download.csdn.net/download/huayu608/7671881
以下是调用ffmpeg接口时的demo代码
#include "avcodec.h"
#include "swscale.h"
#include "avformat.h"
typedef struct SwsContext SwsContext;
typedef struct structVideoDecode {
AVCodec* mAVCodec;
AVCodecContext* mAVCodecContext;
AVFrame* mAVFrame;
AVPicture mAVPicture;
SwsContext* mSwsContext;
} VideoDecode;
int videox_init()
{
av_register_all();
avcodec_register_all();
return 0;
}
int videox_unit()
{
return 0;
}
int videox_began(long video_format, long image_width, long image_height, long* out_vd_handle)
{
VideoDecode* vd = (VideoDecode*)malloc(sizeof(VideoDecode));
if (vd == 0)
return -1;
memset(vd, 0, sizeof(VideoDecode));
*out_vd_handle = (long)vd;
// VIDEO_FORMAT_H264等宏是公司私有的标识
enum AVCodecID codeId = CODEC_ID_NONE;
switch (video_format)
{
case VIDEO_FORMAT_H264: {codeId = AV_CODEC_ID_H264; break;}
case VIDEO_FORMAT_MJPEG4: {codeId = AV_CODEC_ID_MJPEG; break;}
default: {break;}
}
vd->mAVCodec = avcodec_find_decoder(codeId);
if (!vd->mAVCodec)
{
return -2;
}
vd->mAVCodecContext = avcodec_alloc_context3(vd->mAVCodec);
if(!vd->mAVCodecContext)
{
return -3;
}
vd->mAVCodecContext->width = image_width;
vd->mAVCodecContext->height = image_height;
vd->mAVCodecContext->pix_fmt = PIX_FMT_YUV420P;
if (avcodec_open2(vd->mAVCodecContext, vd->mAVCodec, 0) < 0) // 需要加锁
{
return -4;
}
vd->mAVFrame = av_frame_alloc();
if (!vd->mAVFrame)
{
return -5;
}
avpicture_alloc(&vd->mAVPicture, PIX_FMT_RGB24, vd->mAVCodecContext->width, vd->mAVCodecContext->height);
vd->mSwsContext = sws_getCachedContext(0,
vd->mAVCodecContext->width,
vd->mAVCodecContext->height,
vd->mAVCodecContext->pix_fmt,
vd->mAVCodecContext->width, // 生成图像的宽度
vd->mAVCodecContext->height, // 生成图像的高度
PIX_FMT_RGB24,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
return 0;
}
int videox_ended(long vd_handle)
{
if (vd_handle == 0)
return -1;
VideoDecode* vd = (VideoDecode*)vd_handle;
if(vd->mAVCodecContext)
{
avcodec_close(vd->mAVCodecContext); // 需要加锁
av_free(vd->mAVCodecContext);
vd->mAVCodecContext = 0;
}
if(vd->mAVFrame)
{
av_free(vd->mAVFrame);
vd->mAVFrame = 0;
}
avpicture_free(&vd->mAVPicture);
if (vd->mSwsContext)
{
sws_freeContext(vd->mSwsContext);
}
free(vd);
vd = 0;
return 0;
}
int videox_decode(long vd_handle, uint8_t* in_264Buffer, int in_264BufferSize, uint8_t** out_RGBBuffer, int* out_RGBBufferSize, long* out_image_width, long* out_image_height)
{
if (vd_handle == 0 || in_264Buffer == 0 || in_264BufferSize == 0)
return -1;
VideoDecode* dv = (VideoDecode*)vd_handle;
AVPacket avpkt;
av_init_packet(&avpkt);
avpkt.data = in_264Buffer; // packet data will be allocated by the encoder
avpkt.size = in_264BufferSize;
int got_picture;
int consumed_bytes = avcodec_decode_video2(dv->mAVCodecContext, dv->mAVFrame, &got_picture, &avpkt);
av_free_packet(&avpkt);
if (consumed_bytes <= 0 || dv->mAVFrame->data[0] == 0)
{
return -2;
}
// 把数据从pFrame写到picture之中
sws_scale(dv->mSwsContext,
dv->mAVFrame->data,
dv->mAVFrame->linesize,
0,
dv->mAVCodecContext->height,
dv->mAVPicture.data,
dv->mAVPicture.linesize);
*out_RGBBuffer = dv->mAVPicture.data[0];
*out_RGBBufferSize = dv->mAVPicture.linesize[0] * dv->mAVCodecContext->height;
*out_image_width = dv->mAVCodecContext->width;
*out_image_height = dv->mAVCodecContext->height;
return 0;
}
if (mVideoDecode)
videox_ended(mVideoDecode);
videox_began(format, frame_head->width, frame_head->height, &mVideoDecode);
... ...
long w = 0;
long h = 0;
uint8_t* RGBBuffer = 0;
int RGBBufferSize = 0;
int ret = videox_decode(mVideoDecode, (uint8_t*)a264Buffer, a264Length, &RGBBuffer, &RGBBufferSize, &w, &h);
dispatch_async(dispatch_get_main_queue(), ^{
if (self.stream && ret >= 0)
{
CFDataRef data = CFDataCreateWithBytesNoCopy(kCFAllocatorDefault, RGBBuffer, RGBBufferSize, kCFAllocatorNull);
CGDataProviderRef provider = CGDataProviderCreateWithCFData(data);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
int bitsPerComponent = 8; // 8位存储一个Component
int bitsPerPixel =3* bitsPerComponent; // RGB存储,只用三个字节,而不是像RGBA要用4个字节,所以这里一个像素点要3个8位来存储
int bytesPerRow =3* w; // 每行有width个象素点,每个点用3个字节,另外注意:pict.linesize[0]=bytesPerRow=1056
CGImageRef cgImage = CGImageCreate(w,
h,
bitsPerComponent,
bitsPerPixel,
bytesPerRow, // pict.linesize[0],等效
colorSpace,
kCGBitmapByteOrderDefault,
provider,
NULL,
NO,
kCGRenderingIntentDefault);
mImageView.image = [UIImage imageWithCGImage:cgImage];
CGColorSpaceRelease(colorSpace);
CGImageRelease(cgImage);
CGDataProviderRelease(provider);
CFRelease(data);
}
});