1 流程
平台:win10+vs2019
2 code
#include <iostream>
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libavdevice/avdevice.h"
#include "libavformat/avformat.h"
};
#include <Windows.h>
using namespace std;
//#define OUTPUT_YUV420P 1
//需要自己管理释放
char* dup_wchar_to_utf8(wchar_t* w)
{
char* s = NULL;
int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
s = (char*)av_malloc(l);
if (s)
WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
return s;
}
// 编码视频
static void encodeVideo(AVCodecContext* enc_ctx, AVFrame* frame, AVPacket* newpkt, FILE* encodefile)
{
int ret = 0;
if (frame)
{
//printf("send frame to encoder, pts=%ld", frame->pts);
}
// 送原始数据给编码器进行编码
ret = avcodec_send_frame(enc_ctx, frame);
if (ret < 0)
{
printf("Failed to send frame for encoding!\n");
return;
}
// 从编码器获取编码后的数据,并写入文件
while (ret >= 0)
{
ret = avcodec_receive_packet(enc_ctx, newpkt);
// 如果编码器数据不足时会返回EAGAIN,或者到数据结尾时会返回AVERROR_EOF
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
{
return;
}
else if (ret < 0)
{
printf("Failed to encode!\n");
return;
}
fwrite(newpkt->data, 1, newpkt->size, encodefile);
//av_packet_unref(newpkt);
}
}
int main(int argc, char* argv[]) {
int ret = -1;
char errors[1024];
int videoIndex = -1;
int i = 0;
int got_picture = -1;
int yuvsize = 0;
int base = 0;
AVFormatContext* pFormatCtx = avformat_alloc_context();
AVCodecContext* pCodecCtx = NULL;
AVCodec* pCodec = NULL;
struct SwsContext* img_convert_ctx = NULL;
AVDictionary* options = NULL;
AVCodec* pOutCodec = NULL;
AVCodecContext* pOutCodecCtx = NULL;
AVPacket* packet = NULL;
AVPacket* newpkt = NULL;
AVFrame* pFrame = NULL;
AVFrame *pFrameYUV = NULL;
AVFrame* pOutFrame = NULL;
AVDictionary* param = NULL;
//wchar_t w[256] = L"audio=麦克风阵列 (Realtek(R) Audio)";
//char* devicename = dup_wchar_to_utf8(w);
const char* devicename = "video=Integrated Camera";
unsigned char* pOutBuf = NULL;
int size = 0;
const char* encodePath = "encode.h264"; // 保存视频文件
FILE* encodefile = fopen(encodePath, "wb+");
#if OUTPUT_YUV420P
FILE* fp_yuv = fopen("output.yuv", "wb+");
#endif
//av_log_set_level(AV_LOG_DEBUG);
avdevice_register_all();
//***************************输入设备*************************************
av_dict_set(&options, "video_size", "640x480", 0); // 为打开视频设备设置参数
AVInputFormat* iformat = av_find_input_format("dshow");
if (ret = avformat_open_input(&pFormatCtx, devicename, iformat, &options) < 0) {
av_strerror(ret, errors, 1024);
goto __FAIL;
}
if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
{
printf("Couldn't find stream information.\n");
goto __FAIL;
}
av_dump_format(pFormatCtx, 0, devicename, 0);
videoIndex = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++) {
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
videoIndex = i;
break;
}
}
if (videoIndex == -1)
{
printf("Couldn't find a video stream.\n");
goto __FAIL;
}
/**************************codec*****************************************/
pCodecCtx = pFormatCtx->streams[videoIndex]->codec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL)
{
printf("Codec not found.\n");
goto __FAIL;
}
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
{
printf("Could not open codec.\n");
goto __FAIL;
}
pFrame = av_frame_alloc();
pFrameYUV = av_frame_alloc();
packet = (AVPacket*)av_malloc(sizeof(AVPacket));
size = avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
pOutBuf = (unsigned char*)av_malloc(size);
if (!pOutBuf) {
goto __FAIL;
}
avpicture_fill((AVPicture*)pFrameYUV, pOutBuf, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height); //关联pFrameYUV和out_buffer
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P,
SWS_BICUBIC, NULL, NULL, NULL);
// 创建编码后输出的packet
newpkt = av_packet_alloc();
if (!newpkt)
{
printf("failed to alloc avpacket!/n");
goto __FAIL;
}
/*****************************encoder*********************************/
pOutCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!pOutCodec)
{
printf("Codec libx264 is not found!\n");
return -1;
}
pOutCodecCtx = avcodec_alloc_context3(pOutCodec);
if (!pOutCodecCtx)
{
printf("Could not allocate codec context!\n");
return -1;
}
// SPS
pOutCodecCtx->profile = FF_PROFILE_H264_HIGH_444;
pOutCodecCtx->level = 50; // 表示LEVEL是5.0
// 设置分辨率
pOutCodecCtx->width = 640; // 640
pOutCodecCtx->height = 480; // 480
// GOP
pOutCodecCtx->gop_size = 250;
pOutCodecCtx->keyint_min = 25; // option
// 设置B帧
pOutCodecCtx->max_b_frames = 3; // option
pOutCodecCtx->has_b_frames = 1; // option
// 参考帧的数量
pOutCodecCtx->refs = 3; // option
// 设置输入YUV格式
pOutCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
// 设置码率
pOutCodecCtx->bit_rate = 600000; // 600Kbps
// 设置帧率
pOutCodecCtx->time_base = { 1, 25 }; // 帧与帧之间的间隔
pOutCodecCtx->framerate = { 25, 1 }; // 帧率,每秒25帧*/
ret = avcodec_open2(pOutCodecCtx, pOutCodec, NULL);
if (ret < 0)
{
printf("Can not open avcodec context!\n");
return -1;
}
while (av_read_frame(pFormatCtx, packet) >= 0)
{
//av_log(NULL, AV_LOG_INFO, "Packet size: %d(%p)\n", packet.size, packet.data);
if (packet->stream_index == videoIndex) {
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
if (ret < 0) {
printf("Decode Error.\n");
goto __FAIL;
}
if (got_picture) {
sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
#if OUTPUT_YUV420P
yuvsize = pCodecCtx->width * pCodecCtx->height;
fwrite(pFrameYUV->data[0], 1, yuvsize, fp_yuv); //Y
fwrite(pFrameYUV->data[1], 1, yuvsize / 4, fp_yuv); //U
fwrite(pFrameYUV->data[2], 1, yuvsize / 4, fp_yuv); //V
#endif
pFrameYUV->width = 640;
pFrameYUV->height = 480;
pFrameYUV->format = AV_PIX_FMT_YUV420P;
pFrameYUV->pts = base++;// pts默认是一个随机值,需要给pts赋连续的值给编码器编码使用,否则视频质量会很差
encodeVideo(pOutCodecCtx, pFrameYUV, newpkt, encodefile);
}
}
av_free_packet(packet);
}
// 编码最后要给编码器传一个NULL,告诉编码器没有新数据需要编码,编码器会将缓冲区内的数据全部编码后结束。否则会有编码数据不全,视频丢帧的问题。
encodeVideo(pOutCodecCtx, NULL, newpkt, encodefile);
__FAIL:
if (pOutBuf) {
av_free(pOutBuf);
}
if (img_convert_ctx) {
sws_freeContext(img_convert_ctx);
}
//av_free(out_buffer);
if (pFrame) {
av_free(pFrame);
}
if (pFrameYUV) {
av_free(pFrameYUV);
}
if (pCodecCtx) {
avcodec_close(pCodecCtx);
}
if (pFormatCtx) {
avformat_close_input(&pFormatCtx);
}
return 0;
}
参考
FFMPEG 实现 YUV,RGB各种图像原始数据之间的转换(swscale)
最简单的基于FFmpeg的AVDevice例子(读取摄像头)