实现这个过程需要自己搭建rtmp服务器,这里我用nginx搭建服务器,具体搭建方法可以参考如下https://www.jianshu.com/p/06c2025edcd3。
推流的过程是:
1. 打开输入文件(这里把摄像头当作一个输入文件),需要用到的函数如下
1)av_find_input_format() //如果是打开摄像头或者屏幕需要增加
2)avformat_open_input()/avformat_close_input() //打开输入流,并创建输入文件的上下文
3)avformat_find_stream_info() //读取输入的媒体信息
4)av_find_best_stream() //查找对应流的索引号
2.设置编码器
1)avcodec_find_encoder_by_name()/avcodec_find_encoder() //查找编码器
2)avcodec_alloc_context3()/avcodec_free_context() //创建编码器上下文
3)设置编码器参数
4)avcodec_open2() //打开编码器
3.打开输出文件(这里rtmp链接就相当于是一个输出文件)
1)avformat_alloc_output_context2()/avformat_close_input() //创建输出文件的上下文
2)avformat_new_stream() //向输出的媒体文件添加流
3)avcodec_copy_context() //拷贝流信息给添加的流
4)avio_open() //打开输出文件
5)avformat_write_header() //写数据头文件
6)使用sws_getContext/sws_scale进行一些格式转换,需要注意的是视频转换使用SWScontext结构体,音频转换使用SWrcontext结构体。
7)对于视频使用sws_getContext和sws_scale配合使用
对于音频使用swr_alloc_set_opts和swr_convert配合使用
这3个步骤是我自己总结出来的,实际工作中还是要根据实际情况来处理。目前程序运行起来时前面会有错误提示。如下
real-time buffer [USB2.0 PC CAMERA] [video input] too full or near too full (101% of size: 3041280 [rtbufsize parameter])! frame dropped!
但是很快会恢复正常,原因不明。验证可以使用FFMPEG自带的ffplay播放,由于是在局域网,目前测试是有2秒的延时,编码最好还是用硬件编码。
代码如下:
#include "pch.h"
#include <iostream>
#include <time.h>
#include <winsock2.h>
#include <winsock.h>
//#include <sys/time.h>
//#include <sys/time.h>
using namespace std;
extern "C"
{
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libavdevice/avdevice.h"
#include "libavutil/imgutils.h"
#include "libswscale/swscale.h"
#include "libavutil/timestamp.h"
#include "libavutil/rational.h"
//引入时间
#include "libavutil/time.h"
}
void captureFrame()
{
//输入文件
AVInputFormat* ifmt = av_find_input_format("dshow");
AVFormatContext* infmt_ctx = NULL;
AVFormatContext* outfmt_ctx = NULL;
if (0 > avformat_open_input(&infmt_ctx, "video=USB2.0 PC CAMERA", ifmt, NULL)) {
printf("failed open input file\n");
return;
}
if (0 > avformat_find_stream_info(infmt_ctx, NULL)) {
printf("failed find stream info\n");
avformat_close_input(&infmt_ctx);
return;
}
int stream_index = -1;
stream_index = av_find_best_stream(infmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
if (-1 == stream_index) {
printf("failed find stream\n");
avformat_close_input(&infmt_ctx);
return;
}
//av_dump_format(infmt_ctx, 0, "video=USB2.0 PC CAMERA", 1);
//END输入文件
//编码器
AVCodec* encodec = NULL;
encodec = avcodec_find_encoder_by_name("libx264");
if (!encodec) {
printf("not find encoder\n");
avformat_close_input(&infmt_ctx);
return;
}
AVCodecContext* encodec_ctx = NULL;
encodec_ctx = avcodec_alloc_context3(encodec);
if (!encodec_ctx) {
printf("not alloc context3\n\n");
avformat_close_input(&infmt_ctx);
return;
}
int num; ///< Numerator
int den; ///< Denominator
encodec_ctx->bit_rate = 400000;
encodec_ctx->width = 340;
encodec_ctx->height = 240;
encodec_ctx->time_base.num = 1;
encodec_ctx->time_base.den = 25;
encodec_ctx->framerate.num = 25;
encodec_ctx->framerate.den = 1;
encodec_ctx->gop_size = 10;
encodec_ctx->max_b_frames = 0;
encodec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
AVDictionary *param = NULL;
av_dict_set(¶m, "preset", "superfast", 0);
av_dict_set(¶m, "tune", "zerolatency", 0);
av_dict_set(¶m, "profile", "main", 0);
if (0 > avcodec_open2(encodec_ctx, encodec, ¶m)) {
printf("failed open coder\n");
avformat_close_input(&infmt_ctx);
return;
}
//END编码器
//输出文件
if (0 > avformat_alloc_output_context2(&outfmt_ctx, nullptr, "flv", "rtmp://localhost/testlive")) {
printf("failed alloc output context\n");
avformat_close_input(&infmt_ctx);
return;
}
AVStream* out_stream = avformat_new_stream(outfmt_ctx, encodec_ctx->codec);
if (!out_stream) {
printf("failed new stream\n");
avformat_close_input(&infmt_ctx);
avformat_close_input(&outfmt_ctx);
return;
}
avcodec_copy_context(out_stream->codec, encodec_ctx);
//out_stream->codecpar->codec_tag = 0;
if (0 > avio_open(&outfmt_ctx->pb, "rtmp://localhost/testlive", AVIO_FLAG_WRITE)) {
printf("failed to open outfile\n");
avformat_close_input(&infmt_ctx);
avformat_close_input(&outfmt_ctx);
return;
}
av_dump_format(outfmt_ctx, 0, "rtmp://localhost/testlive", 1);
if (0 > avformat_write_header(outfmt_ctx, NULL)) {
printf("failed to write header\n");
avio_close(outfmt_ctx->pb);
avformat_close_input(&infmt_ctx);
avformat_close_input(&outfmt_ctx);
return;
}
//END输出文件
AVPacket packet;
av_init_packet(&packet);
packet.data = NULL;
packet.size = 0;
unsigned char *src_data[4];
unsigned char *dst_data[4];
int src_linesize[4];
int dst_linesize[4];
struct SwsContext *sws_ctx = sws_getContext(infmt_ctx->streams[stream_index]->codec->width, infmt_ctx->streams[stream_index]->codec->height,
infmt_ctx->streams[stream_index]->codec->pix_fmt, 340, 240, AV_PIX_FMT_YUV420P,
SWS_BILINEAR, NULL, NULL, NULL);
int src_bufsize = av_image_alloc(src_data, src_linesize, infmt_ctx->streams[stream_index]->codec->width, infmt_ctx->streams[stream_index]->codec->height, infmt_ctx->streams[stream_index]->codec->pix_fmt, 16);
int dst_bufsize = av_image_alloc(dst_data, dst_linesize, 340, 240, AV_PIX_FMT_YUV420P, 1);
AVFrame* outFrame = av_frame_alloc();
int picture_size = avpicture_get_size(encodec_ctx->pix_fmt,encodec_ctx->width, encodec_ctx->height);
unsigned char* picture_buf = (uint8_t *)av_malloc(picture_size);
avpicture_fill((AVPicture *)outFrame, picture_buf, encodec_ctx->pix_fmt, encodec_ctx->width, encodec_ctx->height);
outFrame->format = encodec_ctx->pix_fmt;
outFrame->width = encodec_ctx->width;
outFrame->height = encodec_ctx->height;
int y_size = encodec_ctx->width*encodec_ctx->height;
AVPacket outpkt;
av_new_packet(&outpkt, picture_size);
int loop = 0;
int got_picture = -1;
int delayedFrame = 0;
while (1) {
av_read_frame(infmt_ctx, &packet);
if (packet.stream_index == stream_index) {
memcpy(src_data[0], packet.data, packet.size);
sws_scale(sws_ctx, src_data, src_linesize, 0, infmt_ctx->streams[stream_index]->codec->height, dst_data, dst_linesize);
outFrame->data[0] = dst_data[0];
outFrame->data[1] = dst_data[0] + y_size;
outFrame->data[2] = dst_data[0] + y_size * 5 / 4;
outFrame->pts = loop;
loop++;
printf("encoding frame %3d---------", loop);
avcodec_encode_video2(encodec_ctx, &outpkt, outFrame, &got_picture);
if (1 == got_picture) {
outpkt.stream_index = out_stream->index;
av_interleaved_write_frame(outfmt_ctx, &outpkt);
av_free_packet(&outpkt);
printf("output frame %3d\n", loop - delayedFrame);
}
else {
delayedFrame++;
printf("no output frame\n");
}
}
av_packet_unref(&packet);
}
av_write_trailer(outfmt_ctx);
av_free(outFrame);
av_free(picture_buf);
avio_close(outfmt_ctx->pb);
avformat_close_input(&infmt_ctx);
avformat_close_input(&outfmt_ctx);
return;
}
int main(void) {
av_register_all();
avformat_network_init();
avcodec_register_all();
avdevice_register_all();
captureFrame();
return 0;
}