ffmpeg rtmp音视频推流实现

ffmpeg rtmp音视频推流实现

紧接上篇文章,实现本地推流,需要经历容器层→解封装→容器层→协议层。

1、准备容器。

容器里有流、编码器等,需要对这些进行规定;为了方便理解,举个不恰当的例子,就好比做月饼一样,一套月饼就是容器,一套月饼肯定需要包装盒子,而包装盒子又有纸盒、铁盒不同材质,容器也一样,容器也有不用的格式mp4、flv等。月饼盒子里面又有一个小盒(装月饼),看成是流,小盒子里时原滋原味的月饼,流装的是音视频数据(不止有音视频数据、还有字幕等)。

int ret = 0;
const char fileName[] = "rtmp://192.168.0.100/live/009";
AVOutputFormat *fmt = NULL; 
ret = avformat_alloc_output_context2(&fmt_ctx_out, NULL, "flv", fileName);
if (ret < 0)
{
    printf("avformat_alloc_output_context2 failth \n");
    return -1;
}
fmt = fmt_ctx_out->oformat;
fmt->video_codec = AV_CODEC_ID_H264;
fmt->audio_codec = AV_CODEC_ID_AAC;


if(fmt->video_codec != AV_CODEC_ID_NONE)
{
    video_encodec = avcodec_find_encoder(fmt->video_codec);
    if (!(video_encodec))
    {
        printf("Can't not find any vencoder");
    }
    else
    {
        printf("Success find vencoder");
    }

    videost = avformat_new_stream(fmt_ctx_out, NULL);
    if (!videost)
    {
        printf("[FFMPEG][ADD STREAM] task id:, Could not allocate stream\n");

        return -2;
    }
    else
    {
        printf("new stream video\n");
    }
    printf("video nb_stream :%d\n",fmt_ctx_out->nb_streams);
    videost->id = fmt_ctx_out->nb_streams - 1;

    videocc = avcodec_alloc_context3(video_encodec);
    if (!videocc)
    {
        printf("[FFMPEG][ADD STREAM] task id: , Could not alloc an encoding context\n");
//            avcodec_free_context(&video_encodec);
//            avcodec_close(video_encodec);

//            avcodec_free_context(&video_encodec);

//            av_buffer_unref(&videopkt);

//            av_packet_unref(&videopkt);
//            av_packet_free(&videopkt);
        avformat_free_context(fmt_ctx_out);
        return -3;
    }

    videocc->codec_id = fmt->video_codec;
    videocc->bit_rate = WIDTH*HEIGHT;
    videocc->width = WIDTH;
    videocc->height = HEIGHT;

    videost->r_frame_rate.den = 1;
    videost->r_frame_rate.num = 25;
    videost->time_base = (AVRational){1, 25};

    videocc->time_base = videost->time_base;
    videocc->gop_size = 25;
    videocc->pix_fmt = AV_PIX_FMT_YUV420P;
    videocc->max_b_frames = 0; 

    videocc->me_range = 16;
    videocc->max_qdiff = 10;
    videocc->qmin = 10;
    videocc->qmax = 30;
    videocc->qcompress = 1;

    if (fmt_ctx_out->oformat->flags & AVFMT_GLOBALHEADER)
    {
        videocc->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    }
   
    avcodec_open2(videocc, video_encodec, NULL);
    videopkt = av_packet_alloc();
    avcodec_parameters_from_context(videost->codecpar, videocc);

}

if (fmt->audio_codec != AV_CODEC_ID_NONE)
{
    audioencodec = avcodec_find_encoder(fmt->audio_codec);

    if (!audioencodec)
    {
        printf("Can't not find any audioencodec\n");
    }
    else
    {
        printf("Success find audioencodec\n");
    }

    audiost = avformat_new_stream(fmt_ctx_out, NULL);
    if (!audiost)
    {
        printf("[FFMPEG][ADD STREAM], Could not allocate stream\n");
        return -2;
    }
    printf("audiost nb_stream :%d\n",fmt_ctx_out->nb_streams);
    audiost->id = fmt_ctx_out->nb_streams - 1;

    audiocc = avcodec_alloc_context3(audioencodec);

    audiocc->sample_fmt = audioencodec->sample_fmts ? audioencodec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
    audiocc->bit_rate = 135000;
    audiocc->sample_rate = 44100;
    audiocc->channel_layout = AV_CH_LAYOUT_STEREO;
    audiocc->channels = av_get_channel_layout_nb_channels(audiocc->channel_layout);
    audiost->time_base = (AVRational){1, audiocc->sample_rate};

    if (fmt_ctx_out->oformat->flags & AVFMT_GLOBALHEADER)
    {
        audiocc->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    }
   
    avcodec_open2(audiocc, audioencodec, NULL);
    audiopkt = av_packet_alloc();

    avcodec_parameters_from_context(audiost->codecpar, audiocc);
    
}

av_dump_format(fmt_ctx_out, 0, fileName, 1);


if (!(fmt->flags & AVFMT_NOFILE))
{
    ret = avio_open(&fmt_ctx_out->pb, fileName, AVIO_FLAG_WRITE);
    if (ret < 0)
    {
        avformat_free_context(fmt_ctx_out);
        return -1;
    }
}
ret = avformat_write_header(fmt_ctx_out,NULL);
if (ret < 0) {
    printf("avformat_write_header fail %d \n",ret);
    return ret;
}

2、准备音视频流,读取文件对其解封装,读取码流数据,将其存储在队列中。“拿到原汁原味的月饼”

准备音频流,

AVFormatContext *pFormatCtx = NULL;

pFormatCtx = avformat_alloc_context();

if (avformat_open_input(&pFormatCtx, aac_ptr, NULL, NULL) != 0)
{
    printf("无法打开信息流");
}
if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
{
    printf("无法查找到流信息");
}
int audioindex = -1;
audioindex = -1;
for (unsigned int i = 0; i < pFormatCtx->nb_streams; i++)
{
    if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
    {
        audioindex = i;
        break;
    }
}
if (audioindex < 0)
{
    printf("No Audio Stream...\n");
}
av_dump_format(pFormatCtx, 0, aac_ptr, 0);

AVPacket *packet = av_packet_alloc();

while (av_read_frame(pFormatCtx, packet) >= 0)
{
    if (packet->stream_index == audioindex)
    {
        printf("detect audio index.....\n");
        //存放在队列中
        audio_data_packet_t *audio_data_packet = (audio_data_packet_t *)malloc(sizeof(audio_data_packet_t));
        memcpy(audio_data_packet->buffer, packet->data, packet->size);
        audio_data_packet->audio_frame_size = packet->size;
        audio_queue->putAudioPacketQueue(audio_data_packet);
    }
}

准备视频流

AVFormatContext *ifmt_ctx_v = NULL;
int in_stream_index_v = -1;
int ret;

if ((ret = avformat_open_input(&ifmt_ctx_v, input_video_file_name, NULL, NULL)) < 0)
{
    av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
}
if ((ret = avformat_find_stream_info(ifmt_ctx_v, NULL)) < 0)
{
    av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
}
ret = av_find_best_stream(ifmt_ctx_v, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
if (ret < 0)
{
    av_log(NULL, AV_LOG_ERROR, "Cannot find video stream\n");
}
in_stream_index_v = ret;


av_dump_format(ifmt_ctx_v, 0, input_video_file_name, 0);

AVPacket *packet = av_packet_alloc();
while (av_read_frame(ifmt_ctx_v, packet) >= 0)
{
    if (packet->stream_index == in_stream_index_v)
    {
        printf("detect video index.....\n");
        video_data_packet_t *video_data_packet = (video_data_packet_t *)malloc(sizeof(video_data_packet_t));
        memcpy(video_data_packet->buffer, packet->data, packet->size);
        video_data_packet->video_frame_size = packet->size;
        video_queue->putVideoPacketQueue(video_data_packet);
    }
}

3、开始读取队列,将读到数据写入文件中,进行推流。

video_data_packet_t *video_data_packet = video_queue->getVideoPacketQueue();
if (video_data_packet != NULL)
{

    int ret = av_buffer_realloc(&videopkt->buf, video_data_packet->video_frame_size + AV_INPUT_BUFFER_PADDING_SIZE);
    if (ret < 0)
    {
        return NULL;
    }
#if 1
    videopkt->size = video_data_packet->video_frame_size;
    memcpy(videopkt->buf->data, video_data_packet->buffer, video_data_packet->video_frame_size);
    videopkt->data = videopkt->buf->data;
    videopkt->flags |= AV_PKT_FLAG_KEY;

#endif
    if (video_data_packet != NULL)
    {
        free(video_data_packet);
        video_data_packet = NULL;
    }
}
else
{
    printf("videopkt = NULL;\n");
}

if (videopkt != NULL)
{
    videopkt->pts = videoNextpts++;
}

//write
av_packet_rescale_ts(videopkt, videocc->time_base, videost->time_base);
videopkt->stream_index = videost->index;
av_interleaved_write_frame(fmt_ctx_out, videopkt);
Java结合FFmpeg推流,可以使用JavaCV库来实现。JavaCV是一个基于FFmpeg和OpenCV的Java接口,提供了访问视频、音频和图像处理的功能。下面是基于JavaCV实现推流步骤: 1. 安装JavaCV库,并在代码中导入相关包。 2. 使用FFmpegFrameRecorder类创建一个推流器对象。 3. 设置推流器的参数,如推流地址、视频宽高、视频码率等。 4. 调用start()方法开始推流。 5. 使用Frame类读取视频数据,并将其通过推流器对象写入网络流中。 6. 调用stop()方法结束推流。 下面是Java结合FFmpeg推流的示例代码: ``` import org.bytedeco.javacv.FFmpegFrameRecorder; import org.bytedeco.javacv.Frame; import org.bytedeco.javacv.Java2DFrameConverter; import org.bytedeco.opencv.opencv_core.IplImage; import javax.imageio.ImageIO; import java.awt.image.BufferedImage; import java.io.File; public class PushStreamDemo { public static void main(String[] args) throws Exception { //创建推流器对象 FFmpegFrameRecorder recorder = new FFmpegFrameRecorder("rtmp://localhost:1935/live/test", 640, 480); //设置推流参数 recorder.setVideoCodec(avcodec.AV_CODEC_ID_H264); recorder.setFormat("flv"); recorder.setFrameRate(25); recorder.setVideoBitrate(2000000); recorder.setVideoQuality(0); //开始推流 recorder.start(); //读取视频数据并写入网络流中 for (int i = 0; i < 100; i++) { BufferedImage image = ImageIO.read(new File("test.jpg")); Java2DFrameConverter converter = new Java2DFrameConverter(); Frame frame = converter.convert(image); recorder.record(frame); Thread.sleep(40); } //停止推流 recorder.stop(); } } ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

小昭dedug

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值