【RTMP推流】利用FFMPEG进行USB摄像头数据采集硬件编码后进行 RTMP推流

相关前期准备:

1.RMTP推流服务器建立
2.S5P6818平台硬件编码
3.FFMPEG USB摄像头数据采集

在三月份接到了这样一个任务,需要通过USB摄像头采集数据之后,放入6818进行硬件编码后,再通过FFMPEG进行RTMP推流。因为对于ffmpeg并不是非常的了解,加上中间偷了一段时间的懒,直到最近才完成初步的工作。

在这里为了方便直接使用了一些QT的东西,并且通过修改Makefile兼容了一些平台编译的问题。
我这里提前移植QT4.8.6和FFMPEG4.0.2

(一)FFMPEG进行RTMP推流相关代码

ffmpeg.cpp

#include "ffmpeg.h"
#include "Vpu.h"

extern "C"
{
    #include "libavcodec/avcodec.h"
    #include "libavformat/avformat.h"
    #include "libswscale/swscale.h"
    #include "libavdevice/avdevice.h"
}

//#define FFMPEG_MJPEG
//#define FFMPEG_H264
#define FFMPEG_YUV

#define TIMEMS      qPrintable(QTime::currentTime().toString("HH:mm:ss zzz"))



static double r2d(AVRational r)
{
    return r.num == 0 || r.den == 0 ? 0. : (double)r.num / (double)r.den;
}

ffmpeg::ffmpeg(QWidget *parent) :
    QThread(parent)
{
	width=640;
	height=480;
	Fps=30;
}


ffmpeg::~ffmpeg()
{
}

void ffmpeg::YUYV_to_YUV420P(char * image_in, char* image_out, int inwidth, int inheight)
{
    AVFrame  *frmyuyv = av_frame_alloc();
    AVFrame  *frm420p = av_frame_alloc();

    av_image_fill_arrays(frmyuyv->data, frmyuyv->linesize, (uint8_t*)image_in, AV_PIX_FMT_YUYV422, inwidth, inheight, 16);
    av_image_fill_arrays(frm420p->data, frm420p->linesize, (uint8_t*)image_out, AV_PIX_FMT_YUV420P, inwidth, inheight, 16);

    struct SwsContext *sws = sws_getContext(inwidth, inheight, AV_PIX_FMT_YUYV422, inwidth,inheight, AV_PIX_FMT_YUV420P,
                                            SWS_BILINEAR, NULL, NULL, NULL);

    int ret = sws_scale(sws, frmyuyv->data, frmyuyv->linesize, 0, inheight, frm420p->data, frm420p->linesize);

	image_out=(char*)frm420p->data;
    av_frame_free(&frmyuyv);
    av_frame_free(&frm420p);
    sws_freeContext(sws);
}

int ffmpeg::GetSpsPpsFromH264(uint8_t* buf, int len)
{
	int i = 0;
	for (i = 0; i < len; i++) {
		if (buf[i+0] == 0x00 
			&& buf[i + 1] == 0x00
			&& buf[i + 2] == 0x00
			&& buf[i + 3] == 0x01
			&& buf[i + 4] == 0x06) {
			break;
		}
	}
	if (i == len) {
		printf("GetSpsPpsFromH264 error...");
		return 0;
	}

	printf("h264(i=%d):", i);
	for (int j = 0; j < i; j++) {
		printf("%x ", buf[j]);
	}
	return i;
}

bool ffmpeg::isIdrFrame2(uint8_t* buf, int len)
{
	switch (buf[0] & 0x1f) {
	case 7: // SPS
		return true;
	case 8: // PPS
		return true;
	case 5:
		return true;
	case 1:
		return false;

	default:
		return false;
		break;
	}
	return false;
}

bool ffmpeg::isIdrFrame1(uint8_t* buf, int size)
{
	int last = 0;
	for (int i = 2; i <= size; ++i) {
		if (i == size) {
			if (last) {
				bool ret = isIdrFrame2(buf + last, i - last);
				if (ret) {
					return true;
				}
			}
		}
		else if (buf[i - 2] == 0x00 && buf[i - 1] == 0x00 && buf[i] == 0x01) {
			if (last) {
				int size = i - last - 3;
				if (buf[i - 3]) ++size;
				bool ret = isIdrFrame2(buf + last, size);
				if (ret) {
					return true;
				}
			}
			last = i + 1;
		}
	}
	return false;
}

int ffmpeg::RtmpInit(void* spspps_date, int spspps_datalen)
{
    RtmpULR="rtmp://192.168.2.101:1935/live/livestream";
	int ret = 0;
	AVStream *out_stream;
	AVCodecParameters *out_codecpar;
	avformat_network_init();
	avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", NULL);// out_filename);
	if (!ofmt_ctx) {
		fprintf(stderr, "Could not create output context\n");
		ret = AVERROR_UNKNOWN;
	}
	ofmt = ofmt_ctx->oformat;

	out_stream = avformat_new_stream(ofmt_ctx, NULL);
	if (!out_stream) {
		fprintf(stderr, "Failed allocating output stream\n");
		ret = AVERROR_UNKNOWN;
	}
	stream_index = out_stream->index;

	//因为输入是内存读出来的一帧帧的H264数据,所以没有输入的codecpar信息,必须手动添加输出的codecpar
	out_codecpar = out_stream->codecpar;
	out_codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
	out_codecpar->codec_id = AV_CODEC_ID_H264;
	out_codecpar->bit_rate = 400000;
	out_codecpar->width = width;
	out_codecpar->height = height;
	out_codecpar->codec_tag = 0;
	out_codecpar->format = AV_PIX_FMT_YUV420P;

	//必须添加extradata(H264第一帧的sps和pps数据),否则无法生成带有AVCDecoderConfigurationRecord信息的FLV
	//unsigned char sps_pps[26] = { 0x00, 0x00, 0x01, 0x67, 0x4d, 0x00, 0x1f, 0x9d, 0xa8, 0x14, 0x01, 0x6e, 0x9b, 0x80, 0x80, 0x80, 0x81, 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x3c, 0x80 };
	out_codecpar->extradata_size = spspps_datalen;
	out_codecpar->extradata = (uint8_t*)av_malloc(spspps_datalen + AV_INPUT_BUFFER_PADDING_SIZE);
	if (out_codecpar->extradata == NULL)
	{ 
		printf("could not av_malloc the video params extradata!\n");
	}
	memcpy(out_codecpar->extradata, spspps_date, spspps_datalen);	
	av_dump_format(ofmt_ctx, 0, RtmpULR, 1);
	if (!(ofmt->flags & AVFMT_NOFILE)) {
		ret = avio_open(&ofmt_ctx->pb, RtmpULR, AVIO_FLAG_WRITE);
		if (ret < 0) {
			fprintf(stderr, "Could not open output file '%s'", RtmpULR);
		}
	}
	AVDictionary *opts = NULL;
	av_dict_set(&opts, "flvflags", "add_keyframe_index", 0);
	ret = avformat_write_header(ofmt_ctx, &opts);
	av_dict_free(&opts);
	if (ret < 0) {
		fprintf(stderr, "Error occurred when opening output file\n");
	}

	waitI = 1;
	return 0;
}

// void ffmpeg::VideoWrite(void* data, int datalen)
// {
// 	int ret = 0, isI = 0;
// 	AVRational r = { 10, 1 };
// 	AVPacket pkt;
// 	out_stream = ofmt_ctx->streams[videoStreamIndex];
// 	av_init_packet(&pkt);
// 	isI = isIdrFrame1((uint8_t*)data, datalen);
// 	pkt.flags |= isI ? AV_PKT_FLAG_KEY : 0;
// 	pkt.stream_index = avDePacket->stream_index;
// 	pkt.data = (uint8_t*)data;
// 	pkt.size = datalen;
// 	//AVRational time_base:时基。通过该值可以把PTS,DTS转化为真正的时间。
// 	AVRational time_base1 = in_stream->time_base;
// 	printf("time_base1:{%d,%d}",in_stream->time_base.num,in_stream->time_base.den);
// 	//计算两帧之间的时间
// 	int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(r);
// 	//配置参数
// 	pkt.pts = (double)((framIndex+1)*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
// 	printf("{%d %d %d,%d}\n",framIndex,calc_duration, pkt.pts,av_q2d(time_base1));	
// 	pkt.dts =pkt.pts ;
// 	pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);


// 	AVRational time_base = in_stream->time_base;
// 	AVRational time_base_q = { 1,AV_TIME_BASE };
// 	//计算视频播放时间
// 	int64_t pts_time = av_rescale_q(pkt.pts, time_base, time_base_q); 
// 	//计算实际视频的播放时间
// 	int64_t now_time = av_gettime() - start_time;
// 	printf("pts_time:%d\n", pts_time);	
// 	printf("now_time:%d\n", now_time);
// 	AVRational avr = in_stream->time_base;
// 	// printf("avr.num:%d, avr.den:%d, pkt.dts:%ld, pkt.pts:%ld, pts_time:%ld\n",
// 	// 		avr.num,    avr.den,    pkt.dts,     pkt.pts,     pts_time);
// 	if (pts_time > now_time)
// 	{
// 		//睡眠一段时间(目的是让当前视频记录的播放时间与实际时间同步)
// 	printf("pts_time:%d\n", pts_time);	
// 	printf("now_time:%d\n", now_time);
// 		av_usleep((unsigned int)(pts_time - now_time));
// 	}

// 	//计算延时后,重新指定时间戳
// 	pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
// 	pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
// 	pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
// 	//字节流的位置,-1 表示不知道字节流位置,由程序自行探测
// 	pkt.pos = -1;

// 	printf("avr.num:%d, avr.den:%d, pkt.dts:%ld, pkt.pts:%ld, pts_time:%ld\n",
// 			avr.num,    avr.den,    pkt.dts,     pkt.pts,     pts_time);

// 	ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
// 	if (ret < 0)
// 	{
// 		printf("发送数据包出错\n");
// 	}
// 		av_free_packet(&pkt);
// }

void ffmpeg::VideoWrite(void* data, int datalen)
{
	int ret = 0, isI = 0;
	AVPacket pkt;
	out_stream = ofmt_ctx->streams[stream_index];
	int calc_duration;
	av_init_packet(&pkt);
	isI = isIdrFrame1((uint8_t*)data, datalen);
	pkt.flags |= isI ? AV_PKT_FLAG_KEY : 0;
	pkt.stream_index = out_stream->index;
	pkt.data = (uint8_t*)data;
	pkt.size = datalen;
	//wait I frame
	if (waitI) {
		if (0 == (pkt.flags & AV_PKT_FLAG_KEY))
			return;
		else
			waitI = 0;	
	}
	
	AVRational time_base1=ifmt_ctx->streams[stream_index]->time_base;
	//Duration between 2 frames (us)
	calc_duration=(double)AV_TIME_BASE/av_q2d(ifmt_ctx->streams[stream_index]->r_frame_rate)-2000;
	//Parameters
	pkt.pts=((framIndex+1)*calc_duration)/(av_q2d(time_base1)*AV_TIME_BASE);
	pkt.dts=pkt.pts;
	pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
	pkt.pos=-1;
	// printf("instream{%d,%d}\n",ifmt_ctx->streams[stream_index]->time_base.num,ifmt_ctx->streams[stream_index]->time_base.den);
	// printf("outstream{%d,%d}\n",ofmt_ctx->streams[stream_index]->time_base.num,ofmt_ctx->streams[stream_index]->time_base.den);
	 printf("DURATION :%d\n",calc_duration);
	printf("PTS DTS :%d %d\n",pkt.pts,pkt.dts);
	
	//计算延时后,重新指定时间戳
	pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
	pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
	pkt.duration = (int)av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
	//字节流的位置,-1 表示不知道字节流位置,由程序自行探测
	pkt.pos = -1;

	gettimeofday(&stamp, NULL);
	int now_time = 1000*1000*(stamp.tv_sec)+stamp.tv_usec-start_time;
	printf("start_time now_time:%d %d\n",start_time, now_time);
	if (pkt.dts > now_time)
	av_usleep(pkt.dts - now_time);
	


	ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
	if (ret < 0) {
		fprintf(stderr, "Error muxing packet\n");
	}

	av_packet_unref(&pkt);
}

void ffmpeg::RtmpUnit(void)
{
	if (ofmt_ctx)
		av_write_trailer(ofmt_ctx);
	/* close output */
	if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
		avio_closep(&ofmt_ctx->pb);
	if (ofmt_ctx) {
		avformat_free_context(ofmt_ctx);
		ofmt_ctx = NULL;
	}
}


// void ffmpeg::YUYV_to_NV12( char * image_in, char* image_out, int inwidth, int inheight)
// {
//        /* 计算循环次数,YUYV 一个像素点占2个字节*/
// 	   int pixNUM = inwidth * inheight;
// 	   unsigned int cycleNum = 1;
	   
// 	  /*单帧图像中 NV12格式的输出图像 Y分量 和 UV 分量的起始地址,并初始化*/
// 	  char *y = image_out;
// 	  char *uv = image_out + pixNUM ;
	 
// 	  char *start = image_in;
//           unsigned int i =0; 
// 	  int j =0,k =0;
	  
// 	  /*处理Y分量*/
// 	  for(i= 0; i<cycleNum ;i++)
// 	  {
// 		int index =0;
// 		for(j =0; j< pixNUM*2; j=j+2) //YUYV单行中每两个字节一个Y分量
// 		{
// 			*(y+index) = *(start + j);
// 			index ++;
// 		}
// 		start = image_in + pixNUM*2*i;
// 		y= y + pixNUM*3/2;
// 	  }
      
//       /**处理UV分量**/
// 	  start = image_in;
// 	  for(i= 0; i<cycleNum ;i++)
// 	  {
// 	    int uv_index = 0;
// 		for(j=0; j< inheight; j =j+2)  // 隔行, 我选择保留偶数行
// 		{
// 			for(k = j*inwidth*2+1; k< inwidth*2*(j+1); k=k+4) //YUYV单行中每四个字节含有一对UV分量
// 			{
// 				*(uv+ uv_index) = *(start + k);
// 				*(uv +uv_index+1) = *(start +k +2);
// 				uv_index += 2;
// 			}
// 		}
// 	    start = image_in + pixNUM*2*i;
// 	    uv =uv + pixNUM*3/2;
// 	  }	
//  }


void ffmpeg::YUV420PtoNV12(unsigned char* Src, unsigned char* Dst,int Width,int Height){
    unsigned char* SrcU = Src + Width * Height;
    unsigned char* SrcV = SrcU + Width * Height / 4 ;
    memcpy(Dst, Src, Width * Height);
    unsigned char* DstU = Dst + Width * Height;
    for(int i = 0 ; i < Width * Height / 4 ; i++ ){
        ( *DstU++) = ( *SrcU++);
        ( *DstU++) = ( *SrcV++);
    }
}


/* 功能:初始化video mjpeg to yuv
 *      1 video
 *      2 解码
 * 参数:无
 * 返回值:成功返回零,失败返回-1
 */
int ffmpeg::initDecodeVideo()
{
	MJPEGPath=fopen("out.mpg","wb");
    H264Path = fopen(outputFilename, "wb"); 
	YUVPath = fopen("out.yuv","wb");
	 NV12Path= fopen("out.nv12","wb");
    //为解封装上下文开辟空间
    ifmt_ctx = avformat_alloc_context();   
    framIndex=0;
    /*1、注册*/  
    avcodec_register_all();  
    avdevice_register_all(); 
    qDebug() << TIMEMS << "init ffmpeg lib ok" << " version:" << FFMPEG_VERSION;
     /*2、连接视频源*/   
    AVInputFormat *inputFmt  = av_find_input_format("video4linux2");
    AVDictionary *options = NULL;
	
    //打开输入视频流,进行解封装
    av_dict_set(&options, "framerate", "30", 0);
	char videosize[9];
	sprintf(videosize,"%dx%d",width,height);
    av_dict_set(&options, "video_size", videosize, 0);

#ifdef FFMPEG_MJPEG
    av_dict_set(&options, "input_format", "mjpeg", 0);
#endif    

#ifdef FFMPEG_YUV
    av_dict_set(&options, "input_format", "yuyv422", 0);
#endif  	
	int result = avformat_open_input(&ifmt_ctx, inputFilename, inputFmt, &options);
    if (result < 0) {
        qDebug() << TIMEMS << "open input error" << inputFilename;
        return false;
    }
    //释放设置参数
    if(options != NULL) {
        av_dict_free(&options);
    }
    //获取流信息
    result = avformat_find_stream_info(ifmt_ctx, NULL);
    if (result < 0) {
        qDebug() << TIMEMS << "find stream info error";
        return false;
    }
    avDePacket = av_packet_alloc();
    avDeFrameYuv = av_frame_alloc();
    videoStreamIndex = -1;
    videoStreamIndex = av_find_best_stream(ifmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &deCodec, 0);
    if (videoStreamIndex < 0) {
        qDebug() << TIMEMS << "find video stream index error";
        return false;
    }
	printf("videoindex:%d\n", videoStreamIndex);

    //从输入封装上下文获取输入视频流
    in_stream = ifmt_ctx->streams[videoStreamIndex];
    if (!in_stream)
    {
        printf("Failed get input stream\n");
        return false;
    }
        //获取视频流解码器上下文
    deCodecCtx = in_stream->codec;

    //获取分辨率大小
    videoWidth = in_stream->codec->width;
    videoHeight = in_stream->codec->height;

    //如果没有获取到宽高则返回
    if (videoWidth == 0 || videoHeight == 0) {
        qDebug() << TIMEMS << "find width height error";
        qDebug() <<"WIDTH"<<videoWidth<<":"<<"HEIGHT"<<videoHeight;
        return false;
    }

    //获取视频流的帧率 fps,要对0进行过滤,除数不能为0,有些时候获取到的是0
    int num = in_stream->codec->framerate.num;
    int den = in_stream->codec->framerate.den;
    if (num != 0 && den != 0) {
        videoFps = num / den ;
    }


    QString videoInfo = QString("视频流信息 -> 索引: %1   格式: %2  时长: %3 秒  fps: %4  分辨率: %5*%6 instream->time_base:%7 %8 in_stream->r_frame_rate: %9 %10")
                        .arg(videoStreamIndex).arg(ifmt_ctx->iformat->name)
                        .arg((ifmt_ctx->duration) / 1000000).arg(videoFps).arg(videoWidth).arg(videoHeight).arg(in_stream->time_base.num).arg(in_stream->time_base.den).arg(in_stream->r_frame_rate.num).arg(in_stream->r_frame_rate.den);
    qDebug() << TIMEMS << videoInfo;
    //打开视频解码器
    result = avcodec_open2(deCodecCtx, deCodec, NULL);
    if (result < 0) {
        qDebug() << TIMEMS << "open video codec error";
        return false;
    }
    AVPixelFormat srcFormat = AV_PIX_FMT_YUV420P;

	#ifdef FFMPEG_YUV
		srcFormat = AV_PIX_FMT_YUYV422;
	#endif

    return 0;
}



int ffmpeg::playVideo()
{
    int length;
    int got_packet;
    initDecodeVideo();

    vpu Vpu(width,height,Fps);
    //WRITE HERDER
    fwrite( Vpu.seqBuffer, 1, Vpu.size, H264Path );
    RtmpInit(Vpu.seqBuffer, Vpu.size);
    nv12=(char*)malloc(width*height*3/2);
    yuv420p=(char*)malloc(width*height*3/2);

	gettimeofday(&stamp, NULL);
	start_time=(stamp.tv_sec)*1000*1000+stamp.tv_usec;

    while(true)
    {
        if (av_read_frame(ifmt_ctx, avDePacket) >= 0) { 
                               
            YUYV_to_YUV420P(( char*)avDePacket->data,( char*)yuv420p,width,height);
            h264=Vpu.DecodeNV12_To_H264(yuv420p,&length);
            VideoWrite(h264, length);
			
			// fwrite(avDePacket->data,avDePacket->size,1,YUVPath);
			// fwrite(h264,length,1,H264Path);
			// fwrite(yuv420p,width*height*3/2,1,NV12Path);
            
			// printf("[H264 PACKET LENGTH]=%d\n",length);  
            // qDebug()<< "解码到第" << framIndex << "帧";
            // qDebug()<<"PCAKET SIZE="<<avDePacket->size;
            // qDebug() << TIMEMS;           
            av_packet_unref(avDePacket);
            av_freep(avDePacket);
        }
        framIndex++;
    }
	RtmpUnit();
    avformat_free_context(ifmt_ctx);
    qDebug() << TIMEMS << "stop ffmpeg thread";
}


void ffmpeg::run()
{
    playVideo();
}


ffmpeg.h

#ifndef FFMPEG_H
#define FFMPEG_H

#include <QMainWindow>
#include <QMutex>
#include <QDateTime>
#include <QFile>
#include <QThread>
#include <QDebug>
#include <stdio.h>
#include <string>
#include <iostream>
#include <chrono>
#include <vector>
//引入ffmpeg头文件
extern "C" {
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavutil/frame.h"
#include "libavutil/pixdesc.h"
#include "libavutil/avassert.h"
#include "libavutil/imgutils.h"
#include "libavutil/ffversion.h"
#include "libavcodec/avcodec.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
#include "libavformat/avformat.h"
#include "libavfilter/avfilter.h"
#include "libavutil/hwcontext.h"
#include "libavutil/avutil.h"
#include "libavutil/opt.h"
}


class ffmpeg : public QThread
{
    Q_OBJECT
public:
    explicit ffmpeg(QWidget *parent = NULL);
    ~ffmpeg();
    char *outputFilename;
    char *inputFilename;
    struct timeval stamp;
    int start_time;
    
protected:
    void run();
signals:
    //收到图片信号
    void receiveImage(const QImage &image);

private:

    AVFormatContext *fmtCtx = NULL;;
    int framIndex;
    uint8_t *buffer;                    //存储解码后图片buffer
    AVFrame *avDeFrameYuv;              //解码帧对象YUV
    AVCodec *deCodec = NULL;            //解码器
	AVCodec *pCodecH264; //编码器

    AVPacket *avDePacket;               //解码包对象
    AVPacket avpkt;
    int frameFinish = 0;
    int stream_index;
    FILE* MJPEGPath;
    FILE* YUVPath;
    FILE* H264Path;
    FILE* NV12Path;
    int WIDTH,HEIGHT,FRAME;
    int videoStreamIndex;               //视频流索引
	//输入输出视频流
	AVStream *out_stream;
    AVStream *in_stream;                //输入视频流
    AVCodecContext *c= NULL;
    AVCodecContext *deCodecCtx;         //解码器上下文
    int videoWidth;                     //视频宽度
    int videoHeight;                    //视频高度
    int videoFps;
	uint8_t * outbuf;
    int outbuf_size;
    int got_packet_ptr;
    char* nv12;
    char *h264;
    char* yuv420p;
    char* RtmpULR;
    AVFormatContext *ifmt_ctx;
    AVOutputFormat *ofmt = NULL;
    AVFormatContext *ofmt_ctx = NULL;
    int waitI,rtmpisinit;
    int ptsInc=0;
    int width;
    int height;
    int Fps;

private:
    void YUV420PtoNV12(unsigned char *Src, unsigned char* Dst,int Width,int Height);
    void YUYV_to_YUV420P( char * image_in, char* image_out, int width, int height); 
    int initDecodeVideo();
    int playVideo();
    int RtmpInit(void* spspps_date, int spspps_datale);
    int GetSpsPpsFromH264(uint8_t* buf, int len);
    bool isIdrFrame2(uint8_t* buf, int len);
    bool isIdrFrame1(uint8_t* buf, int size);
    void VideoWrite(void* data, int datalen);
    void RtmpUnit(void);
};

#endif // FFMPEG_H

(二)运行中遇到的问题

其他初始化的想关的准备中并没有遇到严重的问题。但是在推流的过程中遇到了三个非常严重的问题。

(1)时间戳

在其他的代码中经常会使用av_gettime()获取时间戳,但是非常奇怪的是,我在使用相关函数的时候并不能成功获取时间戳。最后不得已采用
gettimeofday(&stamp, NULL);
int now_time = 1000 * 1000 * (stamp.tv_sec)+stamp.tv_usec-start_time;
获取相关的时间戳。

(2)推流包参数设置的问题

打印数据发现,输入和输出具有完全不同的时间基,需要通过一个av_rescale_q_rnd函数重新计算时间PTS DTS参数

(3)推流和播放无法同步

这是一个非常麻烦的问题,到目前都没有找到非常好的解决方法。也就是实际时间与播放时间有1到2秒的时间差,无法同步。这个应该不是代码的原因。即使源码推流好像也存在这样的问题。
我想到了一个这样的办法,通过稍微减小两帧之间的时间可以让这样的问题得到缓解。但是,也有一个问题就是每过一段时间会稍微卡一下。


void ffmpeg::VideoWrite(void* data, int datalen)
{
	int ret = 0, isI = 0;
	AVPacket pkt;
	out_stream = ofmt_ctx->streams[stream_index];
	int calc_duration;
	av_init_packet(&pkt);
	isI = isIdrFrame1((uint8_t*)data, datalen);
	pkt.flags |= isI ? AV_PKT_FLAG_KEY : 0;
	pkt.stream_index = out_stream->index;
	pkt.data = (uint8_t*)data;
	pkt.size = datalen;
	//wait I frame
	if (waitI) {
		if (0 == (pkt.flags & AV_PKT_FLAG_KEY))
			return;
		else
			waitI = 0;	
	}
	
	AVRational time_base1=ifmt_ctx->streams[stream_index]->time_base;
	
//Duration between 2 frames (us)
//通过减少两帧之间的时间缓解时间差的问题	
calc_duration=(double)AV_TIME_BASE/av_q2d(ifmt_ctx->streams[stream_index]->r_frame_rate)-2000;


	//Parameters
	pkt.pts=((framIndex+1)*calc_duration)/(av_q2d(time_base1)*AV_TIME_BASE);
	pkt.dts=pkt.pts;
	pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
	pkt.pos=-1;
	// printf("instream{%d,%d}\n",ifmt_ctx->streams[stream_index]->time_base.num,ifmt_ctx->streams[stream_index]->time_base.den);
	// printf("outstream{%d,%d}\n",ofmt_ctx->streams[stream_index]->time_base.num,ofmt_ctx->streams[stream_index]->time_base.den);
	 printf("DURATION :%d\n",calc_duration);
	printf("PTS DTS :%d %d\n",pkt.pts,pkt.dts);
	
	//计算延时后,重新指定时间戳
	pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
	pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
	pkt.duration = (int)av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
	//字节流的位置,-1 表示不知道字节流位置,由程序自行探测
	pkt.pos = -1;

	gettimeofday(&stamp, NULL);
	int now_time = 1000*1000*(stamp.tv_sec)+stamp.tv_usec-start_time;
	printf("start_time now_time:%d %d\n",start_time, now_time);
	if (pkt.dts > now_time)
	av_usleep(pkt.dts - now_time);
	


	ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
	if (ret < 0) {
		fprintf(stderr, "Error muxing packet\n");
	}

	av_packet_unref(&pkt);
}

在这里插入图片描述

源码:FFMPEG RTMP推流

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

与光同程

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值