视频重编码为h265重新封装MP4并截断

MP4 重编码重封装-分辨率和编码格式(转为h265)调整

XFormat.h

#pragma once
#include <mutex>

struct AVFormatContext;
struct AVCodecParameters;
struct AVPacket;
struct AVCodecContext;

void PrintError(int err);

struct Rational 
{
	int num; ///< Numerator
	int den; ///< Denominator
};

class XFormat
{
public:
	XFormat();
	// 设置上下文,并且清理上次的设置的值,如果传递NULL,相当于关闭上下文3
	// 线程安全
	bool setContext(AVFormatContext* context);
	bool isVaild();
	int videoIndex();
	int audioIndex();
	Rational videoTimebase();
	Rational audioTimebase();
	bool copyParam(int index, AVCodecParameters* dst);
	bool copyParam(int index, AVCodecContext* dst);
	bool rescaleTime(AVPacket* packet, int offset_pts, Rational timebase);
	int codecId();

protected:
	AVFormatContext* m_context;		// 封装解封装上下文
	std::mutex m_mtx;
	int m_video_index;				// video和audio在stream中索引
	int m_audio_index;
	Rational m_video_timebase;
	Rational m_audio_timebase;
	int m_codec_id;
};

XFormat.cpp

#include "XFormat.h"
#include <iostream>

using namespace std;

extern "C"  // 指定函数是 C 语言函数,函数目标名不包含重载标识,C++ 中调用 C 函数需要使用 extern "C"
{
	// 引用 ffmpeg 头文件
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
}

// 预处理指令导入库
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")

void PrintError(int err);


XFormat::XFormat()
{
	m_context = nullptr;
	m_video_index = AVMEDIA_TYPE_VIDEO;
	m_audio_index = AVMEDIA_TYPE_AUDIO;
	m_video_timebase = { 1, 25 };
	m_audio_timebase = { 1, 9000 };
	m_codec_id = -1;
}

bool XFormat::setContext(AVFormatContext* context)
{
	bool ret = true;
	unique_lock<mutex> lock(m_mtx);
	
	if (m_context != nullptr)
	{
		if (m_context->oformat)			// 输出上下文
		{
			if (m_context->pb != nullptr)
			{
				avio_close(m_context->pb);
			}

			avformat_free_context(m_context);
		}
		else if (m_context->iformat)	//输入上下文
		{
			avformat_close_input(&m_context);
		}
	}

	m_context = context;

	if (m_context != nullptr)
	{
		//区分音视频stream 索引
		for (int i = 0; i < m_context->nb_streams; i++)
		{
			if (m_context->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
			{
				m_video_index = i;
				m_video_timebase.num = m_context->streams[i]->time_base.num;
				m_video_timebase.den = m_context->streams[i]->time_base.den;
				m_codec_id = m_context->streams[i]->codecpar->codec_id;
			}
			else if (m_context->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
			{
				m_audio_index = i;
				m_audio_timebase.num = m_context->streams[i]->time_base.num;
				m_audio_timebase.den = m_context->streams[i]->time_base.den;
			}
		}
	}

	return ret;
}

bool XFormat::isVaild()
{
	unique_lock<mutex> lock(m_mtx);

	return (m_context != nullptr);
}

int XFormat::videoIndex()
{
	return m_video_index;
}

int XFormat::audioIndex()
{
	return m_audio_index;
}

bool XFormat::copyParam(int index, AVCodecParameters* dst)
{
	bool ret = false;
	int err = 0;
	unique_lock<mutex> lock(m_mtx);

	if ((m_context != nullptr) && (index >= 0) && (index < m_context->nb_streams) && (dst != nullptr))
	{
		err = avcodec_parameters_copy(dst, m_context->streams[index]->codecpar);

		if (err >= 0)
		{
			ret = true;
		}
		else
		{
			PrintError(err);
		}
	}

	return ret;
}

bool XFormat::copyParam(int index, AVCodecContext* dst)
{
	bool ret = false;
	int err = 0;
	unique_lock<mutex> lock(m_mtx);

	if ((m_context != nullptr) && (index >= 0) && (index < m_context->nb_streams) && (dst != nullptr))
	{
		err = avcodec_parameters_to_context(dst, m_context->streams[index]->codecpar);

		if (err >= 0)
		{
			ret = true;
		}
		else
		{
			PrintError(err);
		}
	}

	return ret;
}

Rational XFormat::videoTimebase()
{
	return m_video_timebase;
}

Rational XFormat::audioTimebase()
{
	return m_audio_timebase;
}

bool XFormat::rescaleTime(AVPacket* packet, int offset_pts, Rational timebase)
{
	bool ret = false;
	int err = 0;
	int pts = 0;
	AVRational in_timebase = { 0 };
	AVRational out_timebase = { 0 };
	unique_lock<mutex> lock(m_mtx);

	if ((m_context != nullptr) && (packet != nullptr) && (offset_pts >= 0))
	{
		if (packet->stream_index == AVMEDIA_TYPE_VIDEO)
		{
			out_timebase = m_context->streams[AVMEDIA_TYPE_VIDEO]->time_base;
		}
		else if (packet->stream_index == AVMEDIA_TYPE_AUDIO)
		{
			out_timebase = m_context->streams[AVMEDIA_TYPE_AUDIO]->time_base;
		}

		in_timebase.num = timebase.num;
		in_timebase.den = timebase.den;

		packet->pts = av_rescale_q_rnd(packet->pts - offset_pts, in_timebase, out_timebase, static_cast<AVRounding>(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		packet->dts = av_rescale_q_rnd(packet->dts - offset_pts, in_timebase, out_timebase, static_cast<AVRounding>(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		packet->duration = av_rescale_q(packet->duration, in_timebase, out_timebase);
		packet->pos = -1;

		ret = true;
	}

	return ret;
}

int XFormat::codecId()
{
	return m_codec_id;
}

XDemux.h

#pragma once
#include "XFormat.h"

class XDemux : public XFormat
{
public:
	// 打开解封装
	static AVFormatContext* Open(const char* url);
	// 读取一帧数据
	bool read(AVPacket* packet);
	bool seek(long long begin_pts, int stream_index);
};

XDemux.cpp

#include "XDemux.h"
#include <iostream>

using namespace std;

extern "C"  // 指定函数是 C 语言函数,函数目标名不包含重载标识,C++ 中调用 C 函数需要使用 extern "C"
{
	// 引用 ffmpeg 头文件
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
}

AVFormatContext* XDemux::Open(const char* url)
{
	AVFormatContext* ret = nullptr;
	int err = 0;

	if (url != nullptr)
	{
		err = avformat_open_input(&ret, url,nullptr,nullptr);

		if (err == 0)
		{
			err = avformat_find_stream_info(ret, nullptr);

			if (err >= 0)
			{
				av_dump_format(ret, 0, url, 0);
			
			}
			else
			{
				PrintError(err);
				avformat_free_context(ret);
			}
		}
		else
		{
			PrintError(err);
			ret = nullptr;
		}
	}

	return ret;
}

bool XDemux::read(AVPacket* packet)
{
	bool ret = false;
	int err = 0;
	unique_lock<mutex> lock(m_mtx);

	if (m_context != nullptr)
	{
		err = av_read_frame(m_context, packet);

		if (err >= 0)
		{
			ret = true;
		}
	}

	return ret;
}

bool XDemux::seek(long long begin_pts, int stream_index)
{
	bool ret = false;
	int err = 0;
	unique_lock<mutex> lock(m_mtx);

	if ((m_context != nullptr) && (stream_index >= 0))
	{
		ret = av_seek_frame(m_context, stream_index, begin_pts, AVSEEK_FLAG_FRAME | AVSEEK_FLAG_BACKWARD);	// 向后关键帧

		if (err >= 0)
		{
			ret = true;
		}
		else
		{
			PrintError(err);
		}
	}

	return ret;
}

XMux.h

#pragma once
#include "XFormat.h"
class XMux : public XFormat
{
public:
	static AVFormatContext* Open(const char* url);
	bool writeHead();
	bool write(AVPacket* packet);
	bool writeEnd();
};

XMux.cpp

#include "XMux.h"
#include <iostream>

using namespace std;

extern "C"  // 指定函数是 C 语言函数,函数目标名不包含重载标识,C++ 中调用 C 函数需要使用 extern "C"
{
	// 引用 ffmpeg 头文件
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
}

AVFormatContext* XMux::Open(const char* url)
{
	AVFormatContext* ret = nullptr;
	int err = 0;

	if (url != nullptr)
	{
		err = avformat_alloc_output_context2(&ret, nullptr, nullptr, url);

		if (err >= 0)
		{
			avformat_new_stream(ret, nullptr);
			avformat_new_stream(ret, nullptr);

			err = avio_open(&ret->pb, url, AVIO_FLAG_WRITE);

			if (err < 0)
			{
				PrintError(err);

				avformat_free_context(ret);
				ret = nullptr;
			}
		}
		else
		{
			PrintError(err);
		}
	}

	return ret;
}

bool XMux::writeHead()
{
	bool ret = false;
	int err = 0;
	unique_lock<mutex> lock(m_mtx);

	if (m_context != nullptr)
	{
		ret = avformat_write_header(m_context, nullptr);

		if (err == 0)
		{
			//打印输出上下文
			av_dump_format(m_context, 0, m_context->url, 1);
			ret = true;
		}
		else
		{
			PrintError(err);
		}
	}

	return ret;
}

bool XMux::write(AVPacket* packet)
{
	bool ret = false;
	int err = 0;
	unique_lock<mutex> lock(m_mtx);

	if (m_context != nullptr)
	{
		// 写入一帧数据,内部缓冲排序dts,通过pkt=null 可以写入缓冲
		ret = av_interleaved_write_frame(m_context, packet);

		if (err == 0)
		{
			ret = true;
		}
		else
		{
			PrintError(err);
		}
	}

	return ret;
}

bool XMux::writeEnd()
{
	bool ret = false;
	int err = 0;
	unique_lock<mutex> lock(m_mtx);

	if (m_context != nullptr)
	{
		// 写入排序缓冲
		av_interleaved_write_frame(m_context, nullptr);
		ret = av_write_trailer(m_context);

		if (err == 0)
		{
			ret = true;
		}
		else
		{
			PrintError(err);
		}
	}

	return ret;
}

125_test_xformat.cpp

#include <iostream>
#include "XDemux.h"
#include "XMux.h"
#include "XDecode.h"
#include "XEncode.h"

using namespace std;

extern "C"  // 指定函数是 C 语言函数,函数目标名不包含重载标识,C++ 中调用 C 函数需要使用 extern "C"
{
	// 引用 ffmpeg 头文件
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
}

// 预处理指令导入库
#pragma comment(lib, "avcodec.lib")


#define CERR(err)  if(err != 0)				\
					{						\
						PrintError(err);	\
						return -1;			\
					}

int main(int argc, char* argv[])
{
	int ret = 0;
	AVFormatContext* ic = nullptr;  // 解封装输入上下文
	const char* url = "v1080.mp4";
	AVPacket packet;
	AVFormatContext* oc = nullptr;
	const char* out_url = "test_remux.mp4";
	const char* in_file = nullptr;
	const char* out_file = nullptr;
	int begin_sec = 0;
	int end_sec = 0;
	long long begin_video_pts = 0;
	long long begin_audio_pts = 0;
	long long end_video_pts = 0;
	XDemux demux;
	XMux mux;
	int video_count = 0;	// 视频帧数量
	int audio_count = 0;	// 音频帧数量
	double total_sec = 0;	// 视频总时长
	int video_width = 0;	// 视频宽度
	int video_height = 0;	// 视频高度
	XDecode de;
	XEncode en;
	AVCodecContext* decode_c = nullptr;
	AVCodecContext* encode_c = nullptr;
	AVFrame* frame = nullptr;
	AVPacket* en_pkt = nullptr;


	if (argc < 3)
	{
		cout << "usage: program in_file.mp4 out_file.mp4 <begin_sec> <end_sec> <video_width> <video_height>" << endl;
		cout << "example: 124_test_xformat v1080.mpt test.mp4 10 20 400 300" << endl;

		return -1;
	}

	in_file = argv[1];
	out_file = argv[2];

	if (argc > 3)
	{
		begin_sec = atoi(argv[3]);
	}

	if (argc > 4)
	{
		end_sec = atoi(argv[4]);
	}

	if (argc > 5)
	{
		video_width = atoi(argv[5]);
	}

	if (argc > 6)
	{
		video_height = atoi(argv[6]);
	}

	ic = demux.Open(url);
	demux.setContext(ic);

	oc = mux.Open(out_url);

	decode_c = de.Create(demux.codecId(), false);
	demux.copyParam(demux.videoIndex(), decode_c);
	de.setContext(decode_c);
	de.open();
	frame = de.createFrame();

	if (video_width <= 0)
	{
		video_width = ic->streams[demux.videoIndex()]->codecpar->width;
	}

	if (video_height <= 0)
	{
		video_height = ic->streams[demux.videoIndex()]->codecpar->height;
	}

	encode_c = en.Create(AV_CODEC_ID_H265, true);
	encode_c->pix_fmt = AV_PIX_FMT_YUV420P;
	encode_c->width = video_width;
	encode_c->height = video_height;
	en.setContext(encode_c);
	en.open();

	// 设置编码音视频流参数
	if (demux.isVaild() && (demux.videoIndex() >= 0))
	{
		oc->streams[mux.videoIndex()]->time_base.num = demux.videoTimebase().num;  // 时间基数与原视频一致
		oc->streams[mux.videoIndex()]->time_base.den = demux.videoTimebase().den;
		avcodec_parameters_from_context(oc->streams[mux.videoIndex()]->codecpar, encode_c);
	}

	if (demux.isVaild() && (demux.videoIndex() >= 0))
	{
		oc->streams[mux.audioIndex()]->time_base.num = demux.videoTimebase().num;
		oc->streams[mux.audioIndex()]->time_base.den = demux.videoTimebase().den;
		demux.copyParam(demux.audioIndex(), oc->streams[mux.audioIndex()]->codecpar);
	}

	mux.setContext(oc);

	mux.writeHead();

	if (demux.isVaild() && (demux.videoTimebase().num > 0))
	{
		double t = (double)demux.videoTimebase().den / (double)demux.videoTimebase().num;

		begin_video_pts = begin_sec * t;
		end_video_pts = end_sec * t;
	}

	if (demux.isVaild() && (demux.audioTimebase().num > 0))
	{
		double t = (double)demux.audioTimebase().den / (double)demux.audioTimebase().num;

		begin_audio_pts = begin_sec * t;
	}


	demux.seek(begin_video_pts, 0);

	while (1)
	{
		if (!demux.read(&packet))  // 此函数不会是否 packet 中原先的 buf 空间,需要手动调用 av_packet_unref 来释放 buf 空间
		{
			break;
		}

		if (packet.stream_index == AVMEDIA_TYPE_VIDEO)
		{
			if (packet.pts > end_video_pts)
			{
				av_packet_unref(&packet);
				break;
			}

			video_count++;
			total_sec += (double)packet.duration * mux.videoTimebase().num / mux.videoTimebase().den;
			mux.rescaleTime(&packet, begin_video_pts, demux.videoTimebase());

			if (de.send(&packet))
			{
				while (de.recv(frame))
				{
					if ((en_pkt = en.encode(frame)) != nullptr)
					{
						en_pkt->stream_index = mux.videoIndex();
						mux.write(en_pkt);
						av_packet_free(&en_pkt);
					}
				}
			}
		}
		else if (packet.stream_index == AVMEDIA_TYPE_AUDIO)
		{
			audio_count++;
			mux.rescaleTime(&packet, begin_audio_pts, demux.audioTimebase());

			if (!mux.write(&packet))
			{
				break;
			}
		}
	}

	// 写入结尾 包含文件偏移索引
	mux.writeEnd();

	av_frame_free(&frame);
	en.setContext(nullptr);
	de.setContext(nullptr);
	mux.setContext(nullptr);
	demux.setContext(nullptr);

	cout << "视频帧数: " << video_count << endl;
	cout << "音频帧数: " << audio_count << endl;
	cout << "视频时长: " << total_sec << endl;

	return 0;
}

这个代码的作用:首先对mp4文件进行解封装,根据用户的输入的开始截断时间和结束截断时间,对视频进行截断,截断时需要重新设置 packet 的 pts dts 和 duration。然后将 packet 解码出 frame,使用编码器对 frame 进行编码,编码器为 H265 编码方式,图像编码的长和宽可以根据用户的输入来决定,编码出 packet 以后,最后进行封装,封装为处理后的mp4文件。

编码后的视频效果如下图所示:

原视频的尺寸为 1920x1080,编码时所使用的尺寸为 800x600,所以视频只有左上角显示了。 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 3
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值