FFmpeg中一个线程获取视频流一个线程执行scale测试代码

https://blog.csdn.net/fengbingchun/article/details/94712986 中介绍过如果usb视频流编码类型为rawvideo则无需进行解码,可直接通过av_read_frame获取视频流然后再通过sws_scale进行像素格式转换,当在高分辨率情况下,有时达不到实时显示,会有卡顿,经分析,性能瓶颈在av_read_frame和sws_scale,由于在https://blog.csdn.net/fengbingchun/article/details/94712986中将此两个函数放在一个线程中串行执行因此导致卡顿。这里将两个函数放在两个独立的线程中并行执行,一个线程负责执行av_read_frame,另一个线程负责执行sws_scale,性能得到大幅提升。

测试代码如下:

common.hpp:

#ifndef FBC_FFMPEG_TEST_COMMON_HPP_
#define FBC_FFMPEG_TEST_COMMON_HPP_

#include <chrono>
#include <queue>
#include <mutex>
#include <condition_variable>

class Timer {
public:
	static long long getNowTime() { // milliseconds
		auto now = std::chrono::system_clock::now();
		return std::chrono::duration_cast<std::chrono::milliseconds>(now.time_since_epoch()).count();
	}
};

typedef struct Buffer {
	unsigned char* data;
	unsigned int length;
} Buffer;

class BufferQueue {
public:
	BufferQueue() = default;
	~BufferQueue() {}

	void push(Buffer& buffer) {
		std::unique_lock<std::mutex> lck(mtx);
		queue.push(buffer);
		cv.notify_all();
	}

	void pop(Buffer& buffer) {
		std::unique_lock<std::mutex> lck(mtx);
		while (queue.empty()) {
			cv.wait(lck);
		}
		buffer = queue.front();
		queue.pop();
	}

	unsigned int size() {
		return queue.size();
	}

private:
	std::queue<Buffer> queue;
	std::mutex mtx;
	std::condition_variable cv;
};

class PacketScaleQueue {
public:
	PacketScaleQueue() = default;

	~PacketScaleQueue() {
		Buffer buffer;

		while (getPacketSize() > 0) {
			popPacket(buffer);
			delete[] buffer.data;
		}

		while (getScaleSize() > 0) {
			popScale(buffer);
			delete[] buffer.data;
		}
	}

	void init(unsigned int buffer_num = 16, unsigned int buffer_size = 1024 * 1024 * 4) {
		for (unsigned int i = 0; i < buffer_num; ++i) {
			Buffer buffer = { new unsigned char[buffer_size], buffer_num};
			pushPacket(buffer);
		}
	}

	void pushPacket(Buffer& buffer) { packet_queue.push(buffer); }
	void popPacket(Buffer& buffer) { packet_queue.pop(buffer); }
	unsigned int getPacketSize() { return packet_queue.size(); }

	void pushScale(Buffer& buffer) { scale_queue.push(buffer); }
	void popScale(Buffer& buffer) { scale_queue.pop(buffer); }
	unsigned int getScaleSize() { return scale_queue.size(); }

private:
	BufferQueue packet_queue, scale_queue;
};

#endif // FBC_FFMPEG_TEST_COMMON_HPP_

test_ffmpeg_decode_show.cpp:

#include "funset.hpp"
#include <stdio.h>
#include <string.h>
#include <iostream>
#include <memory>
#include <fstream>
#include <thread>
#include "common.hpp"

#ifdef __cplusplus
extern "C" {
#endif

#include <libavdevice/avdevice.h>
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#include <libavutil/mem.h>
#include <libavutil/imgutils.h>

#ifdef __cplusplus
}
#endif

#include <opencv2/opencv.hpp>

namespace {

bool packet_scale_flag = true;

void get_packet(AVFormatContext* format_ctx, int video_stream_index, PacketScaleQueue& packet_scale)
{
	//for (int i = 0; i < 100; ++i) {
	while (packet_scale_flag) {
		AVPacket packet;
		//long long t1 = Timer::getNowTime();
		int ret = av_read_frame(format_ctx, &packet);
		//long long t2 = Timer::getNowTime();
		//fprintf(stdout, "av_read frame cost time: %lldms\n", t2 - t1);
		if (ret >= 0 && packet.stream_index == video_stream_index && packet.size > 0) {
			Buffer buffer;
			packet_scale.popPacket(buffer);
			memcpy(buffer.data, packet.data, packet.size);
			packet_scale.pushScale(buffer);
			av_packet_unref(&packet);
		} else {
			fprintf(stderr, "##### fail to av_read_frame: %d, %d\n", ret, packet.size);
		}
	}
}

void get_scale(AVCodecContext* codec_ctx, PacketScaleQueue& packet_scale)
{
	SwsContext* sws_ctx = sws_getContext(codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt, codec_ctx->width, codec_ctx->height, AV_PIX_FMT_BGR24, 0, nullptr, nullptr, nullptr);
	if (!sws_ctx) {
		fprintf(stderr, "##### fail to sws_getContext\n");
	}

	uint8_t *bgr_data[4], *yuyv422_data[4];
	int bgr_linesize[4], yuyv422_linesize[4];
	av_image_alloc(bgr_data, bgr_linesize, codec_ctx->width, codec_ctx->height, AV_PIX_FMT_BGR24, 1);
	av_image_alloc(yuyv422_data, yuyv422_linesize, codec_ctx->width, codec_ctx->height, AV_PIX_FMT_YUYV422, 1);

	cv::Mat mat(codec_ctx->height, codec_ctx->width, CV_8UC3);
	const char* winname = "usb video";
	cv::namedWindow(winname);

	//for (int i = 0; i < 100; ++i) {
	while (packet_scale_flag) {
		Buffer buffer;
		packet_scale.popScale(buffer);
		const uint8_t *srcSlice[1];
		srcSlice[0] = buffer.data;
		//long long t1 = Timer::getNowTime();
		sws_scale(sws_ctx, srcSlice, yuyv422_linesize, 0, codec_ctx->height, bgr_data, bgr_linesize);
		//long long t2 = Timer::getNowTime();
		//fprintf(stdout, "sws_scale cost time: %lldms\n", t2 - t1);
		packet_scale.pushPacket(buffer);

		mat.data = bgr_data[0];
		cv::imshow(winname, mat);
		//cv::imwrite("xxx.jpg", mat);

		int key = cv::waitKey(10);
		if (key == 27) { 
			packet_scale_flag = false;
			break;
		}
	}

	cv::destroyWindow(winname);
	sws_freeContext(sws_ctx);
	av_freep(&bgr_data[0]);
	av_freep(&yuyv422_data[0]);
}

} // namespace

int test_ffmpeg_stream_show_two_thread()
{
	avdevice_register_all();

#ifdef _MSC_VER
	const char* input_format_name = "vfwcap";
	const char* url = "";
#else
	const char* input_format_name = "video4linux2";
	const char* url = "/dev/video0";
#endif

	AVInputFormat* input_fmt = av_find_input_format(input_format_name);
	AVFormatContext* format_ctx = avformat_alloc_context();

	int ret = avformat_open_input(&format_ctx, url, input_fmt, nullptr);
	if (ret != 0) {
		fprintf(stderr, "fail to open url: %s, return value: %d\n", url, ret);
		return -1;
	}

	ret = avformat_find_stream_info(format_ctx, nullptr);
	if (ret < 0) {
		fprintf(stderr, "fail to get stream information: %d\n", ret);
		return -1;
	}

	int video_stream_index = -1;
	for (unsigned int i = 0; i < format_ctx->nb_streams; ++i) {
		const AVStream* stream = format_ctx->streams[i];
		if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
			video_stream_index = i;
			fprintf(stdout, "type of the encoded data: %d, dimensions of the video frame in pixels: width: %d, height: %d, pixel format: %d\n",
				stream->codecpar->codec_id, stream->codecpar->width, stream->codecpar->height, stream->codecpar->format);
		}
	}

	if (video_stream_index == -1) {
		fprintf(stderr, "no video stream\n");
		return -1;
	}

	AVCodecParameters* codecpar = format_ctx->streams[video_stream_index]->codecpar;
	const AVCodec* codec = avcodec_find_decoder(codecpar->codec_id);
	if (!codec) {
		fprintf(stderr, "fail to avcodec_find_decoder\n");
		return -1;
	}

	if (codecpar->codec_id != AV_CODEC_ID_RAWVIDEO) {
		fprintf(stderr, "this test code only support rawvideo encode: %d\n", codecpar->codec_id);
		return -1;
	}

	AVCodecContext* codec_ctx = avcodec_alloc_context3(codec);
	if (!codec_ctx) {
		fprintf(stderr, "fail to avcodec_alloc_context3\n");
		return -1;
	}

	codec_ctx->pix_fmt = AVPixelFormat(codecpar->format);
	codec_ctx->height = codecpar->height;
	codec_ctx->width = codecpar->width;
	codec_ctx->thread_count = 16;
	ret = avcodec_open2(codec_ctx, codec, nullptr);
	if (ret != 0) {
		fprintf(stderr, "fail to avcodec_open2: %d\n", ret);
		return -1;
	}

	PacketScaleQueue packet_scale;
	packet_scale.init(16, 1024*1024*4);

	std::thread thread_packet(get_packet, format_ctx, video_stream_index, std::ref(packet_scale));
	std::thread thread_scale(get_scale, codec_ctx, std::ref(packet_scale));

	thread_packet.join();
	thread_scale.join();

	avformat_close_input(&format_ctx);

	fprintf(stdout, "test finish\n");
	return 0;
}

执行结果如下:

GitHubhttps://github.com/fengbingchun/OpenCV_Test

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值