ffmpeg调用directshow camera 并sdl渲染

基础

ffmpeg使用设备先注册

avdevice_register_all();

使用常用得CCameraDS 取获取名称,这里使用第一个摄像头。这里我们使用SDL来渲染得时候,启动一个定时器来定时给一个事件让SDL来刷新

SDLSDL_Init(SDL_INIT_VIDEO |  SDL_INIT_TIMER);

所以使用video和timer来完成这种渲染。

渲染流程

这里使用bgr24来渲染,使用sdl 创建表面,指定opengl来做
1 创建窗口
2 创建刷新线程
3 线程中发送更新事件
4 main主循环捕获事件,获取一帧rgb24,刷新界面渲染

这里面可以修改变成这样
刷新线程渲染获取一帧,推给主线程,主线程负责编码发送,同时渲染,这里是示例,没有这么做。

int main()
{
	avdevice_register_all();
	int w = 1280;
	int h = 720;
	g_fps = 10;
	std::wstring cname;
	CCameraDS::CameraName(0, cname);
	std::string name = UnicodeToUTF8(cname);
	TFFCap cap;
		
	int ret = cap.OpenCameraRGB(name.c_str(), 1280, 720);
	if (ret != 0)
		return -1;

	int					screen_w, screen_h;
	SDL_Window			*screen;
	SDL_Renderer		*sdlRenderer;
	SDL_Texture			*sdlTexture;
	SDL_Rect			sdlRect;
	SDL_Thread			*video_tid;
	SDL_Event			event;
	
	SDL_Init(SDL_INIT_VIDEO |  SDL_INIT_TIMER);
	screen_w = w; //w;
	screen_h = h;
	//screen_h = screen_w * 9 / 16;
	screen = SDL_CreateWindow("FF", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
		screen_w, screen_h, SDL_WINDOW_OPENGL);

	if (!screen)
	{
		std::cout << "SDL: could not create window - exiting: " << SDL_GetError() << std::endl;
		return -1;
	}
	sdlRenderer = SDL_CreateRenderer(screen, -1, SDL_RENDERER_ACCELERATED);
	sdlTexture = SDL_CreateTexture(sdlRenderer,
		SDL_PIXELFORMAT_BGR24 , //SDL_PIXELFORMAT_IYUV, //SDL_PIXELFORMAT_BGR24
SDL_TEXTUREACCESS_STREAMING,/*SDL_TEXTUREACCESS_STREAMING,*/
		
		screen_w, screen_h);
	sdlRect.x = 0;
	sdlRect.y = 0;
	sdlRect.w = screen_w;
	sdlRect.h = screen_h;
	video_tid = SDL_CreateThread(sfp_refresh_thread, NULL, NULL);
	int got_picture;
	for (;;)
	{
		SDL_WaitEvent(&event);
		if (event.type == SFM_REFRESH_EVENT)
		{
			uint8_t *frame = cap.QueryFrame();

			if (frame != NULL)
			{
				int y = screen_h * 2 / 3;
				SDL_UpdateTexture(sdlTexture, NULL/*&sdlRect*/, frame, w * 3);
				//SDL_UpdateTexture(sdlTexture, &sdlRect, out_buffer + W * H * 3, -W * 3);

				SDL_RenderClear(sdlRenderer);
				SDL_RenderCopy(sdlRenderer, sdlTexture, NULL, NULL);
				SDL_RenderPresent(sdlRenderer);
			}
		}
		else if (event.type == SDL_KEYDOWN)
		{
			if (event.key.keysym.sym == SDLK_SPACE)
				thread_pause = !thread_pause;
		}
		else if (event.type == SDL_QUIT)
		{
			thread_exit = 1;
		}
		else if (event.type == SFM_BREAK_EVENT)
		{
			break;
		}
	}
}

SDL使用得事件如下:

int thread_exit = 0;
int thread_pause = 0;

int g_fps = 30;
int sfp_refresh_thread(void *opaque) 
{
	thread_exit = 0;
	thread_pause = 0;

	while (!thread_exit) 
	{
		if (!thread_pause) 
		{
			SDL_Event event;
			event.type = SFM_REFRESH_EVENT;
			SDL_PushEvent(&event);
		}
		SDL_Delay(1000/g_fps);
	}
	thread_exit = 0;
	thread_pause = 0;
	//Break
	SDL_Event event;
	event.type = SFM_BREAK_EVENT;
	SDL_PushEvent(&event);
	return 0;
}

线程里面每隔1000/g_fps得时间就产生一个渲染事件,取得一帧数据后渲染,当然这里也可以编码发送等等都可以做。事件发生等等也可以使用标准c++得事件等待互斥,原理都是一摸一样得。

最后给出实现得FFcap

TFFCap声明

class TFFCap
{
public:
	int m_fps = 10;

	AVFrame * m_pFrame = NULL;
	AVFrame * m_pFrameDst = NULL;

	string m_name;

	uint8_t * out_buffer = NULL;

	int m_videoIndex = -1;
	struct SwsContext * m_img_convert_ctx = NULL;
	//int _pixelW;
	//int _pixelH;


private:
	AVFormatContext * m_pFormatCtx = NULL;
	AVCodecContext  * m_pCodecCtx = NULL;
	AVCodec         * m_pCodec = NULL;
	AVPacket        * m_pkt = NULL;
public:
	void Init()
	{
		avdevice_register_all();
	}
	int OpenCameraRGB(const char * utf8videoname, 
		int nWidth, int nHeight);

	uint8_t *QueryFrame();

	int m_w = 0;
	int m_h = 0;
//	int QueryFrame(uint8_t *buf, int *len);

	void UnInit();
	TFFCap();
	~TFFCap();
};

TFFCap实现

#include "FFCap.h"
#include <stdio.h>
#include <sstream>
using namespace std;



TFFCap::TFFCap()
{
}


TFFCap::~TFFCap()
{
}


static AVDictionary *GetOptions(int videosize, int &fps)
{
	AVDictionary *options = NULL;
	switch (videosize)
	{
	case 1:
	{
		fps = 10;
		av_dict_set(&options, "video_size", "1280x720", 0);
		//const char * fname = av_get_pix_fmt_name((AVPixelFormat)13);
		//av_dict_set(&options, "pixel_format", fname, 0);
		av_dict_set_int(&options, "framerate", 10, 0);
		av_dict_set_int(&options, "rtbufsize", 2764800 / 10, 0);
		av_dict_set(&options, "start_time_realtime", 0, 0);
		//av_dict_set(&options, "mjpeg", 0, 0);
	}
	break;
	case 2:
		fps = 20;
		av_dict_set_int(&options, "framerate", 30, 0);
		av_dict_set(&options, "video_size", "640x480", 0);
		av_dict_set_int(&options, "rtbufsize", 640 * 480 * 3 / 15, 0);
		break;
	case 3:
		fps = 20;
		av_dict_set_int(&options, "framerate", 30, 0);
		av_dict_set(&options, "video_size", "320x240", 0);
		av_dict_set_int(&options, "rtbufsize", 76800 / 10, 0);
		break;
	case 4:
		fps = 20;
		av_dict_set_int(&options, "framerate", 30, 0);
		av_dict_set(&options, "video_size", "176x144", 0);
		//av_dict_set_int(&options, "rtbufsize", 76800 / 10, 0);
		break;
	}
	av_dict_set(&options, "start_time_realtime", 0, 0);
	return options;
}



int TFFCap::OpenCameraRGB(const char * utf8videoname,
	int w, int h)
{
	av_log_set_level(AV_LOG_FATAL);

	AVInputFormat *ifmt = nullptr;
	AVDictionary *options = NULL;


	string oldname = m_name;
	m_name = "video=";
	m_name += utf8videoname;

	if (oldname.compare(m_name) != 0)
	{
		avcodec_close(m_pCodecCtx);
		avcodec_free_context(&m_pCodecCtx);
		m_pCodecCtx = NULL;
		avformat_close_input(&m_pFormatCtx);
		m_pFormatCtx = NULL;
	}
	else 
	{
		if (m_w == w && m_h == h)
			return 0;
		avcodec_free_context(&m_pCodecCtx);
		m_pCodecCtx = NULL;
		avformat_close_input(&m_pFormatCtx);
		m_pFormatCtx = NULL;
	}

	if (m_pFormatCtx == NULL)
	{
		m_pFormatCtx = avformat_alloc_context();
		ifmt = av_find_input_format("dshow");
	}
	m_pFormatCtx->flags |= AVFMT_FLAG_NOBUFFER;
	av_dict_set(&options, "start_time_realtime", 0, 0);
	switch (w)
	{
	case 1920:
		break;
	case 1280:
		m_fps = 10;
		av_dict_set(&options, "video_size", "1280x720", 0);
		//av_dict_set_int(&options, "framerate", 10, 0);
		av_dict_set_int(&options, "rtbufsize", 2764800 / 10, 0);
		break;
	case 640:
		m_fps = 20;
		av_dict_set_int(&options, "framerate", 30, 0);
		av_dict_set(&options, "video_size", "640x480", 0);
		av_dict_set_int(&options, "rtbufsize", 640 * 480 * 3 / 15, 0);
		break;
	case 320:
		m_fps = 20;
		av_dict_set_int(&options, "framerate", 30, 0);
		av_dict_set(&options, "video_size", "320x240", 0);
		av_dict_set_int(&options, "rtbufsize", 76800 / 10, 0);
		break;
	case 176:
		m_fps = 20;
		av_dict_set_int(&options, "framerate", 30, 0);
		av_dict_set(&options, "video_size", "176x144", 0);
		av_dict_set_int(&options, "rtbufsize", 76800 / 10, 0);
		break;
	}
	if (avformat_open_input(&m_pFormatCtx, m_name.c_str(), ifmt, &options) != 0)
	{
		return -1;
	}
	if (avformat_find_stream_info(m_pFormatCtx, NULL) < 0)
	{
		//std::cout << "avformat find stream info failed." << std::endl;
		return -1;
	}


	for (uint32_t i = 0; i < m_pFormatCtx->nb_streams; i++)
	{
		if (m_pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			//AVPixelFormat avf = (AVPixelFormat)m_pFormatCtx->streams[i]->codecpar->format;
			//const char* strfmt = av_get_pix_fmt_name(avf);
			//w = m_pFormatCtx->streams[i]->codecpar->width;
			//h = m_pFormatCtx->streams[i]->codecpar->height;
			m_videoIndex = i;
			break;
		}
	}
	if (-1 == m_videoIndex)
	{
		return -1;
	}
	if(m_pCodecCtx == NULL)
		m_pCodecCtx = avcodec_alloc_context3(NULL);
	if (m_pCodecCtx == NULL)
	{
		return -1;
	}
	avcodec_parameters_to_context(m_pCodecCtx, m_pFormatCtx->streams[m_videoIndex]->codecpar);
	m_pCodec = avcodec_find_decoder(m_pCodecCtx->codec_id);
	if (m_pCodec == NULL)
	{
		//std::cout << "AVCodec not found." << std::endl;
		return -1;
	}
	if (avcodec_open2(m_pCodecCtx, m_pCodec, NULL) < 0)
	{
		//std::cout << "codec open failed." << std::endl;
		return -1;
	}
	if(m_pFrame == NULL)
		m_pFrame = av_frame_alloc();
	if(m_pFrameDst == NULL)
		m_pFrameDst = av_frame_alloc();
	if(m_pkt == NULL)
		m_pkt = av_packet_alloc();
	AVPixelFormat avpf = AV_PIX_FMT_BGR24; // AV_PIX_FMT_YUV420P; //AV_PIX_FMT_BGR24
	if (m_w != w || m_h != h)
	{
		if (out_buffer != NULL)
			delete []out_buffer;

	}
	if (out_buffer == NULL)
	{
		out_buffer = (unsigned char *)av_malloc(av_image_get_buffer_size(avpf,
			w,
			h,
			1));

		av_image_fill_arrays(m_pFrameDst->data, m_pFrameDst->linesize, out_buffer,
			avpf, w, h, 1);
	}
	m_img_convert_ctx = 
		sws_getCachedContext(m_img_convert_ctx,
		w, h, m_pCodecCtx->pix_fmt,
		w, h, avpf, SWS_POINT, NULL, NULL, NULL);
	
	m_w = w;
	m_h = h;
	return 0;
}



uint8_t *TFFCap::QueryFrame(/*int &len*/)
{
	if (m_pFormatCtx == NULL)
		return NULL;
	AVPacket packet;
	av_init_packet(&packet);

	int got_picture = 0;
	if (av_read_frame(m_pFormatCtx, &packet) >= 0)
	{
		int ret = 0;
		if (packet.stream_index == m_videoIndex)
		{
			if (avcodec_send_packet(m_pCodecCtx, &packet) == 0)
				if (avcodec_receive_frame(m_pCodecCtx, m_pFrame) == 0)
				{
					got_picture = 1;
					sws_scale(m_img_convert_ctx, 
						(const uint8_t* const*)m_pFrame->data, 
						m_pFrame->linesize, 0, 
						m_pCodecCtx->height, 
						m_pFrameDst->data, m_pFrameDst->linesize);
				}

			av_packet_unref(&packet);
		}
		
	}


	//返回上一帧
	if (got_picture)
		return out_buffer;
	return NULL;
}
void TFFCap::UnInit()
{
	av_frame_free(&m_pFrameDst);
	av_frame_free(&m_pFrame);

	if(m_img_convert_ctx!= nullptr)
		sws_freeContext(m_img_convert_ctx);

	avcodec_close(m_pCodecCtx);
	avcodec_free_context(&m_pCodecCtx);
	avformat_close_input(&m_pFormatCtx);
}

效果

在这里插入图片描述
代码里面还有一些是可以修改得,仅仅作为示例,代表可行

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

qianbo_insist

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值