ffmpeg 播放音视频,time_base解决音频同步问题,SDL渲染画面

概要

        csdn上的资料,让我受益匪浅。我也想借此机会,把一些之前写过的项目分享出去,让这些存放在电脑上的资料可以分享给更多的朋友,发挥更大的价值。当然,项目中可能还存在一些bug,希望有疑问的朋友提出来,咋们一起解决。加油,撸起袖子加油干!

         安霸行车记录仪是一款携带Gps信息的行车记录仪、本文基于C++语言开发一套MFC框架的应用工具,解析安霸行车记录仪录制的mp4文件,获取音视频数据进行播放,并解析到gps数据显示到百度地图上,通过回放可以直观看到当前车辆所处的位置,同时,可以快速的浏览车辆是否按指定的路线行驶。有助于事后分析车辆的实际行驶路线。

 

 ffmpeg 调用流程

分配空间及初始化

av_mallocz();

av_register_all();

avformat_alloc_context();

打开文件

avformat_open_input();

查找解码器

avcodec_find_decoder();

打开解码器,分别有视频解码器AVMEDIA_TYPE_VIDEO,音频解码器AVMEDIA_TYPE_AUDIO,字幕解码器AVMEDIA_TYPE_SUBTITLE

avcodec_open2();

另外还有可以通过以下接口获取字幕解码器

avcodec_decode_subtitle2();

读取一帧数据

av_frame_alloc();

av_init_packet();

av_read_frame();

跳转到指定位置

av_seek_frame();

释放空间

av_frame_free();

av_free();

关闭

avcodec_close();

avformat_close_input();

视频的渲染使用SDL

具体的代码如下,这里只是逻辑上的代码,UI界面如有需要请联系本人,电话号码18824182332(微信同号),谢谢!!

/*
========================================================================
File name:        ztplayerDll.h
Module:
Author:            中唐工作室(zt)18824182332
Create Time:    2016/12/10 10:41:00
Modify By:
Modify Date:
========================================================================
*/
#ifndef __ZONTTANG_ZTPLAYERDLL_H__
#define __ZONTTANG_ZTPLAYERDLL_H__

#define ZONGTANG_H_DLL_EXPORTS  
#ifdef ZONGTANG_H_DLL_EXPORTS  
#define ZONGTANGDLL_API __declspec(dllexport)   
#else  
#define ZONGTANGDLL_API __declspec(dllimport)   
#endif

#define __STDC_CONSTANT_MACROS

#include "const.h"

typedef void(*FrameCallBack)(const AVPacket* packet);
typedef void(*FrameEndCallBack)();

typedef void(*AnalysisGpsEndCallBack)();

ZONGTANGDLL_API void initSDK(VideoState** p);
ZONGTANGDLL_API int openFile(char filepath[], int64_t& duration);
ZONGTANGDLL_API int setFrameCallback(FrameCallBack _callback);
ZONGTANGDLL_API int setFrameEndCallback(FrameEndCallBack _callback);
ZONGTANGDLL_API int initCodec();
ZONGTANGDLL_API int setWindownHandle(HWND handle);
ZONGTANGDLL_API int play();
ZONGTANGDLL_API int seek(int64_t timestamp);
ZONGTANGDLL_API int pause(bool enable);
ZONGTANGDLL_API bool getState();
ZONGTANGDLL_API int desSDK();

ZONGTANGDLL_API int setVolumeEnable(bool enable);
ZONGTANGDLL_API int setVolume(double value);

ZONGTANGDLL_API int analysisGps();
ZONGTANGDLL_API int setAnalysisGpsEndCallback(AnalysisGpsEndCallBack _callback);

ZONGTANGDLL_API int saveFrame(char** filename, LPTSTR Dir);

#endif
/*
========================================================================
File name:        ztplayerDll.cpp
Module:
Author:            中唐工作室(zt)18824182332
Create Time:    2016/12/10 10:41:00
Modify By:
Modify Date:
========================================================================
*/
#pragma once
#include "stdafx.h"
#include "tools.h"
#include "mapUtils.h"

bool mPlaying = false;

VideoState *global_video_state;
FrameCallBack frameCallBack;
FrameEndCallBack frameEndCallBack;
AnalysisGpsEndCallBack analysisGpsEndCallBack;
PictureHandle* global_picture_handle;
BOOL m_Release;

#pragma region 音频模块

int audio_decode_frame(VideoState *is, double *pts_ptr) {
	int len1, len2, decoded_data_size, n;
	AVPacket *pkt = &is->audio_pkt;
	int got_frame = 0;
	int64_t dec_channel_layout;
	int wanted_nb_samples, resampled_data_size;

	double pts = 0;

	while (1) {
		while (is->audio_pkt_size > 0) {
			if (!is->audio_frame) {
				if (!(is->audio_frame = av_frame_alloc())) {
					return AVERROR(ENOMEM);
				}
			}
			else
				av_frame_unref(is->audio_frame);

			if (is->audio_st == NULL)
			{
				break;
			}
			if (m_Release)
			{
				break;
			}
			/**
			* 当AVPacket中装得是音频时,有可能一个AVPacket中有多个AVFrame,
			* 而某些解码器只会解出第一个AVFrame,这种情况我们必须循环解码出后续AVFrame
			*/
			len1 = avcodec_decode_audio4(is->audio_st->codec, is->audio_frame, &got_frame, pkt);
			if (len1 < 0) {
				is->audio_pkt_size = 0;
				printf("break\n");
				break;
			}


			is->audio_pkt_data += len1;
			is->audio_pkt_size -= len1;

			if (got_frame <= 0)
				continue;
			//执行到这里我们得到了一个AVFrame  
			decoded_data_size = av_samples_get_buffer_size(NULL,
				is->audio_frame->channels, is->audio_frame->nb_samples,
				(AVSampleFormat)is->audio_frame->format, 1);

			//得到这个AvFrame的声音布局,比如立体声  
			dec_channel_layout =
				(is->audio_frame->channel_layout
				&& is->audio_frame->channels
				== av_get_channel_layout_nb_channels(
				is->audio_frame->channel_layout)) ?
				is->audio_frame->channel_layout :
				av_get_default_channel_layout(
				is->audio_frame->channels);

			//这个AVFrame每个声道的采样数  
			wanted_nb_samples = is->audio_frame->nb_samples;
			/**
			* 接下来判断我们之前设置SDL时设置的声音格式(AV_SAMPLE_FMT_S16),声道布局,
			* 采样频率,每个AVFrame的每个声道采样数与
			* 得到的该AVFrame分别是否相同,如有任意不同,我们就需要swr_convert该AvFrame,
			* 然后才能符合之前设置好的SDL的需要,才能播放
			*/
			if (is->audio_frame->format != is->audio_src_fmt
				|| dec_channel_layout != is->audio_src_channel_layout
				|| is->audio_frame->sample_rate != is->audio_src_freq
				|| (wanted_nb_samples != is->audio_frame->nb_samples
				&& !is->swr_ctx)) {
				if (is->swr_ctx)
					swr_free(&is->swr_ctx);
				is->swr_ctx = swr_alloc_set_opts(NULL,
					is->audio_tgt_channel_layout, is->audio_tgt_fmt,
					is->audio_tgt_freq, dec_channel_layout,
					(AVSampleFormat)is->audio_frame->format, is->audio_frame->sample_rate,
					0, NULL);
				if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
					fprintf(stderr, "swr_init() failed\n");
					break;
				}
				is->audio_src_channel_layout = dec_channel_layout;
				is->audio_src_channels = is->audio_st->codec->channels;
				is->audio_src_freq = is->audio_st->codec->sample_rate;
				is->audio_src_fmt = is->audio_st->codec->sample_fmt;
			}

			/**
			* 如果上面if判断失败,就会初始化好swr_ctx,就会如期进行转换
			*/
			if (is->swr_ctx) {
				// const uint8_t *in[] = { is->audio_frame->data[0] };  
				const uint8_t **in =
					(const uint8_t **)is->audio_frame->extended_data;
				uint8_t *out[] = { is->audio_buf2 };
				if (wanted_nb_samples != is->audio_frame->nb_samples) {
					fprintf(stdout, "swr_set_compensation \n");
					if (swr_set_compensation(is->swr_ctx,
						(wanted_nb_samples - is->audio_frame->nb_samples)
						* is->audio_tgt_freq
						/ is->audio_frame->sample_rate,
						wanted_nb_samples * is->audio_tgt_freq
						/ is->audio_frame->sample_rate) < 0) {
						fprintf(stderr, "swr_set_compensation() failed\n");
						break;
					}
				}

				/**
				* 转换该AVFrame到设置好的SDL需要的样子,有些旧的代码示例最主要就是少了这一部分,
				* 往往一些音频能播,一些不能播,这就是原因,比如有些源文件音频恰巧是AV_SAMPLE_FMT_S16的。
				* swr_convert 返回的是转换后每个声道(channel)的采样数
				*/
				len2 = swr_convert(is->swr_ctx, out,
					sizeof(is->audio_buf2) / is->audio_tgt_channels
					/ av_get_bytes_per_sample(is->audio_tgt_fmt),
					in, is->audio_frame->nb_samples);
				if (len2 < 0) {
					fprintf(stderr, "swr_convert() failed\n");
					break;
				}
				if (len2 == sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt)) {
					fprintf(stderr, "warning: audio buffer is probably too small\n");
					swr_init(is->swr_ctx);
				}
				is->audio_buf = is->audio_buf2;

				//每声道采样数 x 声道数 x 每个采样字节数  
				resampled_data_size = len2 * is->audio_tgt_channels
					* av_get_bytes_per_sample(is->audio_tgt_fmt);
			}
			else {
				resampled_data_size = decoded_data_size;
				is->audio_buf = is->audio_frame->data[0];
			}

			pts = is->audio_clock;
			*pts_ptr = pts;
			n = 2 * is->audio_st->codec->channels;
			is->audio_clock += (double)resampled_data_size / (double)(n * is->audio_st->codec->sample_rate);
			return resampled_data_size;
		}

		if (pkt->data)
			av_free_packet(pkt);
		if (is->quit)
			return -1;

		if (!mPlaying)
		{
			if (is->audio_buf != NULL)
			{
				is->audio_buf_size = 1024;
				memset(is->audio_buf, 0, is->audio_buf_size);
			}
			SDL_Delay(10);
			continue;
		}
		if (tools::packet_queue_get(&is->audioq, pkt, is->decodeState) < 0)
			return -1;
		if (is->audio_clock < 0)
		{
			connect;
		}
		is->audio_pkt_data = pkt->data;
		is->audio_pkt_size = pkt->size;

		if (pkt->pts != AV_NOPTS_VALUE)
		{
			is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
		}
	}
}

void audio_callback(void *userdata, Uint8 *stream, int len) {
	VideoState *is = (VideoState *)userdata;
	int len1, audio_data_size = 0;
	double pts;
	double  delay = 0;
	
	while (len > 0)
	{
		if (global_video_state->quit)
		{
			break;
		}
		if (m_Release)
		{
			return;
		}
		if (is->audio_buf_index >= is->audio_buf_size) {
			if (is->audioq.size > 0)
			{
				audio_data_size = audio_decode_frame(is, &pts);
			}
			fprintf(stderr, "audio_decode_frame failed\n", audio_data_size);
			if (audio_data_size < 0) {
				/* silence */
				is->audio_buf_size = 1024;
				memset(is->audio_buf, 0, is->audio_buf_size);
			}
			else {
				is->audio_buf_size = audio_data_size;
			}
			is->audio_buf_index = 0;
		}
		else
		{
			len1 = is->audio_buf_size - is->audio_buf_index;
			if (len1 > len) {
				len1 = len;
			}

			memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
			len -= len1;
			stream += len1;
			is->audio_buf_index += len1;
		}
	}
}
/*打开音频有关设置
*/
int stream_component_open(VideoState *is, unsigned int stream_index) {
	global_video_state = is;
	AVFormatContext *ic = global_video_state->pFormatCtx;
	AVCodecContext *codecCtx;
	SDL_AudioSpec wanted_spec, spec;
	int64_t wanted_channel_layout = 0;
	int wanted_nb_channels;
	const int next_nb_channels[] = { 0, 0, 1, 6, 2, 6, 4, 6 };

	if (stream_index < 0 || stream_index >= ic->nb_streams) {
		return -1;
	}

	codecCtx = ic->streams[stream_index]->codec;
	wanted_nb_channels = codecCtx->channels;
	if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
		wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
		wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
	}

	wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
	wanted_spec.freq = codecCtx->sample_rate;
	if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
		fprintf(stderr, "Invalid sample rate or channel count!\n");
		return -1;
	}
	wanted_spec.format = AUDIO_S16SYS;
	wanted_spec.silence = 0;
	wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
	wanted_spec.callback = audio_callback;
	wanted_spec.userdata = is;

	while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
		fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
		wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
		if (!wanted_spec.channels) {
			fprintf(stderr, "No more channel combinations to tyu, audio open failed\n");
			return -1;
		}
		wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
	}

	if (spec.format != AUDIO_S16SYS) {
		fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
		return -1;
	}
	if (spec.channels != wanted_spec.channels) {
		wanted_channel_layout = av_get_default_channel_layout(spec.channels);
		if (!wanted_channel_layout) {
			fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
			return -1;
		}
	}

	is->audio_src_fmt = is->audio_tgt_fmt = AV_SAMPLE_FMT_S16;
	is->audio_src_freq = is->audio_tgt_freq = spec.freq;
	is->audio_src_channel_layout = is->audio_tgt_channel_layout = wanted_channel_layout;
	is->audio_src_channels = is->audio_tgt_channels = spec.channels;

	ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
	switch (codecCtx->codec_type) {
	case AVMEDIA_TYPE_AUDIO:
		SDL_PauseAudio(0);
		break;
	default:
		break;
	}
	return 0;
}


#pragma endregion

uint64_t global_video_pkt_pts = AV_NOPTS_VALUE;

double synchronize_video(VideoState *is, AVFrame *src_frame, double pts)
{
	double frame_delay;
	if (pts != 0)
	{
		is->video_clock = pts;
	}
	else
	{
		pts = is->video_clock;
	}
	is->video_dts = src_frame->pkt_dts;
	frame_delay = av_q2d(is->video_st->codec->time_base);
	frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
	is->video_clock += frame_delay;
	return pts;
}


void alloc_picture(void *userdata)
{
	VideoState *is = (VideoState *)userdata;
	VideoPicture *vp;

	vp = &is->pictq[is->pictq_windex];

	vp->width = is->video_st->codec->width;
	vp->height = is->video_st->codec->height;

	SDL_LockMutex(is->pictq_mutex);
	vp->allocated = 1;
	SDL_CondSignal(is->pictq_cond);
	SDL_UnlockMutex(is->pictq_mutex);
}


static void display_picture(AVPacket *packet, AVFrame* pFrame)
{
	if (global_video_state->quit)
	{
		return;
	}
	if (global_picture_handle == NULL)
	{
		return;
	}
	AVCodecContext *	pCodecCtx = global_video_state->video_st->codec;
	AVFrame *pFrameYUV = global_picture_handle->pFrameYUV;
	global_picture_handle->pFrame = pFrame;//截图使用
	static struct SwsContext *img_convert_ctx;

	if (img_convert_ctx == NULL)
	{
		img_convert_ctx = sws_getContext(global_video_state->video_st->codec->width, global_video_state->video_st->codec->height,
			global_video_state->video_st->codec->pix_fmt,
			global_video_state->video_st->codec->width, global_video_state->video_st->codec->height,
			PIX_FMT_YUV420P,
			SWS_BICUBIC, NULL, NULL, NULL);
		if (img_convert_ctx == NULL)
		{
			fprintf(stderr, "Cannot initialize the conversion context/n");
			exit(1);
		}
	}
	if (pFrame == NULL || pFrameYUV == NULL)
	{
		return;
	}
	sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
		pFrameYUV->data, pFrameYUV->linesize);

	if (pFrame == NULL || pFrameYUV == NULL)
	{
		return;
	}
	SDL_UpdateYUVTexture(global_picture_handle->sdlTexture, &global_picture_handle->srcRect,
		pFrameYUV->data[0], pFrameYUV->linesize[0],
		pFrameYUV->data[1], pFrameYUV->linesize[1],
		pFrameYUV->data[2], pFrameYUV->linesize[2]);

	SDL_RenderClear(global_picture_handle->sdlRenderer);
	SDL_RenderCopy(global_picture_handle->sdlRenderer, global_picture_handle->sdlTexture, &global_picture_handle->srcRect, &global_picture_handle->sdlRect);
	SDL_RenderPresent(global_picture_handle->sdlRenderer);
	//SDL End-----------------------
}

double firt_subtitle_last_pts = 0;
double firt_subtitle_delay = 0;
void display_subtitle(VideoState *is)
{
	AVPacket pkt1, *packet = &pkt1;
	int frameFinished = 0;

	if (global_video_state->quit)
	{
		return;
	}
	double actual_delay, delay, sync_threshold, ref_clock, diff;
	delay = is->frame_last_pts - firt_subtitle_last_pts;

	bool drag = false;//防止拖动时间不同步问题
	if (delay <= 0 || delay >= 1.0)
	{
		delay = is->frame_last_delay;
		drag = true;
	}

	if ((is->frame_last_pts <firt_subtitle_last_pts + firt_subtitle_delay) && !drag)
	{
		return;
	}

	if (tools::packet_queue_get(&is->subtitleq, packet, 0) > 0)
	{
		int len1 = avcodec_decode_subtitle2(is->subtitle_st->codec, is->pSubtitle, &frameFinished, (AVPacket*)packet);

		firt_subtitle_last_pts = is->frame_last_pts;
		double  delay = is->pSubtitle->end_display_time - is->pSubtitle->start_display_time;
		firt_subtitle_delay = delay / 1000.0f;
		if (frameFinished > 0)
		{
			frameCallBack(packet);
		}
	}
}


int queue_picture(VideoState *is, AVFrame *pFrame, double pts, AVPacket* pkt)
{
	VideoPicture *vp;
	SDL_LockMutex(is->pictq_mutex);
	while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
		!is->quit)
	{
		SDL_CondWait(is->pictq_cond, is->pictq_mutex);
	}
	SDL_UnlockMutex(is->pictq_mutex);

	if (global_video_state->quit)
		return -1;
	vp = &is->pictq[is->pictq_windex];
	vp->pkt = pkt;
	vp->pFrame = pFrame;

	display_picture(pkt, pFrame);
	//回调播放进度
	if (frameCallBack != NULL)
	{
		frameCallBack(pkt);
	}
	vp->pts = pts;
	if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
	{
		is->pictq_windex = 0;
	}
	SDL_LockMutex(is->pictq_mutex);
	is->pictq_size++;
	SDL_UnlockMutex(is->pictq_mutex);
	return 0;
}


int video_thread(void *arg)
{
	VideoState *is = (VideoState *)arg;
	AVPacket pkt1, *packet = &pkt1;
	int len1, frameFinished;
	AVFrame *pFrame;
	double pts;

#if Debug
	pFrame = avcodec_alloc_frame();
#else 
	pFrame = av_frame_alloc();
#endif

	for (;;)
	{
		if (global_video_state->quit)
		{
			break;
		}
		if (m_Release)
		{
			break;

		}
		if (!mPlaying)
		{
			SDL_Delay(10);
			continue;
		}

		if (tools::packet_queue_get(&is->videoq, packet, is->decodeState) < 0)
		{
			break;
		}
		pts = 0;
		global_video_pkt_pts = packet->pts;
		len1 = avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished, packet);
		if (packet->dts == AV_NOPTS_VALUE
			&& pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE)
		{
			pts = *(uint64_t *)pFrame->opaque;
		}
		else if (packet->dts != AV_NOPTS_VALUE)
		{
			pts = packet->dts;
		}
		else
		{
			pts = 0;
		}
		pts *= av_q2d(is->video_st->time_base);
		if (frameFinished)
		{
			pts = synchronize_video(is, pFrame, pts);
			if (queue_picture(is, pFrame, pts, packet) < 0)
			{
				break;
			}
		}
		av_free_packet(packet);
	}
	av_free(pFrame);
	return 0;
}


static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
{
	SDL_Event event;
	event.type = FF_REFRESH_EVENT;
	event.user.data1 = opaque;
	SDL_PushEvent(&event);
	return 0;
}

SDL_TimerID _timeId = 0;
static void schedule_refresh(VideoState *is, int delay)
{
	_timeId = SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
}

static void video_refresh_timer(void *userdata)
{
	VideoState *is = (VideoState *)userdata;
	VideoPicture *vp;
	double actual_delay, delay, sync_threshold, ref_clock, diff;

	if (is->pFormatCtx == NULL)
	{
		return;
	}
	if (is->video_st == NULL)
	{
		return;
	}
	if (global_video_state->quit)
	{
		if (_timeId != 0)
		{
			SDL_RemoveTimer(_timeId);
		}
		return;
	}
	double allTime = is->pFormatCtx->duration* av_q2d(is->video_st->time_base);
	int64_t pos = global_video_pkt_pts* av_q2d(is->video_st->codec->pkt_timebase);
	int64_t timestamp = is->pFormatCtx->duration / AV_TIME_BASE;
	bool end = pos >= timestamp;
	if (end)
	{
		//解析完成回调
		if (frameEndCallBack != NULL)
		{
			printf(" acount: %d  vcount: %d   atime:%f vtime:  %f\n",
				global_video_state->audioq.nb_packets, global_video_state->videoq.nb_packets,
				global_video_state->audio_clock, global_video_state->video_clock);
			frameEndCallBack();
		}
	}

	if (is->video_st) {
		if (is->pictq_size == 0) {
			schedule_refresh(is, 10);
		}
		else {

			vp = &is->pictq[is->pictq_rindex];
			delay = vp->pts - is->frame_last_pts;
			if (delay <= 0 || delay >= 1.0)
			{
				delay = is->frame_last_delay;
			}
			is->frame_last_delay = delay;
			is->frame_last_pts = vp->pts;

			ref_clock = tools::get_audio_clock(is);
			diff = vp->pts - ref_clock;

			sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD;
			if (fabs(diff) < AV_NOSYNC_THRESHOLD)
			{
				if (diff <= -sync_threshold)
				{
					delay = 0;
				}
				else if (diff >= sync_threshold)
				{
					delay = 2 * delay;
				}
			}
			is->frame_timer += delay;
			actual_delay = is->frame_timer - (tools::av_gettime() / 1000000.0);
			if (actual_delay < 0.010)
			{
				actual_delay = 0.010;
			}
			schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
			display_subtitle(is);

			if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
			{
				is->pictq_rindex = 0;
			}
			SDL_LockMutex(is->pictq_mutex);
			is->pictq_size--;
			SDL_CondSignal(is->pictq_cond);
			SDL_UnlockMutex(is->pictq_mutex);
		}
	}
	else {
		schedule_refresh(is, 100);
	}
}


int getCodeByType(AVMediaType type, AVCodecContext**pCodecCtx, AVCodec**pCodec)
{
	int  index = -1;
	for (int i = 0; i < global_video_state->pFormatCtx->nb_streams; i++)
	if (global_video_state->pFormatCtx->streams[i]->codec->codec_type == type){
		index = i;
		break;
	}
	if (index == -1)
	{
		return -1;
	}
	*pCodecCtx = global_video_state->pFormatCtx->streams[index]->codec;
	*pCodec = avcodec_find_decoder((*pCodecCtx)->codec_id);
	if (pCodec == NULL){
		printf("Codec not found.\n");
		return -1;
	}
	//打开解码器
	if (avcodec_open2(*pCodecCtx, *pCodec, NULL) < 0){
		printf("Could not open codec.\n");
		return -1;
	}
	if (type == AVMEDIA_TYPE_VIDEO)
	{
		global_video_state->video_st = global_video_state->pFormatCtx->streams[index];
		global_video_state->videoStream = index;

		tools::packet_queue_init(&global_video_state->videoq);

		tools::packet_queue_flush(&global_video_state->videoq);

		global_video_state->frame_timer = (double)tools::av_gettime() / 1000000.0;
		global_video_state->frame_last_delay = 40e-3;

		int width = global_video_state->video_st->codec->width;
		int height = global_video_state->video_st->codec->height;
		AVPixelFormat pix_fmt = global_video_state->video_st->codec->pix_fmt;

		global_picture_handle = (PictureHandle *)av_malloc(sizeof(PictureHandle));
		global_picture_handle->pFrame = av_frame_alloc();
		global_picture_handle->pFrameYUV = av_frame_alloc();

		uint8_t *out_buffer = (uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, width, height));
		avpicture_fill((AVPicture *)global_picture_handle->pFrameYUV, out_buffer, PIX_FMT_YUV420P, width, height);

	}

	if (type == AVMEDIA_TYPE_AUDIO)
	{
		global_video_state->audioStream = index;
		global_video_state->audio_st = global_video_state->pFormatCtx->streams[index];
		global_video_state->audio_buf_size = 0;
		global_video_state->audio_buf_index = 0;
		memset(&global_video_state->audio_pkt, 0, sizeof(global_video_state->audio_pkt));
		tools::packet_queue_init(&global_video_state->audioq);

		tools::packet_queue_flush(&global_video_state->audioq);

	}
	if (type == AVMEDIA_TYPE_SUBTITLE)
	{
		global_video_state->subtitle_st = global_video_state->pFormatCtx->streams[index];
		global_video_state->subtitleStream = index;
		global_video_state->pSubtitle = (AVSubtitle *)av_malloc(sizeof(AVSubtitle));
		tools::packet_queue_init(&global_video_state->subtitleq);
		tools::packet_queue_flush(&global_video_state->subtitleq);

	}
	return index;
}

static int decode_thread(void *arg)
{
	VideoState *is = (VideoState *)arg;
	int ret = 0;
	AVPacket *packet = (AVPacket *)av_malloc(sizeof(AVPacket));
	av_init_packet(packet);
	while (true){
		if (global_video_state->quit)
			break;

		if (is->audioq.size > MAX_AUDIOQ_SIZE ||
			is->videoq.size > MAX_VIDEOQ_SIZE) {
			SDL_Delay(10);
			continue;
		}
		if (!mPlaying)
		{
			SDL_Delay(10);
			continue;
		}
		ret = av_read_frame(is->pFormatCtx, packet);
		if (ret < 0) {
			if (ret == AVERROR_EOF || url_feof(is->pFormatCtx->pb)) {
				printf(" acount: %d  vcount: %d   atime:%f vtime:  %f\n",
					is->audioq.nb_packets, is->videoq.nb_packets,
					is->audio_clock, is->video_clock);
				is->decodeState = 0;
				break;
			}
			if (is->pFormatCtx->pb && is->pFormatCtx->pb->error) {
				break;
			}
			continue;
		}

		if (packet->stream_index == is->videoStream)
		{
			tools::packet_queue_put(&is->videoq, (AVPacket*)packet);
		}
		if (packet->stream_index == is->audioStream)
		{
			tools::packet_queue_put(&is->audioq, (AVPacket*)packet);
		}
		if (packet->stream_index == is->subtitleStream)
		{
			tools::packet_queue_put(&is->subtitleq, (AVPacket*)packet);
		}
	}
	printf("decode_thread finish");
	while (!is->quit)
	{
		SDL_Delay(10);
	}
	return 0;
}

LatLng lastGps;

static int analysis_thread(void *arg)
{
	SDL_Delay(500);
	VideoState *is = (VideoState *)arg;
	int ret = 0;
	int frameFinished = 0;
	AVPacket *packet = (AVPacket *)av_malloc(sizeof(AVPacket));
	av_init_packet(packet);
	lastGps = LatLng();
	AVSubtitle *pSubtitle = (AVSubtitle *)av_malloc(sizeof(AVSubtitle));
	while (true){
		if (global_video_state->quit)
			break;
		if (m_Release)
		{
			break;
		}
		ret = av_read_frame(is->pFormatCtx, packet);
		if (ret < 0) {
			if (ret == AVERROR_EOF || url_feof(is->pFormatCtx->pb)) {
				printf(" acount: %d  vcount: %d   atime:%f vtime:  %f\n",
					is->audioq.nb_packets, is->videoq.nb_packets,
					is->audio_clock, is->video_clock);

				break;
			}
			if (is->pFormatCtx->pb && is->pFormatCtx->pb->error) {
				break;
			}
			continue;
		}
		if (is->quit)
			break;
		if (packet->stream_index == is->subtitleStream)
		{
			int len1 = avcodec_decode_subtitle2(is->subtitle_st->codec, pSubtitle, &frameFinished, (AVPacket*)packet);
			if (len1 < 0)
			{
				continue;
			}
			if (frameFinished>0)
			{
				AVSubtitleRect*  rect = *(pSubtitle->rects);
				char * title = rect->ass;
				std::vector<std::string> list = tools::split(title, ",");

				string Time = "";
				if (list.size() > 1)
				{
					Time = list.at(1);
				}
				string Lat = "0";
				if (list.size() > 7)
				{
					Lat = list.at(7);
				}

				string Log = "0";
				if (list.size() > 9)
				{
					Log = list.at(9);
				}

				string Speed = "";
				if (list.size() > 11)
				{
					Speed = list.at(11);
				}

				string Date = "";
				if (list.size() > 12)
				{
					Date = list.at(12);
				}
				double log = atof(Log.c_str());
				double lat = atof(Lat.c_str());
				if (log == 0 || lat == 0)
				{
					continue;
				}

				if (lastGps.lat != 0 && lastGps.lng != 0)
				{
					double tmpdistance1 = mapUtils::getDisance(lat, log, lastGps.lat, lastGps.lng);

					double speed1 = atof(Speed.c_str());
					speed1 = speed1*1.852;
					double time1 = 10.0 / 100;
					double speed2 = (tmpdistance1 / time1)* 3.6;

					//发生漂移
					if (speed2 > speed1)
					{
						continue;
					}
				}
				lastGps = LatLng();
				lastGps.lat = lat;
				lastGps.lng = log;
				LatLng latlnt = mapUtils::gpsToBaidu(atof(Log.c_str()), atof(Lat.c_str()));
				MapInfo mapInfo = MapInfo();
				mapInfo.lat = latlnt.lat;
				mapInfo.lng = latlnt.lng;
				is->gpsList.push_back(mapInfo);
			}
		}
	}
	av_seek_frame(is->pFormatCtx, is->videoStream, 0, AVSEEK_FLAG_BACKWARD);
	printf("analysis_thread finish");

	//解析完成
	if (analysisGpsEndCallBack != NULL)
	{
		analysisGpsEndCallBack();
	}

	return 0;
}



ZONGTANGDLL_API void initSDK(VideoState** p)
{
	global_video_state = (VideoState *)av_mallocz(sizeof(VideoState));
	//初始化参数
	global_video_state->videoStream = -1;
	global_video_state->audioStream = -1;
	global_video_state->subtitleStream = -1;
	/*global_video_state->quit = 0;*/
	*p = global_video_state;
	//注册库中所有可用的文件格式和编码器
	av_register_all();
	//avformat_network_init();
	//分配解码上下文
	global_video_state->pFormatCtx = avformat_alloc_context();
	m_Release = FALSE;
}


ZONGTANGDLL_API int openFile(char filepath[], int64_t& duration)
{
	//打开输入文件
	if (avformat_open_input(&global_video_state->pFormatCtx, filepath, NULL, NULL) != 0){
		printf("Couldn't open input stream.\n");
		return -1;
	}
	//取出流信息
	if (avformat_find_stream_info(global_video_state->pFormatCtx, NULL) < 0){
		printf("Couldn't find stream information.\n");
		return -1;
	}
	//列出输入文件的相关流信息
	av_dump_format(global_video_state->pFormatCtx, 0, filepath, 0);
	if (global_video_state->pFormatCtx->duration != AV_NOPTS_VALUE){
		int hours, mins, secs, us;
		duration = global_video_state->pFormatCtx->duration;
		secs = duration / AV_TIME_BASE;
		us = duration % AV_TIME_BASE;
		mins = secs / 60;
		secs %= 60;
		hours = mins / 60;
		mins %= 60;
		printf("%02d:%02d:%02d.%02d\n", hours, mins, secs, (100 * us) / AV_TIME_BASE);
	}
	return 0;
}


ZONGTANGDLL_API int setFrameCallback(FrameCallBack _callback)
{
	frameCallBack = _callback;
	return 0;
}

ZONGTANGDLL_API int setFrameEndCallback(FrameEndCallBack _callback)
{
	frameEndCallBack = _callback;
	return 0;
}

ZONGTANGDLL_API int initCodec()
{
	AVCodecContext	*pCodecCtx, *sCodecCtx, *aCodecCtx;
	AVCodec			*pCodec, *sCodec, *aCodec;
	int				videoindex, subtitleindex, audioindex;

	videoindex = getCodeByType(AVMEDIA_TYPE_VIDEO, &pCodecCtx, &pCodec);
	if (videoindex == -1){
		printf("Couldn't getCodeByType.\n");
		return -1;
	}
	subtitleindex = getCodeByType(AVMEDIA_TYPE_SUBTITLE, &sCodecCtx, &sCodec);
	
	audioindex = getCodeByType(AVMEDIA_TYPE_AUDIO, &aCodecCtx, &aCodec);
	
	if (audioindex >= 0) {

		stream_component_open(global_video_state, audioindex);
	}
	return 0;
}

ZONGTANGDLL_API int setWindownHandle(HWND handle)
{
	if (global_picture_handle == NULL)
	{
		return -1;
	}
	if (global_video_state->video_st == NULL)
	{
		return -1;
	}
	if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
		printf("Could not initialize SDL - %s\n", SDL_GetError());
		return -1;
	}
	SDL_Window *screen;
	screen = SDL_CreateWindowFrom((void *)(handle));

	if (!screen) {
		printf("SDL: could not create window - exiting:%s\n", SDL_GetError());
		return -1;
	}
	int iWidth = 0;
	int iHeight = 0;
	SDL_GetWindowSize(screen, &iWidth, &iHeight);

	global_picture_handle->sdlRenderer = SDL_CreateRenderer(screen, -1, 0);
	//IYUV: Y + U + V  (3 planes)
	//YV12: Y + V + U  (3 planes)
	int width = global_video_state->video_st->codec->width;
	int height = global_video_state->video_st->codec->height;
	global_picture_handle->sdlTexture = SDL_CreateTexture(global_picture_handle->sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, width, height);

	global_picture_handle->sdlRect.x = 0;
	global_picture_handle->sdlRect.y = 0;
	global_picture_handle->sdlRect.w = iWidth;
	global_picture_handle->sdlRect.h = iHeight;

	global_picture_handle->srcRect.x = 0;
	global_picture_handle->srcRect.y = 0;
	global_picture_handle->srcRect.w = width;
	global_picture_handle->srcRect.h = height;
	return 0;
}

//刷新线程,也叫守护线程
static int  refresh_thread(void *arg)
{
	SDL_Event event;
	for (;;)
	{
		if (global_video_state == NULL)
		{
			break;
		}
		if (global_video_state->quit)
		{
			break;
		}
		SDL_WaitEvent(&event);
		switch (event.type) {
		case FF_QUIT_EVENT:
		case SDL_QUIT:
			global_video_state->quit = 1;

			break;
		case FF_ALLOC_EVENT:
			//alloc_picture(event.user.data1);
			break;
		case FF_REFRESH_EVENT:
			video_refresh_timer(event.user.data1);
			break;
		default:
			break;
		}
	}
	return 0;
}

ZONGTANGDLL_API int play()
{
	mPlaying = true;
	global_video_state->quit = 0;
	global_video_state->pictq_mutex = SDL_CreateMutex();
	global_video_state->pictq_cond = SDL_CreateCond();

	schedule_refresh(global_video_state, 40);
	global_video_state->decodeState = 1;//重置解码状态为1
	global_video_state->parse_tid = SDL_CreateThread(decode_thread, "myThread", global_video_state);
	if (!global_video_state->parse_tid)
	{
		global_video_state->quit = 1;
		av_free(global_video_state);
		return -1;
	}

	global_video_state->video_tid = SDL_CreateThread(video_thread, "videoThread", global_video_state);
	if (!global_video_state->video_tid)
	{
		global_video_state->quit = 1;
		av_free(global_video_state);
		return -1;
	}
	SDL_Thread *refresh_tid;
	refresh_tid = SDL_CreateThread(refresh_thread, "myRefreshThread", NULL);
	if (!refresh_tid)
	{
		global_video_state->quit = 1;
		av_free(global_video_state);
		return -1;
	}
	return 0;
}

ZONGTANGDLL_API int seek(int64_t timestamp)
{
	int ret = 0;
	//ret = pause(true);
	tools::packet_queue_flush(&global_video_state->audioq);
	tools::packet_queue_flush(&global_video_state->videoq);
	tools::packet_queue_flush(&global_video_state->subtitleq);
	if (ret < 0)
	{
		return -1;
	}
	if (av_seek_frame(global_video_state->pFormatCtx, global_video_state->videoStream, timestamp, AVSEEK_FLAG_BACKWARD) < 0){
		printf("Could not seek_frame.\n");
		return -1;
	}
	/*ret = pause(false);
	if (ret < 0)
	{
	return -1;
	}*/

	return 0;
}

//true 为暂停 ,false 为继续
ZONGTANGDLL_API int pause(bool enable)
{
	mPlaying = !enable;
	if (global_video_state == NULL)
	{
		return -1;
	}
	if (global_video_state->quit)
	{
		return -1;
	}
	SDL_AudioStatus status = SDL_GetAudioStatus();
	if (enable)
	{
		PacketQueue q = global_video_state->audioq;
		SDL_CondSignal(q.cond);
		SDL_PauseAudio(1);
		printf("pause SDL_PauseAudio\n");
	}
	else
	{
		PacketQueue q = global_video_state->audioq;
		/*SDL_PauseAudio(SDL_AUDIO_PLAYING);*/
		SDL_CondSignal(q.cond);
		SDL_PauseAudio(0);
		printf("pause SDL_AUDIO_PLAYING\n");
	}
	return 0;
}

ZONGTANGDLL_API bool getState()
{
	return mPlaying;
}

ZONGTANGDLL_API int desSDK()
{
	try{
		if (global_video_state == NULL)
		{
			return -1;
		}
		global_video_state->quit = 1;
		m_Release = TRUE;
		PacketQueue q = global_video_state->audioq;
		SDL_CondSignal(q.cond);
		SDL_CloseAudio();
		SDL_Delay(500);

		if (global_picture_handle != NULL)
		{
			av_frame_free(&global_picture_handle->pFrameYUV);
			global_picture_handle->sdlRenderer = NULL;
			global_picture_handle->pSubtitle = NULL;
		}
		if (global_video_state != NULL)
		{
			tools::packet_queue_flush(&global_video_state->audioq);
			tools::packet_queue_flush(&global_video_state->videoq);
			tools::packet_queue_flush(&global_video_state->subtitleq);

			if (global_video_state->video_st != NULL)
			{
				avcodec_close(global_video_state->video_st->codec);
				global_video_state->video_st = NULL;
			}

			if (global_video_state->audio_st != NULL)
			{
				avcodec_close(global_video_state->audio_st->codec);
				global_video_state->audio_st = NULL;
			}
			if (global_video_state->subtitle_st != NULL)
			{
				avcodec_close(global_video_state->subtitle_st->codec);
				global_video_state->subtitle_st = NULL;
			}
			avformat_close_input(&global_video_state->pFormatCtx);
		}
	}
	catch (...)
	{
		printf("Exception : \n");
		return -1;
	}

	return 0;
}

ZONGTANGDLL_API int setVolumeEnable(bool enable)
{
	return 0;
}

ZONGTANGDLL_API int setVolume(double value)
{
	return 0;
}

ZONGTANGDLL_API int analysisGps()
{
	if (global_video_state == NULL)
	{
		return -1;
	}
	global_video_state->gpsList.clear();
	global_video_state->parse_tid = SDL_CreateThread(analysis_thread, "myAnalysisThread", global_video_state);
	if (!global_video_state->parse_tid)
	{
		global_video_state->quit = 1;
		av_free(global_video_state);
		return -1;
	}
	SDL_Delay(100);
	return 0;
}

ZONGTANGDLL_API int setAnalysisGpsEndCallback(AnalysisGpsEndCallBack _callback)
{
	analysisGpsEndCallBack = _callback;
	return 0;
}



int WriteJPEG(AVFrame* pFrame, int width, int height, int iIndex, char**filename, LPTSTR Dir)
{
	// 输出文件路径  
	char out_file[MAX_PATH] = { 0 };
	char acInputFileName[200] = { '\0' };
	WideCharToMultiByte(CP_OEMCP, NULL, Dir, -1, acInputFileName, 200, NULL, FALSE);

	int val = sprintf_s(out_file, sizeof(out_file), "%s\\%d.jpg", acInputFileName, iIndex);

	// 分配AVFormatContext对象  
	AVFormatContext* pFormatCtx = avformat_alloc_context();

	// 设置输出文件格式  
	pFormatCtx->oformat = av_guess_format("mjpeg", NULL, NULL);
	// 创建并初始化一个和该url相关的AVIOContext  
	if (avio_open(&pFormatCtx->pb, out_file, AVIO_FLAG_READ_WRITE) < 0) {
		printf("Couldn't open output file.");
		return -1;
	}

	// 构建一个新stream  
	AVStream* pAVStream = avformat_new_stream(pFormatCtx, 0);
	if (pAVStream == NULL) {
		return -1;
	}

	// 设置该stream的信息  
	AVCodecContext* pCodecCtx = pAVStream->codec;

	pCodecCtx->codec_id = pFormatCtx->oformat->video_codec;
	pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
	pCodecCtx->pix_fmt = PIX_FMT_YUVJ420P;
	pCodecCtx->width = width;
	pCodecCtx->height = height;
	pCodecCtx->time_base.num = 1;
	pCodecCtx->time_base.den = 25;

	av_dump_format(pFormatCtx, 0, out_file, 1);

	// 查找解码器  
	AVCodec* pCodec = avcodec_find_encoder(pCodecCtx->codec_id);
	if (!pCodec) {
		printf("Codec not found.");
		return -1;
	}
	// 设置pCodecCtx的解码器为pCodec  
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
		printf("Could not open codec.");
		return -1;
	}

	//Write Header  
	avformat_write_header(pFormatCtx, NULL);

	int y_size = pCodecCtx->width * pCodecCtx->height;

	// 给AVPacket分配足够大的空间  
	AVPacket pkt;
	av_new_packet(&pkt, y_size * 3);

	//   
	int got_picture = 0;
	int ret = avcodec_encode_video2(pCodecCtx, &pkt, pFrame, &got_picture);
	if (ret < 0) {
		printf("Encode Error.\n");
		return -1;
	}
	if (got_picture == 1) {
		ret = av_write_frame(pFormatCtx, &pkt);
	}

	av_free_packet(&pkt);

	//Write Trailer  
	av_write_trailer(pFormatCtx);

	printf("Encode Successful.\n");

	if (pAVStream) {
		avcodec_close(pAVStream->codec);
	}
	avio_close(pFormatCtx->pb);
	avformat_free_context(pFormatCtx);
	char* res = new char[strlen(out_file) + 1];
	strcpy_s(res, strlen(out_file) + 1, out_file);
	*filename = res;
	return 0;
}


ZONGTANGDLL_API int saveFrame(char** filename, LPTSTR Dir)
{
	try{

		if (global_picture_handle == NULL)
		{
			return -1;
		}

		if (global_video_state == NULL)
		{
			return -1;
		}

		if (global_video_state->video_st == NULL)
		{
			return -1;
		}

		AVFrame *pFrame = global_picture_handle->pFrame;
		AVCodecContext *pCodecCtx = global_video_state->video_st->codec;
		int width = pCodecCtx->width;
		int height = pCodecCtx->height;
		int index = global_video_state->video_dts;

		WriteJPEG(pFrame, width, height, index, filename, Dir);

	}
	catch (...)
	{
		printf("Exception : \n");
		return -1;
	}
	return 0;

}

CPP 依赖

Opencv 3.4 以上版本

项目地址:

暂无

后续全部代码将开放到git代码管理上,如您需要全部代码作为学习,也可以联系本人,电话号码18824182332(微信同号)。

  • 3
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

乘风偷月

如果觉得有用,请打赏一杯奶茶

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值