简单原始的FFMpeg播放器

这个代码很久之前写的,当时是为了学习FFMpeg和SDL。写得很简单,仅仅处理视频帧解码,用SDL_DisplayYUVOverlay显示视频数据,没有同步处理和音频处理,所以一个视频文件会很快的逐帧显示而过。


// rin.c
// A small sample program that shows how to use libavformat and libavcodec to
// play a media file.
//
// Use
//
// gcc rin.c -lavutil -lavformat -lavcodec -lswscale `sdl-config --cflags --libs`
//
// to build (assuming sdl, libavformat and libavcodec are correctly installed 
// on your system).
//
// Run using
//
// ./a.out air.mpg
//

#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>

#include <libavformat/avformat.h>
#include <libswscale/swscale.h>

#include <SDL.h>
#include <SDL_thread.h>

#define uint8_t unsigned char
#define uint16_t unsigned short
#define uint32_t unsigned int

#define SDL_AUDIO_BUFFER_SIZE 1024

typedef struct PacketQueue {
	AVPacketList *first_pkt, *last_pkt;
	int n_packets;
	int size;
	int abort_request;
	SDL_mutex *mutex;
	SDL_cond *cond;
} PacketQueue_t;

typedef struct ResourcesSet {
	char *filename;
	AVFormatContext *ic;
	AVCodecContext *videoDec, *audioDec;
	AVCodec *videoCodec, *audioCodec;
	AVFrame *frame;
	int videoStream, audioStream;
	struct SwsContext *imgConvert;
	
	PacketQueue_t audioQ;
	
	SDL_Surface *screen;
	SDL_Overlay *bmp;
} ResourcesSet_t;

static ResourcesSet_t g_RSet;

 package queue ref 

void packet_queue_init(PacketQueue_t *q) 
{ 
  memset(q, 0, sizeof(PacketQueue_t)); 
  q->mutex = SDL_CreateMutex(); 
  q->cond = SDL_CreateCond(); 
}

void packet_queue_flush(PacketQueue_t *q)
{
	AVPacketList *pkt, *pkt1;

	SDL_LockMutex(q->mutex);
	for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
		pkt1 = pkt->next;
		av_free_packet(&pkt->pkt);
		av_freep(&pkt);
	}
	q->last_pkt = NULL;
	q->first_pkt = NULL;
	q->n_packets = 0;
	q->size = 0;
	SDL_UnlockMutex(q->mutex);
}

void packet_queue_deinit(PacketQueue_t *q)
{
    packet_queue_flush(q);
    SDL_DestroyMutex(q->mutex);
    SDL_DestroyCond(q->cond);
}

void packet_queue_abort(PacketQueue_t *q)
{
	SDL_LockMutex(q->mutex);
	q->abort_request = 1;
	SDL_CondSignal(q->cond);
	SDL_UnlockMutex(q->mutex);
}

int packet_queue_put(PacketQueue_t *q, AVPacket *pkt)
{
	AVPacketList *pkt1;

	/* duplicate the packet */
	if(av_dup_packet(pkt) < 0)
    return -1;

	pkt1 = av_malloc(sizeof(AVPacketList));
	if (!pkt1)
		return -1;
	pkt1->pkt = *pkt;
	pkt1->next = NULL;

	SDL_LockMutex(q->mutex);

	if (!q->last_pkt)
		q->first_pkt = pkt1;
	else
		q->last_pkt->next = pkt1;
	q->last_pkt = pkt1;
	q->n_packets++;
	q->size += pkt1->pkt.size + sizeof(*pkt1);
	/* XXX: should duplicate packet data in DV case */
	SDL_CondSignal(q->cond);
	SDL_UnlockMutex(q->mutex);
	return 0;
}

int packet_queue_get(PacketQueue_t *q, AVPacket *pkt, int block)
{
	AVPacketList *pkt1;
	int ret;

	SDL_LockMutex(q->mutex);

	for(;;) {
		if (q->abort_request) {
			ret = -1;
			break;
		}

		pkt1 = q->first_pkt;
		if (pkt1) {
			q->first_pkt = pkt1->next;
			if (!q->first_pkt)
				q->last_pkt = NULL;
			q->n_packets--;
			q->size -= pkt1->pkt.size + sizeof(*pkt1);
			*pkt = pkt1->pkt;
			av_free(pkt1);
			ret = 1;
			break;
		} else if (!block) {
			ret = 0;
			break;
		} else {
			SDL_CondWait(q->cond, q->mutex);
		}
	}
	
	SDL_UnlockMutex(q->mutex);
	return ret;
}

 video ref 

int video_display_open()
{
	int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
	g_RSet.screen = SDL_SetVideoMode(g_RSet.videoDec->width, g_RSet.videoDec->height, 0, flags);
	if (!g_RSet.screen) {
		fprintf(stderr, "SDL: could not set video mode - exiting/n");
		return -1;
	}
	
	g_RSet.bmp = SDL_CreateYUVOverlay(g_RSet.videoDec->width, g_RSet.videoDec->height, SDL_YV12_OVERLAY, g_RSet.screen);
	if (!g_RSet.bmp) {
		fprintf(stderr, "SDL: could not create YUV Overlay - exiting/n");
		return -1;
	}
                           
	return 0;
}

void video_image_display()
{
	AVPicture pict;
	SDL_Rect rect;
	
	SDL_LockYUVOverlay(g_RSet.bmp);
	 
	pict.data[0] = g_RSet.bmp->pixels[0]; 
	pict.data[1] = g_RSet.bmp->pixels[2]; 
	pict.data[2] = g_RSet.bmp->pixels[1]; 
	pict.linesize[0] = g_RSet.bmp->pitches[0]; 
	pict.linesize[1] = g_RSet.bmp->pitches[2]; 
	pict.linesize[2] = g_RSet.bmp->pitches[1]; 
  
	// Convert the image into YUV format that SDL uses
	sws_scale(g_RSet.imgConvert, 
		(AVPicture*)g_RSet.frame->data, (AVPicture*)g_RSet.frame->linesize, 
		0, g_RSet.videoDec->height, 
		pict.data, pict.linesize);
	   
	SDL_UnlockYUVOverlay(g_RSet.bmp);
	
	rect.x = 0; 
	rect.y = 0;
	rect.w = g_RSet.videoDec->width; 
	rect.h = g_RSet.videoDec->height; 
	SDL_DisplayYUVOverlay(g_RSet.bmp, &rect); 
}

void decode_video_frame()
{
	int frameFinished; 
	AVPacket packet; 
	int i=0; 
	
	while(av_read_frame(g_RSet.ic, &packet) >= 0) { 
  	// Is this a packet from the video stream? 
  	if(packet.stream_index == g_RSet.videoStream) { 
    	// Decode video frame 
    	avcodec_decode_video(g_RSet.videoDec, g_RSet.frame, &frameFinished, packet.data, packet.size); 
    
    	// Did we get a video frame? Maybe a frame contains sevaral packets.
    	if(frameFinished) { 
    		// Display frame picture on screen
    		video_image_display();
			} 
		} 
    
		// Free the packet that was allocated by av_read_frame 
		av_free_packet(&packet); 
	}
}

 audio ref 

void audio_callback(void *userdata, Uint8 *stream, int len)
{
	
}

int audio_display_open()
{
	SDL_AudioSpec wanted_spec, spec;
	
	wanted_spec.freq = g_RSet.audioDec->sample_rate; 
	wanted_spec.format = AUDIO_S16SYS; 
	wanted_spec.channels = g_RSet.audioDec->channels; 
	wanted_spec.silence = 0; 
	wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; 
	wanted_spec.callback = audio_callback; 
	wanted_spec.userdata = g_RSet.audioDec; 
  
	if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { 
		fprintf(stderr, "SDL_OpenAudio: %s/n", SDL_GetError()); 
		return -1; 
	}
	return 0;
}

int main(int argc, char *argv[])
{
	int i, flags;
	
	memset(&g_RSet, 0x0, sizeof(g_RSet));
	
	// Register all formats and codecs
	avcodec_register_all();
	//avdevice_register_all();
	av_register_all();
	
	//init SDL
	flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
	if (SDL_Init (flags)) {
		fprintf(stderr, "Could not initialize SDL - %s/n", SDL_GetError());
		goto exit_err;
	}

	g_RSet.filename = argv[1];
	// Open video file
	if(av_open_input_file(&g_RSet.ic, g_RSet.filename, NULL, 0, NULL)!=0) {
		fprintf(stderr, "Cannt open input file/n");
		goto exit_err;
	}

	// Retrieve stream information
	if(av_find_stream_info(g_RSet.ic)<0) {
		fprintf(stderr, "Cannt find stream info/n");
		goto exit_err;
	}

	// Dump information about file onto standard error
	dump_format(g_RSet.ic, 0, g_RSet.filename, 0);

	// Find the first video stream
	g_RSet.videoStream = -1;
	for(i=0; i<g_RSet.ic->nb_streams; i++) {
		if(g_RSet.ic->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) {
			g_RSet.videoStream = i;
			break;
		}
	}
	if(g_RSet.videoStream == -1) {
		fprintf(stderr, "No video stream/n");
		//goto exit_err;
	} else {
		// Get a pointer to the codec context for the video stream
		g_RSet.videoDec = g_RSet.ic->streams[g_RSet.videoStream]->codec;
		
		// Find the decoder for the video stream
		g_RSet.videoCodec = avcodec_find_decoder(g_RSet.videoDec->codec_id);
		if(g_RSet.videoCodec == NULL) {
			fprintf(stderr, "Found no video codec/n");
			goto exit_err;
		}
		
		// Open video codec
		if(avcodec_open(g_RSet.videoDec, g_RSet.videoCodec)<0) {
			fprintf(stderr, "Cannt open avcodec for videoCodec/n");
			goto exit_err; 
		}
	}
	
	// Find the first audio stream
	g_RSet.audioStream = -1;
	for(i=0; i<g_RSet.ic->nb_streams; i++) {
		if(g_RSet.ic->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO) {
			g_RSet.audioStream = i;
			break;
		}
	}
	if(g_RSet.audioStream == -1) {
		fprintf(stderr, "No audio stream/n");
		//goto exit_err;
	} else {
		// Get a pointer to the codec context for the audio stream
		g_RSet.audioDec = g_RSet.ic->streams[g_RSet.audioStream]->codec;
		
		// Find the decoder for the audio stream
		g_RSet.audioCodec = avcodec_find_decoder(g_RSet.audioDec->codec_id);
		if(g_RSet.audioCodec == NULL) {
			fprintf(stderr, "Found no audio codec/n");
			goto exit_err;
		}
		
		// Open audio codec
		if(avcodec_open(g_RSet.audioDec, g_RSet.audioCodec)<0) {
			fprintf(stderr, "Cannt open avcodec for audioCodec/n");
			goto exit_err; 
		}
	}
	
	//no video nor audio stream, return error
	if (g_RSet.audioStream == -1 && g_RSet.videoStream == -1) {
		goto exit_err;
	}

	// Allocate video frame
	g_RSet.frame = avcodec_alloc_frame();
	if (!g_RSet.frame) {
		fprintf(stderr, "Cannt alloc frame buffer/n");
		goto exit_err;
	}
	
	g_RSet.imgConvert = sws_getContext(g_RSet.videoDec->width, g_RSet.videoDec->height, 
		g_RSet.videoDec->pix_fmt, g_RSet.videoDec->width, g_RSet.videoDec->height, 
		PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
	if (!g_RSet.imgConvert) {
		fprintf(stderr, "Cannt alloc imgConvert buffer/n");
		goto exit_err;
	}

	
	// Open SDL Surface
	video_display_open();
	//audio_display_open();
	
	// Init audio packet queue
	packet_queue_init(&g_RSet.audioQ); 
  SDL_PauseAudio(0);
	
	// Decode video frame and display image
	decode_video_frame();

	
exit_err:
	// Free the YUV frame
	if (g_RSet.frame)
		av_free(g_RSet.frame);
	// Close the codec
	if (g_RSet.videoDec)
		avcodec_close(g_RSet.videoDec);
	if (g_RSet.audioDec)
		avcodec_close(g_RSet.audioDec);
	// Close the video file
	if (g_RSet.ic)
		av_close_input_file(g_RSet.ic);
	if (g_RSet.imgConvert)
		sws_freeContext(g_RSet.imgConvert);

	return 0;
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值