00001 /*
00002 * FFplay : Simple Media Player based on the ffmpeg libraries
00003 * Copyright (c) 2003 Fabrice Bellard
00004 *
00005 * This file is part of FFmpeg.
00006 *
00007 * FFmpeg is free software; you can redistribute it and/or
00008 * modify it under the terms of the GNU Lesser General Public
00009 * License as published by the Free Software Foundation; either
00010 * version 2.1 of the License, or (at your option) any later version.
00011 *
00012 * FFmpeg is distributed in the hope that it will be useful,
00013 * but WITHOUT ANY WARRANTY; without even the implied warranty of
00014 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
00015 * Lesser General Public License for more details.
00016 *
00017 * You should have received a copy of the GNU Lesser General Public
00018 * License along with FFmpeg; if not, write to the Free Software
00019 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00020 */
00021
00022 #include <math.h>
00023 #include <limits.h>
00024 #include "libavutil/avstring.h"
00025 #include "libavformat/avformat.h"
00026 #include "libavformat/rtsp.h"
00027 #include "libavdevice/avdevice.h"
00028 #include "libswscale/swscale.h"
00029 #include "libavcodec/audioconvert.h"
00030 #include "libavcodec/opt.h"
00031
00032 #include "cmdutils.h"
00033
00034 #include <SDL.h>
00035 #include <SDL_thread.h>
00036
00037 #ifdef __MINGW32__
00038 #undef main /* We don't want SDL to override our main() */
00039 #endif
00040
00041 #undef exit
00042
00043 const char program_name[] = "FFplay";
00044 const int program_birth_year = 2003;
00045
00046 //#define DEBUG_SYNC
00047
00048 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
00049 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
00050 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
00051
00052 /* SDL audio buffer size, in samples. Should be small to have precise
00053 A/V sync as SDL does not have hardware buffer fullness info. */
00054 #define SDL_AUDIO_BUFFER_SIZE 1024
00055
00056 /* no AV sync correction is done if below the AV sync threshold */
00057 #define AV_SYNC_THRESHOLD 0.01
00058 /* no AV correction is done if too big error */
00059 #define AV_NOSYNC_THRESHOLD 10.0
00060
00061 /* maximum audio speed change to get correct sync */
00062 #define SAMPLE_CORRECTION_PERCENT_MAX 10
00063
00064 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
00065 #define AUDIO_DIFF_AVG_NB 20
00066
00067 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
00068 #define SAMPLE_ARRAY_SIZE (2*65536)
00069
00070 static int sws_flags = SWS_BICUBIC;
00071
00072 typedef struct PacketQueue {
00073 AVPacketList *first_pkt, *last_pkt;
00074 int nb_packets;
00075 int size;
00076 int abort_request;
00077 SDL_mutex *mutex;
00078 SDL_cond *cond;
00079 } PacketQueue;
00080
00081 #define VIDEO_PICTURE_QUEUE_SIZE 1
00082 #define SUBPICTURE_QUEUE_SIZE 4
00083
00084 typedef struct VideoPicture {
00085 double pts;
00086 SDL_Overlay *bmp;
00087 int width, height; /* source height & width */
00088 int allocated;
00089 } VideoPicture;
00090
00091 typedef struct SubPicture {
00092 double pts; /* presentation time stamp for this picture */
00093 AVSubtitle sub;
00094 } SubPicture;
00095
00096 enum {
00097 AV_SYNC_AUDIO_MASTER, /* default choice */
00098 AV_SYNC_VIDEO_MASTER,
00099 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
00100 };
00101
00102 typedef struct VideoState {
00103 SDL_Thread *parse_tid;
00104 SDL_Thread *video_tid;
00105 AVInputFormat *iformat;
00106 int no_background;
00107 int abort_request;
00108 int paused;
00109 int last_paused;
00110 int seek_req;
00111 int seek_flags;
00112 int64_t seek_pos;
00113 AVFormatContext *ic;
00114 int dtg_active_format;
00115
00116 int audio_stream;
00117
00118 int av_sync_type;
00119 double external_clock; /* external clock base */
00120 int64_t external_clock_time;
00121
00122 double audio_clock;
00123 double audio_diff_cum; /* used for AV difference average computation */
00124 double audio_diff_avg_coef;
00125 double audio_diff_threshold;
00126 int audio_diff_avg_count;
00127 AVStream *audio_st;
00128 PacketQueue audioq;
00129 int audio_hw_buf_size;
00130 /* samples output by the codec. we reserve more space for avsync
00131 compensation */
00132 DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
00133 DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
00134 uint8_t *audio_buf;
00135 unsigned int audio_buf_size; /* in bytes */
00136 int audio_buf_index; /* in bytes */
00137 AVPacket audio_pkt;
00138 uint8_t *audio_pkt_data;
00139 int audio_pkt_size;
00140 enum SampleFormat audio_src_fmt;
00141 AVAudioConvert *reformat_ctx;
00142
00143 int show_audio; /* if true, display audio samples */
00144 int16_t sample_array[SAMPLE_ARRAY_SIZE];
00145 int sample_array_index;
00146 int last_i_start;
00147
00148 SDL_Thread *subtitle_tid;
00149 int subtitle_stream;
00150 int subtitle_stream_changed;
00151 AVStream *subtitle_st;
00152 PacketQueue subtitleq;
00153 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
00154 int subpq_size, subpq_rindex, subpq_windex;
00155 SDL_mutex *subpq_mutex;
00156 SDL_cond *subpq_cond;
00157
00158 double frame_timer;
00159 double frame_last_pts;
00160 double frame_last_delay;
00161 double video_clock;
00162 int video_stream;
00163 AVStream *video_st;
00164 PacketQueue videoq;
00165 double video_current_pts;
00166 int64_t video_current_pts_time;
00167 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
00168 int pictq_size, pictq_rindex, pictq_windex;
00169 SDL_mutex *pictq_mutex;
00170 SDL_cond *pictq_cond;
00171
00172 // QETimer *video_timer;
00173 char filename[1024];
00174 int width, height, xleft, ytop;
00175 } VideoState;
00176
00177 static void show_help(void);
00178 static int audio_write_get_buf_size(VideoState *is);
00179
00180 /* options specified by the user */
00181 static AVInputFormat *file_iformat;
00182 static const char *input_filename;
00183 static int fs_screen_width;
00184 static int fs_screen_height;
00185 static int screen_width = 0;
00186 static int screen_height = 0;
00187 static int frame_width = 0;
00188 static int frame_height = 0;
00189 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
00190 static int audio_disable;
00191 static int video_disable;
00192 static int wanted_audio_stream= 0;
00193 static int wanted_video_stream= 0;
00194 static int wanted_subtitle_stream= -1;
00195 static int seek_by_bytes;
00196 static int display_disable;
00197 static int show_status;
00198 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
00199 static int64_t start_time = AV_NOPTS_VALUE;
00200 static int debug = 0;
00201 static int debug_mv = 0;
00202 static int step = 0;
00203 static int thread_count = 1;
00204 static int workaround_bugs = 1;
00205 static int fast = 0;
00206 static int genpts = 0;
00207 static int lowres = 0;
00208 static int idct = FF_IDCT_AUTO;
00209 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
00210 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
00211 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
00212 static int error_recognition = FF_ER_CAREFUL;
00213 static int error_concealment = 3;
00214 static int decoder_reorder_pts= 0;
00215
00216 /* current context */
00217 static int is_full_screen;
00218 static VideoState *cur_stream;
00219 static int64_t audio_callback_time;
00220
00221 static AVPacket flush_pkt;
00222
00223 #define FF_ALLOC_EVENT (SDL_USEREVENT)
00224 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
00225 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
00226
00227 static SDL_Surface *screen;
00228
00229 /* packet queue handling */
00230 static void packet_queue_init(PacketQueue *q)
00231 {
00232 memset(q, 0, sizeof(PacketQueue));
00233 q->mutex = SDL_CreateMutex();
00234 q->cond = SDL_CreateCond();
00235 }
00236
00237 static void packet_queue_flush(PacketQueue *q)
00238 {
00239 AVPacketList *pkt, *pkt1;
00240
00241 SDL_LockMutex(q->mutex);
00242 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
00243 pkt1 = pkt->next;
00244 av_free_packet(&pkt->pkt);
00245 av_freep(&pkt);
00246 }
00247 q->last_pkt = NULL;
00248 q->first_pkt = NULL;
00249 q->nb_packets = 0;
00250 q->size = 0;
00251 SDL_UnlockMutex(q->mutex);
00252 }
00253
00254 static void packet_queue_end(PacketQueue *q)
00255 {
00256 packet_queue_flush(q);
00257 SDL_DestroyMutex(q->mutex);
00258 SDL_DestroyCond(q->cond);
00259 }
00260
00261 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
00262 {
00263 AVPacketList *pkt1;
00264
00265 /* duplicate the packet */
00266 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
00267 return -1;
00268
00269 pkt1 = av_malloc(sizeof(AVPacketList));
00270 if (!pkt1)
00271 return -1;
00272 pkt1->pkt = *pkt;
00273 pkt1->next = NULL;
00274
00275
00276 SDL_LockMutex(q->mutex);
00277
00278 if (!q->last_pkt)
00279
00280 q->first_pkt = pkt1;
00281 else
00282 q->last_pkt->next = pkt1;
00283 q->last_pkt = pkt1;
00284 q->nb_packets++;
00285 q->size += pkt1->pkt.size + sizeof(*pkt1);
00286 /* XXX: should duplicate packet data in DV case */
00287 SDL_CondSignal(q->cond);
00288
00289 SDL_UnlockMutex(q->mutex);
00290 return 0;
00291 }
00292
00293 static void packet_queue_abort(PacketQueue *q)
00294 {
00295 SDL_LockMutex(q->mutex);
00296
00297 q->abort_request = 1;
00298
00299 SDL_CondSignal(q->cond);
00300
00301 SDL_UnlockMutex(q->mutex);
00302 }
00303
00304 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
00305 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
00306 {
00307 AVPacketList *pkt1;
00308 int ret;
00309
00310 SDL_LockMutex(q->mutex);
00311
00312 for(;;) {
00313 if (q->abort_request) {
00314 ret = -1;
00315 break;
00316 }
00317
00318 pkt1 = q->first_pkt;
00319 if (pkt1) {
00320 q->first_pkt = pkt1->next;
00321 if (!q->first_pkt)
00322 q->last_pkt = NULL;
00323 q->nb_packets--;
00324 q->size -= pkt1->pkt.size + sizeof(*pkt1);
00325 *pkt = pkt1->pkt;
00326 av_free(pkt1);
00327 ret = 1;
00328 break;
00329 } else if (!block) {
00330 ret = 0;
00331 break;
00332 } else {
00333 SDL_CondWait(q->cond, q->mutex);
00334 }
00335 }
00336 SDL_UnlockMutex(q->mutex);
00337 return ret;
00338 }
00339
00340 static inline void fill_rectangle(SDL_Surface *screen,
00341 int x, int y, int w, int h, int color)
00342 {
00343 SDL_Rect rect;
00344 rect.x = x;
00345 rect.y = y;
00346 rect.w = w;
00347 rect.h = h;
00348 SDL_FillRect(screen, &rect, color);
00349 }
00350
00351 #if 0
00352 /* draw only the border of a rectangle */
00353 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
00354 {
00355 int w1, w2, h1, h2;
00356
00357 /* fill the background */
00358 w1 = x;
00359 if (w1 < 0)
00360 w1 = 0;
00361 w2 = s->width - (x + w);
00362 if (w2 < 0)
00363 w2 = 0;
00364 h1 = y;
00365 if (h1 < 0)
00366 h1 = 0;
00367 h2 = s->height - (y + h);
00368 if (h2 < 0)
00369 h2 = 0;
00370 fill_rectangle(screen,
00371 s->xleft, s->ytop,
00372 w1, s->height,
00373 color);
00374 fill_rectangle(screen,
00375 s->xleft + s->width - w2, s->ytop,
00376 w2, s->height,
00377 color);
00378 fill_rectangle(screen,
00379 s->xleft + w1, s->ytop,
00380 s->width - w1 - w2, h1,
00381 color);
00382 fill_rectangle(screen,
00383 s->xleft + w1, s->ytop + s->height - h2,
00384 s->width - w1 - w2, h2,
00385 color);
00386 }
00387 #endif
00388
00389
00390
00391 #define SCALEBITS 10
00392 #define ONE_HALF (1 << (SCALEBITS - 1))
00393 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
00394
00395 #define RGB_TO_Y_CCIR(r, g, b) \
00396 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
00397 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
00398
00399 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
00400 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
00401 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
00402
00403 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
00404 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
00405 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
00406
00407 #define ALPHA_BLEND(a, oldp, newp, s)\
00408 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
00409
00410 #define RGBA_IN(r, g, b, a, s)\
00411 {\
00412 unsigned int v = ((const uint32_t *)(s))[0];\
00413 a = (v >> 24) & 0xff;\
00414 r = (v >> 16) & 0xff;\
00415 g = (v >> 8) & 0xff;\
00416 b = v & 0xff;\
00417 }
00418
00419 #define YUVA_IN(y, u, v, a, s, pal)\
00420 {\
00421 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
00422 a = (val >> 24) & 0xff;\
00423 y = (val >> 16) & 0xff;\
00424 u = (val >> 8) & 0xff;\
00425 v = val & 0xff;\
00426 }
00427
00428 #define YUVA_OUT(d, y, u, v, a)\
00429 {\
00430 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
00431 }
00432
00433
00434 #define BPP 1
00435
00436 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
00437 {
00438 int wrap, wrap3, width2, skip2;
00439 int y, u, v, a, u1, v1, a1, w, h;
00440 uint8_t *lum, *cb, *cr;
00441 const uint8_t *p;
00442 const uint32_t *pal;
00443 int dstx, dsty, dstw, dsth;
00444
00445 dstw = av_clip(rect->w, 0, imgw);
00446 dsth = av_clip(rect->h, 0, imgh);
00447 dstx = av_clip(rect->x, 0, imgw - dstw);
00448 dsty = av_clip(rect->y, 0, imgh - dsth);
00449 lum = dst->data[0] + dsty * dst->linesize[0];
00450 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
00451 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
00452
00453 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
00454 skip2 = dstx >> 1;
00455 wrap = dst->linesize[0];
00456 wrap3 = rect->pict.linesize[0];
00457 p = rect->pict.data[0];
00458 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
00459
00460 if (dsty & 1) {
00461 lum += dstx;
00462 cb += skip2;
00463 cr += skip2;
00464
00465 if (dstx & 1) {
00466 YUVA_IN(y, u, v, a, p, pal);
00467 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00468 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00469 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00470 cb++;
00471 cr++;
00472 lum++;
00473 p += BPP;
00474 }
00475 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
00476 YUVA_IN(y, u, v, a, p, pal);
00477 u1 = u;
00478 v1 = v;
00479 a1 = a;
00480 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00481
00482 YUVA_IN(y, u, v, a, p + BPP, pal);
00483 u1 += u;
00484 v1 += v;
00485 a1 += a;
00486 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00487 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00488 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00489 cb++;
00490 cr++;
00491 p += 2 * BPP;
00492 lum += 2;
00493 }
00494 if (w) {
00495 YUVA_IN(y, u, v, a, p, pal);
00496 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00497 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00498 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00499 p++;
00500 lum++;
00501 }
00502 p += wrap3 - dstw * BPP;
00503 lum += wrap - dstw - dstx;
00504 cb += dst->linesize[1] - width2 - skip2;
00505 cr += dst->linesize[2] - width2 - skip2;
00506 }
00507 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
00508 lum += dstx;
00509 cb += skip2;
00510 cr += skip2;
00511
00512 if (dstx & 1) {
00513 YUVA_IN(y, u, v, a, p, pal);
00514 u1 = u;
00515 v1 = v;
00516 a1 = a;
00517 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00518 p += wrap3;
00519 lum += wrap;
00520 YUVA_IN(y, u, v, a, p, pal);
00521 u1 += u;
00522 v1 += v;
00523 a1 += a;
00524 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00525 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00526 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00527 cb++;
00528 cr++;
00529 p += -wrap3 + BPP;
00530 lum += -wrap + 1;
00531 }
00532 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
00533 YUVA_IN(y, u, v, a, p, pal);
00534 u1 = u;
00535 v1 = v;
00536 a1 = a;
00537 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00538
00539 YUVA_IN(y, u, v, a, p + BPP, pal);
00540 u1 += u;
00541 v1 += v;
00542 a1 += a;
00543 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00544 p += wrap3;
00545 lum += wrap;
00546
00547 YUVA_IN(y, u, v, a, p, pal);
00548 u1 += u;
00549 v1 += v;
00550 a1 += a;
00551 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00552
00553 YUVA_IN(y, u, v, a, p + BPP, pal);
00554 u1 += u;
00555 v1 += v;
00556 a1 += a;
00557 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00558
00559 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
00560 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
00561
00562 cb++;
00563 cr++;
00564 p += -wrap3 + 2 * BPP;
00565 lum += -wrap + 2;
00566 }
00567 if (w) {
00568 YUVA_IN(y, u, v, a, p, pal);
00569 u1 = u;
00570 v1 = v;
00571 a1 = a;
00572 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00573 p += wrap3;
00574 lum += wrap;
00575 YUVA_IN(y, u, v, a, p, pal);
00576 u1 += u;
00577 v1 += v;
00578 a1 += a;
00579 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00580 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00581 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00582 cb++;
00583 cr++;
00584 p += -wrap3 + BPP;
00585 lum += -wrap + 1;
00586 }
00587 p += wrap3 + (wrap3 - dstw * BPP);
00588 lum += wrap + (wrap - dstw - dstx);
00589 cb += dst->linesize[1] - width2 - skip2;
00590 cr += dst->linesize[2] - width2 - skip2;
00591 }
00592 /* handle odd height */
00593 if (h) {
00594 lum += dstx;
00595 cb += skip2;
00596 cr += skip2;
00597
00598 if (dstx & 1) {
00599 YUVA_IN(y, u, v, a, p, pal);
00600 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00601 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00602 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00603 cb++;
00604 cr++;
00605 lum++;
00606 p += BPP;
00607 }
00608 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
00609 YUVA_IN(y, u, v, a, p, pal);
00610 u1 = u;
00611 v1 = v;
00612 a1 = a;
00613 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00614
00615 YUVA_IN(y, u, v, a, p + BPP, pal);
00616 u1 += u;
00617 v1 += v;
00618 a1 += a;
00619 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00620 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
00621 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
00622 cb++;
00623 cr++;
00624 p += 2 * BPP;
00625 lum += 2;
00626 }
00627 if (w) {
00628 YUVA_IN(y, u, v, a, p, pal);
00629 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00630 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00631 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00632 }
00633 }
00634 }
00635
00636 static void free_subpicture(SubPicture *sp)
00637 {
00638 int i;
00639
00640 for (i = 0; i < sp->sub.num_rects; i++)
00641 {
00642 av_freep(&sp->sub.rects[i]->pict.data[0]);
00643 av_freep(&sp->sub.rects[i]->pict.data[1]);
00644 av_freep(&sp->sub.rects[i]);
00645 }
00646
00647 av_free(sp->sub.rects);
00648
00649 memset(&sp->sub, 0, sizeof(AVSubtitle));
00650 }
00651
00652 static void video_image_display(VideoState *is)
00653 {
00654 VideoPicture *vp;
00655 SubPicture *sp;
00656 AVPicture pict;
00657 float aspect_ratio;
00658 int width, height, x, y;
00659 SDL_Rect rect;
00660 int i;
00661
00662 vp = &is->pictq[is->pictq_rindex];
00663 if (vp->bmp) {
00664 /* XXX: use variable in the frame */
00665 if (is->video_st->sample_aspect_ratio.num)
00666 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
00667 else if (is->video_st->codec->sample_aspect_ratio.num)
00668 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
00669 else
00670 aspect_ratio = 0;
00671 if (aspect_ratio <= 0.0)
00672 aspect_ratio = 1.0;
00673 aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
00674 /* if an active format is indicated, then it overrides the
00675 mpeg format */
00676 #if 0
00677 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
00678 is->dtg_active_format = is->video_st->codec->dtg_active_format;
00679 printf("dtg_active_format=%d\n", is->dtg_active_format);
00680 }
00681 #endif
00682 #if 0
00683 switch(is->video_st->codec->dtg_active_format) {
00684 case FF_DTG_AFD_SAME:
00685 default:
00686 /* nothing to do */
00687 break;
00688 case FF_DTG_AFD_4_3:
00689 aspect_ratio = 4.0 / 3.0;
00690 break;
00691 case FF_DTG_AFD_16_9:
00692 aspect_ratio = 16.0 / 9.0;
00693 break;
00694 case FF_DTG_AFD_14_9:
00695 aspect_ratio = 14.0 / 9.0;
00696 break;
00697 case FF_DTG_AFD_4_3_SP_14_9:
00698 aspect_ratio = 14.0 / 9.0;
00699 break;
00700 case FF_DTG_AFD_16_9_SP_14_9:
00701 aspect_ratio = 14.0 / 9.0;
00702 break;
00703 case FF_DTG_AFD_SP_4_3:
00704 aspect_ratio = 4.0 / 3.0;
00705 break;
00706 }
00707 #endif
00708
00709 if (is->subtitle_st)
00710 {
00711 if (is->subpq_size > 0)
00712 {
00713 sp = &is->subpq[is->subpq_rindex];
00714
00715 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
00716 {
00717 SDL_LockYUVOverlay (vp->bmp);
00718
00719 pict.data[0] = vp->bmp->pixels[0];
00720 pict.data[1] = vp->bmp->pixels[2];
00721 pict.data[2] = vp->bmp->pixels[1];
00722
00723 pict.linesize[0] = vp->bmp->pitches[0];
00724 pict.linesize[1] = vp->bmp->pitches[2];
00725 pict.linesize[2] = vp->bmp->pitches[1];
00726
00727 for (i = 0; i < sp->sub.num_rects; i++)
00728 blend_subrect(&pict, sp->sub.rects[i],
00729 vp->bmp->w, vp->bmp->h);
00730
00731 SDL_UnlockYUVOverlay (vp->bmp);
00732 }
00733 }
00734 }
00735
00736
00737 /* XXX: we suppose the screen has a 1.0 pixel ratio */
00738 height = is->height;
00739 width = ((int)rint(height * aspect_ratio)) & ~1;
00740 if (width > is->width) {
00741 width = is->width;
00742 height = ((int)rint(width / aspect_ratio)) & ~1;
00743 }
00744 x = (is->width - width) / 2;
00745 y = (is->height - height) / 2;
00746 if (!is->no_background) {
00747 /* fill the background */
00748 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
00749 } else {
00750 is->no_background = 0;
00751 }
00752 rect.x = is->xleft + x;
00753 rect.y = is->ytop + y;
00754 rect.w = width;
00755 rect.h = height;
00756 SDL_DisplayYUVOverlay(vp->bmp, &rect);
00757 } else {
00758 #if 0
00759 fill_rectangle(screen,
00760 is->xleft, is->ytop, is->width, is->height,
00761 QERGB(0x00, 0x00, 0x00));
00762 #endif
00763 }
00764 }
00765
00766 static inline int compute_mod(int a, int b)
00767 {
00768 a = a % b;
00769 if (a >= 0)
00770 return a;
00771 else
00772 return a + b;
00773 }
00774
00775 static void video_audio_display(VideoState *s)
00776 {
00777 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
00778 int ch, channels, h, h2, bgcolor, fgcolor;
00779 int16_t time_diff;
00780
00781 /* compute display index : center on currently output samples */
00782 channels = s->audio_st->codec->channels;
00783 nb_display_channels = channels;
00784 if (!s->paused) {
00785 n = 2 * channels;
00786 delay = audio_write_get_buf_size(s);
00787 delay /= n;
00788
00789 /* to be more precise, we take into account the time spent since
00790 the last buffer computation */
00791 if (audio_callback_time) {
00792 time_diff = av_gettime() - audio_callback_time;
00793 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
00794 }
00795
00796 delay -= s->width / 2;
00797 if (delay < s->width)
00798 delay = s->width;
00799
00800 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
00801
00802 h= INT_MIN;
00803 for(i=0; i<1000; i+=channels){
00804 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
00805 int a= s->sample_array[idx];
00806 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
00807 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
00808 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
00809 int score= a-d;
00810 if(h<score && (b^c)<0){
00811 h= score;
00812 i_start= idx;
00813 }
00814 }
00815
00816 s->last_i_start = i_start;
00817 } else {
00818 i_start = s->last_i_start;
00819 }
00820
00821 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
00822 fill_rectangle(screen,
00823 s->xleft, s->ytop, s->width, s->height,
00824 bgcolor);
00825
00826 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
00827
00828 /* total height for one channel */
00829 h = s->height / nb_display_channels;
00830 /* graph height / 2 */
00831 h2 = (h * 9) / 20;
00832 for(ch = 0;ch < nb_display_channels; ch++) {
00833 i = i_start + ch;
00834 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
00835 for(x = 0; x < s->width; x++) {
00836 y = (s->sample_array[i] * h2) >> 15;
00837 if (y < 0) {
00838 y = -y;
00839 ys = y1 - y;
00840 } else {
00841 ys = y1;
00842 }
00843 fill_rectangle(screen,
00844 s->xleft + x, ys, 1, y,
00845 fgcolor);
00846 i += channels;
00847 if (i >= SAMPLE_ARRAY_SIZE)
00848 i -= SAMPLE_ARRAY_SIZE;
00849 }
00850 }
00851
00852 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
00853
00854 for(ch = 1;ch < nb_display_channels; ch++) {
00855 y = s->ytop + ch * h;
00856 fill_rectangle(screen,
00857 s->xleft, y, s->width, 1,
00858 fgcolor);
00859 }
00860 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
00861 }
00862
00863 static int video_open(VideoState *is){
00864 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
00865 int w,h;
00866
00867 if(is_full_screen) flags |= SDL_FULLSCREEN;
00868 else flags |= SDL_RESIZABLE;
00869
00870 if (is_full_screen && fs_screen_width) {
00871 w = fs_screen_width;
00872 h = fs_screen_height;
00873 } else if(!is_full_screen && screen_width){
00874 w = screen_width;
00875 h = screen_height;
00876 }else if (is->video_st && is->video_st->codec->width){
00877 w = is->video_st->codec->width;
00878 h = is->video_st->codec->height;
00879 } else {
00880 w = 640;
00881 h = 480;
00882 }
00883 #ifndef __APPLE__
00884 screen = SDL_SetVideoMode(w, h, 0, flags);
00885 #else
00886 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
00887 screen = SDL_SetVideoMode(w, h, 24, flags);
00888 #endif
00889 if (!screen) {
00890 fprintf(stderr, "SDL: could not set video mode - exiting\n");
00891 return -1;
00892 }
00893 SDL_WM_SetCaption("FFplay", "FFplay");
00894
00895 is->width = screen->w;
00896 is->height = screen->h;
00897
00898 return 0;
00899 }
00900
00901 /* display the current picture, if any */
00902 static void video_display(VideoState *is)
00903 {
00904 if(!screen)
00905 video_open(cur_stream);
00906 if (is->audio_st && is->show_audio)
00907 video_audio_display(is);
00908 else if (is->video_st)
00909 video_image_display(is);
00910 }
00911
00912 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
00913 {
00914 SDL_Event event;
00915 event.type = FF_REFRESH_EVENT;
00916 event.user.data1 = opaque;
00917 SDL_PushEvent(&event);
00918 return 0; /* 0 means stop timer */
00919 }
00920
00921 /* schedule a video refresh in 'delay' ms */
00922 static void schedule_refresh(VideoState *is, int delay)
00923 {
00924 if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
00925 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
00926 }
00927
00928 /* get the current audio clock value */
00929 static double get_audio_clock(VideoState *is)
00930 {
00931 double pts;
00932 int hw_buf_size, bytes_per_sec;
00933 pts = is->audio_clock;
00934 hw_buf_size = audio_write_get_buf_size(is);
00935 bytes_per_sec = 0;
00936 if (is->audio_st) {
00937 bytes_per_sec = is->audio_st->codec->sample_rate *
00938 2 * is->audio_st->codec->channels;
00939 }
00940 if (bytes_per_sec)
00941 pts -= (double)hw_buf_size / bytes_per_sec;
00942 return pts;
00943 }
00944
00945 /* get the current video clock value */
00946 static double get_video_clock(VideoState *is)
00947 {
00948 double delta;
00949 if (is->paused) {
00950 delta = 0;
00951 } else {
00952 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
00953 }
00954 return is->video_current_pts + delta;
00955 }
00956
00957 /* get the current external clock value */
00958 static double get_external_clock(VideoState *is)
00959 {
00960 int64_t ti;
00961 ti = av_gettime();
00962 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
00963 }
00964
00965 /* get the current master clock value */
00966 static double get_master_clock(VideoState *is)
00967 {
00968 double val;
00969
00970 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
00971 if (is->video_st)
00972 val = get_video_clock(is);
00973 else
00974 val = get_audio_clock(is);
00975 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
00976 if (is->audio_st)
00977 val = get_audio_clock(is);
00978 else
00979 val = get_video_clock(is);
00980 } else {
00981 val = get_external_clock(is);
00982 }
00983 return val;
00984 }
00985
00986 /* seek in the stream */
00987 static void stream_seek(VideoState *is, int64_t pos, int rel)
00988 {
00989 if (!is->seek_req) {
00990 is->seek_pos = pos;
00991 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
00992 if (seek_by_bytes)
00993 is->seek_flags |= AVSEEK_FLAG_BYTE;
00994 is->seek_req = 1;
00995 }
00996 }
00997
00998 /* pause or resume the video */
00999 static void stream_pause(VideoState *is)
01000 {
01001 is->paused = !is->paused;
01002 if (!is->paused) {
01003 is->video_current_pts = get_video_clock(is);
01004 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
01005 }
01006 }
01007
01008 static double compute_frame_delay(double frame_current_pts, VideoState *is)
01009 {
01010 double actual_delay, delay, sync_threshold, ref_clock, diff;
01011
01012 /* compute nominal delay */
01013 //frame_last_pts存着上一帧图像的pts,用当前帧的pts减去上一帧的pts,从而计算出一个估计的delay值
delay = frame_current_pts - is->frame_last_pts;
01014 if (delay <= 0 || delay >= 10.0) {
01015 /* if incorrect delay, use previous one */
01016 delay = is->frame_last_delay;
01017 } else {
01018 is->frame_last_delay = delay;
01019 }
//保存当前帧的PTS
01020 is->frame_last_pts = frame_current_pts;
01021
01022 /* update delay to follow master synchronisation source */
//根据主同步源更新delay值
01023 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
01024 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
01025 /* if video is slave, we try to correct big delays by
01026 duplicating or deleting a frame */
//获取主参考时钟
01027 ref_clock = get_master_clock(is);
//当前帧的pts减去参考时钟
01028 diff = frame_current_pts - ref_clock;
01029
01030 /* skip or repeat frame. We take into account the
01031 delay to compute the threshold. I still don't know
01032 if it is the best guess */
01033 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
01034 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
01035 if (diff <= -sync_threshold)
01036 delay = 0;
01037 else if (diff >= sync_threshold)
01038 delay = 2 * delay;
01039 }
01040 }
01041
01042 is->frame_timer += delay;
01043 /* compute the REAL delay (we need to do that to avoid
01044 long term errors */
01045 //计算实际需要的延迟
actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
01046 if (actual_delay < 0.010) {
01047 /* XXX: should skip picture */
01048 actual_delay = 0.010;
01049 }
01050
01051 #if defined(DEBUG_SYNC)
01052 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
01053 delay, actual_delay, frame_current_pts, -diff);
01054 #endif
01055
// actual_delay将做为下一次帧图像更新的TIMER参数
01056 return actual_delay;
01057 }
01058
01059 /* called to display each frame */
01060 static void video_refresh_timer(void *opaque)
01061 {
01062 VideoState *is = opaque;
01063 VideoPicture *vp;
01064
01065 SubPicture *sp, *sp2;
01066
01067 if (is->video_st) {
01068 if (is->pictq_size == 0) {
01069 /* if no picture, need to wait */
01070 schedule_refresh(is, 1);
01071 } else {
01072 /* dequeue the picture */
01073 vp = &is->pictq[is->pictq_rindex];
01074
01075 /* update current video pts */
01076 is->video_current_pts = vp->pts;
01077 is->video_current_pts_time = av_gettime();
01078
01079 /* launch timer for next picture */
01080 schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
01081
01082 if(is->subtitle_st) {
01083 if (is->subtitle_stream_changed) {
01084 SDL_LockMutex(is->subpq_mutex);
01085
01086 while (is->subpq_size) {
01087 free_subpicture(&is->subpq[is->subpq_rindex]);
01088
01089 /* update queue size and signal for next picture */
01090 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
01091 is->subpq_rindex = 0;
01092
01093 is->subpq_size--;
01094 }
01095 is->subtitle_stream_changed = 0;
01096
01097 SDL_CondSignal(is->subpq_cond);
01098 SDL_UnlockMutex(is->subpq_mutex);
01099 } else {
01100 if (is->subpq_size > 0) {
01101 sp = &is->subpq[is->subpq_rindex];
01102
01103 if (is->subpq_size > 1)
01104 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
01105 else
01106 sp2 = NULL;
01107
01108 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
01109 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
01110 {
01111 free_subpicture(sp);
01112
01113 /* update queue size and signal for next picture */
01114 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
01115 is->subpq_rindex = 0;
01116
01117 SDL_LockMutex(is->subpq_mutex);
01118 is->subpq_size--;
01119 SDL_CondSignal(is->subpq_cond);
01120 SDL_UnlockMutex(is->subpq_mutex);
01121 }
01122 }
01123 }
01124 }
01125
01126 /* display picture */
01127 video_display(is);
01128
01129 /* update queue size and signal for next picture */
01130 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
01131 is->pictq_rindex = 0;
01132
01133 SDL_LockMutex(is->pictq_mutex);
01134 is->pictq_size--;
01135 SDL_CondSignal(is->pictq_cond);
01136 SDL_UnlockMutex(is->pictq_mutex);
01137 }
01138 } else if (is->audio_st) {
01139 /* draw the next audio frame */
01140
01141 schedule_refresh(is, 40);
01142
01143 /* if only audio stream, then display the audio bars (better
01144 than nothing, just to test the implementation */
01145
01146 /* display picture */
01147 video_display(is);
01148 } else {
01149 schedule_refresh(is, 100);
01150 }
01151 if (show_status) {
01152 static int64_t last_time;
01153 int64_t cur_time;
01154 int aqsize, vqsize, sqsize;
01155 double av_diff;
01156
01157 cur_time = av_gettime();
01158 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
01159 aqsize = 0;
01160 vqsize = 0;
01161 sqsize = 0;
01162 if (is->audio_st)
01163 aqsize = is->audioq.size;
01164 if (is->video_st)
01165 vqsize = is->videoq.size;
01166 if (is->subtitle_st)
01167 sqsize = is->subtitleq.size;
01168 av_diff = 0;
01169 if (is->audio_st && is->video_st)
01170 av_diff = get_audio_clock(is) - get_video_clock(is);
01171 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
01172 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
01173 fflush(stdout);
01174 last_time = cur_time;
01175 }
01176 }
01177 }
01178
01179 /* allocate a picture (needs to do that in main thread to avoid
01180 potential locking problems */
01181 static void alloc_picture(void *opaque)
01182 {
01183 VideoState *is = opaque;
01184 VideoPicture *vp;
01185
01186 vp = &is->pictq[is->pictq_windex];
01187
01188 if (vp->bmp)
01189 SDL_FreeYUVOverlay(vp->bmp);
01190
01191 #if 0
01192 /* XXX: use generic function */
01193 /* XXX: disable overlay if no hardware acceleration or if RGB format */
01194 switch(is->video_st->codec->pix_fmt) {
01195 case PIX_FMT_YUV420P:
01196 case PIX_FMT_YUV422P:
01197 case PIX_FMT_YUV444P:
01198 case PIX_FMT_YUYV422:
01199 case PIX_FMT_YUV410P:
01200 case PIX_FMT_YUV411P:
01201 is_yuv = 1;
01202 break;
01203 default:
01204 is_yuv = 0;
01205 break;
01206 }
01207 #endif
01208 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
01209 is->video_st->codec->height,
01210 SDL_YV12_OVERLAY,
01211 screen);
01212 vp->width = is->video_st->codec->width;
01213 vp->height = is->video_st->codec->height;
01214
01215 SDL_LockMutex(is->pictq_mutex);
01216 vp->allocated = 1;
01217 SDL_CondSignal(is->pictq_cond);
01218 SDL_UnlockMutex(is->pictq_mutex);
01219 }
01220
01225 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
01226 {
01227 VideoPicture *vp;
01228 int dst_pix_fmt;
01229 AVPicture pict;
01230 static struct SwsContext *img_convert_ctx;
01231
01232 /* wait until we have space to put a new picture */
01233 SDL_LockMutex(is->pictq_mutex);
01234 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
01235 !is->videoq.abort_request) {
01236 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01237 }
01238 SDL_UnlockMutex(is->pictq_mutex);
01239
01240 if (is->videoq.abort_request)
01241 return -1;
01242
01243 vp = &is->pictq[is->pictq_windex];
01244
01245 /* alloc or resize hardware picture buffer */
01246 if (!vp->bmp ||
01247 vp->width != is->video_st->codec->width ||
01248 vp->height != is->video_st->codec->height) {
01249 SDL_Event event;
01250
01251 vp->allocated = 0;
01252
01253 /* the allocation must be done in the main thread to avoid
01254 locking problems */
01255 event.type = FF_ALLOC_EVENT;
01256 event.user.data1 = is;
01257 SDL_PushEvent(&event);
01258
01259 /* wait until the picture is allocated */
01260 SDL_LockMutex(is->pictq_mutex);
01261 while (!vp->allocated && !is->videoq.abort_request) {
01262 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01263 }
01264 SDL_UnlockMutex(is->pictq_mutex);
01265
01266 if (is->videoq.abort_request)
01267 return -1;
01268 }
01269
01270 /* if the frame is not skipped, then display it */
01271 if (vp->bmp) {
01272 /* get a pointer on the bitmap */
01273 SDL_LockYUVOverlay (vp->bmp);
01274
01275 dst_pix_fmt = PIX_FMT_YUV420P;
01276 pict.data[0] = vp->bmp->pixels[0];
01277 pict.data[1] = vp->bmp->pixels[2];
01278 pict.data[2] = vp->bmp->pixels[1];
01279
01280 pict.linesize[0] = vp->bmp->pitches[0];
01281 pict.linesize[1] = vp->bmp->pitches[2];
01282 pict.linesize[2] = vp->bmp->pitches[1];
01283 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
01284 img_convert_ctx = sws_getCachedContext(img_convert_ctx,
01285 is->video_st->codec->width, is->video_st->codec->height,
01286 is->video_st->codec->pix_fmt,
01287 is->video_st->codec->width, is->video_st->codec->height,
01288 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
01289 if (img_convert_ctx == NULL) {
01290 fprintf(stderr, "Cannot initialize the conversion context\n");
01291 exit(1);
01292 }
01293 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
01294 0, is->video_st->codec->height, pict.data, pict.linesize);
01295 /* update the bitmap content */
01296 SDL_UnlockYUVOverlay(vp->bmp);
01297
01298 vp->pts = pts;
01299
01300 /* now we can update the picture count */
01301 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
01302 is->pictq_windex = 0;
01303 SDL_LockMutex(is->pictq_mutex);
01304 is->pictq_size++;
01305 SDL_UnlockMutex(is->pictq_mutex);
01306 }
01307 return 0;
01308 }
01309
//更新视频时间戳,图像入队列
01314 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
01315 {
01316 double frame_delay, pts;
01317
01318 pts = pts1;
01319
01320 if (pts != 0) {
01321 /* update video clock with pts, if present */
01322 is->video_clock = pts;
01323 } else {
01324 pts = is->video_clock;
01325 }
01326 /* update video clock for next frame */
01327 frame_delay = av_q2d(is->video_st->codec->time_base);
01328 /* for MPEG2, the frame can be repeated, so we update the
01329 clock accordingly */
01330 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
01331 is->video_clock += frame_delay;
01332
01333 #if defined(DEBUG_SYNC) && 0
01334 {
01335 int ftype;
01336 if (src_frame->pict_type == FF_B_TYPE)
01337 ftype = 'B';
01338 else if (src_frame->pict_type == FF_I_TYPE)
01339 ftype = 'I';
01340 else
01341 ftype = 'P';
01342 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
01343 ftype, pts, pts1);
01344 }
01345 #endif
01346 return queue_picture(is, src_frame, pts);
01347 }
01348
01349 static int video_thread(void *arg)
01350 {
01351 VideoState *is = arg;
01352 AVPacket pkt1, *pkt = &pkt1;
01353 int len1, got_picture;
01354 AVFrame *frame= avcodec_alloc_frame();
01355 double pts;
01356
01357 for(;;) {
01358 while (is->paused && !is->videoq.abort_request) {
01359 SDL_Delay(10);
01360 }
01361 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
01362 break;
01363
01364 if(pkt->data == flush_pkt.data){
01365 avcodec_flush_buffers(is->video_st->codec);
01366 continue;
01367 }
01368
01369 /* NOTE: ipts is the PTS of the _first_ picture beginning in
01370 this packet, if any */
01371 is->video_st->codec->reordered_opaque= pkt->pts;
01372 len1 = avcodec_decode_video(is->video_st->codec,
01373 frame, &got_picture,
01374 pkt->data, pkt->size);
01375
01376 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
01377 && frame->reordered_opaque != AV_NOPTS_VALUE)
01378 pts= frame->reordered_opaque;
01379 else if(pkt->dts != AV_NOPTS_VALUE)
01380 pts= pkt->dts;
01381 else
01382 pts= 0;
//转换时间戳
01383 pts *= av_q2d(is->video_st->time_base);
01384
01385 // if (len1 < 0)
01386 // break;
01387 if (got_picture) {
01388 if (output_picture2(is, frame, pts) < 0)
01389 goto the_end;
01390 }
01391 av_free_packet(pkt);
01392 if (step)
01393 if (cur_stream)
01394 stream_pause(cur_stream);
01395 }
01396 the_end:
01397 av_free(frame);
01398 return 0;
01399 }
01400
01401 static int subtitle_thread(void *arg)
01402 {
01403 VideoState *is = arg;
01404 SubPicture *sp;
01405 AVPacket pkt1, *pkt = &pkt1;
01406 int len1, got_subtitle;
01407 double pts;
01408 int i, j;
01409 int r, g, b, y, u, v, a;
01410
01411 for(;;) {
01412 while (is->paused && !is->subtitleq.abort_request) {
01413 SDL_Delay(10);
01414 }
01415 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
01416 break;
01417
01418 if(pkt->data == flush_pkt.data){
01419 avcodec_flush_buffers(is->subtitle_st->codec);
01420 continue;
01421 }
01422 SDL_LockMutex(is->subpq_mutex);
01423 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
01424 !is->subtitleq.abort_request) {
01425 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
01426 }
01427 SDL_UnlockMutex(is->subpq_mutex);
01428
01429 if (is->subtitleq.abort_request)
01430 goto the_end;
01431
01432 sp = &is->subpq[is->subpq_windex];
01433
01434 /* NOTE: ipts is the PTS of the _first_ picture beginning in
01435 this packet, if any */
01436 pts = 0;
01437 if (pkt->pts != AV_NOPTS_VALUE)
01438 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
01439
01440 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
01441 &sp->sub, &got_subtitle,
01442 pkt->data, pkt->size);
01443 // if (len1 < 0)
01444 // break;
01445 if (got_subtitle && sp->sub.format == 0) {
01446 sp->pts = pts;
01447
01448 for (i = 0; i < sp->sub.num_rects; i++)
01449 {
01450 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
01451 {
01452 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
01453 y = RGB_TO_Y_CCIR(r, g, b);
01454 u = RGB_TO_U_CCIR(r, g, b, 0);
01455 v = RGB_TO_V_CCIR(r, g, b, 0);
01456 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
01457 }
01458 }
01459
01460 /* now we can update the picture count */
01461 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
01462 is->subpq_windex = 0;
01463 SDL_LockMutex(is->subpq_mutex);
01464 is->subpq_size++;
01465 SDL_UnlockMutex(is->subpq_mutex);
01466 }
01467 av_free_packet(pkt);
01468 // if (step)
01469 // if (cur_stream)
01470 // stream_pause(cur_stream);
01471 }
01472 the_end:
01473 return 0;
01474 }
01475
01476 /* copy samples for viewing in editor window */
01477 static void update_sample_display(VideoState *is, short *samples, int samples_size)
01478 {
01479 int size, len, channels;
01480
01481 channels = is->audio_st->codec->channels;
01482
01483 size = samples_size / sizeof(short);
01484 while (size > 0) {
01485 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
01486 if (len > size)
01487 len = size;
01488 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
01489 samples += len;
01490 is->sample_array_index += len;
01491 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
01492 is->sample_array_index = 0;
01493 size -= len;
01494 }
01495 }
01496
01497 /* return the new audio buffer size (samples can be added or deleted
01498 to get better sync if video or external master clock) */
01499 static int synchronize_audio(VideoState *is, short *samples,
01500 int samples_size1, double pts)
01501 {
01502 int n, samples_size;
01503 double ref_clock;
01504
01505 n = 2 * is->audio_st->codec->channels;
01506 samples_size = samples_size1;
01507
01508 /* if not master, then we try to remove or add samples to correct the clock */
01509 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
01510 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
01511 double diff, avg_diff;
01512 int wanted_size, min_size, max_size, nb_samples;
01513
01514 ref_clock = get_master_clock(is);
01515 diff = get_audio_clock(is) - ref_clock;
01516
01517 if (diff < AV_NOSYNC_THRESHOLD) {
01518 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
01519 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
01520 /* not enough measures to have a correct estimate */
01521 is->audio_diff_avg_count++;
01522 } else {
01523 /* estimate the A-V difference */
01524 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
01525
01526 if (fabs(avg_diff) >= is->audio_diff_threshold) {
01527 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
01528 nb_samples = samples_size / n;
01529
01530 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
01531 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
01532 if (wanted_size < min_size)
01533 wanted_size = min_size;
01534 else if (wanted_size > max_size)
01535 wanted_size = max_size;
01536
01537 /* add or remove samples to correction the synchro */
01538 if (wanted_size < samples_size) {
01539 /* remove samples */
01540 samples_size = wanted_size;
01541 } else if (wanted_size > samples_size) {
01542 uint8_t *samples_end, *q;
01543 int nb;
01544
01545 /* add samples */
01546 nb = (samples_size - wanted_size);
01547 samples_end = (uint8_t *)samples + samples_size - n;
01548 q = samples_end + n;
01549 while (nb > 0) {
01550 memcpy(q, samples_end, n);
01551 q += n;
01552 nb -= n;
01553 }
01554 samples_size = wanted_size;
01555 }
01556 }
01557 #if 0
01558 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
01559 diff, avg_diff, samples_size - samples_size1,
01560 is->audio_clock, is->video_clock, is->audio_diff_threshold);
01561 #endif
01562 }
01563 } else {
01564 /* too big difference : may be initial PTS errors, so
01565 reset A-V filter */
01566 is->audio_diff_avg_count = 0;
01567 is->audio_diff_cum = 0;
01568 }
01569 }
01570
01571 return samples_size;
01572 }
01573
01574//解码音频帧,更新音频时钟
/* decode one audio frame and returns its uncompressed size */
01575 static int audio_decode_frame(VideoState *is, double *pts_ptr)
01576 {
01577 AVPacket *pkt = &is->audio_pkt;
01578 AVCodecContext *dec= is->audio_st->codec;
01579 int n, len1, data_size;
01580 double pts;
01581
01582 for(;;) {
01583 /* NOTE: the audio packet can contain several frames */
01584 while (is->audio_pkt_size > 0) {
01585 data_size = sizeof(is->audio_buf1);
01586 len1 = avcodec_decode_audio2(dec,
01587 (int16_t *)is->audio_buf1, &data_size,
01588 is->audio_pkt_data, is->audio_pkt_size);
01589 if (len1 < 0) {
01590 /* if error, we skip the frame */
01591 is->audio_pkt_size = 0;
01592 break;
01593 }
01594
01595 is->audio_pkt_data += len1;
01596 is->audio_pkt_size -= len1;
01597 if (data_size <= 0)
01598 continue;
01599
01600 if (dec->sample_fmt != is->audio_src_fmt) {
01601 if (is->reformat_ctx)
01602 av_audio_convert_free(is->reformat_ctx);
01603 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
01604 dec->sample_fmt, 1, NULL, 0);
01605 if (!is->reformat_ctx) {
01606 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
01607 avcodec_get_sample_fmt_name(dec->sample_fmt),
01608 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
01609 break;
01610 }
01611 is->audio_src_fmt= dec->sample_fmt;
01612 }
01613
01614 if (is->reformat_ctx) {
01615 const void *ibuf[6]= {is->audio_buf1};
01616 void *obuf[6]= {is->audio_buf2};
01617 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
01618 int ostride[6]= {2};
01619 int len= data_size/istride[0];
01620 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
01621 printf("av_audio_convert() failed\n");
01622 break;
01623 }
01624 is->audio_buf= is->audio_buf2;
01625 /* FIXME: existing code assume that data_size equals framesize*channels*2
01626 remove this legacy cruft */
01627 data_size= len*2;
01628 }else{
01629 is->audio_buf= is->audio_buf1;
01630 }
01631
01632 /* if no pts, then compute it */
01633 pts = is->audio_clock;
01634 *pts_ptr = pts;
01635 n = 2 * dec->channels;
01636 is->audio_clock += (double)data_size /
01637 (double)(n * dec->sample_rate);
01638 #if defined(DEBUG_SYNC)
01639 {
01640 static double last_clock;
01641 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
01642 is->audio_clock - last_clock,
01643 is->audio_clock, pts);
01644 last_clock = is->audio_clock;
01645 }
01646 #endif
01647 return data_size;
01648 }
01649
01650 /* free the current packet */
01651 if (pkt->data)
01652 av_free_packet(pkt);
01653
01654 if (is->paused || is->audioq.abort_request) {
01655 return -1;
01656 }
01657
01658 /* read next packet */
01659 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
01660 return -1;
01661 if(pkt->data == flush_pkt.data){
01662 avcodec_flush_buffers(dec);
01663 continue;
01664 }
01665
01666 is->audio_pkt_data = pkt->data;
01667 is->audio_pkt_size = pkt->size;
01668
01669 /* if update the audio clock with the pts */
01670 if (pkt->pts != AV_NOPTS_VALUE) {
01671 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
01672 }
01673 }
01674 }
01675
01676 /* get the current audio output buffer size, in samples. With SDL, we
01677 cannot have a precise information */
01678 static int audio_write_get_buf_size(VideoState *is)
01679 {
01680 return is->audio_buf_size - is->audio_buf_index;
01681 }
01682
01683
01684 /* prepare a new audio buffer */
01685 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
01686 {
01687 VideoState *is = opaque;
01688 int audio_size, len1;
01689 double pts;
01690
01691 audio_callback_time = av_gettime();
01692
01693 while (len > 0) {
01694 if (is->audio_buf_index >= is->audio_buf_size) {
01695 audio_size = audio_decode_frame(is, &pts);
01696 if (audio_size < 0) {
01697 /* if error, just output silence */
01698 is->audio_buf = is->audio_buf1;
01699 is->audio_buf_size = 1024;
01700 memset(is->audio_buf, 0, is->audio_buf_size);
01701 } else {
01702 if (is->show_audio)
01703 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
01704 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
01705 pts);
01706 is->audio_buf_size = audio_size;
01707 }
01708 is->audio_buf_index = 0;
01709 }
01710 len1 = is->audio_buf_size - is->audio_buf_index;
01711 if (len1 > len)
01712 len1 = len;
01713 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
01714 len -= len1;
01715 stream += len1;
01716 is->audio_buf_index += len1;
01717 }
01718 }
01719
01720 /* open a given stream. Return 0 if OK */
01721 static int stream_component_open(VideoState *is, int stream_index)
01722 {
01723 AVFormatContext *ic = is->ic;
01724 AVCodecContext *enc;
01725 AVCodec *codec;
01726 SDL_AudioSpec wanted_spec, spec;
01727
01728 if (stream_index < 0 || stream_index >= ic->nb_streams)
01729 return -1;
01730 enc = ic->streams[stream_index]->codec;
01731
01732 /* prepare audio output */
01733 if (enc->codec_type == CODEC_TYPE_AUDIO) {
01734 if (enc->channels > 0) {
01735 enc->request_channels = FFMIN(2, enc->channels);
01736 } else {
01737 enc->request_channels = 2;
01738 }
01739 }
01740
01741 codec = avcodec_find_decoder(enc->codec_id);
01742 enc->debug_mv = debug_mv;
01743 enc->debug = debug;
01744 enc->workaround_bugs = workaround_bugs;
01745 enc->lowres = lowres;
01746 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
01747 enc->idct_algo= idct;
01748 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
01749 enc->skip_frame= skip_frame;
01750 enc->skip_idct= skip_idct;
01751 enc->skip_loop_filter= skip_loop_filter;
01752 enc->error_recognition= error_recognition;
01753 enc->error_concealment= error_concealment;
01754
01755 set_context_opts(enc, avctx_opts[enc->codec_type], 0);
01756
01757 if (!codec ||
01758 avcodec_open(enc, codec) < 0)
01759 return -1;
01760
01761 /* prepare audio output */
01762 if (enc->codec_type == CODEC_TYPE_AUDIO) {
01763 wanted_spec.freq = enc->sample_rate;
01764 wanted_spec.format = AUDIO_S16SYS;
01765 wanted_spec.channels = enc->channels;
01766 wanted_spec.silence = 0;
01767 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
01768 wanted_spec.callback = sdl_audio_callback;
01769 wanted_spec.userdata = is;
01770 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
01771 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
01772 return -1;
01773 }
01774 is->audio_hw_buf_size = spec.size;
01775 is->audio_src_fmt= SAMPLE_FMT_S16;
01776 }
01777
01778 if(thread_count>1)
01779 avcodec_thread_init(enc, thread_count);
01780 enc->thread_count= thread_count;
01781 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
01782 switch(enc->codec_type) {
01783 case CODEC_TYPE_AUDIO:
01784 is->audio_stream = stream_index;
01785 is->audio_st = ic->streams[stream_index];
01786 is->audio_buf_size = 0;
01787 is->audio_buf_index = 0;
01788
01789 /* init averaging filter */
01790 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
01791 is->audio_diff_avg_count = 0;
01792 /* since we do not have a precise anough audio fifo fullness,
01793 we correct audio sync only if larger than this threshold */
01794 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
01795
01796 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
01797 packet_queue_init(&is->audioq);
01798 SDL_PauseAudio(0);
01799 break;
01800 case CODEC_TYPE_VIDEO:
01801 is->video_stream = stream_index;
01802 is->video_st = ic->streams[stream_index];
01803
01804 is->frame_last_delay = 40e-3;
01805 is->frame_timer = (double)av_gettime() / 1000000.0;
01806 is->video_current_pts_time = av_gettime();
01807
01808 packet_queue_init(&is->videoq);
01809 is->video_tid = SDL_CreateThread(video_thread, is);
01810 break;
01811 case CODEC_TYPE_SUBTITLE:
01812 is->subtitle_stream = stream_index;
01813 is->subtitle_st = ic->streams[stream_index];
01814 packet_queue_init(&is->subtitleq);
01815
01816 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
01817 break;
01818 default:
01819 break;
01820 }
01821 return 0;
01822 }
01823
01824 static void stream_component_close(VideoState *is, int stream_index)
01825 {
01826 AVFormatContext *ic = is->ic;
01827 AVCodecContext *enc;
01828
01829 if (stream_index < 0 || stream_index >= ic->nb_streams)
01830 return;
01831 enc = ic->streams[stream_index]->codec;
01832
01833 switch(enc->codec_type) {
01834 case CODEC_TYPE_AUDIO:
01835 packet_queue_abort(&is->audioq);
01836
01837 SDL_CloseAudio();
01838
01839 packet_queue_end(&is->audioq);
01840 if (is->reformat_ctx)
01841 av_audio_convert_free(is->reformat_ctx);
01842 break;
01843 case CODEC_TYPE_VIDEO:
01844 packet_queue_abort(&is->videoq);
01845
01846 /* note: we also signal this mutex to make sure we deblock the
01847 video thread in all cases */
01848 SDL_LockMutex(is->pictq_mutex);
01849 SDL_CondSignal(is->pictq_cond);
01850 SDL_UnlockMutex(is->pictq_mutex);
01851
01852 SDL_WaitThread(is->video_tid, NULL);
01853
01854 packet_queue_end(&is->videoq);
01855 break;
01856 case CODEC_TYPE_SUBTITLE:
01857 packet_queue_abort(&is->subtitleq);
01858
01859 /* note: we also signal this mutex to make sure we deblock the
01860 video thread in all cases */
01861 SDL_LockMutex(is->subpq_mutex);
01862 is->subtitle_stream_changed = 1;
01863
01864 SDL_CondSignal(is->subpq_cond);
01865 SDL_UnlockMutex(is->subpq_mutex);
01866
01867 SDL_WaitThread(is->subtitle_tid, NULL);
01868
01869 packet_queue_end(&is->subtitleq);
01870 break;
01871 default:
01872 break;
01873 }
01874
01875 ic->streams[stream_index]->discard = AVDISCARD_ALL;
01876 avcodec_close(enc);
01877 switch(enc->codec_type) {
01878 case CODEC_TYPE_AUDIO:
01879 is->audio_st = NULL;
01880 is->audio_stream = -1;
01881 break;
01882 case CODEC_TYPE_VIDEO:
01883 is->video_st = NULL;
01884 is->video_stream = -1;
01885 break;
01886 case CODEC_TYPE_SUBTITLE:
01887 is->subtitle_st = NULL;
01888 is->subtitle_stream = -1;
01889 break;
01890 default:
01891 break;
01892 }
01893 }
01894
01895 static void dump_stream_info(const AVFormatContext *s)
01896 {
01897 AVMetadataTag *tag = NULL;
01898 while ((tag=av_metadata_get(s->metadata,"",tag,AV_METADATA_IGNORE_SUFFIX)))
01899 fprintf(stderr, "%s: %s\n", tag->key, tag->value);
01900 }
01901
01902 /* since we have only one decoding thread, we can use a global
01903 variable instead of a thread local variable */
01904 static VideoState *global_video_state;
01905
01906 static int decode_interrupt_cb(void)
01907 {
01908 return (global_video_state && global_video_state->abort_request);
01909 }
01910
01911 /* this thread gets the stream from the disk or the network */
01912 static int decode_thread(void *arg)
01913 {
01914 VideoState *is = arg;
01915 AVFormatContext *ic;
01916 int err, i, ret, video_index, audio_index, subtitle_index;
01917 AVPacket pkt1, *pkt = &pkt1;
01918 AVFormatParameters params, *ap = ¶ms;
01919
01920 video_index = -1;
01921 audio_index = -1;
01922 subtitle_index = -1;
01923 is->video_stream = -1;
01924 is->audio_stream = -1;
01925 is->subtitle_stream = -1;
01926
01927 global_video_state = is;
01928 url_set_interrupt_cb(decode_interrupt_cb);
01929
01930 memset(ap, 0, sizeof(*ap));
01931
01932 ap->width = frame_width;
01933 ap->height= frame_height;
01934 ap->time_base= (AVRational){1, 25};
01935 ap->pix_fmt = frame_pix_fmt;
01936
01937 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
01938 if (err < 0) {
01939 print_error(is->filename, err);
01940 ret = -1;
01941 goto fail;
01942 }
01943 is->ic = ic;
01944
01945 if(genpts)
01946 ic->flags |= AVFMT_FLAG_GENPTS;
01947
01948 err = av_find_stream_info(ic);
01949 if (err < 0) {
01950 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
01951 ret = -1;
01952 goto fail;
01953 }
01954 if(ic->pb)
01955 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
01956
01957 /* if seeking requested, we execute it */
01958 if (start_time != AV_NOPTS_VALUE) {
01959 int64_t timestamp;
01960
01961 timestamp = start_time;
01962 /* add the stream start time */
01963 if (ic->start_time != AV_NOPTS_VALUE)
01964 timestamp += ic->start_time;
01965 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
01966 if (ret < 0) {
01967 fprintf(stderr, "%s: could not seek to position %0.3f\n",
01968 is->filename, (double)timestamp / AV_TIME_BASE);
01969 }
01970 }
01971
01972 for(i = 0; i < ic->nb_streams; i++) {
01973 AVCodecContext *enc = ic->streams[i]->codec;
01974 ic->streams[i]->discard = AVDISCARD_ALL;
01975 switch(enc->codec_type) {
01976 case CODEC_TYPE_AUDIO:
01977 if (wanted_audio_stream-- >= 0 && !audio_disable)
01978 audio_index = i;
01979 break;
01980 case CODEC_TYPE_VIDEO:
01981 if (wanted_video_stream-- >= 0 && !video_disable)
01982 video_index = i;
01983 break;
01984 case CODEC_TYPE_SUBTITLE:
01985 if (wanted_subtitle_stream-- >= 0 && !video_disable)
01986 subtitle_index = i;
01987 break;
01988 default:
01989 break;
01990 }
01991 }
01992 if (show_status) {
01993 dump_format(ic, 0, is->filename, 0);
01994 dump_stream_info(ic);
01995 }
01996
01997 /* open the streams */
01998 if (audio_index >= 0) {
01999 stream_component_open(is, audio_index);
02000 }
02001
02002 if (video_index >= 0) {
02003 stream_component_open(is, video_index);
02004 } else {
02005 if (!display_disable)
02006 is->show_audio = 1;
02007 }
02008
02009 if (subtitle_index >= 0) {
02010 stream_component_open(is, subtitle_index);
02011 }
02012
02013 if (is->video_stream < 0 && is->audio_stream < 0) {
02014 fprintf(stderr, "%s: could not open codecs\n", is->filename);
02015 ret = -1;
02016 goto fail;
02017 }
02018
02019 for(;;) {
02020 if (is->abort_request)
02021 break;
02022 if (is->paused != is->last_paused) {
02023 is->last_paused = is->paused;
02024 if (is->paused)
02025 av_read_pause(ic);
02026 else
02027 av_read_play(ic);
02028 }
02029 #if CONFIG_RTSP_DEMUXER
02030 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
02031 /* wait 10 ms to avoid trying to get another packet */
02032 /* XXX: horrible */
02033 SDL_Delay(10);
02034 continue;
02035 }
02036 #endif
02037 if (is->seek_req) {
02038 int stream_index= -1;
02039 int64_t seek_target= is->seek_pos;
02040
02041 if (is-> video_stream >= 0) stream_index= is-> video_stream;
02042 else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
02043 else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
02044
02045 if(stream_index>=0){
02046 seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
02047 }
02048
02049 ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
02050 if (ret < 0) {
02051 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
02052 }else{
02053 if (is->audio_stream >= 0) {
02054 packet_queue_flush(&is->audioq);
02055 packet_queue_put(&is->audioq, &flush_pkt);
02056 }
02057 if (is->subtitle_stream >= 0) {
02058 packet_queue_flush(&is->subtitleq);
02059 packet_queue_put(&is->subtitleq, &flush_pkt);
02060 }
02061 if (is->video_stream >= 0) {
02062 packet_queue_flush(&is->videoq);
02063 packet_queue_put(&is->videoq, &flush_pkt);
02064 }
02065 }
02066 is->seek_req = 0;
02067 }
02068
02069 /* if the queue are full, no need to read more */
02070 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
02071 is->videoq.size > MAX_VIDEOQ_SIZE ||
02072 is->subtitleq.size > MAX_SUBTITLEQ_SIZE) {
02073 /* wait 10 ms */
02074 SDL_Delay(10);
02075 continue;
02076 }
02077 if(url_feof(ic->pb)) {
02078 av_init_packet(pkt);
02079 pkt->data=NULL;
02080 pkt->size=0;
02081 pkt->stream_index= is->video_stream;
02082 packet_queue_put(&is->videoq, pkt);
02083 continue;
02084 }
02085 ret = av_read_frame(ic, pkt);
02086 if (ret < 0) {
02087 if (ret != AVERROR_EOF && url_ferror(ic->pb) == 0) {
02088 SDL_Delay(100); /* wait for user event */
02089 continue;
02090 } else
02091 break;
02092 }
02093 if (pkt->stream_index == is->audio_stream) {
02094 packet_queue_put(&is->audioq, pkt);
02095 } else if (pkt->stream_index == is->video_stream) {
02096 packet_queue_put(&is->videoq, pkt);
02097 } else if (pkt->stream_index == is->subtitle_stream) {
02098 packet_queue_put(&is->subtitleq, pkt);
02099 } else {
02100 av_free_packet(pkt);
02101 }
02102 }
02103 /* wait until the end */
02104 while (!is->abort_request) {
02105 SDL_Delay(100);
02106 }
02107
02108 ret = 0;
02109 fail:
02110 /* disable interrupting */
02111 global_video_state = NULL;
02112
02113 /* close each stream */
02114 if (is->audio_stream >= 0)
02115 stream_component_close(is, is->audio_stream);
02116 if (is->video_stream >= 0)
02117 stream_component_close(is, is->video_stream);
02118 if (is->subtitle_stream >= 0)
02119 stream_component_close(is, is->subtitle_stream);
02120 if (is->ic) {
02121 av_close_input_file(is->ic);
02122 is->ic = NULL; /* safety */
02123 }
02124 url_set_interrupt_cb(NULL);
02125
02126 if (ret != 0) {
02127 SDL_Event event;
02128
02129 event.type = FF_QUIT_EVENT;
02130 event.user.data1 = is;
02131 SDL_PushEvent(&event);
02132 }
02133 return 0;
02134 }
02135
02136 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
02137 {
02138 VideoState *is;
02139
02140 is = av_mallocz(sizeof(VideoState));
02141 if (!is)
02142 return NULL;
02143 av_strlcpy(is->filename, filename, sizeof(is->filename));
02144 is->iformat = iformat;
02145 is->ytop = 0;
02146 is->xleft = 0;
02147
02148 /* start video display */
02149 is->pictq_mutex = SDL_CreateMutex();
02150 is->pictq_cond = SDL_CreateCond();
02151
02152 is->subpq_mutex = SDL_CreateMutex();
02153 is->subpq_cond = SDL_CreateCond();
02154
02155 /* add the refresh timer to draw the picture */
02156 schedule_refresh(is, 40);
02157
02158 is->av_sync_type = av_sync_type;
02159 is->parse_tid = SDL_CreateThread(decode_thread, is);
02160 if (!is->parse_tid) {
02161 av_free(is);
02162 return NULL;
02163 }
02164 return is;
02165 }
02166
02167 static void stream_close(VideoState *is)
02168 {
02169 VideoPicture *vp;
02170 int i;
02171 /* XXX: use a special url_shutdown call to abort parse cleanly */
02172 is->abort_request = 1;
02173 SDL_WaitThread(is->parse_tid, NULL);
02174
02175 /* free all pictures */
02176 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
02177 vp = &is->pictq[i];
02178 if (vp->bmp) {
02179 SDL_FreeYUVOverlay(vp->bmp);
02180 vp->bmp = NULL;
02181 }
02182 }
02183 SDL_DestroyMutex(is->pictq_mutex);
02184 SDL_DestroyCond(is->pictq_cond);
02185 SDL_DestroyMutex(is->subpq_mutex);
02186 SDL_DestroyCond(is->subpq_cond);
02187 }
02188
02189 static void stream_cycle_channel(VideoState *is, int codec_type)
02190 {
02191 AVFormatContext *ic = is->ic;
02192 int start_index, stream_index;
02193 AVStream *st;
02194
02195 if (codec_type == CODEC_TYPE_VIDEO)
02196 start_index = is->video_stream;
02197 else if (codec_type == CODEC_TYPE_AUDIO)
02198 start_index = is->audio_stream;
02199 else
02200 start_index = is->subtitle_stream;
02201 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
02202 return;
02203 stream_index = start_index;
02204 for(;;) {
02205 if (++stream_index >= is->ic->nb_streams)
02206 {
02207 if (codec_type == CODEC_TYPE_SUBTITLE)
02208 {
02209 stream_index = -1;
02210 goto the_end;
02211 } else
02212 stream_index = 0;
02213 }
02214 if (stream_index == start_index)
02215 return;
02216 st = ic->streams[stream_index];
02217 if (st->codec->codec_type == codec_type) {
02218 /* check that parameters are OK */
02219 switch(codec_type) {
02220 case CODEC_TYPE_AUDIO:
02221 if (st->codec->sample_rate != 0 &&
02222 st->codec->channels != 0)
02223 goto the_end;
02224 break;
02225 case CODEC_TYPE_VIDEO:
02226 case CODEC_TYPE_SUBTITLE:
02227 goto the_end;
02228 default:
02229 break;
02230 }
02231 }
02232 }
02233 the_end:
02234 stream_component_close(is, start_index);
02235 stream_component_open(is, stream_index);
02236 }
02237
02238
02239 static void toggle_full_screen(void)
02240 {
02241 is_full_screen = !is_full_screen;
02242 if (!fs_screen_width) {
02243 /* use default SDL method */
02244 // SDL_WM_ToggleFullScreen(screen);
02245 }
02246 video_open(cur_stream);
02247 }
02248
02249 static void toggle_pause(void)
02250 {
02251 if (cur_stream)
02252 stream_pause(cur_stream);
02253 step = 0;
02254 }
02255
02256 static void step_to_next_frame(void)
02257 {
02258 if (cur_stream) {
02259 /* if the stream is paused unpause it, then step */
02260 if (cur_stream->paused)
02261 stream_pause(cur_stream);
02262 }
02263 step = 1;
02264 }
02265
02266 static void do_exit(void)
02267 {
02268 if (cur_stream) {
02269 stream_close(cur_stream);
02270 cur_stream = NULL;
02271 }
02272 if (show_status)
02273 printf("\n");
02274 SDL_Quit();
02275 exit(0);
02276 }
02277
02278 static void toggle_audio_display(void)
02279 {
02280 if (cur_stream) {
02281 cur_stream->show_audio = !cur_stream->show_audio;
02282 }
02283 }
02284
02285 /* handle an event sent by the GUI */
02286 static void event_loop(void)
02287 {
02288 SDL_Event event;
02289 double incr, pos, frac;
02290
02291 for(;;) {
02292 SDL_WaitEvent(&event);
02293 switch(event.type) {
02294 case SDL_KEYDOWN:
02295 switch(event.key.keysym.sym) {
02296 case SDLK_ESCAPE:
02297 case SDLK_q:
02298 do_exit();
02299 break;
02300 case SDLK_f:
02301 toggle_full_screen();
02302 break;
02303 case SDLK_p:
02304 case SDLK_SPACE:
02305 toggle_pause();
02306 break;
02307 case SDLK_s: //S: Step to next frame
02308 step_to_next_frame();
02309 break;
02310 case SDLK_a:
02311 if (cur_stream)
02312 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
02313 break;
02314 case SDLK_v:
02315 if (cur_stream)
02316 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
02317 break;
02318 case SDLK_t:
02319 if (cur_stream)
02320 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
02321 break;
02322 case SDLK_w:
02323 toggle_audio_display();
02324 break;
02325 case SDLK_LEFT:
02326 incr = -10.0;
02327 goto do_seek;
02328 case SDLK_RIGHT:
02329 incr = 10.0;
02330 goto do_seek;
02331 case SDLK_UP:
02332 incr = 60.0;
02333 goto do_seek;
02334 case SDLK_DOWN:
02335 incr = -60.0;
02336 do_seek:
02337 if (cur_stream) {
02338 if (seek_by_bytes) {
02339 pos = url_ftell(cur_stream->ic->pb);
02340 if (cur_stream->ic->bit_rate)
02341 incr *= cur_stream->ic->bit_rate / 60.0;
02342 else
02343 incr *= 180000.0;
02344 pos += incr;
02345 stream_seek(cur_stream, pos, incr);
02346 } else {
02347 pos = get_master_clock(cur_stream);
02348 pos += incr;
02349 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
02350 }
02351 }
02352 break;
02353 default:
02354 break;
02355 }
02356 break;
02357 case SDL_MOUSEBUTTONDOWN:
02358 if (cur_stream) {
02359 int ns, hh, mm, ss;
02360 int tns, thh, tmm, tss;
02361 tns = cur_stream->ic->duration/1000000LL;
02362 thh = tns/3600;
02363 tmm = (tns%3600)/60;
02364 tss = (tns%60);
02365 frac = (double)event.button.x/(double)cur_stream->width;
02366 ns = frac*tns;
02367 hh = ns/3600;
02368 mm = (ns%3600)/60;
02369 ss = (ns%60);
02370 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
02371 hh, mm, ss, thh, tmm, tss);
02372 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
02373 }
02374 break;
02375 case SDL_VIDEORESIZE:
02376 if (cur_stream) {
02377 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
02378 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
02379 screen_width = cur_stream->width = event.resize.w;
02380 screen_height= cur_stream->height= event.resize.h;
02381 }
02382 break;
02383 case SDL_QUIT:
02384 case FF_QUIT_EVENT:
02385 do_exit();
02386 break;
02387 case FF_ALLOC_EVENT:
02388 video_open(event.user.data1);
02389 alloc_picture(event.user.data1);
02390 break;
02391 case FF_REFRESH_EVENT:
02392 video_refresh_timer(event.user.data1);
02393 break;
02394 default:
02395 break;
02396 }
02397 }
02398 }
02399
02400 static void opt_frame_size(const char *arg)
02401 {
02402 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
02403 fprintf(stderr, "Incorrect frame size\n");
02404 exit(1);
02405 }
02406 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
02407 fprintf(stderr, "Frame size must be a multiple of 2\n");
02408 exit(1);
02409 }
02410 }
02411
02412 static int opt_width(const char *opt, const char *arg)
02413 {
02414 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
02415 return 0;
02416 }
02417
02418 static int opt_height(const char *opt, const char *arg)
02419 {
02420 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
02421 return 0;
02422 }
02423
02424 static void opt_format(const char *arg)
02425 {
02426 file_iformat = av_find_input_format(arg);
02427 if (!file_iformat) {
02428 fprintf(stderr, "Unknown input format: %s\n", arg);
02429 exit(1);
02430 }
02431 }
02432
02433 static void opt_frame_pix_fmt(const char *arg)
02434 {
02435 frame_pix_fmt = avcodec_get_pix_fmt(arg);
02436 }
02437
02438 static int opt_sync(const char *opt, const char *arg)
02439 {
02440 if (!strcmp(arg, "audio"))
02441 av_sync_type = AV_SYNC_AUDIO_MASTER;
02442 else if (!strcmp(arg, "video"))
02443 av_sync_type = AV_SYNC_VIDEO_MASTER;
02444 else if (!strcmp(arg, "ext"))
02445 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
02446 else {
02447 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
02448 exit(1);
02449 }
02450 return 0;
02451 }
02452
02453 static int opt_seek(const char *opt, const char *arg)
02454 {
02455 start_time = parse_time_or_die(opt, arg, 1);
02456 return 0;
02457 }
02458
02459 static int opt_debug(const char *opt, const char *arg)
02460 {
02461 av_log_set_level(99);
02462 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
02463 return 0;
02464 }
02465
02466 static int opt_vismv(const char *opt, const char *arg)
02467 {
02468 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
02469 return 0;
02470 }
02471
02472 static int opt_thread_count(const char *opt, const char *arg)
02473 {
02474 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
02475 #if !HAVE_THREADS
02476 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
02477 #endif
02478 return 0;
02479 }
02480
02481 static const OptionDef options[] = {
02482 { "h", OPT_EXIT, {(void*)show_help}, "show help" },
02483 { "version", OPT_EXIT, {(void*)show_version}, "show version" },
02484 { "L", OPT_EXIT, {(void*)show_license}, "show license" },
02485 { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
02486 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
02487 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
02488 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
02489 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
02490 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
02491 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
02492 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "select desired audio stream", "stream_number" },
02493 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "select desired video stream", "stream_number" },
02494 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "select desired subtitle stream", "stream_number" },
02495 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
02496 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
02497 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
02498 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
02499 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
02500 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
02501 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
02502 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
02503 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
02504 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
02505 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
02506 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
02507 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
02508 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
02509 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
02510 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
02511 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
02512 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
02513 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
02514 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
02515 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
02516 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
02517 { NULL, },
02518 };
02519
02520 static void show_help(void)
02521 {
02522 printf("usage: ffplay [options] input_file\n"
02523 "Simple media player\n");
02524 printf("\n");
02525 show_help_options(options, "Main options:\n",
02526 OPT_EXPERT, 0);
02527 show_help_options(options, "\nAdvanced options:\n",
02528 OPT_EXPERT, OPT_EXPERT);
02529 printf("\nWhile playing:\n"
02530 "q, ESC quit\n"
02531 "f toggle full screen\n"
02532 "p, SPC pause\n"
02533 "a cycle audio channel\n"
02534 "v cycle video channel\n"
02535 "t cycle subtitle channel\n"
02536 "w show audio waves\n"
02537 "left/right seek backward/forward 10 seconds\n"
02538 "down/up seek backward/forward 1 minute\n"
02539 "mouse click seek to percentage in file corresponding to fraction of width\n"
02540 );
02541 }
02542
02543 static void opt_input_file(const char *filename)
02544 {
02545 if (!strcmp(filename, "-"))
02546 filename = "pipe:";
02547 input_filename = filename;
02548 }
02549
02550 /* Called from the main */
02551 int main(int argc, char **argv)
02552 {
02553 int flags, i;
02554
02555 /* register all codecs, demux and protocols */
02556 avcodec_register_all();
02557 avdevice_register_all();
02558 av_register_all();
02559
02560 for(i=0; i<CODEC_TYPE_NB; i++){
02561 avctx_opts[i]= avcodec_alloc_context2(i);
02562 }
02563 avformat_opts = avformat_alloc_context();
02564 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
02565
02566 show_banner();
02567
02568 parse_options(argc, argv, options, opt_input_file);
02569
02570 if (!input_filename) {
02571 fprintf(stderr, "An input file must be specified\n");
02572 exit(1);
02573 }
02574
02575 if (display_disable) {
02576 video_disable = 1;
02577 }
02578 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
02579 #if !defined(__MINGW32__) && !defined(__APPLE__)
02580 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
02581 #endif
02582 if (SDL_Init (flags)) {
02583 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
02584 exit(1);
02585 }
02586
02587 if (!display_disable) {
02588 #if HAVE_SDL_VIDEO_SIZE
02589 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
02590 fs_screen_width = vi->current_w;
02591 fs_screen_height = vi->current_h;
02592 #endif
02593 }
02594
02595 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
02596 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
02597 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
02598 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
02599
02600 av_init_packet(&flush_pkt);
02601 flush_pkt.data= "FLUSH";
02602
02603 cur_stream = stream_open(input_filename, file_iformat);
02604
02605 event_loop();
02606
02607 /* never returns */
02608
02609 return 0;
02610 }
ffplay 流程图如下:
ffplay 音视频同步(http://hi.baidu.com/hainei_/item/053360e28a0f90216cabb811)
// tutorial05.c
// A pedagogical video player that really works!
//
// This tutorial was written by Stephen Dranger (dranger@gmail.com).
//
// Code based on FFplay, Copyright (c) 2003 Fabrice Bellard,
// and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de)
// Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1
//
// Use the Makefile to build all the samples.
//
// Run using
// tutorial05 myvideofile.mpg
//
// to play the video.
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include <libswscale/swscale.h>
#include <libavutil/avstring.h>
#include <libavutil/time.h>
#include <SDL.h>
#include <SDL_thread.h>
#ifdef __MINGW32__
#undef main /* Prevents SDL from overriding main() */
#endif
#include <stdio.h>
#include <math.h>
#define SDL_AUDIO_BUFFER_SIZE 1024
#define MAX_AUDIO_FRAME_SIZE 192000
#define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
#define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
#define AV_SYNC_THRESHOLD 0.01
#define AV_NOSYNC_THRESHOLD 10.0
#define FF_ALLOC_EVENT (SDL_USEREVENT)
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
#define FF_QUIT_EVENT (SDL_USEREVENT + 2)
#define VIDEO_PICTURE_QUEUE_SIZE 1
typedef struct PacketQueue {
AVPacketList *first_pkt, *last_pkt;
int nb_packets;
int size;
SDL_mutex *mutex;
SDL_cond *cond;
} PacketQueue;
typedef struct VideoPicture {
SDL_Overlay *bmp;
int width, height; /* source height & width */
int allocated;
double pts;
} VideoPicture;
typedef struct VideoState {
AVFormatContext *pFormatCtx;
int videoStream, audioStream;
double audio_clock;
AVStream *audio_st;
PacketQueue audioq;
AVFrame audio_frame;
uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2];
unsigned int audio_buf_size;
unsigned int audio_buf_index;
AVPacket audio_pkt;
uint8_t *audio_pkt_data;
int audio_pkt_size;
int audio_hw_buf_size;
double frame_timer;
double frame_last_pts;
double frame_last_delay;
double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
AVStream *video_st;
PacketQueue videoq;
VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
int pictq_size, pictq_rindex, pictq_windex;
SDL_mutex *pictq_mutex;
SDL_cond *pictq_cond;
SDL_Thread *parse_tid;
SDL_Thread *video_tid;
char filename[1024];
int quit;
AVIOContext *io_context;
struct SwsContext *sws_ctx;
} VideoState;
SDL_Surface *screen;
/* Since we only have one decoding thread, the Big Struct
can be global in case we need it. */
VideoState *global_video_state;
void packet_queue_init(PacketQueue *q) {
memset(q, 0, sizeof(PacketQueue));
q->mutex = SDL_CreateMutex();
q->cond = SDL_CreateCond();
}
int packet_queue_put(PacketQueue *q, AVPacket *pkt) {
AVPacketList *pkt1;
if(av_dup_packet(pkt) < 0) {
return -1;
}
pkt1 = av_malloc(sizeof(AVPacketList));
if (!pkt1)
return -1;
pkt1->pkt = *pkt;
pkt1->next = NULL;
SDL_LockMutex(q->mutex);
if (!q->last_pkt)
q->first_pkt = pkt1;
else
q->last_pkt->next = pkt1;
q->last_pkt = pkt1;
q->nb_packets++;
q->size += pkt1->pkt.size;
SDL_CondSignal(q->cond);
SDL_UnlockMutex(q->mutex);
return 0;
}
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
{
AVPacketList *pkt1;
int ret;
SDL_LockMutex(q->mutex);
for(;;) {
if(global_video_state->quit) {
ret = -1;
break;
}
pkt1 = q->first_pkt;
if (pkt1) {
q->first_pkt = pkt1->next;
if (!q->first_pkt)
q->last_pkt = NULL;
q->nb_packets--;
q->size -= pkt1->pkt.size;
*pkt = pkt1->pkt;
av_free(pkt1);
ret = 1;
break;
} else if (!block) {
ret = 0;
break;
} else {
SDL_CondWait(q->cond, q->mutex);
}
}
SDL_UnlockMutex(q->mutex);
return ret;
}
double get_audio_clock(VideoState *is) {
double pts;
int hw_buf_size, bytes_per_sec, n;
pts = is->audio_clock; /* maintained in the audio thread */
hw_buf_size = is->audio_buf_size - is->audio_buf_index;
bytes_per_sec = 0;
n = is->audio_st->codec->channels * 2;
if(is->audio_st) {
bytes_per_sec = is->audio_st->codec->sample_rate * n;
}
if(bytes_per_sec) {
pts -= (double)hw_buf_size / bytes_per_sec;
}
return pts;
}
int audio_decode_frame(VideoState *is, double *pts_ptr) {
int len1, data_size = 0, n;
AVPacket *pkt = &is->audio_pkt;
double pts;
for(;;) {
while(is->audio_pkt_size > 0) {
int got_frame;
len1 = avcodec_decode_audio4(is->audio_st->codec, &is->audio_frame, &got_frame, pkt);
if(len1 < 0) {
/* if error, skip frame */
is->audio_pkt_size = 0;
break;
}
if (got_frame)
{
data_size =
av_samples_get_buffer_size
(
NULL,
is->audio_st->codec->channels,
is->audio_frame.nb_samples,
is->audio_st->codec->sample_fmt,
1
);
memcpy(is->audio_buf, is->audio_frame.data[0], data_size);
}
is->audio_pkt_data += len1;
is->audio_pkt_size -= len1;
if(data_size <= 0) {
/* No data yet, get more frames */
continue;
}
pts = is->audio_clock;
*pts_ptr = pts;
n = 2 * is->audio_st->codec->channels;
is->audio_clock += (double)data_size /
(double)(n * is->audio_st->codec->sample_rate);
/* We have data, return it and come back for more later */
return data_size;
}
if(pkt->data)
av_free_packet(pkt);
if(is->quit) {
return -1;
}
/* next packet */
if(packet_queue_get(&is->audioq, pkt, 1) < 0) {
return -1;
}
is->audio_pkt_data = pkt->data;
is->audio_pkt_size = pkt->size;
/* if update, update the audio clock w/pts */
if(pkt->pts != AV_NOPTS_VALUE) {
is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
}
}
}
void audio_callback(void *userdata, Uint8 *stream, int len) {
VideoState *is = (VideoState *)userdata;
int len1, audio_size;
double pts;
while(len > 0) {
if(is->audio_buf_index >= is->audio_buf_size) {
/* We have already sent all our data; get more */
audio_size = audio_decode_frame(is, &pts);
if(audio_size < 0) {
/* If error, output silence */
is->audio_buf_size = 1024;
memset(is->audio_buf, 0, is->audio_buf_size);
} else {
is->audio_buf_size = audio_size;
}
is->audio_buf_index = 0;
}
len1 = is->audio_buf_size - is->audio_buf_index;
if(len1 > len)
len1 = len;
memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
len -= len1;
stream += len1;
is->audio_buf_index += len1;
}
}
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque) {
SDL_Event event;
event.type = FF_REFRESH_EVENT;
event.user.data1 = opaque;
SDL_PushEvent(&event);
return 0; /* 0 means stop timer */
}
/* schedule a video refresh in 'delay' ms */
static void schedule_refresh(VideoState *is, int delay) {
SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
}
void video_display(VideoState *is) {
SDL_Rect rect;
VideoPicture *vp;
//AVPicture pict;
float aspect_ratio;
int w, h, x, y;
//int i;
vp = &is->pictq[is->pictq_rindex];
if(vp->bmp) {
if(is->video_st->codec->sample_aspect_ratio.num == 0) {
aspect_ratio = 0;
} else {
aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio) *
is->video_st->codec->width / is->video_st->codec->height;
}
if(aspect_ratio <= 0.0) {
aspect_ratio = (float)is->video_st->codec->width /
(float)is->video_st->codec->height;
}
h = screen->h;
w = ((int)rint(h * aspect_ratio)) & -3;
if(w > screen->w) {
w = screen->w;
h = ((int)rint(w / aspect_ratio)) & -3;
}
x = (screen->w - w) / 2;
y = (screen->h - h) / 2;
rect.x = x;
rect.y = y;
rect.w = w;
rect.h = h;
SDL_DisplayYUVOverlay(vp->bmp, &rect);
}
}
void video_refresh_timer(void *userdata) {
VideoState *is = (VideoState *)userdata;
VideoPicture *vp;
double actual_delay, delay, sync_threshold, ref_clock, diff;
if(is->video_st) {
if(is->pictq_size == 0) {
schedule_refresh(is, 1);
} else {
vp = &is->pictq[is->pictq_rindex];
delay = vp->pts - is->frame_last_pts; /* the pts from last time */
if(delay <= 0 || delay >= 1.0) {
/* if incorrect delay, use previous one */
delay = is->frame_last_delay;
}
/* save for next time */
is->frame_last_delay = delay;
is->frame_last_pts = vp->pts;
/* update delay to sync to audio */
ref_clock = get_audio_clock(is);
diff = vp->pts - ref_clock;
/* Skip or repeat the frame. Take delay into account
FFPlay still doesn't "know if this is the best guess." */
sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD;
if(fabs(diff) < AV_NOSYNC_THRESHOLD) {
if(diff <= -sync_threshold) {
delay = 0;
} else if(diff >= sync_threshold) {
delay = 2 * delay;
}
}
is->frame_timer += delay;
/* computer the REAL delay */
actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
if(actual_delay < 0.010) {
/* Really it should skip the picture instead */
actual_delay = 0.010;
}
schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
/* show the picture! */
video_display(is);
/* update queue for next picture! */
if(++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) {
is->pictq_rindex = 0;
}
SDL_LockMutex(is->pictq_mutex);
is->pictq_size--;
SDL_CondSignal(is->pictq_cond);
SDL_UnlockMutex(is->pictq_mutex);
}
} else {
schedule_refresh(is, 100);
}
}
void alloc_picture(void *userdata) {
VideoState *is = (VideoState *)userdata;
VideoPicture *vp;
vp = &is->pictq[is->pictq_windex];
if(vp->bmp) {
// we already have one make another, bigger/smaller
SDL_FreeYUVOverlay(vp->bmp);
}
// Allocate a place to put our YUV image on that screen
vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
is->video_st->codec->height,
SDL_YV12_OVERLAY,
screen);
vp->width = is->video_st->codec->width;
vp->height = is->video_st->codec->height;
SDL_LockMutex(is->pictq_mutex);
vp->allocated = 1;
SDL_CondSignal(is->pictq_cond);
SDL_UnlockMutex(is->pictq_mutex);
}
int queue_picture(VideoState *is, AVFrame *pFrame, double pts) {
VideoPicture *vp;
AVPicture pict;
/* wait until we have space for a new pic */
SDL_LockMutex(is->pictq_mutex);
while(is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
!is->quit) {
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
}
SDL_UnlockMutex(is->pictq_mutex);
if(is->quit)
return -1;
// windex is set to 0 initially
vp = &is->pictq[is->pictq_windex];
/* allocate or resize the buffer! */
if(!vp->bmp ||
vp->width != is->video_st->codec->width ||
vp->height != is->video_st->codec->height) {
SDL_Event event;
vp->allocated = 0;
/* we have to do it in the main thread */
event.type = FF_ALLOC_EVENT;
event.user.data1 = is;
SDL_PushEvent(&event);
/* wait until we have a picture allocated */
SDL_LockMutex(is->pictq_mutex);
while(!vp->allocated && !is->quit) {
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
}
SDL_UnlockMutex(is->pictq_mutex);
if(is->quit) {
return -1;
}
}
/* We have a place to put our picture on the queue */
/* If we are skipping a frame, do we set this to null
but still return vp->allocated = 1? */
if(vp->bmp) {
SDL_LockYUVOverlay(vp->bmp);
/* point pict at the queue */
pict.data[0] = vp->bmp->pixels[0];
pict.data[1] = vp->bmp->pixels[2];
pict.data[2] = vp->bmp->pixels[1];
pict.linesize[0] = vp->bmp->pitches[0];
pict.linesize[1] = vp->bmp->pitches[2];
pict.linesize[2] = vp->bmp->pitches[1];
// Convert the image into YUV format that SDL uses
sws_scale
(
is->sws_ctx,
(uint8_t const * const *)pFrame->data,
pFrame->linesize,
0,
is->video_st->codec->height,
pict.data,
pict.linesize
);
SDL_UnlockYUVOverlay(vp->bmp);
vp->pts = pts;
/* now we inform our display thread that we have a pic ready */
if(++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) {
is->pictq_windex = 0;
}
SDL_LockMutex(is->pictq_mutex);
is->pictq_size++;
SDL_UnlockMutex(is->pictq_mutex);
}
return 0;
}
double synchronize_video(VideoState *is, AVFrame *src_frame, double pts) {
double frame_delay;
if(pts != 0) {
/* if we have pts, set video clock to it */
is->video_clock = pts;
} else {
/* if we aren't given a pts, set it to the clock */
pts = is->video_clock;
}
/* update the video clock */
frame_delay = av_q2d(is->video_st->codec->time_base);
/* if we are repeating a frame, adjust clock accordingly */
frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
is->video_clock += frame_delay;
return pts;
}
uint64_t global_video_pkt_pts = AV_NOPTS_VALUE;
/* These are called whenever we allocate a frame
* buffer. We use this to store the global_pts in
* a frame at the time it is allocated.
*/
int our_get_buffer(struct AVCodecContext *c, AVFrame *pic) {
int ret = avcodec_default_get_buffer(c, pic);
uint64_t *pts = av_malloc(sizeof(uint64_t));
*pts = global_video_pkt_pts;
pic->opaque = pts;
return ret;
}
void our_release_buffer(struct AVCodecContext *c, AVFrame *pic) {
if(pic) av_freep(&pic->opaque);
avcodec_default_release_buffer(c, pic);
}
int video_thread(void *arg) {
VideoState *is = (VideoState *)arg;
AVPacket pkt1, *packet = &pkt1;
int frameFinished;
AVFrame *pFrame;
double pts;
pFrame = av_frame_alloc();
for(;;) {
if(packet_queue_get(&is->videoq, packet, 1) < 0) {
// means we quit getting packets
break;
}
pts = 0;
// Save global pts to be stored in pFrame in first call
global_video_pkt_pts = packet->pts;
// Decode video frame
avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished,
packet);
if(packet->dts == AV_NOPTS_VALUE
&& pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) {
pts = *(uint64_t *)pFrame->opaque;
} else if(packet->dts != AV_NOPTS_VALUE) {
pts = packet->dts;
} else {
pts = 0;
}
pts *= av_q2d(is->video_st->time_base);
// Did we get a video frame?
if(frameFinished) {
pts = synchronize_video(is, pFrame, pts);
if(queue_picture(is, pFrame, pts) < 0) {
break;
}
}
av_free_packet(packet);
}
av_free(pFrame);
return 0;
}
int stream_component_open(VideoState *is, int stream_index) {
AVFormatContext *pFormatCtx = is->pFormatCtx;
AVCodecContext *codecCtx = NULL;
AVCodec *codec = NULL;
AVDictionary *optionsDict = NULL;
SDL_AudioSpec wanted_spec, spec;
if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) {
return -1;
}
// Get a pointer to the codec context for the video stream
codecCtx = pFormatCtx->streams[stream_index]->codec;
if(codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) {
// Set audio settings from codec info
wanted_spec.freq = codecCtx->sample_rate;
wanted_spec.format = AUDIO_S16SYS;
wanted_spec.channels = codecCtx->channels;
wanted_spec.silence = 0;
wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
wanted_spec.callback = audio_callback;
wanted_spec.userdata = is;
if(SDL_OpenAudio(&wanted_spec, &spec) < 0) {
fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
return -1;
}
is->audio_hw_buf_size = spec.size;
}
codec = avcodec_find_decoder(codecCtx->codec_id);
if(!codec || (avcodec_open2(codecCtx, codec, &optionsDict) < 0)) {
fprintf(stderr, "Unsupported codec!\n");
return -1;
}
switch(codecCtx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
is->audioStream = stream_index;
is->audio_st = pFormatCtx->streams[stream_index];
is->audio_buf_size = 0;
is->audio_buf_index = 0;
memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
packet_queue_init(&is->audioq);
SDL_PauseAudio(0);
break;
case AVMEDIA_TYPE_VIDEO:
is->videoStream = stream_index;
is->video_st = pFormatCtx->streams[stream_index];
is->frame_timer = (double)av_gettime() / 1000000.0;
is->frame_last_delay = 40e-3;
packet_queue_init(&is->videoq);
is->video_tid = SDL_CreateThread(video_thread, is);
is->sws_ctx =
sws_getContext
(
is->video_st->codec->width,
is->video_st->codec->height,
is->video_st->codec->pix_fmt,
is->video_st->codec->width,
is->video_st->codec->height,
PIX_FMT_YUV420P,
SWS_BILINEAR,
NULL,
NULL,
NULL
);
codecCtx->get_buffer2 = our_get_buffer;
codecCtx->release_buffer = our_release_buffer;
break;
default:
break;
}
return 0;
}
int decode_interrupt_cb(void *opaque) {
return (global_video_state && global_video_state->quit);
}
int decode_thread(void *arg) {
VideoState *is = (VideoState *)arg;
AVFormatContext *pFormatCtx = NULL;
AVPacket pkt1, *packet = &pkt1;
AVDictionary *io_dict = NULL;
AVIOInterruptCB callback;
int video_index = -1;
int audio_index = -1;
int i;
is->videoStream=-1;
is->audioStream=-1;
global_video_state = is;
// will interrupt blocking functions if we quit!
callback.callback = decode_interrupt_cb;
callback.opaque = is;
if (avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict))
{
fprintf(stderr, "Unable to open I/O for %s\n", is->filename);
return -1;
}
// Open video file
if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL)!=0)
return -1; // Couldn't open file
is->pFormatCtx = pFormatCtx;
// Retrieve stream information
if(avformat_find_stream_info(pFormatCtx, NULL)<0)
return -1; // Couldn't find stream information
// Dump information about file onto standard error
av_dump_format(pFormatCtx, 0, is->filename, 0);
// Find the first video stream
for(i=0; i<pFormatCtx->nb_streams; i++) {
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO &&
video_index < 0) {
video_index=i;
}
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO &&
audio_index < 0) {
audio_index=i;
}
}
if(audio_index >= 0) {
stream_component_open(is, audio_index);
}
if(video_index >= 0) {
stream_component_open(is, video_index);
}
if(is->videoStream < 0 || is->audioStream < 0) {
fprintf(stderr, "%s: could not open codecs\n", is->filename);
goto fail;
}
// main decode loop
for(;;) {
if(is->quit) {
break;
}
// seek stuff goes here
if(is->audioq.size > MAX_AUDIOQ_SIZE ||
is->videoq.size > MAX_VIDEOQ_SIZE) {
SDL_Delay(10);
continue;
}
if(av_read_frame(is->pFormatCtx, packet) < 0) {
if(is->pFormatCtx->pb->error == 0) {
SDL_Delay(100); /* no error; wait for user input */
continue;
} else {
break;
}
}
// Is this a packet from the video stream?
if(packet->stream_index == is->videoStream) {
packet_queue_put(&is->videoq, packet);
} else if(packet->stream_index == is->audioStream) {
packet_queue_put(&is->audioq, packet);
} else {
av_free_packet(packet);
}
}
/* all done - wait for it */
while(!is->quit) {
SDL_Delay(100);
}
fail:
{
SDL_Event event;
event.type = FF_QUIT_EVENT;
event.user.data1 = is;
SDL_PushEvent(&event);
}
return 0;
}
int main(int argc, char *argv[]) {
SDL_Event event;
VideoState *is;
is = av_mallocz(sizeof(VideoState));
if(argc < 2) {
fprintf(stderr, "Usage: test <file>\n");
exit(1);
}
// Register all formats and codecs
av_register_all();
if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
exit(1);
}
// Make a screen to put our video
#ifndef __DARWIN__
screen = SDL_SetVideoMode(640, 480, 0, 0);
#else
screen = SDL_SetVideoMode(640, 480, 24, 0);
#endif
if(!screen) {
fprintf(stderr, "SDL: could not set video mode - exiting\n");
exit(1);
}
av_strlcpy(is->filename, argv[1], 1024);
is->pictq_mutex = SDL_CreateMutex();
is->pictq_cond = SDL_CreateCond();
schedule_refresh(is, 40);
is->parse_tid = SDL_CreateThread(decode_thread, is);
if(!is->parse_tid) {
av_free(is);
return -1;
}
for(;;) {
SDL_WaitEvent(&event);
switch(event.type) {
case FF_QUIT_EVENT:
case SDL_QUIT:
is->quit = 1;
/*
* If the video has finished playing, then both the picture and
* audio queues are waiting for more data. Make them stop
* waiting and terminate normally.
*/
SDL_CondSignal(is->audioq.cond);
SDL_CondSignal(is->videoq.cond);
SDL_Quit();
exit(0);
break;
case FF_ALLOC_EVENT:
alloc_picture(event.user.data1);
break;
case FF_REFRESH_EVENT:
video_refresh_timer(event.user.data1);
break;
default:
break;
}
}
return 0;
}