file:///c:/video1/?68-0-0.html,FFmpeg: ffmpeg.c Source File

00001 /*

00002 * Copyright (c) 2000-2003 Fabrice Bellard

00003 *

00004 * This file is part of FFmpeg.

00005 *

00006 * FFmpeg is free software; you can redistribute it and/or

00007 * modify it under the terms of the GNU Lesser General Public

00008 * License as published by the Free Software Foundation; either

00009 * version 2.1 of the License, or (at your option) any later version.

00010 *

00011 * FFmpeg is distributed in the hope that it will be useful,

00012 * but WITHOUT ANY WARRANTY; without even the implied warranty of

00013 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU

00014 * Lesser General Public License for more details.

00015 *

00016 * You should have received a copy of the GNU Lesser General Public

00017 * License along with FFmpeg; if not, write to the Free Software

00018 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

00019 */

00020

00026 #include "config.h"

00027 #include

00028 #include

00029 #include

00030 #include

00031 #include

00032 #include

00033 #if HAVE_ISATTY

00034 #if HAVE_IO_H

00035 #include

00036 #endif

00037 #if HAVE_UNISTD_H

00038 #include

00039 #endif

00040 #endif

00041 00042 00043 00044 00045 00046 00047 00048 00049 00050 00051 00052 00053 00054 00055 00056 00057 00058 00059 00060 00061 00062

00063 // not public API

00064

00065 00066 00067 00068 00069 00070

00071 #if HAVE_SYS_RESOURCE_H

00072 #include

00073 #include

00074 #include

00075 #elif HAVE_GETPROCESSTIMES

00076 #include

00077 #endif

00078 #if HAVE_GETPROCESSMEMORYINFO

00079 #include

00080 #include

00081 #endif

00082

00083 #if HAVE_SYS_SELECT_H

00084 #include

00085 #endif

00086

00087 #if HAVE_TERMIOS_H

00088 #include

00089 #include

00090 #include

00091 #include

00092 #elif HAVE_KBHIT

00093 #include

00094 #endif

00095

00096 #if HAVE_PTHREADS

00097 #include

00098 #endif

00099

00100 #include

00101

00102 #include "ffmpeg.h"

00103 #include "cmdutils.h"

00104

00105 00106

00107 const char program_name[] = "ffmpeg";

00108 const int program_birth_year = 2000;

00109

00110 static FILE *vstats_file;

00111

00112 static void do_video_stats(OutputStream *ost, int frame_size);

00113 static int64_t getutime(void);

00114

00115 static int run_as_daemon = 0;

00116 static int64_t video_size = 0;

00117 static int64_t audio_size = 0;

00118 static int64_t subtitle_size = 0;

00119 static int64_t extra_size = 0;

00120 static int nb_frames_dup = 0;

00121 static int nb_frames_drop = 0;

00122

00123 static int current_time;

00124 AVIOContext *progress_avio = NULL;

00125

00126 static uint8_t *subtitle_out;

00127

00128 #if HAVE_PTHREADS

00129 /* signal to input threads that they should exit; set by the main thread */

00130 static int transcoding_finished;

00131 #endif

00132

00133 #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"

00134

00135 InputStream **input_streams = NULL;

00136 int nb_input_streams = 0;

00137 InputFile **input_files = NULL;

00138 int nb_input_files = 0;

00139

00140 OutputStream **output_streams = NULL;

00141 int nb_output_streams = 0;

00142 OutputFile **output_files = NULL;

00143 int nb_output_files = 0;

00144

00145 FilterGraph **filtergraphs;

00146 int nb_filtergraphs;

00147

00148 #if HAVE_TERMIOS_H

00149

00150 /* init terminal so that we can grab keys */

00151 static structtermios oldtty;

00152 static int restore_tty;

00153 #endif

00154

00155

00156 /* sub2video hack:

00157 Convert subtitles to video with alpha to insert them in filter graphs.

00158 This is a temporary solution until libavfilter gets real subtitles support.

00159 */

00160

00161

00162

00163 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,

00164 AVSubtitleRect *r)

00165 {

00166 uint32_t *pal, *dst2;

00167 uint8_t *src, *src2;

00168 int x, y;

00169

00170 if (r->type != SUBTITLE_BITMAP) {

00171 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");

00172 return;

00173 }

00174 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {

00175 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle overflowing\n");

00176 return;

00177 }

00178

00179 dst += r->y * dst_linesize + r->x * 4;

00180 src = r->pict.data[0];

00181 pal = (uint32_t *)r->pict.data[1];

00182 for (y = 0; y < r->h; y++) {

00183 dst2 = (uint32_t *)dst;

00184 src2 = src;

00185 for (x = 0; x < r->w; x++)

00186 *(dst2++) = pal[*(src2++)];

00187 dst += dst_linesize;

00188 src += r->pict.linesize[0];

00189 }

00190 }

00191

00192 static void sub2video_push_ref(InputStream *ist, int64_t pts)

00193 {

00194 AVFilterBufferRef *ref = ist->sub2video.ref;

00195 int i;

00196

00197 ist->sub2video.last_pts = ref->pts = pts;

00198 for (i = 0; i < ist->nb_filters; i++)

00199 av_buffersrc_add_ref(ist->filters[i]->filter,

00200 avfilter_ref_buffer(ref, ~0),

00201 AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT |

00202 AV_BUFFERSRC_FLAG_NO_COPY |

00203 AV_BUFFERSRC_FLAG_PUSH);

00204 }

00205

00206 static void sub2video_update(InputStream *ist, AVSubtitle *sub)

00207 {

00208 int w = ist->sub2video.w, h = ist->sub2video.h;

00209 AVFilterBufferRef *ref = ist->sub2video.ref;

00210 int8_t *dst;

00211 int dst_linesize;

00212 int num_rects, i;

00213 int64_t pts, end_pts;

00214

00215 if (!ref)

00216 return;

00217 if (sub) {

00218 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000,

00219 AV_TIME_BASE_Q, ist->st->time_base);

00220 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000,

00221 AV_TIME_BASE_Q, ist->st->time_base);

00222 num_rects = sub->num_rects;

00223 } else {

00224 pts = ist->sub2video.end_pts;

00225 end_pts = INT64_MAX;

00226 num_rects = 0;

00227 }

00228 dst = ref->data [0];

00229 dst_linesize = ref->linesize[0];

00230 memset(dst, 0, h * dst_linesize);

00231 for (i = 0; i < num_rects; i++)

00232 sub2video_copy_rect(dst, dst_linesize, w, h, sub->rects[i]);

00233 sub2video_push_ref(ist, pts);

00234 ist->sub2video.end_pts = end_pts;

00235 }

00236

00237 static void sub2video_heartbeat(InputStream *ist, int64_t pts)

00238 {

00239 InputFile *infile = input_files[ist->file_index];

00240 int i, j, nb_reqs;

00241 int64_t pts2;

00242

00243 /* When a frame is read from a file, examine all sub2video streams in

00244 the same file and send the sub2video frame again. Otherwise, decoded

00245 video frames could be accumulating in the filter graph while a filter

00246 (possibly overlay) is desperately waiting for a subtitle frame. */

00247 for (i = 0; i < infile->nb_streams; i++) {

00248 InputStream *ist2 = input_streams[infile->ist_index + i];

00249 if (!ist2->sub2video.ref)

00250 continue;

00251 /* subtitles seem to be usually muxed ahead of other streams;

00252 if not, substracting a larger time here is necessary */

00253 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;

00254 /* do not send the heartbeat frame if the subtitle is already ahead */

00255 if (pts2 <= ist2->sub2video.last_pts)

00256 continue;

00257 if (pts2 >= ist2->sub2video.end_pts)

00258 sub2video_update(ist2, NULL);

00259 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)

00260 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);

00261 if (nb_reqs)

00262 sub2video_push_ref(ist2, pts2);

00263 }

00264 }

00265

00266 static void sub2video_flush(InputStream *ist)

00267 {

00268 int i;

00269

00270 for (i = 0; i < ist->nb_filters; i++)

00271 av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);

00272 }

00273

00274 /* end of sub2video hack */

00275

00276 void term_exit(void)

00277 {

00278 av_log(NULL, AV_LOG_QUIET, "%s", "");

00279 #if HAVE_TERMIOS_H

00280 if(restore_tty)

00281 tcsetattr (0, TCSANOW, &oldtty);

00282 #endif

00283 }

00284

00285 static volatile int received_sigterm = 0;

00286 static volatile int received_nb_signals = 0;

00287

00288 static void

00289 sigterm_handler(int sig)

00290 {

00291 received_sigterm = sig;

00292 received_nb_signals++;

00293 term_exit();

00294 if(received_nb_signals > 3)

00295 exit(123);

00296 }

00297

00298 void term_init(void)

00299 {

00300 #if HAVE_TERMIOS_H

00301 if(!run_as_daemon){

00302 structtermios tty;

00303 int istty = 1;

00304 #if HAVE_ISATTY

00305 istty = isatty(0) && isatty(2);

00306 #endif

00307 if (istty && tcgetattr (0, &tty) == 0) {

00308 oldtty = tty;

00309 restore_tty = 1;

00310 atexit(term_exit);

00311

00312 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP

00313 |INLCR|IGNCR|ICRNL|IXON);

00314 tty.c_oflag |= OPOST;

00315 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);

00316 tty.c_cflag &= ~(CSIZE|PARENB);

00317 tty.c_cflag |= CS8;

00318 tty.c_cc[VMIN] = 1;

00319 tty.c_cc[VTIME] = 0;

00320

00321 tcsetattr (0, TCSANOW, &tty);

00322 }

00323 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */

00324 }

00325 #endif

00326 avformat_network_deinit();

00327

00328 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */

00329 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */

00330 #ifdef SIGXCPU

00331 signal(SIGXCPU, sigterm_handler);

00332 #endif

00333 }

00334

00335 /* read a key without blocking */

00336 static int read_key(void)

00337 {

00338 unsigned char ch;

00339 #if HAVE_TERMIOS_H

00340 int n = 1;

00341 structtimeval tv;

00342 fd_set rfds;

00343

00344 FD_ZERO(&rfds);

00345 FD_SET(0, &rfds);

00346 tv.tv_sec = 0;

00347 tv.tv_usec = 0;

00348 n = select(1, &rfds, NULL, NULL, &tv);

00349 if (n > 0) {

00350 n = read(0, &ch, 1);

00351 if (n == 1)

00352 return ch;

00353

00354 return n;

00355 }

00356 #elif HAVE_KBHIT

00357 # if HAVE_PEEKNAMEDPIPE

00358 static int is_pipe;

00359 static HANDLE input_handle;

00360 DWORD dw, nchars;

00361 if(!input_handle){

00362 input_handle = GetStdHandle(STD_INPUT_HANDLE);

00363 is_pipe = !GetConsoleMode(input_handle, &dw);

00364 }

00365

00366 if (stdin->_cnt > 0) {

00367 read(0, &ch, 1);

00368 return ch;

00369 }

00370 if (is_pipe) {

00371 /* When running under a GUI, you will end here. */

00372 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL))

00373 return -1;

00374 //Read it

00375 if(nchars != 0) {

00376 read(0, &ch, 1);

00377 return ch;

00378 }else{

00379 return -1;

00380 }

00381 }

00382 # endif

00383 if(kbhit())

00384 return(getch());

00385 #endif

00386 return -1;

00387 }

00388

00389 static int decode_interrupt_cb(void *ctx)

00390 {

00391 return received_nb_signals > 1;

00392 }

00393

00394 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };

00395

00396 static void exit_program(void)

00397 {

00398 int i, j;

00399

00400 for (i = 0; i < nb_filtergraphs; i++) {

00401 avfilter_graph_free(&filtergraphs[i]->graph);

00402 for (j = 0; j < filtergraphs[i]->nb_inputs; j++) {

00403 av_freep(&filtergraphs[i]->inputs[j]->name);

00404 av_freep(&filtergraphs[i]->inputs[j]);

00405 }

00406 av_freep(&filtergraphs[i]->inputs);

00407 for (j = 0; j < filtergraphs[i]->nb_outputs; j++) {

00408 av_freep(&filtergraphs[i]->outputs[j]->name);

00409 av_freep(&filtergraphs[i]->outputs[j]);

00410 }

00411 av_freep(&filtergraphs[i]->outputs);

00412 av_freep(&filtergraphs[i]);

00413 }

00414 av_freep(&filtergraphs);

00415

00416 av_freep(&subtitle_out);

00417

00418 /* close files */

00419 for (i = 0; i < nb_output_files; i++) {

00420 AVFormatContext *s = output_files[i]->ctx;

00421 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)

00422 avio_close(s->pb);

00423 avformat_free_context(s);

00424 av_dict_free(&output_files[i]->opts);

00425 av_freep(&output_files[i]);

00426 }

00427 for (i = 0; i < nb_output_streams; i++) {

00428 AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;

00429 while (bsfc) {

00430 AVBitStreamFilterContext *next = bsfc->next;

00431 av_bitstream_filter_close(bsfc);

00432 bsfc = next;

00433 }

00434 output_streams[i]->bitstream_filters = NULL;

00435 avcodec_free_frame(&output_streams[i]->filtered_frame);

00436

00437 av_freep(&output_streams[i]->forced_keyframes);

00438 av_freep(&output_streams[i]->avfilter);

00439 av_freep(&output_streams[i]->logfile_prefix);

00440 av_freep(&output_streams[i]);

00441 }

00442 for (i = 0; i < nb_input_files; i++) {

00443 avformat_close_input(&input_files[i]->ctx);

00444 av_freep(&input_files[i]);

00445 }

00446 for (i = 0; i < nb_input_streams; i++) {

00447 avcodec_free_frame(&input_streams[i]->decoded_frame);

00448 av_dict_free(&input_streams[i]->opts);

00449 free_buffer_pool(&input_streams[i]->buffer_pool);

00450 avfilter_unref_bufferp(&input_streams[i]->sub2video.ref);

00451 av_freep(&input_streams[i]->filters);

00452 av_freep(&input_streams[i]);

00453 }

00454

00455 if (vstats_file)

00456 fclose(vstats_file);

00457 av_free(vstats_filename);

00458

00459 av_freep(&input_streams);

00460 av_freep(&input_files);

00461 av_freep(&output_streams);

00462 av_freep(&output_files);

00463

00464 uninit_opts();

00465

00466 avfilter_uninit();

00467 avformat_network_deinit();

00468

00469 if (received_sigterm) {

00470 av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",

00471 (int) received_sigterm);

00472 exit (255);

00473 }

00474 }

00475

00476 void assert_avoptions(AVDictionary *m)

00477 {

00478 AVDictionaryEntry *t;

00479 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {

00480 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);

00481 exit(1);

00482 }

00483 }

00484

00485 static void abort_codec_experimental(AVCodec *c, int encoder)

00486 {

00487 exit(1);

00488 }

00489

00490 static void update_benchmark(const char *fmt, ...)

00491 {

00492 if (do_benchmark_all) {

00493 int64_t t = getutime();

00494 va_list va;

00495 char buf[1024];

00496

00497 if (fmt) {

00498 va_start(va, fmt);

00499 vsnprintf(buf, sizeof(buf), fmt, va);

00500 va_end(va);

00501 printf("bench: %8"PRIu64" %s \n", t - current_time, buf);

00502 }

00503 current_time = t;

00504 }

00505 }

00506

00507 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)

00508 {

00509 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;

00510 AVCodecContext *avctx = ost->st->codec;

00511 int ret;

00512

00513 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||

00514 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))

00515 pkt->pts = pkt->dts = AV_NOPTS_VALUE;

00516

00517 if ((avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) && pkt->dts != AV_NOPTS_VALUE) {

00518 int64_t max = ost->st->cur_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);

00519 if (ost->st->cur_dts && ost->st->cur_dts != AV_NOPTS_VALUE && max > pkt->dts) {

00520 av_log(s, max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG,

00521 "st:%d PTS: %"PRId64" DTS: %"PRId64" < %"PRId64" invalid, clipping\n", pkt->stream_index, pkt->pts, pkt->dts, max);

00522 if(pkt->pts >= pkt->dts)

00523 pkt->pts = FFMAX(pkt->pts, max);

00524 pkt->dts = max;

00525 }

00526 }

00527

00528 /*

00529 * Audio encoders may split the packets -- #frames in != #packets out.

00530 * But there is no reordering, so we can limit the number of output packets

00531 * by simply dropping them here.

00532 * Counting encoded video frames needs to be done separately because of

00533 * reordering, see do_video_out()

00534 */

00535 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {

00536 if (ost->frame_number >= ost->max_frames) {

00537 av_free_packet(pkt);

00538 return;

00539 }

00540 ost->frame_number++;

00541 }

00542

00543 while (bsfc) {

00544 AVPacket new_pkt = *pkt;

00545 int a = av_bitstream_filter_filter(bsfc, avctx, NULL,

00546 &new_pkt.data, &new_pkt.size,

00547 pkt->data, pkt->size,

00548 pkt->flags & AV_PKT_FLAG_KEY);

00549 if(a == 0 && new_pkt.data != pkt->data && new_pkt.destruct) {

00550 uint8_t *t = av_malloc(new_pkt.size + FF_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow

00551 if(t) {

00552 memcpy(t, new_pkt.data, new_pkt.size);

00553 memset(t + new_pkt.size, 0, FF_INPUT_BUFFER_PADDING_SIZE);

00554 new_pkt.data = t;

00555 a = 1;

00556 } else

00557 a = AVERROR(ENOMEM);

00558 }

00559 if (a > 0) {

00560 av_free_packet(pkt);

00561 new_pkt.destruct = av_destruct_packet;

00562 } else if (a < 0) {

00563 av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",

00564 bsfc->filter->name, pkt->stream_index,

00565 avctx->codec ? avctx->codec->name : "copy");

00566 print_error("", a);

00567 if (exit_on_error)

00568 exit(1);

00569 }

00570 *pkt = new_pkt;

00571

00572 bsfc = bsfc->next;

00573 }

00574

00575 pkt->stream_index = ost->index;

00576

00577 if (debug_ts) {

00578 av_log(NULL, AV_LOG_INFO, "muxer

00579 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",

00580 av_get_media_type_string(ost->st->codec->codec_type),

00581 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),

00582 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),

00583 pkt->size

00584 );

00585 }

00586

00587 ret = av_interleaved_write_frame(s, pkt);

00588 if (ret < 0) {

00589 print_error("av_interleaved_write_frame()", ret);

00590 exit(1);

00591 }

00592 }

00593

00594 static void close_output_stream(OutputStream *ost)

00595 {

00596 OutputFile *of = output_files[ost->file_index];

00597

00598 ost->finished = 1;

00599 if (of->shortest) {

00600 int i;

00601 for (i = 0; i < of->ctx->nb_streams; i++)

00602 output_streams[of->ost_index + i]->finished = 1;

00603 }

00604 }

00605

00606 static int check_recording_time(OutputStream *ost)

00607 {

00608 OutputFile *of = output_files[ost->file_index];

00609

00610 if (of->recording_time != INT64_MAX &&

00611 av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,

00612 AV_TIME_BASE_Q) >= 0) {

00613 close_output_stream(ost);

00614 return 0;

00615 }

00616 return 1;

00617 }

00618

00619 static void do_audio_out(AVFormatContext *s, OutputStream *ost,

00620 AVFrame *frame)

00621 {

00622 AVCodecContext *enc = ost->st->codec;

00623 AVPacket pkt;

00624 int got_packet = 0;

00625

00626 av_init_packet(&pkt);

00627 pkt.data = NULL;

00628 pkt.size = 0;

00629

00630 if (!check_recording_time(ost))

00631 return;

00632

00633 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)

00634 frame->pts = ost->sync_opts;

00635 ost->sync_opts = frame->pts + frame->nb_samples;

00636

00637 av_assert0(pkt.size || !pkt.data);

00638 update_benchmark(NULL);

00639 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {

00640 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");

00641 exit(1);

00642 }

00643 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);

00644

00645 if (got_packet) {

00646 if (pkt.pts != AV_NOPTS_VALUE)

00647 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);

00648 if (pkt.dts != AV_NOPTS_VALUE)

00649 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);

00650 if (pkt.duration > 0)

00651 pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);

00652

00653 if (debug_ts) {

00654 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "

00655 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",

00656 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),

00657 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));

00658 }

00659

00660 audio_size += pkt.size;

00661 write_frame(s, &pkt, ost);

00662

00663 av_free_packet(&pkt);

00664 }

00665 }

00666

00667 static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)

00668 {

00669 AVCodecContext *dec;

00670 AVPicture *picture2;

00671 AVPicture picture_tmp;

00672 uint8_t *buf = 0;

00673

00674 dec = ist->st->codec;

00675

00676 /* deinterlace : must be done before any resize */

00677 if (do_deinterlace) {

00678 int size;

00679

00680 /* create temporary picture */

00681 size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);

00682 if (size < 0)

00683 return;

00684 buf = av_malloc(size);

00685 if (!buf)

00686 return;

00687

00688 picture2 = &picture_tmp;

00689 avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);

00690

00691 if (avpicture_deinterlace(picture2, picture,

00692 dec->pix_fmt, dec->width, dec->height) < 0) {

00693 /* if error, do not deinterlace */

00694 av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");

00695 av_free(buf);

00696 buf = NULL;

00697 picture2 = picture;

00698 }

00699 } else {

00700 picture2 = picture;

00701 }

00702

00703 if (picture != picture2)

00704 *picture = *picture2;

00705 *bufp = buf;

00706 }

00707

00708 static void do_subtitle_out(AVFormatContext *s,

00709 OutputStream *ost,

00710 InputStream *ist,

00711 AVSubtitle *sub)

00712 {

00713 int subtitle_out_max_size = 1024 * 1024;

00714 int subtitle_out_size, nb, i;

00715 AVCodecContext *enc;

00716 AVPacket pkt;

00717 int64_t pts;

00718

00719 if (sub->pts == AV_NOPTS_VALUE) {

00720 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");

00721 if (exit_on_error)

00722 exit(1);

00723 return;

00724 }

00725

00726 enc = ost->st->codec;

00727

00728 if (!subtitle_out) {

00729 subtitle_out = av_malloc(subtitle_out_max_size);

00730 }

00731

00732 /* Note: DVB subtitle need one packet to draw them and one other

00733 packet to clear them */

00734 /* XXX: signal it in the codec context ? */

00735 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)

00736 nb = 2;

00737 else

00738 nb = 1;

00739

00740 /* shift timestamp to honor -ss and make check_recording_time() work with -t */

00741 pts = sub->pts - output_files[ost->file_index]->start_time;

00742 for (i = 0; i < nb; i++) {

00743 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);

00744 if (!check_recording_time(ost))

00745 return;

00746

00747 sub->pts = pts;

00748 // start_display_time is required to be 0

00749 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);

00750 sub->end_display_time -= sub->start_display_time;

00751 sub->start_display_time = 0;

00752 if (i == 1)

00753 sub->num_rects = 0;

00754 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,

00755 subtitle_out_max_size, sub);

00756 if (subtitle_out_size < 0) {

00757 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");

00758 exit(1);

00759 }

00760

00761 av_init_packet(&pkt);

00762 pkt.data = subtitle_out;

00763 pkt.size = subtitle_out_size;

00764 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);

00765 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);

00766 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {

00767 /* XXX: the pts correction is handled here. Maybe handling

00768 it in the codec would be better */

00769 if (i == 0)

00770 pkt.pts += 90 * sub->start_display_time;

00771 else

00772 pkt.pts += 90 * sub->end_display_time;

00773 }

00774 subtitle_size += pkt.size;

00775 write_frame(s, &pkt, ost);

00776 }

00777 }

00778

00779 static void do_video_out(AVFormatContext *s,

00780 OutputStream *ost,

00781 AVFrame *in_picture)

00782 {

00783 int ret, format_video_sync;

00784 AVPacket pkt;

00785 AVCodecContext *enc = ost->st->codec;

00786 int nb_frames, i;

00787 double sync_ipts, delta;

00788 double duration = 0;

00789 int frame_size = 0;

00790 InputStream *ist = NULL;

00791

00792 if (ost->source_index >= 0)

00793 ist = input_streams[ost->source_index];

00794

00795 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)

00796 duration = 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base));

00797

00798 sync_ipts = in_picture->pts;

00799 delta = sync_ipts - ost->sync_opts + duration;

00800

00801 /* by default, we output a single frame */

00802 nb_frames = 1;

00803

00804 format_video_sync = video_sync_method;

00805 if (format_video_sync == VSYNC_AUTO)

00806 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;

00807

00808 switch (format_video_sync) {

00809 case VSYNC_CFR:

00810 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c

00811 if (delta < -1.1)

00812 nb_frames = 0;

00813 else if (delta > 1.1)

00814 nb_frames = lrintf(delta);

00815 break;

00816 case VSYNC_VFR:

00817 if (delta <= -0.6)

00818 nb_frames = 0;

00819 else if (delta > 0.6)

00820 ost->sync_opts = lrint(sync_ipts);

00821 break;

00822 case VSYNC_DROP:

00823 case VSYNC_PASSTHROUGH:

00824 ost->sync_opts = lrint(sync_ipts);

00825 break;

00826 default:

00827 av_assert0(0);

00828 }

00829

00830 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);

00831 if (nb_frames == 0) {

00832 nb_frames_drop++;

00833 av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");

00834 return;

00835 } else if (nb_frames > 1) {

00836 if (nb_frames > dts_error_threshold * 30) {

00837 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);

00838 nb_frames_drop++;

00839 return;

00840 }

00841 nb_frames_dup += nb_frames - 1;

00842 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);

00843 }

00844

00845 /* duplicates frame if needed */

00846 for (i = 0; i < nb_frames; i++) {

00847 av_init_packet(&pkt);

00848 pkt.data = NULL;

00849 pkt.size = 0;

00850

00851 in_picture->pts = ost->sync_opts;

00852

00853 if (!check_recording_time(ost))

00854 return;

00855

00856 if (s->oformat->flags & AVFMT_RAWPICTURE &&

00857 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {

00858 /* raw pictures are written as AVPicture structure to

00859 avoid any copies. We support temporarily the older

00860 method. */

00861 enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;

00862 enc->coded_frame->top_field_first = in_picture->top_field_first;

00863 if (enc->coded_frame->interlaced_frame)

00864 enc->field_order = enc->coded_frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;

00865 else

00866 enc->field_order = AV_FIELD_PROGRESSIVE;

00867 pkt.data = (uint8_t *)in_picture;

00868 pkt.size = sizeof(AVPicture);

00869 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);

00870 pkt.flags |= AV_PKT_FLAG_KEY;

00871

00872 video_size += pkt.size;

00873 write_frame(s, &pkt, ost);

00874 } else {

00875 int got_packet;

00876 AVFrame big_picture;

00877

00878 big_picture = *in_picture;

00879 /* better than nothing: use input picture interlaced

00880 settings */

00881 big_picture.interlaced_frame = in_picture->interlaced_frame;

00882 if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {

00883 if (ost->top_field_first == -1)

00884 big_picture.top_field_first = in_picture->top_field_first;

00885 else

00886 big_picture.top_field_first = !!ost->top_field_first;

00887 }

00888

00889 if (big_picture.interlaced_frame) {

00890 if (enc->codec->id == AV_CODEC_ID_MJPEG)

00891 enc->field_order = big_picture.top_field_first ? AV_FIELD_TT:AV_FIELD_BB;

00892 else

00893 enc->field_order = big_picture.top_field_first ? AV_FIELD_TB:AV_FIELD_BT;

00894 } else

00895 enc->field_order = AV_FIELD_PROGRESSIVE;

00896

00897 big_picture.quality = ost->st->codec->global_quality;

00898 if (!enc->me_threshold)

00899 big_picture.pict_type = 0;

00900 if (ost->forced_kf_index < ost->forced_kf_count &&

00901 big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {

00902 big_picture.pict_type = AV_PICTURE_TYPE_I;

00903 ost->forced_kf_index++;

00904 }

00905 update_benchmark(NULL);

00906 ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);

00907 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);

00908 if (ret < 0) {

00909 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");

00910 exit(1);

00911 }

00912

00913 if (got_packet) {

00914 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))

00915 pkt.pts = ost->sync_opts;

00916

00917 if (pkt.pts != AV_NOPTS_VALUE)

00918 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);

00919 if (pkt.dts != AV_NOPTS_VALUE)

00920 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);

00921

00922 if (debug_ts) {

00923 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "

00924 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",

00925 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),

00926 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));

00927 }

00928

00929 frame_size = pkt.size;

00930 video_size += pkt.size;

00931 write_frame(s, &pkt, ost);

00932 av_free_packet(&pkt);

00933

00934 /* if two pass, output log */

00935 if (ost->logfile && enc->stats_out) {

00936 fprintf(ost->logfile, "%s", enc->stats_out);

00937 }

00938 }

00939 }

00940 ost->sync_opts++;

00941 /*

00942 * For video, number of frames in == number of packets out.

00943 * But there may be reordering, so we can't throw away frames on encoder

00944 * flush, we need to limit them here, before they go into encoder.

00945 */

00946 ost->frame_number++;

00947 }

00948

00949 if (vstats_filename && frame_size)

00950 do_video_stats(ost, frame_size);

00951 }

00952

00953 static double psnr(double d)

00954 {

00955 return -10.0 * log(d) / log(10.0);

00956 }

00957

00958 static void do_video_stats(OutputStream *ost, int frame_size)

00959 {

00960 AVCodecContext *enc;

00961 int frame_number;

00962 double ti1, bitrate, avg_bitrate;

00963

00964 /* this is executed just the first time do_video_stats is called */

00965 if (!vstats_file) {

00966 vstats_file = fopen(vstats_filename, "w");

00967 if (!vstats_file) {

00968 perror("fopen");

00969 exit(1);

00970 }

00971 }

00972

00973 enc = ost->st->codec;

00974 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {

00975 frame_number = ost->st->nb_frames;

00976 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);

00977 if (enc->flags&CODEC_FLAG_PSNR)

00978 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));

00979

00980 fprintf(vstats_file,"f_size= %6d ", frame_size);

00981 /* compute pts value */

00982 ti1 = ost->st->pts.val * av_q2d(enc->time_base);

00983 if (ti1 < 0.01)

00984 ti1 = 0.01;

00985

00986 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;

00987 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;

00988 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",

00989 (double)video_size / 1024, ti1, bitrate, avg_bitrate);

00990 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));

00991 }

00992 }

00993

01000 static int reap_filters(void)

01001 {

01002 AVFilterBufferRef *picref;

01003 AVFrame *filtered_frame = NULL;

01004 int i;

01005 int64_t frame_pts;

01006

01007 /* Reap all buffers present in the buffer sinks */

01008 for (i = 0; i < nb_output_streams; i++) {

01009 OutputStream *ost = output_streams[i];

01010 OutputFile *of = output_files[ost->file_index];

01011 int ret = 0;

01012

01013 if (!ost->filter)

01014 continue;

01015

01016 if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {

01017 return AVERROR(ENOMEM);

01018 } else

01019 avcodec_get_frame_defaults(ost->filtered_frame);

01020 filtered_frame = ost->filtered_frame;

01021

01022 while (1) {

01023 ret = av_buffersink_get_buffer_ref(ost->filter->filter, &picref,

01024 AV_BUFFERSINK_FLAG_NO_REQUEST);

01025 if (ret < 0) {

01026 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {

01027 char buf[256];

01028 av_strerror(ret, buf, sizeof(buf));

01029 av_log(NULL, AV_LOG_WARNING,

01030 "Error in av_buffersink_get_buffer_ref(): %s\n", buf);

01031 }

01032 break;

01033 }

01034 frame_pts = AV_NOPTS_VALUE;

01035 if (picref->pts != AV_NOPTS_VALUE) {

01036 filtered_frame->pts = frame_pts = av_rescale_q(picref->pts,

01037 ost->filter->filter->inputs[0]->time_base,

01038 ost->st->codec->time_base) -

01039 av_rescale_q(of->start_time,

01040 AV_TIME_BASE_Q,

01041 ost->st->codec->time_base);

01042

01043 if (of->start_time && filtered_frame->pts < 0) {

01044 avfilter_unref_buffer(picref);

01045 continue;

01046 }

01047 }

01048 //if (ost->source_index >= 0)

01049 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold

01050

01051

01052 switch (ost->filter->filter->inputs[0]->type) {

01053 case AVMEDIA_TYPE_VIDEO:

01054 avfilter_copy_buf_props(filtered_frame, picref);

01055 filtered_frame->pts = frame_pts;

01056 if (!ost->frame_aspect_ratio)

01057 ost->st->codec->sample_aspect_ratio = picref->video->sample_aspect_ratio;

01058

01059 do_video_out(of->ctx, ost, filtered_frame);

01060 break;

01061 case AVMEDIA_TYPE_AUDIO:

01062 avfilter_copy_buf_props(filtered_frame, picref);

01063 filtered_frame->pts = frame_pts;

01064 do_audio_out(of->ctx, ost, filtered_frame);

01065 break;

01066 default:

01067 // TODO support subtitle filters

01068 av_assert0(0);

01069 }

01070

01071 avfilter_unref_buffer(picref);

01072 }

01073 }

01074

01075 return 0;

01076 }

01077

01078 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)

01079 {

01080 char buf[1024];

01081 AVBPrint buf_script;

01082 OutputStream *ost;

01083 AVFormatContext *oc;

01084 int64_t total_size;

01085 AVCodecContext *enc;

01086 int frame_number, vid, i;

01087 double bitrate;

01088 int64_t pts = INT64_MIN;

01089 static int64_t last_time = -1;

01090 static int qp_histogram[52];

01091 int hours, mins, secs, us;

01092

01093 if (!print_stats && !is_last_report && !progress_avio)

01094 return;

01095

01096 if (!is_last_report) {

01097 if (last_time == -1) {

01098 last_time = cur_time;

01099 return;

01100 }

01101 if ((cur_time - last_time) < 500000)

01102 return;

01103 last_time = cur_time;

01104 }

01105

01106

01107 oc = output_files[0]->ctx;

01108

01109 total_size = avio_size(oc->pb);

01110 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too

01111 total_size = avio_tell(oc->pb);

01112 if (total_size < 0) {

01113 char errbuf[128];

01114 av_strerror(total_size, errbuf, sizeof(errbuf));

01115 av_log(NULL, AV_LOG_VERBOSE, "Bitrate not available, "

01116 "avio_tell() failed: %s\n", errbuf);

01117 total_size = 0;

01118 }

01119

01120 buf[0] = '\0';

01121 vid = 0;

01122 av_bprint_init(&buf_script, 0, 1);

01123 for (i = 0; i < nb_output_streams; i++) {

01124 float q = -1;

01125 ost = output_streams[i];

01126 enc = ost->st->codec;

01127 if (!ost->stream_copy && enc->coded_frame)

01128 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;

01129 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {

01130 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);

01131 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",

01132 ost->file_index, ost->index, q);

01133 }

01134 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {

01135 float fps, t = (cur_time-timer_start) / 1000000.0;

01136

01137 frame_number = ost->frame_number;

01138 fps = t > 1 ? frame_number / t : 0;

01139 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",

01140 frame_number, fps < 9.95, fps, q);

01141 av_bprintf(&buf_script, "frame=%d\n", frame_number);

01142 av_bprintf(&buf_script, "fps=%.1f\n", fps);

01143 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",

01144 ost->file_index, ost->index, q);

01145 if (is_last_report)

01146 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");

01147 if (qp_hist) {

01148 int j;

01149 int qp = lrintf(q);

01150 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))

01151 qp_histogram[qp]++;

01152 for (j = 0; j < 32; j++)

01153 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));

01154 }

01155 if ((enc->flags&CODEC_FLAG_PSNR) && (enc->coded_frame || is_last_report)) {

01156 int j;

01157 double error, error_sum = 0;

01158 double scale, scale_sum = 0;

01159 double p;

01160 char type[3] = { 'Y','U','V' };

01161 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");

01162 for (j = 0; j < 3; j++) {

01163 if (is_last_report) {

01164 error = enc->error[j];

01165 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;

01166 } else {

01167 error = enc->coded_frame->error[j];

01168 scale = enc->width * enc->height * 255.0 * 255.0;

01169 }

01170 if (j)

01171 scale /= 4;

01172 error_sum += error;

01173 scale_sum += scale;

01174 p = psnr(error / scale);

01175 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);

01176 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",

01177 ost->file_index, ost->index, type[i] | 32, p);

01178 }

01179 p = psnr(error_sum / scale_sum);

01180 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));

01181 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",

01182 ost->file_index, ost->index, p);

01183 }

01184 vid = 1;

01185 }

01186 /* compute min output value */

01187 if ((is_last_report || !ost->finished) && ost->st->pts.val != AV_NOPTS_VALUE)

01188 pts = FFMAX(pts, av_rescale_q(ost->st->pts.val,

01189 ost->st->time_base, AV_TIME_BASE_Q));

01190 }

01191

01192 secs = pts / AV_TIME_BASE;

01193 us = pts % AV_TIME_BASE;

01194 mins = secs / 60;

01195 secs %= 60;

01196 hours = mins / 60;

01197 mins %= 60;

01198

01199 bitrate = pts ? total_size * 8 / (pts / 1000.0) : 0;

01200

01201 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),

01202 "size=%8.0fkB time=", total_size / 1024.0);

01203 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),

01204 "%02d:%02d:%02d.%02d ", hours, mins, secs,

01205 (100 * us) / AV_TIME_BASE);

01206 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),

01207 "bitrate=%6.1fkbits/s", bitrate);

01208 av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);

01209 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);

01210 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",

01211 hours, mins, secs, us);

01212

01213 if (nb_frames_dup || nb_frames_drop)

01214 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",

01215 nb_frames_dup, nb_frames_drop);

01216 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);

01217 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);

01218

01219 if (print_stats || is_last_report) {

01220 av_log(NULL, AV_LOG_INFO, "%s \r", buf);

01221

01222 fflush(stderr);

01223 }

01224

01225 if (progress_avio) {

01226 av_bprintf(&buf_script, "progress=%s\n",

01227 is_last_report ? "end" : "continue");

01228 avio_write(progress_avio, buf_script.str,

01229 FFMIN(buf_script.len, buf_script.size - 1));

01230 avio_flush(progress_avio);

01231 av_bprint_finalize(&buf_script, NULL);

01232 if (is_last_report) {

01233 avio_close(progress_avio);

01234 progress_avio = NULL;

01235 }

01236 }

01237

01238 if (is_last_report) {

01239 int64_t raw= audio_size + video_size + subtitle_size + extra_size;

01240 av_log(NULL, AV_LOG_INFO, "\n");

01241 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0f global headers:%1.0fkB muxing overhead %f%%\n",

01242 video_size / 1024.0,

01243 audio_size / 1024.0,

01244 subtitle_size / 1024.0,

01245 extra_size / 1024.0,

01246 100.0 * (total_size - raw) / raw

01247 );

01248 if(video_size + audio_size + subtitle_size + extra_size == 0){

01249 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded (check -ss / -t / -frames parameters if used)\n");

01250 }

01251 }

01252 }

01253

01254 static void flush_encoders(void)

01255 {

01256 int i, ret;

01257

01258 for (i = 0; i < nb_output_streams; i++) {

01259 OutputStream *ost = output_streams[i];

01260 AVCodecContext *enc = ost->st->codec;

01261 AVFormatContext *os = output_files[ost->file_index]->ctx;

01262 int stop_encoding = 0;

01263

01264 if (!ost->encoding_needed)

01265 continue;

01266

01267 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)

01268 continue;

01269 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)

01270 continue;

01271

01272 for (;;) {

01273 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;

01274 const char *desc;

01275 int64_t *size;

01276

01277 switch (ost->st->codec->codec_type) {

01278 case AVMEDIA_TYPE_AUDIO:

01279 encode = avcodec_encode_audio2;

01280 desc = "Audio";

01281 size = &audio_size;

01282 break;

01283 case AVMEDIA_TYPE_VIDEO:

01284 encode = avcodec_encode_video2;

01285 desc = "Video";

01286 size = &video_size;

01287 break;

01288 default:

01289 stop_encoding = 1;

01290 }

01291

01292 if (encode) {

01293 AVPacket pkt;

01294 int got_packet;

01295 av_init_packet(&pkt);

01296 pkt.data = NULL;

01297 pkt.size = 0;

01298

01299 update_benchmark(NULL);

01300 ret = encode(enc, &pkt, NULL, &got_packet);

01301 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);

01302 if (ret < 0) {

01303 av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);

01304 exit(1);

01305 }

01306 *size += pkt.size;

01307 if (ost->logfile && enc->stats_out) {

01308 fprintf(ost->logfile, "%s", enc->stats_out);

01309 }

01310 if (!got_packet) {

01311 stop_encoding = 1;

01312 break;

01313 }

01314 if (pkt.pts != AV_NOPTS_VALUE)

01315 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);

01316 if (pkt.dts != AV_NOPTS_VALUE)

01317 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);

01318 if (pkt.duration > 0)

01319 pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);

01320 write_frame(os, &pkt, ost);

01321 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {

01322 do_video_stats(ost, pkt.size);

01323 }

01324 }

01325

01326 if (stop_encoding)

01327 break;

01328 }

01329 }

01330 }

01331

01332 /*

01333 * Check whether a packet from ist should be written into ost at this time

01334 */

01335 static int check_output_constraints(InputStream *ist, OutputStream *ost)

01336 {

01337 OutputFile *of = output_files[ost->file_index];

01338 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;

01339

01340 if (ost->source_index != ist_index)

01341 return 0;

01342

01343 if (of->start_time && ist->pts < of->start_time)

01344 return 0;

01345

01346 return 1;

01347 }

01348

01349 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)

01350 {

01351 OutputFile *of = output_files[ost->file_index];

01352 int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);

01353 AVPicture pict;

01354 AVPacket opkt;

01355

01356 av_init_packet(&opkt);

01357

01358 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&

01359 !ost->copy_initial_nonkeyframes)

01360 return;

01361

01362 if (!ost->frame_number && ist->pts < of->start_time &&

01363 !ost->copy_prior_start)

01364 return;

01365

01366 if (of->recording_time != INT64_MAX &&

01367 ist->pts >= of->recording_time + of->start_time) {

01368 close_output_stream(ost);

01369 return;

01370 }

01371

01372 /* force the input stream PTS */

01373 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)

01374 audio_size += pkt->size;

01375 else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {

01376 video_size += pkt->size;

01377 ost->sync_opts++;

01378 } else if (ost->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {

01379 subtitle_size += pkt->size;

01380 }

01381

01382 if (pkt->pts != AV_NOPTS_VALUE)

01383 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;

01384 else

01385 opkt.pts = AV_NOPTS_VALUE;

01386

01387 if (pkt->dts == AV_NOPTS_VALUE)

01388 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);

01389 else

01390 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);

01391 opkt.dts -= ost_tb_start_time;

01392

01393 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {

01394 int duration = av_get_audio_frame_duration(ist->st->codec, pkt->size);

01395 if(!duration)

01396 duration = ist->st->codec->frame_size;

01397 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,

01398 (AVRational){1, ist->st->codec->sample_rate}, duration, &ist->filter_in_rescale_delta_last,

01399 ost->st->time_base) - ost_tb_start_time;

01400 }

01401

01402 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);

01403 opkt.flags = pkt->flags;

01404

01405 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters

01406 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264

01407 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO

01408 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO

01409 && ost->st->codec->codec_id != AV_CODEC_ID_VC1

01410 ) {

01411 if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))

01412 opkt.destruct = av_destruct_packet;

01413 } else {

01414 opkt.data = pkt->data;

01415 opkt.size = pkt->size;

01416 }

01417

01418 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {

01419 /* store AVPicture in AVPacket, as expected by the output format */

01420 avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);

01421 opkt.data = (uint8_t *)&pict;

01422 opkt.size = sizeof(AVPicture);

01423 opkt.flags |= AV_PKT_FLAG_KEY;

01424 }

01425

01426 write_frame(of->ctx, &opkt, ost);

01427 ost->st->codec->frame_number++;

01428 }

01429

01430 static void rate_emu_sleep(InputStream *ist)

01431 {

01432 if (input_files[ist->file_index]->rate_emu) {

01433 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);

01434 int64_t now = av_gettime() - ist->start;

01435 if (pts > now)

01436 av_usleep(pts - now);

01437 }

01438 }

01439

01440 int guess_input_channel_layout(InputStream *ist)

01441 {

01442 AVCodecContext *dec = ist->st->codec;

01443

01444 if (!dec->channel_layout) {

01445 char layout_name[256];

01446

01447 dec->channel_layout = av_get_default_channel_layout(dec->channels);

01448 if (!dec->channel_layout)

01449 return 0;

01450 av_get_channel_layout_string(layout_name, sizeof(layout_name),

01451 dec->channels, dec->channel_layout);

01452 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "

01453 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);

01454 }

01455 return 1;

01456 }

01457

01458 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)

01459 {

01460 AVFrame *decoded_frame;

01461 AVCodecContext *avctx = ist->st->codec;

01462 int i, ret, resample_changed;

01463 AVRational decoded_frame_tb;

01464

01465 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))

01466 return AVERROR(ENOMEM);

01467 decoded_frame = ist->decoded_frame;

01468

01469 update_benchmark(NULL);

01470 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);

01471 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);

01472

01473 if (ret >= 0 && avctx->sample_rate <= 0) {

01474 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);

01475 ret = AVERROR_INVALIDDATA;

01476 }

01477

01478 if (!*got_output || ret < 0) {

01479 if (!pkt->size) {

01480 for (i = 0; i < ist->nb_filters; i++)

01481 av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);

01482 }

01483 return ret;

01484 }

01485

01486 #if 1

01487 /* increment next_dts to use for the case where the input stream does not

01488 have timestamps or there are multiple frames in the packet */

01489 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /

01490 avctx->sample_rate;

01491 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /

01492 avctx->sample_rate;

01493 #endif

01494

01495 rate_emu_sleep(ist);

01496

01497 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||

01498 ist->resample_channels != avctx->channels ||

01499 ist->resample_channel_layout != decoded_frame->channel_layout ||

01500 ist->resample_sample_rate != decoded_frame->sample_rate;

01501 if (resample_changed) {

01502 char layout1[64], layout2[64];

01503

01504 if (!guess_input_channel_layout(ist)) {

01505 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "

01506 "layout for Input Stream #%d.%d\n", ist->file_index,

01507 ist->st->index);

01508 exit(1);

01509 }

01510 decoded_frame->channel_layout = avctx->channel_layout;

01511

01512 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,

01513 ist->resample_channel_layout);

01514 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,

01515 decoded_frame->channel_layout);

01516

01517 av_log(NULL, AV_LOG_INFO,

01518 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",

01519 ist->file_index, ist->st->index,

01520 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),

01521 ist->resample_channels, layout1,

01522 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),

01523 avctx->channels, layout2);

01524

01525 ist->resample_sample_fmt = decoded_frame->format;

01526 ist->resample_sample_rate = decoded_frame->sample_rate;

01527 ist->resample_channel_layout = decoded_frame->channel_layout;

01528 ist->resample_channels = avctx->channels;

01529

01530 for (i = 0; i < nb_filtergraphs; i++)

01531 if (ist_in_filtergraph(filtergraphs[i], ist)) {

01532 FilterGraph *fg = filtergraphs[i];

01533 int j;

01534 if (configure_filtergraph(fg) < 0) {

01535 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");

01536 exit(1);

01537 }

01538 for (j = 0; j < fg->nb_outputs; j++) {

01539 OutputStream *ost = fg->outputs[j]->ost;

01540 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&

01541 !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))

01542 av_buffersink_set_frame_size(ost->filter->filter,

01543 ost->st->codec->frame_size);

01544 }

01545 }

01546 }

01547

01548 /* if the decoder provides a pts, use it instead of the last packet pts.

01549 the decoder could be delaying output by a packet or more. */

01550 if (decoded_frame->pts != AV_NOPTS_VALUE) {

01551 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);

01552 decoded_frame_tb = avctx->time_base;

01553 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {

01554 decoded_frame->pts = decoded_frame->pkt_pts;

01555 pkt->pts = AV_NOPTS_VALUE;

01556 decoded_frame_tb = ist->st->time_base;

01557 } else if (pkt->pts != AV_NOPTS_VALUE) {

01558 decoded_frame->pts = pkt->pts;

01559 pkt->pts = AV_NOPTS_VALUE;

01560 decoded_frame_tb = ist->st->time_base;

01561 }else {

01562 decoded_frame->pts = ist->dts;

01563 decoded_frame_tb = AV_TIME_BASE_Q;

01564 }

01565 if (decoded_frame->pts != AV_NOPTS_VALUE)

01566 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,

01567 (AVRational){1, ist->st->codec->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,

01568 (AVRational){1, ist->st->codec->sample_rate});

01569 for (i = 0; i < ist->nb_filters; i++)

01570 av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame,

01571 AV_BUFFERSRC_FLAG_PUSH);

01572

01573 decoded_frame->pts = AV_NOPTS_VALUE;

01574

01575 return ret;

01576 }

01577

01578 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)

01579 {

01580 AVFrame *decoded_frame;

01581 void *buffer_to_free = NULL;

01582 int i, ret = 0, resample_changed;

01583 int64_t best_effort_timestamp;

01584 AVRational *frame_sample_aspect;

01585

01586 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))

01587 return AVERROR(ENOMEM);

01588 decoded_frame = ist->decoded_frame;

01589 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);

01590

01591 update_benchmark(NULL);

01592 ret = avcodec_decode_video2(ist->st->codec,

01593 decoded_frame, got_output, pkt);

01594 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);

01595 if (!*got_output || ret < 0) {

01596 if (!pkt->size) {

01597 for (i = 0; i < ist->nb_filters; i++)

01598 av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);

01599 }

01600 return ret;

01601 }

01602

01603 if(ist->top_field_first>=0)

01604 decoded_frame->top_field_first = ist->top_field_first;

01605

01606 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);

01607 if(best_effort_timestamp != AV_NOPTS_VALUE)

01608 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);

01609

01610 if (debug_ts) {

01611 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "

01612 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d \n",

01613 ist->st->index, av_ts2str(decoded_frame->pts),

01614 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),

01615 best_effort_timestamp,

01616 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),

01617 decoded_frame->key_frame, decoded_frame->pict_type);

01618 }

01619

01620 pkt->size = 0;

01621 pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);

01622

01623 rate_emu_sleep(ist);

01624

01625 if (ist->st->sample_aspect_ratio.num)

01626 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;

01627

01628 resample_changed = ist->resample_width != decoded_frame->width ||

01629 ist->resample_height != decoded_frame->height ||

01630 ist->resample_pix_fmt != decoded_frame->format;

01631 if (resample_changed) {

01632 av_log(NULL, AV_LOG_INFO,

01633 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",

01634 ist->file_index, ist->st->index,

01635 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),

01636 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));

01637

01638 ist->resample_width = decoded_frame->width;

01639 ist->resample_height = decoded_frame->height;

01640 ist->resample_pix_fmt = decoded_frame->format;

01641

01642 for (i = 0; i < nb_filtergraphs; i++) {

01643 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&

01644 configure_filtergraph(filtergraphs[i]) < 0) {

01645 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");

01646 exit(1);

01647 }

01648 }

01649 }

01650

01651 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");

01652 for (i = 0; i < ist->nb_filters; i++) {

01653 int changed = ist->st->codec->width != ist->filters[i]->filter->outputs[0]->w

01654 || ist->st->codec->height != ist->filters[i]->filter->outputs[0]->h

01655 || ist->st->codec->pix_fmt != ist->filters[i]->filter->outputs[0]->format;

01656

01657 if (!frame_sample_aspect->num)

01658 *frame_sample_aspect = ist->st->sample_aspect_ratio;

01659 if (ist->dr1 && decoded_frame->type==FF_BUFFER_TYPE_USER && !changed) {

01660 FrameBuffer *buf = decoded_frame->opaque;

01661 AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(

01662 decoded_frame->data, decoded_frame->linesize,

01663 AV_PERM_READ | AV_PERM_PRESERVE,

01664 ist->st->codec->width, ist->st->codec->height,

01665 ist->st->codec->pix_fmt);

01666

01667 avfilter_copy_frame_props(fb, decoded_frame);

01668 fb->buf->priv = buf;

01669 fb->buf->free = filter_release_buffer;

01670

01671 av_assert0(buf->refcount>0);

01672 buf->refcount++;

01673 av_buffersrc_add_ref(ist->filters[i]->filter, fb,

01674 AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT |

01675 AV_BUFFERSRC_FLAG_NO_COPY |

01676 AV_BUFFERSRC_FLAG_PUSH);

01677 } else

01678 if(av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, AV_BUFFERSRC_FLAG_PUSH)<0) {

01679 av_log(NULL, AV_LOG_FATAL, "Failed to inject frame into filter network\n");

01680 exit(1);

01681 }

01682

01683 }

01684

01685 av_free(buffer_to_free);

01686 return ret;

01687 }

01688

01689 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)

01690 {

01691 AVSubtitle subtitle;

01692 int i, ret = avcodec_decode_subtitle2(ist->st->codec,

01693 &subtitle, got_output, pkt);

01694 if (ret < 0 || !*got_output) {

01695 if (!pkt->size)

01696 sub2video_flush(ist);

01697 return ret;

01698 }

01699

01700 if (ist->fix_sub_duration) {

01701 if (ist->prev_sub.got_output) {

01702 int end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,

01703 1000, AV_TIME_BASE);

01704 if (end < ist->prev_sub.subtitle.end_display_time) {

01705 av_log(ist->st->codec, AV_LOG_DEBUG,

01706 "Subtitle duration reduced from %d to %d\n",

01707 ist->prev_sub.subtitle.end_display_time, end);

01708 ist->prev_sub.subtitle.end_display_time = end;

01709 }

01710 }

01711 FFSWAP(int, *got_output, ist->prev_sub.got_output);

01712 FFSWAP(int, ret, ist->prev_sub.ret);

01713 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);

01714 }

01715

01716 sub2video_update(ist, &subtitle);

01717

01718 if (!*got_output || !subtitle.num_rects)

01719 return ret;

01720

01721 rate_emu_sleep(ist);

01722

01723 for (i = 0; i < nb_output_streams; i++) {

01724 OutputStream *ost = output_streams[i];

01725

01726 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)

01727 continue;

01728

01729 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);

01730 }

01731

01732 avsubtitle_free(&subtitle);

01733 return ret;

01734 }

01735

01736 /* pkt = NULL means EOF (needed to flush decoder buffers) */

01737 static int output_packet(InputStream *ist, const AVPacket *pkt)

01738 {

01739 int ret = 0, i;

01740 int got_output;

01741

01742 AVPacket avpkt;

01743 if (!ist->saw_first_ts) {

01744 ist->dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;

01745 ist->pts = 0;

01746 if (pkt != NULL && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {

01747 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);

01748 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong

01749 }

01750 ist->saw_first_ts = 1;

01751 }

01752

01753 if (ist->next_dts == AV_NOPTS_VALUE)

01754 ist->next_dts = ist->dts;

01755 if (ist->next_pts == AV_NOPTS_VALUE)

01756 ist->next_pts = ist->pts;

01757

01758 if (pkt == NULL) {

01759 /* EOF handling */

01760 av_init_packet(&avpkt);

01761 avpkt.data = NULL;

01762 avpkt.size = 0;

01763 goto handle_eof;

01764 } else {

01765 avpkt = *pkt;

01766 }

01767

01768 if (pkt->dts != AV_NOPTS_VALUE) {

01769 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);

01770 if (ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)

01771 ist->next_pts = ist->pts = ist->dts;

01772 }

01773

01774 // while we have more to decode or while the decoder did output something on EOF

01775 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {

01776 int duration;

01777 handle_eof:

01778

01779 ist->pts = ist->next_pts;

01780 ist->dts = ist->next_dts;

01781

01782 if (avpkt.size && avpkt.size != pkt->size) {

01783 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,

01784 "Multiple frames in a packet from stream %d\n", pkt->stream_index);

01785 ist->showed_multi_packet_warning = 1;

01786 }

01787

01788 switch (ist->st->codec->codec_type) {

01789 case AVMEDIA_TYPE_AUDIO:

01790 ret = decode_audio (ist, &avpkt, &got_output);

01791 break;

01792 case AVMEDIA_TYPE_VIDEO:

01793 ret = decode_video (ist, &avpkt, &got_output);

01794 if (avpkt.duration) {

01795 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);

01796 } else if(ist->st->codec->time_base.num != 0 && ist->st->codec->time_base.den != 0) {

01797 int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;

01798 duration = ((int64_t)AV_TIME_BASE *

01799 ist->st->codec->time_base.num * ticks) /

01800 ist->st->codec->time_base.den;

01801 } else

01802 duration = 0;

01803

01804 if(ist->dts != AV_NOPTS_VALUE && duration) {

01805 ist->next_dts += duration;

01806 }else

01807 ist->next_dts = AV_NOPTS_VALUE;

01808

01809 if (got_output)

01810 ist->next_pts += duration; //FIXME the duration is not correct in some cases

01811 break;

01812 case AVMEDIA_TYPE_SUBTITLE:

01813 ret = transcode_subtitles(ist, &avpkt, &got_output);

01814 break;

01815 default:

01816 return -1;

01817 }

01818

01819 if (ret < 0)

01820 return ret;

01821

01822 avpkt.dts=

01823 avpkt.pts= AV_NOPTS_VALUE;

01824

01825 // touch data and size only if not EOF

01826 if (pkt) {

01827 if(ist->st->codec->codec_type != AVMEDIA_TYPE_AUDIO)

01828 ret = avpkt.size;

01829 avpkt.data += ret;

01830 avpkt.size -= ret;

01831 }

01832 if (!got_output) {

01833 continue;

01834 }

01835 }

01836

01837 /* handle stream copy */

01838 if (!ist->decoding_needed) {

01839 rate_emu_sleep(ist);

01840 ist->dts = ist->next_dts;

01841 switch (ist->st->codec->codec_type) {

01842 case AVMEDIA_TYPE_AUDIO:

01843 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /

01844 ist->st->codec->sample_rate;

01845 break;

01846 case AVMEDIA_TYPE_VIDEO:

01847 if (pkt->duration) {

01848 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);

01849 } else if(ist->st->codec->time_base.num != 0) {

01850 int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;

01851 ist->next_dts += ((int64_t)AV_TIME_BASE *

01852 ist->st->codec->time_base.num * ticks) /

01853 ist->st->codec->time_base.den;

01854 }

01855 break;

01856 }

01857 ist->pts = ist->dts;

01858 ist->next_pts = ist->next_dts;

01859 }

01860 for (i = 0; pkt && i < nb_output_streams; i++) {

01861 OutputStream *ost = output_streams[i];

01862

01863 if (!check_output_constraints(ist, ost) || ost->encoding_needed)

01864 continue;

01865

01866 do_streamcopy(ist, ost, pkt);

01867 }

01868

01869 return 0;

01870 }

01871

01872 static void print_sdp(void)

01873 {

01874 char sdp[2048];

01875 int i;

01876 AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);

01877

01878 if (!avc)

01879 exit(1);

01880 for (i = 0; i < nb_output_files; i++)

01881 avc[i] = output_files[i]->ctx;

01882

01883 av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));

01884 printf("SDP:\n%s\n", sdp);

01885 fflush(stdout);

01886 av_freep(&avc);

01887 }

01888

01889 static int init_input_stream(int ist_index, char *error, int error_len)

01890 {

01891 int ret;

01892 InputStream *ist = input_streams[ist_index];

01893

01894 if (ist->decoding_needed) {

01895 AVCodec *codec = ist->dec;

01896 if (!codec) {

01897 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",

01898 avcodec_get_name(ist->st->codec->codec_id), ist->file_index, ist->st->index);

01899 return AVERROR(EINVAL);

01900 }

01901

01902 ist->dr1 = (codec->capabilities & CODEC_CAP_DR1) && !do_deinterlace;

01903 if (codec->type == AVMEDIA_TYPE_VIDEO && ist->dr1) {

01904 ist->st->codec->get_buffer = codec_get_buffer;

01905 ist->st->codec->release_buffer = codec_release_buffer;

01906 ist->st->codec->opaque = &ist->buffer_pool;

01907 }

01908

01909 if (!av_dict_get(ist->opts, "threads", NULL, 0))

01910 av_dict_set(&ist->opts, "threads", "auto", 0);

01911 if ((ret = avcodec_open2(ist->st->codec, codec, &ist->opts)) < 0) {

01912 if (ret == AVERROR_EXPERIMENTAL)

01913 abort_codec_experimental(codec, 0);

01914 snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",

01915 ist->file_index, ist->st->index);

01916 return ret;

01917 }

01918 assert_avoptions(ist->opts);

01919 }

01920

01921 ist->next_pts = AV_NOPTS_VALUE;

01922 ist->next_dts = AV_NOPTS_VALUE;

01923 ist->is_start = 1;

01924

01925 return 0;

01926 }

01927

01928 static InputStream *get_input_stream(OutputStream *ost)

01929 {

01930 if (ost->source_index >= 0)

01931 return input_streams[ost->source_index];

01932 return NULL;

01933 }

01934

01935 static void parse_forced_key_frames(char *kf, OutputStream *ost,

01936 AVCodecContext *avctx)

01937 {

01938 char *p;

01939 int n = 1, i;

01940 int64_t t;

01941

01942 for (p = kf; *p; p++)

01943 if (*p == ',')

01944 n++;

01945 ost->forced_kf_count = n;

01946 ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);

01947 if (!ost->forced_kf_pts) {

01948 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");

01949 exit(1);

01950 }

01951

01952 p = kf;

01953 for (i = 0; i < n; i++) {

01954 char *next = strchr(p, ',');

01955

01956 if (next)

01957 *next++ = 0;

01958

01959 t = parse_time_or_die("force_key_frames", p, 1);

01960 ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);

01961

01962 p = next;

01963 }

01964 }

01965

01966 static void report_new_stream(int input_index, AVPacket *pkt)

01967 {

01968 InputFile *file = input_files[input_index];

01969 AVStream *st = file->ctx->streams[pkt->stream_index];

01970

01971 if (pkt->stream_index < file->nb_streams_warn)

01972 return;

01973 av_log(file->ctx, AV_LOG_WARNING,

01974 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",

01975 av_get_media_type_string(st->codec->codec_type),

01976 input_index, pkt->stream_index,

01977 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));

01978 file->nb_streams_warn = pkt->stream_index + 1;

01979 }

01980

01981 static int transcode_init(void)

01982 {

01983 int ret = 0, i, j, k;

01984 AVFormatContext *oc;

01985 AVCodecContext *codec;

01986 OutputStream *ost;

01987 InputStream *ist;

01988 char error[1024];

01989 int want_sdp = 1;

01990

01991 /* init framerate emulation */

01992 for (i = 0; i < nb_input_files; i++) {

01993 InputFile *ifile = input_files[i];

01994 if (ifile->rate_emu)

01995 for (j = 0; j < ifile->nb_streams; j++)

01996 input_streams[j + ifile->ist_index]->start = av_gettime();

01997 }

01998

01999 /* output stream init */

02000 for (i = 0; i < nb_output_files; i++) {

02001 oc = output_files[i]->ctx;

02002 if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {

02003 av_dump_format(oc, i, oc->filename, 1);

02004 av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);

02005 return AVERROR(EINVAL);

02006 }

02007 }

02008

02009 /* init complex filtergraphs */

02010 for (i = 0; i < nb_filtergraphs; i++)

02011 if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)

02012 return ret;

02013

02014 /* for each output stream, we compute the right encoding parameters */

02015 for (i = 0; i < nb_output_streams; i++) {

02016 AVCodecContext *icodec = NULL;

02017 ost = output_streams[i];

02018 oc = output_files[ost->file_index]->ctx;

02019 ist = get_input_stream(ost);

02020

02021 if (ost->attachment_filename)

02022 continue;

02023

02024 codec = ost->st->codec;

02025

02026 if (ist) {

02027 icodec = ist->st->codec;

02028

02029 ost->st->disposition = ist->st->disposition;

02030 codec->bits_per_raw_sample = icodec->bits_per_raw_sample;

02031 codec->chroma_sample_location = icodec->chroma_sample_location;

02032 }

02033

02034 if (ost->stream_copy) {

02035 uint64_t extra_size;

02036

02037 av_assert0(ist && !ost->filter);

02038

02039 extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;

02040

02041 if (extra_size > INT_MAX) {

02042 return AVERROR(EINVAL);

02043 }

02044

02045 /* if stream_copy is selected, no need to decode or encode */

02046 codec->codec_id = icodec->codec_id;

02047 codec->codec_type = icodec->codec_type;

02048

02049 if (!codec->codec_tag) {

02050 if (!oc->oformat->codec_tag ||

02051 av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||

02052 av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)

02053 codec->codec_tag = icodec->codec_tag;

02054 }

02055

02056 codec->bit_rate = icodec->bit_rate;

02057 codec->rc_max_rate = icodec->rc_max_rate;

02058 codec->rc_buffer_size = icodec->rc_buffer_size;

02059 codec->field_order = icodec->field_order;

02060 codec->extradata = av_mallocz(extra_size);

02061 if (!codec->extradata) {

02062 return AVERROR(ENOMEM);

02063 }

02064 memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);

02065 codec->extradata_size= icodec->extradata_size;

02066 codec->bits_per_coded_sample = icodec->bits_per_coded_sample;

02067

02068 codec->time_base = ist->st->time_base;

02069 /*

02070 * Avi is a special case here because it supports variable fps but

02071 * having the fps and timebase differe significantly adds quite some

02072 * overhead

02073 */

02074 if(!strcmp(oc->oformat->name, "avi")) {

02075 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)

02076 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)

02077 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(icodec->time_base)

02078 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(icodec->time_base) < 1.0/500

02079 || copy_tb==2){

02080 codec->time_base.num = ist->st->r_frame_rate.den;

02081 codec->time_base.den = 2*ist->st->r_frame_rate.num;

02082 codec->ticks_per_frame = 2;

02083 } else if ( copy_tb<0 && av_q2d(icodec->time_base)*icodec->ticks_per_frame > 2*av_q2d(ist->st->time_base)

02084 && av_q2d(ist->st->time_base) < 1.0/500

02085 || copy_tb==0){

02086 codec->time_base = icodec->time_base;

02087 codec->time_base.num *= icodec->ticks_per_frame;

02088 codec->time_base.den *= 2;

02089 codec->ticks_per_frame = 2;

02090 }

02091 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)

02092 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")

02093 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")

02094 && strcmp(oc->oformat->name, "f4v")

02095 ) {

02096 if( copy_tb<0 && icodec->time_base.den

02097 && av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base)

02098 && av_q2d(ist->st->time_base) < 1.0/500

02099 || copy_tb==0){

02100 codec->time_base = icodec->time_base;

02101 codec->time_base.num *= icodec->ticks_per_frame;

02102 }

02103 }

02104

02105 if(ost->frame_rate.num)

02106 codec->time_base = av_inv_q(ost->frame_rate);

02107

02108 av_reduce(&codec->time_base.num, &codec->time_base.den,

02109 codec->time_base.num, codec->time_base.den, INT_MAX);

02110

02111 switch (codec->codec_type) {

02112 case AVMEDIA_TYPE_AUDIO:

02113 if (audio_volume != 256) {

02114 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");

02115 exit(1);

02116 }

02117 codec->channel_layout = icodec->channel_layout;

02118 codec->sample_rate = icodec->sample_rate;

02119 codec->channels = icodec->channels;

02120 codec->frame_size = icodec->frame_size;

02121 codec->audio_service_type = icodec->audio_service_type;

02122 codec->block_align = icodec->block_align;

02123 if((codec->block_align == 1 || codec->block_align == 1152) && codec->codec_id == AV_CODEC_ID_MP3)

02124 codec->block_align= 0;

02125 if(codec->codec_id == AV_CODEC_ID_AC3)

02126 codec->block_align= 0;

02127 break;

02128 case AVMEDIA_TYPE_VIDEO:

02129 codec->pix_fmt = icodec->pix_fmt;

02130 codec->width = icodec->width;

02131 codec->height = icodec->height;

02132 codec->has_b_frames = icodec->has_b_frames;

02133 if (!codec->sample_aspect_ratio.num) {

02134 codec->sample_aspect_ratio =

02135 ost->st->sample_aspect_ratio =

02136 ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :

02137 ist->st->codec->sample_aspect_ratio.num ?

02138 ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};

02139 }

02140 ost->st->avg_frame_rate = ist->st->avg_frame_rate;

02141 break;

02142 case AVMEDIA_TYPE_SUBTITLE:

02143 codec->width = icodec->width;

02144 codec->height = icodec->height;

02145 break;

02146 case AVMEDIA_TYPE_DATA:

02147 case AVMEDIA_TYPE_ATTACHMENT:

02148 break;

02149 default:

02150 abort();

02151 }

02152 } else {

02153 if (!ost->enc)

02154 ost->enc = avcodec_find_encoder(codec->codec_id);

02155 if (!ost->enc) {

02156 /* should only happen when a default codec is not present. */

02157 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",

02158 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);

02159 ret = AVERROR(EINVAL);

02160 goto dump_format;

02161 }

02162

02163 if (ist)

02164 ist->decoding_needed++;

02165 ost->encoding_needed = 1;

02166

02167 if (!ost->filter &&

02168 (codec->codec_type == AVMEDIA_TYPE_VIDEO ||

02169 codec->codec_type == AVMEDIA_TYPE_AUDIO)) {

02170 FilterGraph *fg;

02171 fg = init_simple_filtergraph(ist, ost);

02172 if (configure_filtergraph(fg)) {

02173 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");

02174 exit(1);

02175 }

02176 }

02177

02178 if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {

02179 if (ost->filter && !ost->frame_rate.num)

02180 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);

02181 if (ist && !ost->frame_rate.num)

02182 ost->frame_rate = ist->framerate;

02183 if (ist && !ost->frame_rate.num)

02184 ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};

02185 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};

02186 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {

02187 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);

02188 ost->frame_rate = ost->enc->supported_framerates[idx];

02189 }

02190 }

02191

02192 switch (codec->codec_type) {

02193 case AVMEDIA_TYPE_AUDIO:

02194 codec->sample_fmt = ost->filter->filter->inputs[0]->format;

02195 codec->sample_rate = ost->filter->filter->inputs[0]->sample_rate;

02196 codec->channel_layout = ost->filter->filter->inputs[0]->channel_layout;

02197 codec->channels = av_get_channel_layout_nb_channels(codec->channel_layout);

02198 codec->time_base = (AVRational){ 1, codec->sample_rate };

02199 break;

02200 case AVMEDIA_TYPE_VIDEO:

02201 codec->time_base = av_inv_q(ost->frame_rate);

02202 if (ost->filter && !(codec->time_base.num && codec->time_base.den))

02203 codec->time_base = ost->filter->filter->inputs[0]->time_base;

02204 if ( av_q2d(codec->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH

02205 && (video_sync_method == VSYNC_CFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){

02206 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"

02207 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");

02208 }

02209 for (j = 0; j < ost->forced_kf_count; j++)

02210 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],

02211 AV_TIME_BASE_Q,

02212 codec->time_base);

02213

02214 codec->width = ost->filter->filter->inputs[0]->w;

02215 codec->height = ost->filter->filter->inputs[0]->h;

02216 codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =

02217 ost->frame_aspect_ratio ? // overridden by the -aspect cli option

02218 av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :

02219 ost->filter->filter->inputs[0]->sample_aspect_ratio;

02220 codec->pix_fmt = ost->filter->filter->inputs[0]->format;

02221

02222 if (!icodec ||

02223 codec->width != icodec->width ||

02224 codec->height != icodec->height ||

02225 codec->pix_fmt != icodec->pix_fmt) {

02226 codec->bits_per_raw_sample = frame_bits_per_raw_sample;

02227 }

02228

02229 if (ost->forced_keyframes)

02230 parse_forced_key_frames(ost->forced_keyframes, ost,

02231 ost->st->codec);

02232 break;

02233 case AVMEDIA_TYPE_SUBTITLE:

02234 codec->time_base = (AVRational){1, 1000};

02235 if (!codec->width) {

02236 codec->width = input_streams[ost->source_index]->st->codec->width;

02237 codec->height = input_streams[ost->source_index]->st->codec->height;

02238 }

02239 break;

02240 default:

02241 abort();

02242 break;

02243 }

02244 /* two pass mode */

02245 if (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) {

02246 char logfilename[1024];

02247 FILE *f;

02248

02249 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",

02250 ost->logfile_prefix ? ost->logfile_prefix :

02251 DEFAULT_PASS_LOGFILENAME_PREFIX,

02252 i);

02253 if (!strcmp(ost->enc->name, "libx264")) {

02254 av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);

02255 } else {

02256 if (codec->flags & CODEC_FLAG_PASS2) {

02257 char *logbuffer;

02258 size_t logbuffer_size;

02259 if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {

02260 av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",

02261 logfilename);

02262 exit(1);

02263 }

02264 codec->stats_in = logbuffer;

02265 }

02266 if (codec->flags & CODEC_FLAG_PASS1) {

02267 f = fopen(logfilename, "wb");

02268 if (!f) {

02269 av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",

02270 logfilename, strerror(errno));

02271 exit(1);

02272 }

02273 ost->logfile = f;

02274 }

02275 }

02276 }

02277 }

02278 }

02279

02280 /* open each encoder */

02281 for (i = 0; i < nb_output_streams; i++) {

02282 ost = output_streams[i];

02283 if (ost->encoding_needed) {

02284 AVCodec *codec = ost->enc;

02285 AVCodecContext *dec = NULL;

02286

02287 if ((ist = get_input_stream(ost)))

02288 dec = ist->st->codec;

02289 if (dec && dec->subtitle_header) {

02290 /* ASS code assumes this buffer is null terminated so add extra byte. */

02291 ost->st->codec->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);

02292 if (!ost->st->codec->subtitle_header) {

02293 ret = AVERROR(ENOMEM);

02294 goto dump_format;

02295 }

02296 memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);

02297 ost->st->codec->subtitle_header_size = dec->subtitle_header_size;

02298 }

02299 if (!av_dict_get(ost->opts, "threads", NULL, 0))

02300 av_dict_set(&ost->opts, "threads", "auto", 0);

02301 if ((ret = avcodec_open2(ost->st->codec, codec, &ost->opts)) < 0) {

02302 if (ret == AVERROR_EXPERIMENTAL)

02303 abort_codec_experimental(codec, 1);

02304 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",

02305 ost->file_index, ost->index);

02306 goto dump_format;

02307 }

02308 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&

02309 !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))

02310 av_buffersink_set_frame_size(ost->filter->filter,

02311 ost->st->codec->frame_size);

02312 assert_avoptions(ost->opts);

02313 if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)

02314 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."

02315 " It takes bits/s as argument, not kbits/s\n");

02316 extra_size += ost->st->codec->extradata_size;

02317

02318 if (ost->st->codec->me_threshold)

02319 input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;

02320 }

02321 }

02322

02323 /* init input streams */

02324 for (i = 0; i < nb_input_streams; i++)

02325 if ((ret = init_input_stream(i, error, sizeof(error))) < 0)

02326 goto dump_format;

02327

02328 /* discard unused programs */

02329 for (i = 0; i < nb_input_files; i++) {

02330 InputFile *ifile = input_files[i];

02331 for (j = 0; j < ifile->ctx->nb_programs; j++) {

02332 AVProgram *p = ifile->ctx->programs[j];

02333 int discard = AVDISCARD_ALL;

02334

02335 for (k = 0; k < p->nb_stream_indexes; k++)

02336 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {

02337 discard = AVDISCARD_DEFAULT;

02338 break;

02339 }

02340 p->discard = discard;

02341 }

02342 }

02343

02344 /* open files and write file headers */

02345 for (i = 0; i < nb_output_files; i++) {

02346 oc = output_files[i]->ctx;

02347 oc->interrupt_callback = int_cb;

02348 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {

02349 char errbuf[128];

02350 const char *errbuf_ptr = errbuf;

02351 if (av_strerror(ret, errbuf, sizeof(errbuf)) < 0)

02352 errbuf_ptr = strerror(AVUNERROR(ret));

02353 snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?): %s", i, errbuf_ptr);

02354 ret = AVERROR(EINVAL);

02355 goto dump_format;

02356 }

02357 // assert_avoptions(output_files[i]->opts);

02358 if (strcmp(oc->oformat->name, "rtp")) {

02359 want_sdp = 0;

02360 }

02361 }

02362

02363 dump_format:

02364 /* dump the file output parameters - cannot be done before in case

02365 of stream copy */

02366 for (i = 0; i < nb_output_files; i++) {

02367 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);

02368 }

02369

02370 /* dump the stream mapping */

02371 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");

02372 for (i = 0; i < nb_input_streams; i++) {

02373 ist = input_streams[i];

02374

02375 for (j = 0; j < ist->nb_filters; j++) {

02376 if (ist->filters[j]->graph->graph_desc) {

02377 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",

02378 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",

02379 ist->filters[j]->name);

02380 if (nb_filtergraphs > 1)

02381 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);

02382 av_log(NULL, AV_LOG_INFO, "\n");

02383 }

02384 }

02385 }

02386

02387 for (i = 0; i < nb_output_streams; i++) {

02388 ost = output_streams[i];

02389

02390 if (ost->attachment_filename) {

02391 /* an attached file */

02392 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",

02393 ost->attachment_filename, ost->file_index, ost->index);

02394 continue;

02395 }

02396

02397 if (ost->filter && ost->filter->graph->graph_desc) {

02398 /* output from a complex graph */

02399 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);

02400 if (nb_filtergraphs > 1)

02401 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);

02402

02403 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,

02404 ost->index, ost->enc ? ost->enc->name : "?");

02405 continue;

02406 }

02407

02408 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",

02409 input_streams[ost->source_index]->file_index,

02410 input_streams[ost->source_index]->st->index,

02411 ost->file_index,

02412 ost->index);

02413 if (ost->sync_ist != input_streams[ost->source_index])

02414 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",

02415 ost->sync_ist->file_index,

02416 ost->sync_ist->st->index);

02417 if (ost->stream_copy)

02418 av_log(NULL, AV_LOG_INFO, " (copy)");

02419 else

02420 av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ?

02421 input_streams[ost->source_index]->dec->name : "?",

02422 ost->enc ? ost->enc->name : "?");

02423 av_log(NULL, AV_LOG_INFO, "\n");

02424 }

02425

02426 if (ret) {

02427 av_log(NULL, AV_LOG_ERROR, "%s\n", error);

02428 return ret;

02429 }

02430

02431 if (want_sdp) {

02432 print_sdp();

02433 }

02434

02435 return 0;

02436 }

02437

02438 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */

02439 static int need_output(void)

02440 {

02441 int i;

02442

02443 for (i = 0; i < nb_output_streams; i++) {

02444 OutputStream *ost = output_streams[i];

02445 OutputFile *of = output_files[ost->file_index];

02446 AVFormatContext *os = output_files[ost->file_index]->ctx;

02447

02448 if (ost->finished ||

02449 (os->pb && avio_tell(os->pb) >= of->limit_filesize))

02450 continue;

02451 if (ost->frame_number >= ost->max_frames) {

02452 int j;

02453 for (j = 0; j < of->ctx->nb_streams; j++)

02454 close_output_stream(output_streams[of->ost_index + j]);

02455 continue;

02456 }

02457

02458 return 1;

02459 }

02460

02461 return 0;

02462 }

02463

02469 static OutputStream *choose_output(void)

02470 {

02471 int i;

02472 int64_t opts_min = INT64_MAX;

02473 OutputStream *ost_min = NULL;

02474

02475 for (i = 0; i < nb_output_streams; i++) {

02476 OutputStream *ost = output_streams[i];

02477 int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,

02478 AV_TIME_BASE_Q);

02479 if (!ost->unavailable && !ost->finished && opts < opts_min) {

02480 opts_min = opts;

02481 ost_min = ost;

02482 }

02483 }

02484 return ost_min;

02485 }

02486

02487 static int check_keyboard_interaction(int64_t cur_time)

02488 {

02489 int i, ret, key;

02490 static int64_t last_time;

02491 if (received_nb_signals)

02492 return AVERROR_EXIT;

02493 /* read_key() returns 0 on EOF */

02494 if(cur_time - last_time >= 100000 && !run_as_daemon){

02495 key = read_key();

02496 last_time = cur_time;

02497 }else

02498 key = -1;

02499 if (key == 'q')

02500 return AVERROR_EXIT;

02501 if (key == '+') av_log_set_level(av_log_get_level()+10);

02502 if (key == '-') av_log_set_level(av_log_get_level()-10);

02503 if (key == 's') qp_hist ^= 1;

02504 if (key == 'h'){

02505 if (do_hex_dump){

02506 do_hex_dump = do_pkt_dump = 0;

02507 } else if(do_pkt_dump){

02508 do_hex_dump = 1;

02509 } else

02510 do_pkt_dump = 1;

02511 av_log_set_level(AV_LOG_DEBUG);

02512 }

02513 if (key == 'c' || key == 'C'){

02514 char buf[4096], target[64], command[256], arg[256] = {0};

02515 double time;

02516 int k, n = 0;

02517 fprintf(stderr, "\nEnter command: [ ]\n");

02518 i = 0;

02519 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)

02520 if (k > 0)

02521 buf[i++] = k;

02522 buf[i] = 0;

02523 if (k > 0 &&

02524 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {

02525 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",

02526 target, time, command, arg);

02527 for (i = 0; i < nb_filtergraphs; i++) {

02528 FilterGraph *fg = filtergraphs[i];

02529 if (fg->graph) {

02530 if (time < 0) {

02531 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),

02532 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);

02533 fprintf(stderr, "Command reply for stream %d: ret:%d res:%s\n", i, ret, buf);

02534 } else {

02535 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);

02536 }

02537 }

02538 }

02539 } else {

02540 av_log(NULL, AV_LOG_ERROR,

02541 "Parse error, at least 3 arguments were expected, "

02542 "only %d given in string '%s'\n", n, buf);

02543 }

02544 }

02545 if (key == 'd' || key == 'D'){

02546 int debug=0;

02547 if(key == 'D') {

02548 debug = input_streams[0]->st->codec->debug<<1;

02549 if(!debug) debug = 1;

02550 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash

02551 debug += debug;

02552 }else

02553 if(scanf("%d", &debug)!=1)

02554 fprintf(stderr,"error parsing debug value\n");

02555 for(i=0;i

02556 input_streams[i]->st->codec->debug = debug;

02557 }

02558 for(i=0;i

02559 OutputStream *ost = output_streams[i];

02560 ost->st->codec->debug = debug;

02561 }

02562 if(debug) av_log_set_level(AV_LOG_DEBUG);

02563 fprintf(stderr,"debug=%d\n", debug);

02564 }

02565 if (key == '?'){

02566 fprintf(stderr, "key function\n"

02567 "? show this help\n"

02568 "+ increase verbosity\n"

02569 "- decrease verbosity\n"

02570 "c Send command to filtergraph\n"

02571 "D cycle through available debug modes\n"

02572 "h dump packets/hex press to cycle through the 3 states\n"

02573 "q quit\n"

02574 "s Show QP histogram\n"

02575 );

02576 }

02577 return 0;

02578 }

02579

02580 #if HAVE_PTHREADS

02581 static void *input_thread(void *arg)

02582 {

02583 InputFile *f = arg;

02584 int ret = 0;

02585

02586 while (!transcoding_finished && ret >= 0) {

02587 AVPacket pkt;

02588 ret = av_read_frame(f->ctx, &pkt);

02589

02590 if (ret == AVERROR(EAGAIN)) {

02591 av_usleep(10000);

02592 ret = 0;

02593 continue;

02594 } else if (ret < 0)

02595 break;

02596

02597 pthread_mutex_lock(&f->fifo_lock);

02598 while (!av_fifo_space(f->fifo))

02599 pthread_cond_wait(&f->fifo_cond, &f->fifo_lock);

02600

02601 av_dup_packet(&pkt);

02602 av_fifo_generic_write(f->fifo, &pkt, sizeof(pkt), NULL);

02603

02604 pthread_mutex_unlock(&f->fifo_lock);

02605 }

02606

02607 f->finished = 1;

02608 return NULL;

02609 }

02610

02611 static void free_input_threads(void)

02612 {

02613 int i;

02614

02615 if (nb_input_files == 1)

02616 return;

02617

02618 transcoding_finished = 1;

02619

02620 for (i = 0; i < nb_input_files; i++) {

02621 InputFile *f = input_files[i];

02622 AVPacket pkt;

02623

02624 if (!f->fifo || f->joined)

02625 continue;

02626

02627 pthread_mutex_lock(&f->fifo_lock);

02628 while (av_fifo_size(f->fifo)) {

02629 av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);

02630 av_free_packet(&pkt);

02631 }

02632 pthread_cond_signal(&f->fifo_cond);

02633 pthread_mutex_unlock(&f->fifo_lock);

02634

02635 pthread_join(f->thread, NULL);

02636 f->joined = 1;

02637

02638 while (av_fifo_size(f->fifo)) {

02639 av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);

02640 av_free_packet(&pkt);

02641 }

02642 av_fifo_free(f->fifo);

02643 }

02644 }

02645

02646 static int init_input_threads(void)

02647 {

02648 int i, ret;

02649

02650 if (nb_input_files == 1)

02651 return 0;

02652

02653 for (i = 0; i < nb_input_files; i++) {

02654 InputFile *f = input_files[i];

02655

02656 if (!(f->fifo = av_fifo_alloc(8*sizeof(AVPacket))))

02657 return AVERROR(ENOMEM);

02658

02659 pthread_mutex_init(&f->fifo_lock, NULL);

02660 pthread_cond_init (&f->fifo_cond, NULL);

02661

02662 if ((ret = pthread_create(&f->thread, NULL, input_thread, f)))

02663 return AVERROR(ret);

02664 }

02665 return 0;

02666 }

02667

02668 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)

02669 {

02670 int ret = 0;

02671

02672 pthread_mutex_lock(&f->fifo_lock);

02673

02674 if (av_fifo_size(f->fifo)) {

02675 av_fifo_generic_read(f->fifo, pkt, sizeof(*pkt), NULL);

02676 pthread_cond_signal(&f->fifo_cond);

02677 } else {

02678 if (f->finished)

02679 ret = AVERROR_EOF;

02680 else

02681 ret = AVERROR(EAGAIN);

02682 }

02683

02684 pthread_mutex_unlock(&f->fifo_lock);

02685

02686 return ret;

02687 }

02688 #endif

02689

02690 static int get_input_packet(InputFile *f, AVPacket *pkt)

02691 {

02692 #if HAVE_PTHREADS

02693 if (nb_input_files > 1)

02694 return get_input_packet_mt(f, pkt);

02695 #endif

02696 return av_read_frame(f->ctx, pkt);

02697 }

02698

02699 static int got_eagain(void)

02700 {

02701 int i;

02702 for (i = 0; i < nb_output_streams; i++)

02703 if (output_streams[i]->unavailable)

02704 return 1;

02705 return 0;

02706 }

02707

02708 static void reset_eagain(void)

02709 {

02710 int i;

02711 for (i = 0; i < nb_input_files; i++)

02712 input_files[i]->eagain = 0;

02713 for (i = 0; i < nb_output_streams; i++)

02714 output_streams[i]->unavailable = 0;

02715 }

02716

02717 /*

02718 * Return

02719 * - 0 -- one packet was read and processed

02720 * - AVERROR(EAGAIN) -- no packets were available for selected file,

02721 * this function should be called again

02722 * - AVERROR_EOF -- this function should not be called again

02723 */

02724 static int process_input(int file_index)

02725 {

02726 InputFile *ifile = input_files[file_index];

02727 AVFormatContext *is;

02728 InputStream *ist;

02729 AVPacket pkt;

02730 int ret, i, j;

02731

02732 is = ifile->ctx;

02733 ret = get_input_packet(ifile, &pkt);

02734

02735 if (ret == AVERROR(EAGAIN)) {

02736 ifile->eagain = 1;

02737 return ret;

02738 }

02739 if (ret < 0) {

02740 if (ret != AVERROR_EOF) {

02741 print_error(is->filename, ret);

02742 if (exit_on_error)

02743 exit(1);

02744 }

02745 ifile->eof_reached = 1;

02746

02747 for (i = 0; i < ifile->nb_streams; i++) {

02748 ist = input_streams[ifile->ist_index + i];

02749 if (ist->decoding_needed)

02750 output_packet(ist, NULL);

02751

02752 /* mark all outputs that don't go through lavfi as finished */

02753 for (j = 0; j < nb_output_streams; j++) {

02754 OutputStream *ost = output_streams[j];

02755

02756 if (ost->source_index == ifile->ist_index + i &&

02757 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))

02758 close_output_stream(ost);

02759 }

02760 }

02761

02762 return AVERROR(EAGAIN);

02763 }

02764

02765 reset_eagain();

02766

02767 if (do_pkt_dump) {

02768 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,

02769 is->streams[pkt.stream_index]);

02770 }

02771 /* the following test is needed in case new streams appear

02772 dynamically in stream : we ignore them */

02773 if (pkt.stream_index >= ifile->nb_streams) {

02774 report_new_stream(file_index, &pkt);

02775 goto discard_packet;

02776 }

02777

02778 ist = input_streams[ifile->ist_index + pkt.stream_index];

02779 if (ist->discard)

02780 goto discard_packet;

02781

02782 if (debug_ts) {

02783 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "

02784 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",

02785 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->st->codec->codec_type),

02786 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),

02787 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),

02788 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),

02789 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),

02790 av_ts2str(input_files[ist->file_index]->ts_offset),

02791 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));

02792 }

02793

02794 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){

02795 int64_t stime, stime2;

02796 // Correcting starttime based on the enabled streams

02797 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.

02798 // so we instead do it here as part of discontinuity handling

02799 if ( ist->next_dts == AV_NOPTS_VALUE

02800 && ifile->ts_offset == -is->start_time

02801 && (is->iformat->flags & AVFMT_TS_DISCONT)) {

02802 int64_t new_start_time = INT64_MAX;

02803 for (i=0; inb_streams; i++) {

02804 AVStream *st = is->streams[i];

02805 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)

02806 continue;

02807 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));

02808 }

02809 if (new_start_time > is->start_time) {

02810 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);

02811 ifile->ts_offset = -new_start_time;

02812 }

02813 }

02814

02815 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);

02816 stime2= stime + (1ULL<st->pts_wrap_bits);

02817 ist->wrap_correction_done = 1;

02818

02819 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<st->pts_wrap_bits-1))) {

02820 pkt.dts -= 1ULL<st->pts_wrap_bits;

02821 ist->wrap_correction_done = 0;

02822 }

02823 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<st->pts_wrap_bits-1))) {

02824 pkt.pts -= 1ULL<st->pts_wrap_bits;

02825 ist->wrap_correction_done = 0;

02826 }

02827 }

02828

02829 if (pkt.dts != AV_NOPTS_VALUE)

02830 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);

02831 if (pkt.pts != AV_NOPTS_VALUE)

02832 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);

02833

02834 if (pkt.pts != AV_NOPTS_VALUE)

02835 pkt.pts *= ist->ts_scale;

02836 if (pkt.dts != AV_NOPTS_VALUE)

02837 pkt.dts *= ist->ts_scale;

02838

02839 if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&

02840 !copy_ts) {

02841 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);

02842 int64_t delta = pkt_dts - ist->next_dts;

02843 if (is->iformat->flags & AVFMT_TS_DISCONT) {

02844 if(delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||

02845 (delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&

02846 ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE) ||

02847 pkt_dts+1pts){

02848 ifile->ts_offset -= delta;

02849 av_log(NULL, AV_LOG_DEBUG,

02850 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",

02851 delta, ifile->ts_offset);

02852 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);

02853 if (pkt.pts != AV_NOPTS_VALUE)

02854 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);

02855 }

02856 } else {

02857 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||

02858 (delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE)

02859 ) {

02860 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);

02861 pkt.dts = AV_NOPTS_VALUE;

02862 }

02863 if (pkt.pts != AV_NOPTS_VALUE){

02864 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);

02865 delta = pkt_pts - ist->next_dts;

02866 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||

02867 (delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE)

02868 ) {

02869 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);

02870 pkt.pts = AV_NOPTS_VALUE;

02871 }

02872 }

02873 }

02874 }

02875

02876 if (debug_ts) {

02877 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",

02878 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->st->codec->codec_type),

02879 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),

02880 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),

02881 av_ts2str(input_files[ist->file_index]->ts_offset),

02882 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));

02883 }

02884

02885 sub2video_heartbeat(ist, pkt.pts);

02886

02887 ret = output_packet(ist, &pkt);

02888 if (ret < 0) {

02889 char buf[128];

02890 av_strerror(ret, buf, sizeof(buf));

02891 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",

02892 ist->file_index, ist->st->index, buf);

02893 if (exit_on_error)

02894 exit(1);

02895 }

02896

02897 discard_packet:

02898 av_free_packet(&pkt);

02899

02900 return 0;

02901 }

02902

02910 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)

02911 {

02912 int i, ret;

02913 int nb_requests, nb_requests_max = 0;

02914 InputFilter *ifilter;

02915 InputStream *ist;

02916

02917 *best_ist = NULL;

02918 ret = avfilter_graph_request_oldest(graph->graph);

02919 if (ret >= 0)

02920 return reap_filters();

02921

02922 if (ret == AVERROR_EOF) {

02923 ret = reap_filters();

02924 for (i = 0; i < graph->nb_outputs; i++)

02925 close_output_stream(graph->outputs[i]->ost);

02926 return ret;

02927 }

02928 if (ret != AVERROR(EAGAIN))

02929 return ret;

02930

02931 for (i = 0; i < graph->nb_inputs; i++) {

02932 ifilter = graph->inputs[i];

02933 ist = ifilter->ist;

02934 if (input_files[ist->file_index]->eagain ||

02935 input_files[ist->file_index]->eof_reached)

02936 continue;

02937 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);

02938 if (nb_requests > nb_requests_max) {

02939 nb_requests_max = nb_requests;

02940 *best_ist = ist;

02941 }

02942 }

02943

02944 if (!*best_ist)

02945 for (i = 0; i < graph->nb_outputs; i++)

02946 graph->outputs[i]->ost->unavailable = 1;

02947

02948 return 0;

02949 }

02950

02956 static int transcode_step(void)

02957 {

02958 OutputStream *ost;

02959 InputStream *ist;

02960 int ret;

02961

02962 ost = choose_output();

02963 if (!ost) {

02964 if (got_eagain()) {

02965 reset_eagain();

02966 av_usleep(10000);

02967 return 0;

02968 }

02969 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");

02970 return AVERROR_EOF;

02971 }

02972

02973 if (ost->filter) {

02974 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)

02975 return ret;

02976 if (!ist)

02977 return 0;

02978 } else {

02979 av_assert0(ost->source_index >= 0);

02980 ist = input_streams[ost->source_index];

02981 }

02982

02983 ret = process_input(ist->file_index);

02984 if (ret == AVERROR(EAGAIN)) {

02985 if (input_files[ist->file_index]->eagain)

02986 ost->unavailable = 1;

02987 return 0;

02988 }

02989 if (ret < 0)

02990 return ret == AVERROR_EOF ? 0 : ret;

02991

02992 return reap_filters();

02993 }

02994

02995 /*

02996 * The following code is the main loop of the file converter

02997 */

02998 static int transcode(void)

02999 {

03000 int ret, i;

03001 AVFormatContext *os;

03002 OutputStream *ost;

03003 InputStream *ist;

03004 int64_t timer_start;

03005

03006 ret = transcode_init();

03007 if (ret < 0)

03008 goto fail;

03009

03010 if (stdin_interaction) {

03011 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");

03012 }

03013

03014 timer_start = av_gettime();

03015

03016 #if HAVE_PTHREADS

03017 if ((ret = init_input_threads()) < 0)

03018 goto fail;

03019 #endif

03020

03021 while (!received_sigterm) {

03022 int64_t cur_time= av_gettime();

03023

03024 /* if 'q' pressed, exits */

03025 if (stdin_interaction)

03026 if (check_keyboard_interaction(cur_time) < 0)

03027 break;

03028

03029 /* check if there's any stream where output is still needed */

03030 if (!need_output()) {

03031 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");

03032 break;

03033 }

03034

03035 ret = transcode_step();

03036 if (ret < 0) {

03037 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))

03038 continue;

03039

03040 av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");

03041 break;

03042 }

03043

03044 /* dump report by using the output first video and audio streams */

03045 print_report(0, timer_start, cur_time);

03046 }

03047 #if HAVE_PTHREADS

03048 free_input_threads();

03049 #endif

03050

03051 /* at the end of stream, we must flush the decoder buffers */

03052 for (i = 0; i < nb_input_streams; i++) {

03053 ist = input_streams[i];

03054 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {

03055 output_packet(ist, NULL);

03056 }

03057 }

03058 flush_encoders();

03059

03060 term_exit();

03061

03062 /* write the trailer if needed and close file */

03063 for (i = 0; i < nb_output_files; i++) {

03064 os = output_files[i]->ctx;

03065 av_write_trailer(os);

03066 }

03067

03068 /* dump report by using the first video and audio streams */

03069 print_report(1, timer_start, av_gettime());

03070

03071 /* close each encoder */

03072 for (i = 0; i < nb_output_streams; i++) {

03073 ost = output_streams[i];

03074 if (ost->encoding_needed) {

03075 av_freep(&ost->st->codec->stats_in);

03076 avcodec_close(ost->st->codec);

03077 }

03078 }

03079

03080 /* close each decoder */

03081 for (i = 0; i < nb_input_streams; i++) {

03082 ist = input_streams[i];

03083 if (ist->decoding_needed) {

03084 avcodec_close(ist->st->codec);

03085 }

03086 }

03087

03088 /* finished ! */

03089 ret = 0;

03090

03091 fail:

03092 #if HAVE_PTHREADS

03093 free_input_threads();

03094 #endif

03095

03096 if (output_streams) {

03097 for (i = 0; i < nb_output_streams; i++) {

03098 ost = output_streams[i];

03099 if (ost) {

03100 if (ost->stream_copy)

03101 av_freep(&ost->st->codec->extradata);

03102 if (ost->logfile) {

03103 fclose(ost->logfile);

03104 ost->logfile = NULL;

03105 }

03106 av_freep(&ost->st->codec->subtitle_header);

03107 av_free(ost->forced_kf_pts);

03108 av_dict_free(&ost->opts);

03109 }

03110 }

03111 }

03112 return ret;

03113 }

03114

03115

03116 static int64_t getutime(void)

03117 {

03118 #if HAVE_GETRUSAGE

03119 structrusage rusage;

03120

03121 getrusage(RUSAGE_SELF, &rusage);

03122 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;

03123 #elif HAVE_GETPROCESSTIMES

03124 HANDLE proc;

03125 FILETIME c, e, k, u;

03126 proc = GetCurrentProcess();

03127 GetProcessTimes(proc, &c, &e, &k, &u);

03128 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;

03129 #else

03130 return av_gettime();

03131 #endif

03132 }

03133

03134 static int64_t getmaxrss(void)

03135 {

03136 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS

03137 structrusage rusage;

03138 getrusage(RUSAGE_SELF, &rusage);

03139 return (int64_t)rusage.ru_maxrss * 1024;

03140 #elif HAVE_GETPROCESSMEMORYINFO

03141 HANDLE proc;

03142 PROCESS_MEMORY_COUNTERS memcounters;

03143 proc = GetCurrentProcess();

03144 memcounters.cb = sizeof(memcounters);

03145 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));

03146 return memcounters.PeakPagefileUsage;

03147 #else

03148 return 0;

03149 #endif

03150 }

03151

03152 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)

03153 {

03154 }

03155

03156 static void parse_cpuflags(int argc, char **argv, const OptionDef *options)

03157 {

03158 int idx = locate_option(argc, argv, options, "cpuflags");

03159 if (idx && argv[idx + 1])

03160 opt_cpuflags(NULL, "cpuflags", argv[idx + 1]);

03161 }

03162

03163 int main(int argc, char **argv)

03164 {

03165 OptionsContext o = { 0 };

03166 int64_t ti;

03167

03168 atexit(exit_program);

03169

03170 reset_options(&o, 0);

03171

03172 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */

03173

03174 av_log_set_flags(AV_LOG_SKIP_REPEATED);

03175 parse_loglevel(argc, argv, options);

03176

03177 if(argc>1 && !strcmp(argv[1], "-d")){

03178 run_as_daemon=1;

03179 av_log_set_callback(log_callback_null);

03180 argc--;

03181 argv++;

03182 }

03183

03184 avcodec_register_all();

03185 #if CONFIG_AVDEVICE

03186 avdevice_register_all();

03187 #endif

03188 avfilter_register_all();

03189 av_register_all();

03190 avformat_network_init();

03191

03192 show_banner(argc, argv, options);

03193

03194 term_init();

03195

03196 parse_cpuflags(argc, argv, options);

03197

03198 /* parse options */

03199 parse_options(&o, argc, argv, options, opt_output_file);

03200

03201 if (nb_output_files <= 0 && nb_input_files == 0) {

03202 show_usage();

03203 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);

03204 exit(1);

03205 }

03206

03207 /* file converter / grab */

03208 if (nb_output_files <= 0) {

03209 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");

03210 exit(1);

03211 }

03212

03213 // if (nb_input_files == 0) {

03214 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");

03215 // exit(1);

03216 // }

03217

03218 current_time = ti = getutime();

03219 if (transcode() < 0)

03220 exit(1);

03221 ti = getutime() - ti;

03222 if (do_benchmark) {

03223 int maxrss = getmaxrss() / 1024;

03224 printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);

03225 }

03226

03227 exit(0);

03228 return 0;

03229 }

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值