]> git.sesse.net Git - ffmpeg/blob - ffmpeg.c
Merge commit 'e2c272eb3660d7f4f1d7720980e30f6a617e7eb3'
[ffmpeg] / ffmpeg.c
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #if HAVE_ISATTY
34 #if HAVE_IO_H
35 #include <io.h>
36 #endif
37 #if HAVE_UNISTD_H
38 #include <unistd.h>
39 #endif
40 #endif
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/colorspace.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavformat/os_support.h"
62
63 #include "libavformat/ffm.h" // not public API
64
65 # include "libavfilter/avcodec.h"
66 # include "libavfilter/avfilter.h"
67 # include "libavfilter/avfiltergraph.h"
68 # include "libavfilter/buffersrc.h"
69 # include "libavfilter/buffersink.h"
70
71 #if HAVE_SYS_RESOURCE_H
72 #include <sys/time.h>
73 #include <sys/types.h>
74 #include <sys/resource.h>
75 #elif HAVE_GETPROCESSTIMES
76 #include <windows.h>
77 #endif
78 #if HAVE_GETPROCESSMEMORYINFO
79 #include <windows.h>
80 #include <psapi.h>
81 #endif
82
83 #if HAVE_SYS_SELECT_H
84 #include <sys/select.h>
85 #endif
86
87 #if HAVE_TERMIOS_H
88 #include <fcntl.h>
89 #include <sys/ioctl.h>
90 #include <sys/time.h>
91 #include <termios.h>
92 #elif HAVE_KBHIT
93 #include <conio.h>
94 #endif
95
96 #if HAVE_PTHREADS
97 #include <pthread.h>
98 #endif
99
100 #include <time.h>
101
102 #include "ffmpeg.h"
103 #include "cmdutils.h"
104
105 #include "libavutil/avassert.h"
106
107 const char program_name[] = "ffmpeg";
108 const int program_birth_year = 2000;
109
110 static FILE *vstats_file;
111
112 const char *const forced_keyframes_const_names[] = {
113     "n",
114     "n_forced",
115     "prev_forced_n",
116     "prev_forced_t",
117     "t",
118     NULL
119 };
120
121 static void do_video_stats(OutputStream *ost, int frame_size);
122 static int64_t getutime(void);
123
124 static int run_as_daemon  = 0;
125 static int64_t video_size = 0;
126 static int64_t audio_size = 0;
127 static int64_t subtitle_size = 0;
128 static int64_t extra_size = 0;
129 static int nb_frames_dup = 0;
130 static int nb_frames_drop = 0;
131
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
134
135 static uint8_t *subtitle_out;
136
137 #if HAVE_PTHREADS
138 /* signal to input threads that they should exit; set by the main thread */
139 static int transcoding_finished;
140 #endif
141
142 #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
143
144 InputStream **input_streams = NULL;
145 int        nb_input_streams = 0;
146 InputFile   **input_files   = NULL;
147 int        nb_input_files   = 0;
148
149 OutputStream **output_streams = NULL;
150 int         nb_output_streams = 0;
151 OutputFile   **output_files   = NULL;
152 int         nb_output_files   = 0;
153
154 FilterGraph **filtergraphs;
155 int        nb_filtergraphs;
156
157 #if HAVE_TERMIOS_H
158
159 /* init terminal so that we can grab keys */
160 static struct termios oldtty;
161 static int restore_tty;
162 #endif
163
164
165 /* sub2video hack:
166    Convert subtitles to video with alpha to insert them in filter graphs.
167    This is a temporary solution until libavfilter gets real subtitles support.
168  */
169
170
171
172 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
173                                 AVSubtitleRect *r)
174 {
175     uint32_t *pal, *dst2;
176     uint8_t *src, *src2;
177     int x, y;
178
179     if (r->type != SUBTITLE_BITMAP) {
180         av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
181         return;
182     }
183     if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
184         av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle overflowing\n");
185         return;
186     }
187
188     dst += r->y * dst_linesize + r->x * 4;
189     src = r->pict.data[0];
190     pal = (uint32_t *)r->pict.data[1];
191     for (y = 0; y < r->h; y++) {
192         dst2 = (uint32_t *)dst;
193         src2 = src;
194         for (x = 0; x < r->w; x++)
195             *(dst2++) = pal[*(src2++)];
196         dst += dst_linesize;
197         src += r->pict.linesize[0];
198     }
199 }
200
201 static void sub2video_push_ref(InputStream *ist, int64_t pts)
202 {
203     AVFilterBufferRef *ref = ist->sub2video.ref;
204     int i;
205
206     ist->sub2video.last_pts = ref->pts = pts;
207     for (i = 0; i < ist->nb_filters; i++)
208         av_buffersrc_add_ref(ist->filters[i]->filter,
209                              avfilter_ref_buffer(ref, ~0),
210                              AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT |
211                              AV_BUFFERSRC_FLAG_NO_COPY |
212                              AV_BUFFERSRC_FLAG_PUSH);
213 }
214
215 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
216 {
217     int w = ist->sub2video.w, h = ist->sub2video.h;
218     AVFilterBufferRef *ref = ist->sub2video.ref;
219     int8_t *dst;
220     int     dst_linesize;
221     int num_rects, i;
222     int64_t pts, end_pts;
223
224     if (!ref)
225         return;
226     if (sub) {
227         pts       = av_rescale_q(sub->pts + sub->start_display_time * 1000,
228                                  AV_TIME_BASE_Q, ist->st->time_base);
229         end_pts   = av_rescale_q(sub->pts + sub->end_display_time   * 1000,
230                                  AV_TIME_BASE_Q, ist->st->time_base);
231         num_rects = sub->num_rects;
232     } else {
233         pts       = ist->sub2video.end_pts;
234         end_pts   = INT64_MAX;
235         num_rects = 0;
236     }
237     dst          = ref->data    [0];
238     dst_linesize = ref->linesize[0];
239     memset(dst, 0, h * dst_linesize);
240     for (i = 0; i < num_rects; i++)
241         sub2video_copy_rect(dst, dst_linesize, w, h, sub->rects[i]);
242     sub2video_push_ref(ist, pts);
243     ist->sub2video.end_pts = end_pts;
244 }
245
246 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
247 {
248     InputFile *infile = input_files[ist->file_index];
249     int i, j, nb_reqs;
250     int64_t pts2;
251
252     /* When a frame is read from a file, examine all sub2video streams in
253        the same file and send the sub2video frame again. Otherwise, decoded
254        video frames could be accumulating in the filter graph while a filter
255        (possibly overlay) is desperately waiting for a subtitle frame. */
256     for (i = 0; i < infile->nb_streams; i++) {
257         InputStream *ist2 = input_streams[infile->ist_index + i];
258         if (!ist2->sub2video.ref)
259             continue;
260         /* subtitles seem to be usually muxed ahead of other streams;
261            if not, substracting a larger time here is necessary */
262         pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
263         /* do not send the heartbeat frame if the subtitle is already ahead */
264         if (pts2 <= ist2->sub2video.last_pts)
265             continue;
266         if (pts2 >= ist2->sub2video.end_pts)
267             sub2video_update(ist2, NULL);
268         for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
269             nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
270         if (nb_reqs)
271             sub2video_push_ref(ist2, pts2);
272     }
273 }
274
275 static void sub2video_flush(InputStream *ist)
276 {
277     int i;
278
279     for (i = 0; i < ist->nb_filters; i++)
280         av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
281 }
282
283 /* end of sub2video hack */
284
285 void term_exit(void)
286 {
287     av_log(NULL, AV_LOG_QUIET, "%s", "");
288 #if HAVE_TERMIOS_H
289     if(restore_tty)
290         tcsetattr (0, TCSANOW, &oldtty);
291 #endif
292 }
293
294 static volatile int received_sigterm = 0;
295 static volatile int received_nb_signals = 0;
296
297 static void
298 sigterm_handler(int sig)
299 {
300     received_sigterm = sig;
301     received_nb_signals++;
302     term_exit();
303     if(received_nb_signals > 3)
304         exit(123);
305 }
306
307 void term_init(void)
308 {
309 #if HAVE_TERMIOS_H
310     if(!run_as_daemon){
311         struct termios tty;
312         int istty = 1;
313 #if HAVE_ISATTY
314         istty = isatty(0) && isatty(2);
315 #endif
316         if (istty && tcgetattr (0, &tty) == 0) {
317             oldtty = tty;
318             restore_tty = 1;
319             atexit(term_exit);
320
321             tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
322                              |INLCR|IGNCR|ICRNL|IXON);
323             tty.c_oflag |= OPOST;
324             tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
325             tty.c_cflag &= ~(CSIZE|PARENB);
326             tty.c_cflag |= CS8;
327             tty.c_cc[VMIN] = 1;
328             tty.c_cc[VTIME] = 0;
329
330             tcsetattr (0, TCSANOW, &tty);
331         }
332         signal(SIGQUIT, sigterm_handler); /* Quit (POSIX).  */
333     }
334 #endif
335     avformat_network_deinit();
336
337     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
338     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
339 #ifdef SIGXCPU
340     signal(SIGXCPU, sigterm_handler);
341 #endif
342 }
343
344 /* read a key without blocking */
345 static int read_key(void)
346 {
347     unsigned char ch;
348 #if HAVE_TERMIOS_H
349     int n = 1;
350     struct timeval tv;
351     fd_set rfds;
352
353     FD_ZERO(&rfds);
354     FD_SET(0, &rfds);
355     tv.tv_sec = 0;
356     tv.tv_usec = 0;
357     n = select(1, &rfds, NULL, NULL, &tv);
358     if (n > 0) {
359         n = read(0, &ch, 1);
360         if (n == 1)
361             return ch;
362
363         return n;
364     }
365 #elif HAVE_KBHIT
366 #    if HAVE_PEEKNAMEDPIPE
367     static int is_pipe;
368     static HANDLE input_handle;
369     DWORD dw, nchars;
370     if(!input_handle){
371         input_handle = GetStdHandle(STD_INPUT_HANDLE);
372         is_pipe = !GetConsoleMode(input_handle, &dw);
373     }
374
375     if (stdin->_cnt > 0) {
376         read(0, &ch, 1);
377         return ch;
378     }
379     if (is_pipe) {
380         /* When running under a GUI, you will end here. */
381         if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
382             // input pipe may have been closed by the program that ran ffmpeg
383             return -1;
384         }
385         //Read it
386         if(nchars != 0) {
387             read(0, &ch, 1);
388             return ch;
389         }else{
390             return -1;
391         }
392     }
393 #    endif
394     if(kbhit())
395         return(getch());
396 #endif
397     return -1;
398 }
399
400 static int decode_interrupt_cb(void *ctx)
401 {
402     return received_nb_signals > 1;
403 }
404
405 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
406
407 static void exit_program(void)
408 {
409     int i, j;
410
411     for (i = 0; i < nb_filtergraphs; i++) {
412         avfilter_graph_free(&filtergraphs[i]->graph);
413         for (j = 0; j < filtergraphs[i]->nb_inputs; j++) {
414             av_freep(&filtergraphs[i]->inputs[j]->name);
415             av_freep(&filtergraphs[i]->inputs[j]);
416         }
417         av_freep(&filtergraphs[i]->inputs);
418         for (j = 0; j < filtergraphs[i]->nb_outputs; j++) {
419             av_freep(&filtergraphs[i]->outputs[j]->name);
420             av_freep(&filtergraphs[i]->outputs[j]);
421         }
422         av_freep(&filtergraphs[i]->outputs);
423         av_freep(&filtergraphs[i]);
424     }
425     av_freep(&filtergraphs);
426
427     av_freep(&subtitle_out);
428
429     /* close files */
430     for (i = 0; i < nb_output_files; i++) {
431         AVFormatContext *s = output_files[i]->ctx;
432         if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
433             avio_close(s->pb);
434         avformat_free_context(s);
435         av_dict_free(&output_files[i]->opts);
436         av_freep(&output_files[i]);
437     }
438     for (i = 0; i < nb_output_streams; i++) {
439         AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
440         while (bsfc) {
441             AVBitStreamFilterContext *next = bsfc->next;
442             av_bitstream_filter_close(bsfc);
443             bsfc = next;
444         }
445         output_streams[i]->bitstream_filters = NULL;
446         avcodec_free_frame(&output_streams[i]->filtered_frame);
447
448         av_freep(&output_streams[i]->forced_keyframes);
449         av_expr_free(output_streams[i]->forced_keyframes_pexpr);
450         av_freep(&output_streams[i]->avfilter);
451         av_freep(&output_streams[i]->logfile_prefix);
452         av_freep(&output_streams[i]);
453     }
454     for (i = 0; i < nb_input_files; i++) {
455         avformat_close_input(&input_files[i]->ctx);
456         av_freep(&input_files[i]);
457     }
458     for (i = 0; i < nb_input_streams; i++) {
459         avcodec_free_frame(&input_streams[i]->decoded_frame);
460         av_dict_free(&input_streams[i]->opts);
461         free_buffer_pool(&input_streams[i]->buffer_pool);
462         avsubtitle_free(&input_streams[i]->prev_sub.subtitle);
463         avfilter_unref_bufferp(&input_streams[i]->sub2video.ref);
464         av_freep(&input_streams[i]->filters);
465         av_freep(&input_streams[i]);
466     }
467
468     if (vstats_file)
469         fclose(vstats_file);
470     av_free(vstats_filename);
471
472     av_freep(&input_streams);
473     av_freep(&input_files);
474     av_freep(&output_streams);
475     av_freep(&output_files);
476
477     uninit_opts();
478
479     avfilter_uninit();
480     avformat_network_deinit();
481
482     if (received_sigterm) {
483         av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
484                (int) received_sigterm);
485     }
486 }
487
488 void assert_avoptions(AVDictionary *m)
489 {
490     AVDictionaryEntry *t;
491     if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
492         av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
493         exit(1);
494     }
495 }
496
497 static void abort_codec_experimental(AVCodec *c, int encoder)
498 {
499     exit(1);
500 }
501
502 static void update_benchmark(const char *fmt, ...)
503 {
504     if (do_benchmark_all) {
505         int64_t t = getutime();
506         va_list va;
507         char buf[1024];
508
509         if (fmt) {
510             va_start(va, fmt);
511             vsnprintf(buf, sizeof(buf), fmt, va);
512             va_end(va);
513             printf("bench: %8"PRIu64" %s \n", t - current_time, buf);
514         }
515         current_time = t;
516     }
517 }
518
519 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
520 {
521     AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
522     AVCodecContext          *avctx = ost->st->codec;
523     int ret;
524
525     if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
526         (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
527         pkt->pts = pkt->dts = AV_NOPTS_VALUE;
528
529     if ((avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) && pkt->dts != AV_NOPTS_VALUE) {
530         int64_t max = ost->st->cur_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
531         if (ost->st->cur_dts && ost->st->cur_dts != AV_NOPTS_VALUE &&  max > pkt->dts) {
532             av_log(s, max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG,
533                    "st:%d PTS: %"PRId64" DTS: %"PRId64" < %"PRId64" invalid, clipping\n", pkt->stream_index, pkt->pts, pkt->dts, max);
534             if(pkt->pts >= pkt->dts)
535                 pkt->pts = FFMAX(pkt->pts, max);
536             pkt->dts = max;
537         }
538     }
539
540     /*
541      * Audio encoders may split the packets --  #frames in != #packets out.
542      * But there is no reordering, so we can limit the number of output packets
543      * by simply dropping them here.
544      * Counting encoded video frames needs to be done separately because of
545      * reordering, see do_video_out()
546      */
547     if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
548         if (ost->frame_number >= ost->max_frames) {
549             av_free_packet(pkt);
550             return;
551         }
552         ost->frame_number++;
553     }
554
555     while (bsfc) {
556         AVPacket new_pkt = *pkt;
557         int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
558                                            &new_pkt.data, &new_pkt.size,
559                                            pkt->data, pkt->size,
560                                            pkt->flags & AV_PKT_FLAG_KEY);
561         if(a == 0 && new_pkt.data != pkt->data && new_pkt.destruct) {
562             uint8_t *t = av_malloc(new_pkt.size + FF_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
563             if(t) {
564                 memcpy(t, new_pkt.data, new_pkt.size);
565                 memset(t + new_pkt.size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
566                 new_pkt.data = t;
567                 a = 1;
568             } else
569                 a = AVERROR(ENOMEM);
570         }
571         if (a > 0) {
572             av_free_packet(pkt);
573             new_pkt.destruct = av_destruct_packet;
574         } else if (a < 0) {
575             av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
576                    bsfc->filter->name, pkt->stream_index,
577                    avctx->codec ? avctx->codec->name : "copy");
578             print_error("", a);
579             if (exit_on_error)
580                 exit(1);
581         }
582         *pkt = new_pkt;
583
584         bsfc = bsfc->next;
585     }
586
587     pkt->stream_index = ost->index;
588
589     if (debug_ts) {
590         av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
591                 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
592                 av_get_media_type_string(ost->st->codec->codec_type),
593                 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
594                 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
595                 pkt->size
596               );
597     }
598
599     ret = av_interleaved_write_frame(s, pkt);
600     if (ret < 0) {
601         print_error("av_interleaved_write_frame()", ret);
602         exit(1);
603     }
604 }
605
606 static void close_output_stream(OutputStream *ost)
607 {
608     OutputFile *of = output_files[ost->file_index];
609
610     ost->finished = 1;
611     if (of->shortest) {
612         int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, AV_TIME_BASE_Q);
613         of->recording_time = FFMIN(of->recording_time, end);
614     }
615 }
616
617 static int check_recording_time(OutputStream *ost)
618 {
619     OutputFile *of = output_files[ost->file_index];
620
621     if (of->recording_time != INT64_MAX &&
622         av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,
623                       AV_TIME_BASE_Q) >= 0) {
624         close_output_stream(ost);
625         return 0;
626     }
627     return 1;
628 }
629
630 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
631                          AVFrame *frame)
632 {
633     AVCodecContext *enc = ost->st->codec;
634     AVPacket pkt;
635     int got_packet = 0;
636
637     av_init_packet(&pkt);
638     pkt.data = NULL;
639     pkt.size = 0;
640
641     if (!check_recording_time(ost))
642         return;
643
644     if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
645         frame->pts = ost->sync_opts;
646     ost->sync_opts = frame->pts + frame->nb_samples;
647
648     av_assert0(pkt.size || !pkt.data);
649     update_benchmark(NULL);
650     if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
651         av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
652         exit(1);
653     }
654     update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
655
656     if (got_packet) {
657         if (pkt.pts != AV_NOPTS_VALUE)
658             pkt.pts      = av_rescale_q(pkt.pts,      enc->time_base, ost->st->time_base);
659         if (pkt.dts != AV_NOPTS_VALUE)
660             pkt.dts      = av_rescale_q(pkt.dts,      enc->time_base, ost->st->time_base);
661         if (pkt.duration > 0)
662             pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
663
664         if (debug_ts) {
665             av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
666                    "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
667                    av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
668                    av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
669         }
670
671         audio_size += pkt.size;
672         write_frame(s, &pkt, ost);
673
674         av_free_packet(&pkt);
675     }
676 }
677
678 static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
679 {
680     AVCodecContext *dec;
681     AVPicture *picture2;
682     AVPicture picture_tmp;
683     uint8_t *buf = 0;
684
685     dec = ist->st->codec;
686
687     /* deinterlace : must be done before any resize */
688     if (do_deinterlace) {
689         int size;
690
691         /* create temporary picture */
692         size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
693         if (size < 0)
694             return;
695         buf  = av_malloc(size);
696         if (!buf)
697             return;
698
699         picture2 = &picture_tmp;
700         avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
701
702         if (avpicture_deinterlace(picture2, picture,
703                                  dec->pix_fmt, dec->width, dec->height) < 0) {
704             /* if error, do not deinterlace */
705             av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
706             av_free(buf);
707             buf = NULL;
708             picture2 = picture;
709         }
710     } else {
711         picture2 = picture;
712     }
713
714     if (picture != picture2)
715         *picture = *picture2;
716     *bufp = buf;
717 }
718
719 static void do_subtitle_out(AVFormatContext *s,
720                             OutputStream *ost,
721                             InputStream *ist,
722                             AVSubtitle *sub)
723 {
724     int subtitle_out_max_size = 1024 * 1024;
725     int subtitle_out_size, nb, i;
726     AVCodecContext *enc;
727     AVPacket pkt;
728     int64_t pts;
729
730     if (sub->pts == AV_NOPTS_VALUE) {
731         av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
732         if (exit_on_error)
733             exit(1);
734         return;
735     }
736
737     enc = ost->st->codec;
738
739     if (!subtitle_out) {
740         subtitle_out = av_malloc(subtitle_out_max_size);
741     }
742
743     /* Note: DVB subtitle need one packet to draw them and one other
744        packet to clear them */
745     /* XXX: signal it in the codec context ? */
746     if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
747         nb = 2;
748     else
749         nb = 1;
750
751     /* shift timestamp to honor -ss and make check_recording_time() work with -t */
752     pts = sub->pts - output_files[ost->file_index]->start_time;
753     for (i = 0; i < nb; i++) {
754         ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
755         if (!check_recording_time(ost))
756             return;
757
758         sub->pts = pts;
759         // start_display_time is required to be 0
760         sub->pts               += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
761         sub->end_display_time  -= sub->start_display_time;
762         sub->start_display_time = 0;
763         if (i == 1)
764             sub->num_rects = 0;
765         subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
766                                                     subtitle_out_max_size, sub);
767         if (subtitle_out_size < 0) {
768             av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
769             exit(1);
770         }
771
772         av_init_packet(&pkt);
773         pkt.data = subtitle_out;
774         pkt.size = subtitle_out_size;
775         pkt.pts  = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
776         pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
777         if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
778             /* XXX: the pts correction is handled here. Maybe handling
779                it in the codec would be better */
780             if (i == 0)
781                 pkt.pts += 90 * sub->start_display_time;
782             else
783                 pkt.pts += 90 * sub->end_display_time;
784         }
785         subtitle_size += pkt.size;
786         write_frame(s, &pkt, ost);
787     }
788 }
789
790 static void do_video_out(AVFormatContext *s,
791                          OutputStream *ost,
792                          AVFrame *in_picture)
793 {
794     int ret, format_video_sync;
795     AVPacket pkt;
796     AVCodecContext *enc = ost->st->codec;
797     int nb_frames, i;
798     double sync_ipts, delta;
799     double duration = 0;
800     int frame_size = 0;
801     InputStream *ist = NULL;
802
803     if (ost->source_index >= 0)
804         ist = input_streams[ost->source_index];
805
806     if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
807         duration = 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base));
808
809     sync_ipts = in_picture->pts;
810     delta = sync_ipts - ost->sync_opts + duration;
811
812     /* by default, we output a single frame */
813     nb_frames = 1;
814
815     format_video_sync = video_sync_method;
816     if (format_video_sync == VSYNC_AUTO)
817         format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
818
819     switch (format_video_sync) {
820     case VSYNC_CFR:
821         // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
822         if (delta < -1.1)
823             nb_frames = 0;
824         else if (delta > 1.1)
825             nb_frames = lrintf(delta);
826         break;
827     case VSYNC_VFR:
828         if (delta <= -0.6)
829             nb_frames = 0;
830         else if (delta > 0.6)
831             ost->sync_opts = lrint(sync_ipts);
832         break;
833     case VSYNC_DROP:
834     case VSYNC_PASSTHROUGH:
835         ost->sync_opts = lrint(sync_ipts);
836         break;
837     default:
838         av_assert0(0);
839     }
840
841     nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
842     if (nb_frames == 0) {
843         nb_frames_drop++;
844         av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
845         return;
846     } else if (nb_frames > 1) {
847         if (nb_frames > dts_error_threshold * 30) {
848             av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
849             nb_frames_drop++;
850             return;
851         }
852         nb_frames_dup += nb_frames - 1;
853         av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
854     }
855
856   /* duplicates frame if needed */
857   for (i = 0; i < nb_frames; i++) {
858     av_init_packet(&pkt);
859     pkt.data = NULL;
860     pkt.size = 0;
861
862     in_picture->pts = ost->sync_opts;
863
864     if (!check_recording_time(ost))
865         return;
866
867     if (s->oformat->flags & AVFMT_RAWPICTURE &&
868         enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
869         /* raw pictures are written as AVPicture structure to
870            avoid any copies. We support temporarily the older
871            method. */
872         enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
873         enc->coded_frame->top_field_first  = in_picture->top_field_first;
874         if (enc->coded_frame->interlaced_frame)
875             enc->field_order = enc->coded_frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
876         else
877             enc->field_order = AV_FIELD_PROGRESSIVE;
878         pkt.data   = (uint8_t *)in_picture;
879         pkt.size   =  sizeof(AVPicture);
880         pkt.pts    = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
881         pkt.flags |= AV_PKT_FLAG_KEY;
882
883         video_size += pkt.size;
884         write_frame(s, &pkt, ost);
885     } else {
886         int got_packet, forced_keyframe = 0;
887         AVFrame big_picture;
888         double pts_time;
889
890         big_picture = *in_picture;
891         /* better than nothing: use input picture interlaced
892            settings */
893         big_picture.interlaced_frame = in_picture->interlaced_frame;
894         if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
895             if (ost->top_field_first == -1)
896                 big_picture.top_field_first = in_picture->top_field_first;
897             else
898                 big_picture.top_field_first = !!ost->top_field_first;
899         }
900
901         if (big_picture.interlaced_frame) {
902             if (enc->codec->id == AV_CODEC_ID_MJPEG)
903                 enc->field_order = big_picture.top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
904             else
905                 enc->field_order = big_picture.top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
906         } else
907             enc->field_order = AV_FIELD_PROGRESSIVE;
908
909         big_picture.quality = ost->st->codec->global_quality;
910         if (!enc->me_threshold)
911             big_picture.pict_type = 0;
912
913         pts_time = big_picture.pts != AV_NOPTS_VALUE ?
914             big_picture.pts * av_q2d(enc->time_base) : NAN;
915         if (ost->forced_kf_index < ost->forced_kf_count &&
916             big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
917             ost->forced_kf_index++;
918             forced_keyframe = 1;
919         } else if (ost->forced_keyframes_pexpr) {
920             double res;
921             ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
922             res = av_expr_eval(ost->forced_keyframes_pexpr,
923                                ost->forced_keyframes_expr_const_values, NULL);
924             av_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
925                     ost->forced_keyframes_expr_const_values[FKF_N],
926                     ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
927                     ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
928                     ost->forced_keyframes_expr_const_values[FKF_T],
929                     ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
930                     res);
931             if (res) {
932                 forced_keyframe = 1;
933                 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
934                     ost->forced_keyframes_expr_const_values[FKF_N];
935                 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
936                     ost->forced_keyframes_expr_const_values[FKF_T];
937                 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
938             }
939
940             ost->forced_keyframes_expr_const_values[FKF_N] += 1;
941         }
942         if (forced_keyframe) {
943             big_picture.pict_type = AV_PICTURE_TYPE_I;
944             av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
945         }
946
947         update_benchmark(NULL);
948         ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
949         update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
950         if (ret < 0) {
951             av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
952             exit(1);
953         }
954
955         if (got_packet) {
956             if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
957                 pkt.pts = ost->sync_opts;
958
959             if (pkt.pts != AV_NOPTS_VALUE)
960                 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
961             if (pkt.dts != AV_NOPTS_VALUE)
962                 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
963
964             if (debug_ts) {
965                 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
966                     "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
967                     av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
968                     av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
969             }
970
971             frame_size = pkt.size;
972             video_size += pkt.size;
973             write_frame(s, &pkt, ost);
974             av_free_packet(&pkt);
975
976             /* if two pass, output log */
977             if (ost->logfile && enc->stats_out) {
978                 fprintf(ost->logfile, "%s", enc->stats_out);
979             }
980         }
981     }
982     ost->sync_opts++;
983     /*
984      * For video, number of frames in == number of packets out.
985      * But there may be reordering, so we can't throw away frames on encoder
986      * flush, we need to limit them here, before they go into encoder.
987      */
988     ost->frame_number++;
989   }
990
991     if (vstats_filename && frame_size)
992         do_video_stats(ost, frame_size);
993 }
994
995 static double psnr(double d)
996 {
997     return -10.0 * log(d) / log(10.0);
998 }
999
1000 static void do_video_stats(OutputStream *ost, int frame_size)
1001 {
1002     AVCodecContext *enc;
1003     int frame_number;
1004     double ti1, bitrate, avg_bitrate;
1005
1006     /* this is executed just the first time do_video_stats is called */
1007     if (!vstats_file) {
1008         vstats_file = fopen(vstats_filename, "w");
1009         if (!vstats_file) {
1010             perror("fopen");
1011             exit(1);
1012         }
1013     }
1014
1015     enc = ost->st->codec;
1016     if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1017         frame_number = ost->st->nb_frames;
1018         fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
1019         if (enc->flags&CODEC_FLAG_PSNR)
1020             fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1021
1022         fprintf(vstats_file,"f_size= %6d ", frame_size);
1023         /* compute pts value */
1024         ti1 = ost->st->pts.val * av_q2d(enc->time_base);
1025         if (ti1 < 0.01)
1026             ti1 = 0.01;
1027
1028         bitrate     = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1029         avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
1030         fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1031                (double)video_size / 1024, ti1, bitrate, avg_bitrate);
1032         fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
1033     }
1034 }
1035
1036 /**
1037  * Get and encode new output from any of the filtergraphs, without causing
1038  * activity.
1039  *
1040  * @return  0 for success, <0 for severe errors
1041  */
1042 static int reap_filters(void)
1043 {
1044     AVFilterBufferRef *picref;
1045     AVFrame *filtered_frame = NULL;
1046     int i;
1047     int64_t frame_pts;
1048
1049     /* Reap all buffers present in the buffer sinks */
1050     for (i = 0; i < nb_output_streams; i++) {
1051         OutputStream *ost = output_streams[i];
1052         OutputFile    *of = output_files[ost->file_index];
1053         int ret = 0;
1054
1055         if (!ost->filter)
1056             continue;
1057
1058         if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
1059             return AVERROR(ENOMEM);
1060         } else
1061             avcodec_get_frame_defaults(ost->filtered_frame);
1062         filtered_frame = ost->filtered_frame;
1063
1064         while (1) {
1065             ret = av_buffersink_get_buffer_ref(ost->filter->filter, &picref,
1066                                                AV_BUFFERSINK_FLAG_NO_REQUEST);
1067             if (ret < 0) {
1068                 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1069                     char buf[256];
1070                     av_strerror(ret, buf, sizeof(buf));
1071                     av_log(NULL, AV_LOG_WARNING,
1072                            "Error in av_buffersink_get_buffer_ref(): %s\n", buf);
1073                 }
1074                 break;
1075             }
1076             frame_pts = AV_NOPTS_VALUE;
1077             if (picref->pts != AV_NOPTS_VALUE) {
1078                 filtered_frame->pts = frame_pts = av_rescale_q(picref->pts,
1079                                                 ost->filter->filter->inputs[0]->time_base,
1080                                                 ost->st->codec->time_base) -
1081                                     av_rescale_q(of->start_time,
1082                                                 AV_TIME_BASE_Q,
1083                                                 ost->st->codec->time_base);
1084
1085                 if (of->start_time && filtered_frame->pts < 0) {
1086                     avfilter_unref_buffer(picref);
1087                     continue;
1088                 }
1089             }
1090             //if (ost->source_index >= 0)
1091             //    *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1092
1093
1094             switch (ost->filter->filter->inputs[0]->type) {
1095             case AVMEDIA_TYPE_VIDEO:
1096                 avfilter_copy_buf_props(filtered_frame, picref);
1097                 filtered_frame->pts = frame_pts;
1098                 if (!ost->frame_aspect_ratio)
1099                     ost->st->codec->sample_aspect_ratio = picref->video->sample_aspect_ratio;
1100
1101                 do_video_out(of->ctx, ost, filtered_frame);
1102                 break;
1103             case AVMEDIA_TYPE_AUDIO:
1104                 avfilter_copy_buf_props(filtered_frame, picref);
1105                 filtered_frame->pts = frame_pts;
1106                 if (!(ost->st->codec->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
1107                     ost->st->codec->channels != av_frame_get_channels(filtered_frame)) {
1108                     av_log(NULL, AV_LOG_ERROR,
1109                            "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1110                     break;
1111                 }
1112                 do_audio_out(of->ctx, ost, filtered_frame);
1113                 break;
1114             default:
1115                 // TODO support subtitle filters
1116                 av_assert0(0);
1117             }
1118
1119             avfilter_unref_buffer(picref);
1120         }
1121     }
1122
1123     return 0;
1124 }
1125
1126 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1127 {
1128     char buf[1024];
1129     AVBPrint buf_script;
1130     OutputStream *ost;
1131     AVFormatContext *oc;
1132     int64_t total_size;
1133     AVCodecContext *enc;
1134     int frame_number, vid, i;
1135     double bitrate;
1136     int64_t pts = INT64_MIN;
1137     static int64_t last_time = -1;
1138     static int qp_histogram[52];
1139     int hours, mins, secs, us;
1140
1141     if (!print_stats && !is_last_report && !progress_avio)
1142         return;
1143
1144     if (!is_last_report) {
1145         if (last_time == -1) {
1146             last_time = cur_time;
1147             return;
1148         }
1149         if ((cur_time - last_time) < 500000)
1150             return;
1151         last_time = cur_time;
1152     }
1153
1154
1155     oc = output_files[0]->ctx;
1156
1157     total_size = avio_size(oc->pb);
1158     if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1159         total_size = avio_tell(oc->pb);
1160
1161     buf[0] = '\0';
1162     vid = 0;
1163     av_bprint_init(&buf_script, 0, 1);
1164     for (i = 0; i < nb_output_streams; i++) {
1165         float q = -1;
1166         ost = output_streams[i];
1167         enc = ost->st->codec;
1168         if (!ost->stream_copy && enc->coded_frame)
1169             q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1170         if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1171             snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1172             av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1173                        ost->file_index, ost->index, q);
1174         }
1175         if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1176             float fps, t = (cur_time-timer_start) / 1000000.0;
1177
1178             frame_number = ost->frame_number;
1179             fps = t > 1 ? frame_number / t : 0;
1180             snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1181                      frame_number, fps < 9.95, fps, q);
1182             av_bprintf(&buf_script, "frame=%d\n", frame_number);
1183             av_bprintf(&buf_script, "fps=%.1f\n", fps);
1184             av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1185                        ost->file_index, ost->index, q);
1186             if (is_last_report)
1187                 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1188             if (qp_hist) {
1189                 int j;
1190                 int qp = lrintf(q);
1191                 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1192                     qp_histogram[qp]++;
1193                 for (j = 0; j < 32; j++)
1194                     snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1195             }
1196             if ((enc->flags&CODEC_FLAG_PSNR) && (enc->coded_frame || is_last_report)) {
1197                 int j;
1198                 double error, error_sum = 0;
1199                 double scale, scale_sum = 0;
1200                 double p;
1201                 char type[3] = { 'Y','U','V' };
1202                 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1203                 for (j = 0; j < 3; j++) {
1204                     if (is_last_report) {
1205                         error = enc->error[j];
1206                         scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1207                     } else {
1208                         error = enc->coded_frame->error[j];
1209                         scale = enc->width * enc->height * 255.0 * 255.0;
1210                     }
1211                     if (j)
1212                         scale /= 4;
1213                     error_sum += error;
1214                     scale_sum += scale;
1215                     p = psnr(error / scale);
1216                     snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1217                     av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1218                                ost->file_index, ost->index, type[i] | 32, p);
1219                 }
1220                 p = psnr(error_sum / scale_sum);
1221                 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1222                 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1223                            ost->file_index, ost->index, p);
1224             }
1225             vid = 1;
1226         }
1227         /* compute min output value */
1228         if ((is_last_report || !ost->finished) && ost->st->pts.val != AV_NOPTS_VALUE)
1229             pts = FFMAX(pts, av_rescale_q(ost->st->pts.val,
1230                                           ost->st->time_base, AV_TIME_BASE_Q));
1231     }
1232
1233     secs = pts / AV_TIME_BASE;
1234     us = pts % AV_TIME_BASE;
1235     mins = secs / 60;
1236     secs %= 60;
1237     hours = mins / 60;
1238     mins %= 60;
1239
1240     bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1241
1242     if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1243                                  "size=N/A time=");
1244     else                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1245                                  "size=%8.0fkB time=", total_size / 1024.0);
1246     snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1247              "%02d:%02d:%02d.%02d ", hours, mins, secs,
1248              (100 * us) / AV_TIME_BASE);
1249     if (bitrate < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1250                               "bitrate=N/A");
1251     else             snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1252                               "bitrate=%6.1fkbits/s", bitrate);
1253     if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1254     else                av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1255     av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1256     av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1257                hours, mins, secs, us);
1258
1259     if (nb_frames_dup || nb_frames_drop)
1260         snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1261                 nb_frames_dup, nb_frames_drop);
1262     av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1263     av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1264
1265     if (print_stats || is_last_report) {
1266     av_log(NULL, AV_LOG_INFO, "%s    \r", buf);
1267
1268     fflush(stderr);
1269     }
1270
1271     if (progress_avio) {
1272         av_bprintf(&buf_script, "progress=%s\n",
1273                    is_last_report ? "end" : "continue");
1274         avio_write(progress_avio, buf_script.str,
1275                    FFMIN(buf_script.len, buf_script.size - 1));
1276         avio_flush(progress_avio);
1277         av_bprint_finalize(&buf_script, NULL);
1278         if (is_last_report) {
1279             avio_close(progress_avio);
1280             progress_avio = NULL;
1281         }
1282     }
1283
1284     if (is_last_report) {
1285         int64_t raw= audio_size + video_size + subtitle_size + extra_size;
1286         av_log(NULL, AV_LOG_INFO, "\n");
1287         av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0f global headers:%1.0fkB muxing overhead %f%%\n",
1288                video_size / 1024.0,
1289                audio_size / 1024.0,
1290                subtitle_size / 1024.0,
1291                extra_size / 1024.0,
1292                100.0 * (total_size - raw) / raw
1293         );
1294         if(video_size + audio_size + subtitle_size + extra_size == 0){
1295             av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded (check -ss / -t / -frames parameters if used)\n");
1296         }
1297     }
1298 }
1299
1300 static void flush_encoders(void)
1301 {
1302     int i, ret;
1303
1304     for (i = 0; i < nb_output_streams; i++) {
1305         OutputStream   *ost = output_streams[i];
1306         AVCodecContext *enc = ost->st->codec;
1307         AVFormatContext *os = output_files[ost->file_index]->ctx;
1308         int stop_encoding = 0;
1309
1310         if (!ost->encoding_needed)
1311             continue;
1312
1313         if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1314             continue;
1315         if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1316             continue;
1317
1318         for (;;) {
1319             int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1320             const char *desc;
1321             int64_t *size;
1322
1323             switch (ost->st->codec->codec_type) {
1324             case AVMEDIA_TYPE_AUDIO:
1325                 encode = avcodec_encode_audio2;
1326                 desc   = "Audio";
1327                 size   = &audio_size;
1328                 break;
1329             case AVMEDIA_TYPE_VIDEO:
1330                 encode = avcodec_encode_video2;
1331                 desc   = "Video";
1332                 size   = &video_size;
1333                 break;
1334             default:
1335                 stop_encoding = 1;
1336             }
1337
1338             if (encode) {
1339                 AVPacket pkt;
1340                 int got_packet;
1341                 av_init_packet(&pkt);
1342                 pkt.data = NULL;
1343                 pkt.size = 0;
1344
1345                 update_benchmark(NULL);
1346                 ret = encode(enc, &pkt, NULL, &got_packet);
1347                 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1348                 if (ret < 0) {
1349                     av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1350                     exit(1);
1351                 }
1352                 *size += pkt.size;
1353                 if (ost->logfile && enc->stats_out) {
1354                     fprintf(ost->logfile, "%s", enc->stats_out);
1355                 }
1356                 if (!got_packet) {
1357                     stop_encoding = 1;
1358                     break;
1359                 }
1360                 if (pkt.pts != AV_NOPTS_VALUE)
1361                     pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1362                 if (pkt.dts != AV_NOPTS_VALUE)
1363                     pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1364                 if (pkt.duration > 0)
1365                     pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
1366                 write_frame(os, &pkt, ost);
1367                 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1368                     do_video_stats(ost, pkt.size);
1369                 }
1370             }
1371
1372             if (stop_encoding)
1373                 break;
1374         }
1375     }
1376 }
1377
1378 /*
1379  * Check whether a packet from ist should be written into ost at this time
1380  */
1381 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1382 {
1383     OutputFile *of = output_files[ost->file_index];
1384     int ist_index  = input_files[ist->file_index]->ist_index + ist->st->index;
1385
1386     if (ost->source_index != ist_index)
1387         return 0;
1388
1389     if (of->start_time && ist->pts < of->start_time)
1390         return 0;
1391
1392     return 1;
1393 }
1394
1395 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1396 {
1397     OutputFile *of = output_files[ost->file_index];
1398     int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
1399     AVPicture pict;
1400     AVPacket opkt;
1401
1402     av_init_packet(&opkt);
1403
1404     if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1405         !ost->copy_initial_nonkeyframes)
1406         return;
1407
1408     if (!ost->frame_number && ist->pts < of->start_time &&
1409         !ost->copy_prior_start)
1410         return;
1411
1412     if (of->recording_time != INT64_MAX &&
1413         ist->pts >= of->recording_time + of->start_time) {
1414         close_output_stream(ost);
1415         return;
1416     }
1417
1418     /* force the input stream PTS */
1419     if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1420         audio_size += pkt->size;
1421     else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1422         video_size += pkt->size;
1423         ost->sync_opts++;
1424     } else if (ost->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
1425         subtitle_size += pkt->size;
1426     }
1427
1428     if (pkt->pts != AV_NOPTS_VALUE)
1429         opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1430     else
1431         opkt.pts = AV_NOPTS_VALUE;
1432
1433     if (pkt->dts == AV_NOPTS_VALUE)
1434         opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1435     else
1436         opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1437     opkt.dts -= ost_tb_start_time;
1438
1439     if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1440         int duration = av_get_audio_frame_duration(ist->st->codec, pkt->size);
1441         if(!duration)
1442             duration = ist->st->codec->frame_size;
1443         opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1444                                                (AVRational){1, ist->st->codec->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1445                                                ost->st->time_base) - ost_tb_start_time;
1446     }
1447
1448     opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1449     opkt.flags    = pkt->flags;
1450
1451     // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1452     if (  ost->st->codec->codec_id != AV_CODEC_ID_H264
1453        && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1454        && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1455        && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1456        ) {
1457         if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
1458             opkt.destruct = av_destruct_packet;
1459     } else {
1460         opkt.data = pkt->data;
1461         opkt.size = pkt->size;
1462     }
1463
1464     if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1465         /* store AVPicture in AVPacket, as expected by the output format */
1466         avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1467         opkt.data = (uint8_t *)&pict;
1468         opkt.size = sizeof(AVPicture);
1469         opkt.flags |= AV_PKT_FLAG_KEY;
1470     }
1471
1472     write_frame(of->ctx, &opkt, ost);
1473     ost->st->codec->frame_number++;
1474 }
1475
1476 static void rate_emu_sleep(InputStream *ist)
1477 {
1478     if (input_files[ist->file_index]->rate_emu) {
1479         int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
1480         int64_t now = av_gettime() - ist->start;
1481         if (pts > now)
1482             av_usleep(pts - now);
1483     }
1484 }
1485
1486 int guess_input_channel_layout(InputStream *ist)
1487 {
1488     AVCodecContext *dec = ist->st->codec;
1489
1490     if (!dec->channel_layout) {
1491         char layout_name[256];
1492
1493         if (dec->channels > ist->guess_layout_max)
1494             return 0;
1495         dec->channel_layout = av_get_default_channel_layout(dec->channels);
1496         if (!dec->channel_layout)
1497             return 0;
1498         av_get_channel_layout_string(layout_name, sizeof(layout_name),
1499                                      dec->channels, dec->channel_layout);
1500         av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for  Input Stream "
1501                "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1502     }
1503     return 1;
1504 }
1505
1506 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1507 {
1508     AVFrame *decoded_frame;
1509     AVCodecContext *avctx = ist->st->codec;
1510     int i, ret, resample_changed;
1511     AVRational decoded_frame_tb;
1512
1513     if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
1514         return AVERROR(ENOMEM);
1515     decoded_frame = ist->decoded_frame;
1516
1517     update_benchmark(NULL);
1518     ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1519     update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1520
1521     if (ret >= 0 && avctx->sample_rate <= 0) {
1522         av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1523         ret = AVERROR_INVALIDDATA;
1524     }
1525
1526     if (!*got_output || ret < 0) {
1527         if (!pkt->size) {
1528             for (i = 0; i < ist->nb_filters; i++)
1529                 av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
1530         }
1531         return ret;
1532     }
1533
1534 #if 1
1535     /* increment next_dts to use for the case where the input stream does not
1536        have timestamps or there are multiple frames in the packet */
1537     ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1538                      avctx->sample_rate;
1539     ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1540                      avctx->sample_rate;
1541 #endif
1542
1543     rate_emu_sleep(ist);
1544
1545     resample_changed = ist->resample_sample_fmt     != decoded_frame->format         ||
1546                        ist->resample_channels       != avctx->channels               ||
1547                        ist->resample_channel_layout != decoded_frame->channel_layout ||
1548                        ist->resample_sample_rate    != decoded_frame->sample_rate;
1549     if (resample_changed) {
1550         char layout1[64], layout2[64];
1551
1552         if (!guess_input_channel_layout(ist)) {
1553             av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1554                    "layout for Input Stream #%d.%d\n", ist->file_index,
1555                    ist->st->index);
1556             exit(1);
1557         }
1558         decoded_frame->channel_layout = avctx->channel_layout;
1559
1560         av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1561                                      ist->resample_channel_layout);
1562         av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1563                                      decoded_frame->channel_layout);
1564
1565         av_log(NULL, AV_LOG_INFO,
1566                "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1567                ist->file_index, ist->st->index,
1568                ist->resample_sample_rate,  av_get_sample_fmt_name(ist->resample_sample_fmt),
1569                ist->resample_channels, layout1,
1570                decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1571                avctx->channels, layout2);
1572
1573         ist->resample_sample_fmt     = decoded_frame->format;
1574         ist->resample_sample_rate    = decoded_frame->sample_rate;
1575         ist->resample_channel_layout = decoded_frame->channel_layout;
1576         ist->resample_channels       = avctx->channels;
1577
1578         for (i = 0; i < nb_filtergraphs; i++)
1579             if (ist_in_filtergraph(filtergraphs[i], ist)) {
1580                 FilterGraph *fg = filtergraphs[i];
1581                 int j;
1582                 if (configure_filtergraph(fg) < 0) {
1583                     av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1584                     exit(1);
1585                 }
1586                 for (j = 0; j < fg->nb_outputs; j++) {
1587                     OutputStream *ost = fg->outputs[j]->ost;
1588                     if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
1589                         !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
1590                         av_buffersink_set_frame_size(ost->filter->filter,
1591                                                      ost->st->codec->frame_size);
1592                 }
1593             }
1594     }
1595
1596     /* if the decoder provides a pts, use it instead of the last packet pts.
1597        the decoder could be delaying output by a packet or more. */
1598     if (decoded_frame->pts != AV_NOPTS_VALUE) {
1599         ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
1600         decoded_frame_tb   = avctx->time_base;
1601     } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
1602         decoded_frame->pts = decoded_frame->pkt_pts;
1603         pkt->pts           = AV_NOPTS_VALUE;
1604         decoded_frame_tb   = ist->st->time_base;
1605     } else if (pkt->pts != AV_NOPTS_VALUE) {
1606         decoded_frame->pts = pkt->pts;
1607         pkt->pts           = AV_NOPTS_VALUE;
1608         decoded_frame_tb   = ist->st->time_base;
1609     }else {
1610         decoded_frame->pts = ist->dts;
1611         decoded_frame_tb   = AV_TIME_BASE_Q;
1612     }
1613     if (decoded_frame->pts != AV_NOPTS_VALUE)
1614         decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
1615                                               (AVRational){1, ist->st->codec->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
1616                                               (AVRational){1, ist->st->codec->sample_rate});
1617     for (i = 0; i < ist->nb_filters; i++)
1618         av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame,
1619                                AV_BUFFERSRC_FLAG_PUSH);
1620
1621     decoded_frame->pts = AV_NOPTS_VALUE;
1622
1623     return ret;
1624 }
1625
1626 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
1627 {
1628     AVFrame *decoded_frame;
1629     void *buffer_to_free = NULL;
1630     int i, ret = 0, resample_changed;
1631     int64_t best_effort_timestamp;
1632     AVRational *frame_sample_aspect;
1633
1634     if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
1635         return AVERROR(ENOMEM);
1636     decoded_frame = ist->decoded_frame;
1637     pkt->dts  = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
1638
1639     update_benchmark(NULL);
1640     ret = avcodec_decode_video2(ist->st->codec,
1641                                 decoded_frame, got_output, pkt);
1642     update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
1643     if (!*got_output || ret < 0) {
1644         if (!pkt->size) {
1645             for (i = 0; i < ist->nb_filters; i++)
1646                 av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
1647         }
1648         return ret;
1649     }
1650
1651     if(ist->top_field_first>=0)
1652         decoded_frame->top_field_first = ist->top_field_first;
1653
1654     best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
1655     if(best_effort_timestamp != AV_NOPTS_VALUE)
1656         ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
1657
1658     if (debug_ts) {
1659         av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
1660                 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d \n",
1661                 ist->st->index, av_ts2str(decoded_frame->pts),
1662                 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
1663                 best_effort_timestamp,
1664                 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
1665                 decoded_frame->key_frame, decoded_frame->pict_type);
1666     }
1667
1668     pkt->size = 0;
1669     pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
1670
1671     rate_emu_sleep(ist);
1672
1673     if (ist->st->sample_aspect_ratio.num)
1674         decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
1675
1676     resample_changed = ist->resample_width   != decoded_frame->width  ||
1677                        ist->resample_height  != decoded_frame->height ||
1678                        ist->resample_pix_fmt != decoded_frame->format;
1679     if (resample_changed) {
1680         av_log(NULL, AV_LOG_INFO,
1681                "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
1682                ist->file_index, ist->st->index,
1683                ist->resample_width,  ist->resample_height,  av_get_pix_fmt_name(ist->resample_pix_fmt),
1684                decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
1685
1686         ist->resample_width   = decoded_frame->width;
1687         ist->resample_height  = decoded_frame->height;
1688         ist->resample_pix_fmt = decoded_frame->format;
1689
1690         for (i = 0; i < nb_filtergraphs; i++) {
1691             if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
1692                 configure_filtergraph(filtergraphs[i]) < 0) {
1693                 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1694                 exit(1);
1695             }
1696         }
1697     }
1698
1699     frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
1700     for (i = 0; i < ist->nb_filters; i++) {
1701         int changed =      ist->st->codec->width   != ist->filters[i]->filter->outputs[0]->w
1702                         || ist->st->codec->height  != ist->filters[i]->filter->outputs[0]->h
1703                         || ist->st->codec->pix_fmt != ist->filters[i]->filter->outputs[0]->format;
1704
1705         if (!frame_sample_aspect->num)
1706             *frame_sample_aspect = ist->st->sample_aspect_ratio;
1707         if (ist->dr1 && decoded_frame->type==FF_BUFFER_TYPE_USER && !changed) {
1708             FrameBuffer      *buf = decoded_frame->opaque;
1709             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1710                                         decoded_frame->data, decoded_frame->linesize,
1711                                         AV_PERM_READ | AV_PERM_PRESERVE,
1712                                         ist->st->codec->width, ist->st->codec->height,
1713                                         ist->st->codec->pix_fmt);
1714
1715             avfilter_copy_frame_props(fb, decoded_frame);
1716             fb->buf->priv           = buf;
1717             fb->buf->free           = filter_release_buffer;
1718
1719             av_assert0(buf->refcount>0);
1720             buf->refcount++;
1721             av_buffersrc_add_ref(ist->filters[i]->filter, fb,
1722                                  AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT |
1723                                  AV_BUFFERSRC_FLAG_NO_COPY |
1724                                  AV_BUFFERSRC_FLAG_PUSH);
1725         } else
1726         if(av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, AV_BUFFERSRC_FLAG_PUSH)<0) {
1727             av_log(NULL, AV_LOG_FATAL, "Failed to inject frame into filter network\n");
1728             exit(1);
1729         }
1730
1731     }
1732
1733     av_free(buffer_to_free);
1734     return ret;
1735 }
1736
1737 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
1738 {
1739     AVSubtitle subtitle;
1740     int i, ret = avcodec_decode_subtitle2(ist->st->codec,
1741                                           &subtitle, got_output, pkt);
1742     if (ret < 0 || !*got_output) {
1743         if (!pkt->size)
1744             sub2video_flush(ist);
1745         return ret;
1746     }
1747
1748     if (ist->fix_sub_duration) {
1749         if (ist->prev_sub.got_output) {
1750             int end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
1751                                  1000, AV_TIME_BASE);
1752             if (end < ist->prev_sub.subtitle.end_display_time) {
1753                 av_log(ist->st->codec, AV_LOG_DEBUG,
1754                        "Subtitle duration reduced from %d to %d\n",
1755                        ist->prev_sub.subtitle.end_display_time, end);
1756                 ist->prev_sub.subtitle.end_display_time = end;
1757             }
1758         }
1759         FFSWAP(int,        *got_output, ist->prev_sub.got_output);
1760         FFSWAP(int,        ret,         ist->prev_sub.ret);
1761         FFSWAP(AVSubtitle, subtitle,    ist->prev_sub.subtitle);
1762     }
1763
1764     sub2video_update(ist, &subtitle);
1765
1766     if (!*got_output || !subtitle.num_rects)
1767         return ret;
1768
1769     rate_emu_sleep(ist);
1770
1771     for (i = 0; i < nb_output_streams; i++) {
1772         OutputStream *ost = output_streams[i];
1773
1774         if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
1775             continue;
1776
1777         do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
1778     }
1779
1780     avsubtitle_free(&subtitle);
1781     return ret;
1782 }
1783
1784 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1785 static int output_packet(InputStream *ist, const AVPacket *pkt)
1786 {
1787     int ret = 0, i;
1788     int got_output;
1789
1790     AVPacket avpkt;
1791     if (!ist->saw_first_ts) {
1792         ist->dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
1793         ist->pts = 0;
1794         if (pkt != NULL && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
1795             ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
1796             ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
1797         }
1798         ist->saw_first_ts = 1;
1799     }
1800
1801     if (ist->next_dts == AV_NOPTS_VALUE)
1802         ist->next_dts = ist->dts;
1803     if (ist->next_pts == AV_NOPTS_VALUE)
1804         ist->next_pts = ist->pts;
1805
1806     if (pkt == NULL) {
1807         /* EOF handling */
1808         av_init_packet(&avpkt);
1809         avpkt.data = NULL;
1810         avpkt.size = 0;
1811         goto handle_eof;
1812     } else {
1813         avpkt = *pkt;
1814     }
1815
1816     if (pkt->dts != AV_NOPTS_VALUE) {
1817         ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
1818         if (ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
1819             ist->next_pts = ist->pts = ist->dts;
1820     }
1821
1822     // while we have more to decode or while the decoder did output something on EOF
1823     while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
1824         int duration;
1825     handle_eof:
1826
1827         ist->pts = ist->next_pts;
1828         ist->dts = ist->next_dts;
1829
1830         if (avpkt.size && avpkt.size != pkt->size) {
1831             av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
1832                    "Multiple frames in a packet from stream %d\n", pkt->stream_index);
1833             ist->showed_multi_packet_warning = 1;
1834         }
1835
1836         switch (ist->st->codec->codec_type) {
1837         case AVMEDIA_TYPE_AUDIO:
1838             ret = decode_audio    (ist, &avpkt, &got_output);
1839             break;
1840         case AVMEDIA_TYPE_VIDEO:
1841             ret = decode_video    (ist, &avpkt, &got_output);
1842             if (avpkt.duration) {
1843                 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
1844             } else if(ist->st->codec->time_base.num != 0 && ist->st->codec->time_base.den != 0) {
1845                 int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
1846                 duration = ((int64_t)AV_TIME_BASE *
1847                                 ist->st->codec->time_base.num * ticks) /
1848                                 ist->st->codec->time_base.den;
1849             } else
1850                 duration = 0;
1851
1852             if(ist->dts != AV_NOPTS_VALUE && duration) {
1853                 ist->next_dts += duration;
1854             }else
1855                 ist->next_dts = AV_NOPTS_VALUE;
1856
1857             if (got_output)
1858                 ist->next_pts += duration; //FIXME the duration is not correct in some cases
1859             break;
1860         case AVMEDIA_TYPE_SUBTITLE:
1861             ret = transcode_subtitles(ist, &avpkt, &got_output);
1862             break;
1863         default:
1864             return -1;
1865         }
1866
1867         if (ret < 0)
1868             return ret;
1869
1870         avpkt.dts=
1871         avpkt.pts= AV_NOPTS_VALUE;
1872
1873         // touch data and size only if not EOF
1874         if (pkt) {
1875             if(ist->st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
1876                 ret = avpkt.size;
1877             avpkt.data += ret;
1878             avpkt.size -= ret;
1879         }
1880         if (!got_output) {
1881             continue;
1882         }
1883     }
1884
1885     /* handle stream copy */
1886     if (!ist->decoding_needed) {
1887         rate_emu_sleep(ist);
1888         ist->dts = ist->next_dts;
1889         switch (ist->st->codec->codec_type) {
1890         case AVMEDIA_TYPE_AUDIO:
1891             ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
1892                              ist->st->codec->sample_rate;
1893             break;
1894         case AVMEDIA_TYPE_VIDEO:
1895             if (pkt->duration) {
1896                 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
1897             } else if(ist->st->codec->time_base.num != 0) {
1898                 int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
1899                 ist->next_dts += ((int64_t)AV_TIME_BASE *
1900                                   ist->st->codec->time_base.num * ticks) /
1901                                   ist->st->codec->time_base.den;
1902             }
1903             break;
1904         }
1905         ist->pts = ist->dts;
1906         ist->next_pts = ist->next_dts;
1907     }
1908     for (i = 0; pkt && i < nb_output_streams; i++) {
1909         OutputStream *ost = output_streams[i];
1910
1911         if (!check_output_constraints(ist, ost) || ost->encoding_needed)
1912             continue;
1913
1914         do_streamcopy(ist, ost, pkt);
1915     }
1916
1917     return 0;
1918 }
1919
1920 static void print_sdp(void)
1921 {
1922     char sdp[16384];
1923     int i;
1924     AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
1925
1926     if (!avc)
1927         exit(1);
1928     for (i = 0; i < nb_output_files; i++)
1929         avc[i] = output_files[i]->ctx;
1930
1931     av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
1932     printf("SDP:\n%s\n", sdp);
1933     fflush(stdout);
1934     av_freep(&avc);
1935 }
1936
1937 static int init_input_stream(int ist_index, char *error, int error_len)
1938 {
1939     int ret;
1940     InputStream *ist = input_streams[ist_index];
1941
1942     if (ist->decoding_needed) {
1943         AVCodec *codec = ist->dec;
1944         if (!codec) {
1945             snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
1946                     avcodec_get_name(ist->st->codec->codec_id), ist->file_index, ist->st->index);
1947             return AVERROR(EINVAL);
1948         }
1949
1950         ist->dr1 = (codec->capabilities & CODEC_CAP_DR1) && !do_deinterlace;
1951         if (codec->type == AVMEDIA_TYPE_VIDEO && ist->dr1) {
1952             ist->st->codec->get_buffer     = codec_get_buffer;
1953             ist->st->codec->release_buffer = codec_release_buffer;
1954             ist->st->codec->opaque         = &ist->buffer_pool;
1955         }
1956
1957         if (!av_dict_get(ist->opts, "threads", NULL, 0))
1958             av_dict_set(&ist->opts, "threads", "auto", 0);
1959         if ((ret = avcodec_open2(ist->st->codec, codec, &ist->opts)) < 0) {
1960             if (ret == AVERROR_EXPERIMENTAL)
1961                 abort_codec_experimental(codec, 0);
1962             snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
1963                     ist->file_index, ist->st->index);
1964             return ret;
1965         }
1966         assert_avoptions(ist->opts);
1967     }
1968
1969     ist->next_pts = AV_NOPTS_VALUE;
1970     ist->next_dts = AV_NOPTS_VALUE;
1971     ist->is_start = 1;
1972
1973     return 0;
1974 }
1975
1976 static InputStream *get_input_stream(OutputStream *ost)
1977 {
1978     if (ost->source_index >= 0)
1979         return input_streams[ost->source_index];
1980     return NULL;
1981 }
1982
1983 static int compare_int64(const void *a, const void *b)
1984 {
1985     int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
1986     return va < vb ? -1 : va > vb ? +1 : 0;
1987 }
1988
1989 static void parse_forced_key_frames(char *kf, OutputStream *ost,
1990                                     AVCodecContext *avctx)
1991 {
1992     char *p;
1993     int n = 1, i, size, index = 0;
1994     int64_t t, *pts;
1995
1996     for (p = kf; *p; p++)
1997         if (*p == ',')
1998             n++;
1999     size = n;
2000     pts = av_malloc(sizeof(*pts) * size);
2001     if (!pts) {
2002         av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2003         exit(1);
2004     }
2005
2006     p = kf;
2007     for (i = 0; i < n; i++) {
2008         char *next = strchr(p, ',');
2009
2010         if (next)
2011             *next++ = 0;
2012
2013         if (!memcmp(p, "chapters", 8)) {
2014
2015             AVFormatContext *avf = output_files[ost->file_index]->ctx;
2016             int j;
2017
2018             if (avf->nb_chapters > INT_MAX - size ||
2019                 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2020                                      sizeof(*pts)))) {
2021                 av_log(NULL, AV_LOG_FATAL,
2022                        "Could not allocate forced key frames array.\n");
2023                 exit(1);
2024             }
2025             t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2026             t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2027
2028             for (j = 0; j < avf->nb_chapters; j++) {
2029                 AVChapter *c = avf->chapters[j];
2030                 av_assert1(index < size);
2031                 pts[index++] = av_rescale_q(c->start, c->time_base,
2032                                             avctx->time_base) + t;
2033             }
2034
2035         } else {
2036
2037             t = parse_time_or_die("force_key_frames", p, 1);
2038             av_assert1(index < size);
2039             pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2040
2041         }
2042
2043         p = next;
2044     }
2045
2046     av_assert0(index == size);
2047     qsort(pts, size, sizeof(*pts), compare_int64);
2048     ost->forced_kf_count = size;
2049     ost->forced_kf_pts   = pts;
2050 }
2051
2052 static void report_new_stream(int input_index, AVPacket *pkt)
2053 {
2054     InputFile *file = input_files[input_index];
2055     AVStream *st = file->ctx->streams[pkt->stream_index];
2056
2057     if (pkt->stream_index < file->nb_streams_warn)
2058         return;
2059     av_log(file->ctx, AV_LOG_WARNING,
2060            "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2061            av_get_media_type_string(st->codec->codec_type),
2062            input_index, pkt->stream_index,
2063            pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2064     file->nb_streams_warn = pkt->stream_index + 1;
2065 }
2066
2067 static int transcode_init(void)
2068 {
2069     int ret = 0, i, j, k;
2070     AVFormatContext *oc;
2071     AVCodecContext *codec;
2072     OutputStream *ost;
2073     InputStream *ist;
2074     char error[1024];
2075     int want_sdp = 1;
2076
2077     /* init framerate emulation */
2078     for (i = 0; i < nb_input_files; i++) {
2079         InputFile *ifile = input_files[i];
2080         if (ifile->rate_emu)
2081             for (j = 0; j < ifile->nb_streams; j++)
2082                 input_streams[j + ifile->ist_index]->start = av_gettime();
2083     }
2084
2085     /* output stream init */
2086     for (i = 0; i < nb_output_files; i++) {
2087         oc = output_files[i]->ctx;
2088         if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
2089             av_dump_format(oc, i, oc->filename, 1);
2090             av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2091             return AVERROR(EINVAL);
2092         }
2093     }
2094
2095     /* init complex filtergraphs */
2096     for (i = 0; i < nb_filtergraphs; i++)
2097         if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
2098             return ret;
2099
2100     /* for each output stream, we compute the right encoding parameters */
2101     for (i = 0; i < nb_output_streams; i++) {
2102         AVCodecContext *icodec = NULL;
2103         ost = output_streams[i];
2104         oc  = output_files[ost->file_index]->ctx;
2105         ist = get_input_stream(ost);
2106
2107         if (ost->attachment_filename)
2108             continue;
2109
2110         codec  = ost->st->codec;
2111
2112         if (ist) {
2113             icodec = ist->st->codec;
2114
2115             ost->st->disposition          = ist->st->disposition;
2116             codec->bits_per_raw_sample    = icodec->bits_per_raw_sample;
2117             codec->chroma_sample_location = icodec->chroma_sample_location;
2118         }
2119
2120         if (ost->stream_copy) {
2121             uint64_t extra_size;
2122
2123             av_assert0(ist && !ost->filter);
2124
2125             extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2126
2127             if (extra_size > INT_MAX) {
2128                 return AVERROR(EINVAL);
2129             }
2130
2131             /* if stream_copy is selected, no need to decode or encode */
2132             codec->codec_id   = icodec->codec_id;
2133             codec->codec_type = icodec->codec_type;
2134
2135             if (!codec->codec_tag) {
2136                 unsigned int codec_tag;
2137                 if (!oc->oformat->codec_tag ||
2138                      av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
2139                      !av_codec_get_tag2(oc->oformat->codec_tag, icodec->codec_id, &codec_tag))
2140                     codec->codec_tag = icodec->codec_tag;
2141             }
2142
2143             codec->bit_rate       = icodec->bit_rate;
2144             codec->rc_max_rate    = icodec->rc_max_rate;
2145             codec->rc_buffer_size = icodec->rc_buffer_size;
2146             codec->field_order    = icodec->field_order;
2147             codec->extradata      = av_mallocz(extra_size);
2148             if (!codec->extradata) {
2149                 return AVERROR(ENOMEM);
2150             }
2151             memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
2152             codec->extradata_size= icodec->extradata_size;
2153             codec->bits_per_coded_sample  = icodec->bits_per_coded_sample;
2154
2155             codec->time_base = ist->st->time_base;
2156             /*
2157              * Avi is a special case here because it supports variable fps but
2158              * having the fps and timebase differe significantly adds quite some
2159              * overhead
2160              */
2161             if(!strcmp(oc->oformat->name, "avi")) {
2162                 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2163                                && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2164                                && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(icodec->time_base)
2165                                && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(icodec->time_base) < 1.0/500
2166                      || copy_tb==2){
2167                     codec->time_base.num = ist->st->r_frame_rate.den;
2168                     codec->time_base.den = 2*ist->st->r_frame_rate.num;
2169                     codec->ticks_per_frame = 2;
2170                 } else if (   copy_tb<0 && av_q2d(icodec->time_base)*icodec->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2171                                  && av_q2d(ist->st->time_base) < 1.0/500
2172                     || copy_tb==0){
2173                     codec->time_base = icodec->time_base;
2174                     codec->time_base.num *= icodec->ticks_per_frame;
2175                     codec->time_base.den *= 2;
2176                     codec->ticks_per_frame = 2;
2177                 }
2178             } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2179                       && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2180                       && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2181                       && strcmp(oc->oformat->name, "f4v")
2182             ) {
2183                 if(   copy_tb<0 && icodec->time_base.den
2184                                 && av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base)
2185                                 && av_q2d(ist->st->time_base) < 1.0/500
2186                    || copy_tb==0){
2187                     codec->time_base = icodec->time_base;
2188                     codec->time_base.num *= icodec->ticks_per_frame;
2189                 }
2190             }
2191             if (   codec->codec_tag == AV_RL32("tmcd")
2192                 && icodec->time_base.num < icodec->time_base.den
2193                 && icodec->time_base.num > 0
2194                 && 121LL*icodec->time_base.num > icodec->time_base.den) {
2195                 codec->time_base = icodec->time_base;
2196             }
2197
2198             if(ost->frame_rate.num)
2199                 codec->time_base = av_inv_q(ost->frame_rate);
2200
2201             av_reduce(&codec->time_base.num, &codec->time_base.den,
2202                         codec->time_base.num, codec->time_base.den, INT_MAX);
2203
2204             switch (codec->codec_type) {
2205             case AVMEDIA_TYPE_AUDIO:
2206                 if (audio_volume != 256) {
2207                     av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2208                     exit(1);
2209                 }
2210                 codec->channel_layout     = icodec->channel_layout;
2211                 codec->sample_rate        = icodec->sample_rate;
2212                 codec->channels           = icodec->channels;
2213                 codec->frame_size         = icodec->frame_size;
2214                 codec->audio_service_type = icodec->audio_service_type;
2215                 codec->block_align        = icodec->block_align;
2216                 if((codec->block_align == 1 || codec->block_align == 1152 || codec->block_align == 576) && codec->codec_id == AV_CODEC_ID_MP3)
2217                     codec->block_align= 0;
2218                 if(codec->codec_id == AV_CODEC_ID_AC3)
2219                     codec->block_align= 0;
2220                 break;
2221             case AVMEDIA_TYPE_VIDEO:
2222                 codec->pix_fmt            = icodec->pix_fmt;
2223                 codec->width              = icodec->width;
2224                 codec->height             = icodec->height;
2225                 codec->has_b_frames       = icodec->has_b_frames;
2226                 if (!codec->sample_aspect_ratio.num) {
2227                     codec->sample_aspect_ratio   =
2228                     ost->st->sample_aspect_ratio =
2229                         ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
2230                         ist->st->codec->sample_aspect_ratio.num ?
2231                         ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
2232                 }
2233                 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2234                 break;
2235             case AVMEDIA_TYPE_SUBTITLE:
2236                 codec->width  = icodec->width;
2237                 codec->height = icodec->height;
2238                 break;
2239             case AVMEDIA_TYPE_DATA:
2240             case AVMEDIA_TYPE_ATTACHMENT:
2241                 break;
2242             default:
2243                 abort();
2244             }
2245         } else {
2246             if (!ost->enc)
2247                 ost->enc = avcodec_find_encoder(codec->codec_id);
2248             if (!ost->enc) {
2249                 /* should only happen when a default codec is not present. */
2250                 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
2251                          avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
2252                 ret = AVERROR(EINVAL);
2253                 goto dump_format;
2254             }
2255
2256             if (ist)
2257                 ist->decoding_needed++;
2258             ost->encoding_needed = 1;
2259
2260             if (!ost->filter &&
2261                 (codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2262                  codec->codec_type == AVMEDIA_TYPE_AUDIO)) {
2263                     FilterGraph *fg;
2264                     fg = init_simple_filtergraph(ist, ost);
2265                     if (configure_filtergraph(fg)) {
2266                         av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2267                         exit(1);
2268                     }
2269             }
2270
2271             if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2272                 if (ost->filter && !ost->frame_rate.num)
2273                     ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
2274                 if (ist && !ost->frame_rate.num)
2275                     ost->frame_rate = ist->framerate;
2276                 if (ist && !ost->frame_rate.num)
2277                     ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
2278 //                    ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
2279                 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
2280                     int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
2281                     ost->frame_rate = ost->enc->supported_framerates[idx];
2282                 }
2283             }
2284
2285             switch (codec->codec_type) {
2286             case AVMEDIA_TYPE_AUDIO:
2287                 codec->sample_fmt     = ost->filter->filter->inputs[0]->format;
2288                 codec->sample_rate    = ost->filter->filter->inputs[0]->sample_rate;
2289                 codec->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
2290                 codec->channels       = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
2291                 codec->time_base      = (AVRational){ 1, codec->sample_rate };
2292                 break;
2293             case AVMEDIA_TYPE_VIDEO:
2294                 codec->time_base = av_inv_q(ost->frame_rate);
2295                 if (ost->filter && !(codec->time_base.num && codec->time_base.den))
2296                     codec->time_base = ost->filter->filter->inputs[0]->time_base;
2297                 if (   av_q2d(codec->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
2298                    && (video_sync_method == VSYNC_CFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
2299                     av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
2300                                                "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
2301                 }
2302                 for (j = 0; j < ost->forced_kf_count; j++)
2303                     ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
2304                                                          AV_TIME_BASE_Q,
2305                                                          codec->time_base);
2306
2307                 codec->width  = ost->filter->filter->inputs[0]->w;
2308                 codec->height = ost->filter->filter->inputs[0]->h;
2309                 codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2310                     ost->frame_aspect_ratio ? // overridden by the -aspect cli option
2311                     av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
2312                     ost->filter->filter->inputs[0]->sample_aspect_ratio;
2313                 codec->pix_fmt = ost->filter->filter->inputs[0]->format;
2314
2315                 if (!icodec ||
2316                     codec->width   != icodec->width  ||
2317                     codec->height  != icodec->height ||
2318                     codec->pix_fmt != icodec->pix_fmt) {
2319                     codec->bits_per_raw_sample = frame_bits_per_raw_sample;
2320                 }
2321
2322                 if (ost->forced_keyframes) {
2323                     if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
2324                         ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
2325                                             forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
2326                         if (ret < 0) {
2327                             av_log(NULL, AV_LOG_ERROR,
2328                                    "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
2329                             return ret;
2330                         }
2331                         ost->forced_keyframes_expr_const_values[FKF_N] = 0;
2332                         ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
2333                         ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
2334                         ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
2335                     } else {
2336                         parse_forced_key_frames(ost->forced_keyframes, ost, ost->st->codec);
2337                     }
2338                 }
2339                 break;
2340             case AVMEDIA_TYPE_SUBTITLE:
2341                 codec->time_base = (AVRational){1, 1000};
2342                 if (!codec->width) {
2343                     codec->width     = input_streams[ost->source_index]->st->codec->width;
2344                     codec->height    = input_streams[ost->source_index]->st->codec->height;
2345                 }
2346                 break;
2347             default:
2348                 abort();
2349                 break;
2350             }
2351             /* two pass mode */
2352             if (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) {
2353                 char logfilename[1024];
2354                 FILE *f;
2355
2356                 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
2357                          ost->logfile_prefix ? ost->logfile_prefix :
2358                                                DEFAULT_PASS_LOGFILENAME_PREFIX,
2359                          i);
2360                 if (!strcmp(ost->enc->name, "libx264")) {
2361                     av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
2362                 } else {
2363                     if (codec->flags & CODEC_FLAG_PASS2) {
2364                         char  *logbuffer;
2365                         size_t logbuffer_size;
2366                         if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
2367                             av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
2368                                    logfilename);
2369                             exit(1);
2370                         }
2371                         codec->stats_in = logbuffer;
2372                     }
2373                     if (codec->flags & CODEC_FLAG_PASS1) {
2374                         f = fopen(logfilename, "wb");
2375                         if (!f) {
2376                             av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
2377                                 logfilename, strerror(errno));
2378                             exit(1);
2379                         }
2380                         ost->logfile = f;
2381                     }
2382                 }
2383             }
2384         }
2385     }
2386
2387     /* open each encoder */
2388     for (i = 0; i < nb_output_streams; i++) {
2389         ost = output_streams[i];
2390         if (ost->encoding_needed) {
2391             AVCodec      *codec = ost->enc;
2392             AVCodecContext *dec = NULL;
2393
2394             if ((ist = get_input_stream(ost)))
2395                 dec = ist->st->codec;
2396             if (dec && dec->subtitle_header) {
2397                 /* ASS code assumes this buffer is null terminated so add extra byte. */
2398                 ost->st->codec->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2399                 if (!ost->st->codec->subtitle_header) {
2400                     ret = AVERROR(ENOMEM);
2401                     goto dump_format;
2402                 }
2403                 memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2404                 ost->st->codec->subtitle_header_size = dec->subtitle_header_size;
2405             }
2406             if (!av_dict_get(ost->opts, "threads", NULL, 0))
2407                 av_dict_set(&ost->opts, "threads", "auto", 0);
2408             if ((ret = avcodec_open2(ost->st->codec, codec, &ost->opts)) < 0) {
2409                 if (ret == AVERROR_EXPERIMENTAL)
2410                     abort_codec_experimental(codec, 1);
2411                 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
2412                         ost->file_index, ost->index);
2413                 goto dump_format;
2414             }
2415             if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2416                 !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
2417                 av_buffersink_set_frame_size(ost->filter->filter,
2418                                              ost->st->codec->frame_size);
2419             assert_avoptions(ost->opts);
2420             if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
2421                 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2422                                              " It takes bits/s as argument, not kbits/s\n");
2423             extra_size += ost->st->codec->extradata_size;
2424
2425             if (ost->st->codec->me_threshold)
2426                 input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
2427         }
2428     }
2429
2430     /* init input streams */
2431     for (i = 0; i < nb_input_streams; i++)
2432         if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
2433             goto dump_format;
2434
2435     /* discard unused programs */
2436     for (i = 0; i < nb_input_files; i++) {
2437         InputFile *ifile = input_files[i];
2438         for (j = 0; j < ifile->ctx->nb_programs; j++) {
2439             AVProgram *p = ifile->ctx->programs[j];
2440             int discard  = AVDISCARD_ALL;
2441
2442             for (k = 0; k < p->nb_stream_indexes; k++)
2443                 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
2444                     discard = AVDISCARD_DEFAULT;
2445                     break;
2446                 }
2447             p->discard = discard;
2448         }
2449     }
2450
2451     /* open files and write file headers */
2452     for (i = 0; i < nb_output_files; i++) {
2453         oc = output_files[i]->ctx;
2454         oc->interrupt_callback = int_cb;
2455         if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
2456             char errbuf[128];
2457             const char *errbuf_ptr = errbuf;
2458             if (av_strerror(ret, errbuf, sizeof(errbuf)) < 0)
2459                 errbuf_ptr = strerror(AVUNERROR(ret));
2460             snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?): %s", i, errbuf_ptr);
2461             ret = AVERROR(EINVAL);
2462             goto dump_format;
2463         }
2464 //         assert_avoptions(output_files[i]->opts);
2465         if (strcmp(oc->oformat->name, "rtp")) {
2466             want_sdp = 0;
2467         }
2468     }
2469
2470  dump_format:
2471     /* dump the file output parameters - cannot be done before in case
2472        of stream copy */
2473     for (i = 0; i < nb_output_files; i++) {
2474         av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
2475     }
2476
2477     /* dump the stream mapping */
2478     av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
2479     for (i = 0; i < nb_input_streams; i++) {
2480         ist = input_streams[i];
2481
2482         for (j = 0; j < ist->nb_filters; j++) {
2483             if (ist->filters[j]->graph->graph_desc) {
2484                 av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d (%s) -> %s",
2485                        ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
2486                        ist->filters[j]->name);
2487                 if (nb_filtergraphs > 1)
2488                     av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
2489                 av_log(NULL, AV_LOG_INFO, "\n");
2490             }
2491         }
2492     }
2493
2494     for (i = 0; i < nb_output_streams; i++) {
2495         ost = output_streams[i];
2496
2497         if (ost->attachment_filename) {
2498             /* an attached file */
2499             av_log(NULL, AV_LOG_INFO, "  File %s -> Stream #%d:%d\n",
2500                    ost->attachment_filename, ost->file_index, ost->index);
2501             continue;
2502         }
2503
2504         if (ost->filter && ost->filter->graph->graph_desc) {
2505             /* output from a complex graph */
2506             av_log(NULL, AV_LOG_INFO, "  %s", ost->filter->name);
2507             if (nb_filtergraphs > 1)
2508                 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
2509
2510             av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
2511                    ost->index, ost->enc ? ost->enc->name : "?");
2512             continue;
2513         }
2514
2515         av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d -> #%d:%d",
2516                input_streams[ost->source_index]->file_index,
2517                input_streams[ost->source_index]->st->index,
2518                ost->file_index,
2519                ost->index);
2520         if (ost->sync_ist != input_streams[ost->source_index])
2521             av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2522                    ost->sync_ist->file_index,
2523                    ost->sync_ist->st->index);
2524         if (ost->stream_copy)
2525             av_log(NULL, AV_LOG_INFO, " (copy)");
2526         else
2527             av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ?
2528                    input_streams[ost->source_index]->dec->name : "?",
2529                    ost->enc ? ost->enc->name : "?");
2530         av_log(NULL, AV_LOG_INFO, "\n");
2531     }
2532
2533     if (ret) {
2534         av_log(NULL, AV_LOG_ERROR, "%s\n", error);
2535         return ret;
2536     }
2537
2538     if (want_sdp) {
2539         print_sdp();
2540     }
2541
2542     return 0;
2543 }
2544
2545 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
2546 static int need_output(void)
2547 {
2548     int i;
2549
2550     for (i = 0; i < nb_output_streams; i++) {
2551         OutputStream *ost    = output_streams[i];
2552         OutputFile *of       = output_files[ost->file_index];
2553         AVFormatContext *os  = output_files[ost->file_index]->ctx;
2554
2555         if (ost->finished ||
2556             (os->pb && avio_tell(os->pb) >= of->limit_filesize))
2557             continue;
2558         if (ost->frame_number >= ost->max_frames) {
2559             int j;
2560             for (j = 0; j < of->ctx->nb_streams; j++)
2561                 close_output_stream(output_streams[of->ost_index + j]);
2562             continue;
2563         }
2564
2565         return 1;
2566     }
2567
2568     return 0;
2569 }
2570
2571 /**
2572  * Select the output stream to process.
2573  *
2574  * @return  selected output stream, or NULL if none available
2575  */
2576 static OutputStream *choose_output(void)
2577 {
2578     int i;
2579     int64_t opts_min = INT64_MAX;
2580     OutputStream *ost_min = NULL;
2581
2582     for (i = 0; i < nb_output_streams; i++) {
2583         OutputStream *ost = output_streams[i];
2584         int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
2585                                     AV_TIME_BASE_Q);
2586         if (!ost->unavailable && !ost->finished && opts < opts_min) {
2587             opts_min = opts;
2588             ost_min  = ost;
2589         }
2590     }
2591     return ost_min;
2592 }
2593
2594 static int check_keyboard_interaction(int64_t cur_time)
2595 {
2596     int i, ret, key;
2597     static int64_t last_time;
2598     if (received_nb_signals)
2599         return AVERROR_EXIT;
2600     /* read_key() returns 0 on EOF */
2601     if(cur_time - last_time >= 100000 && !run_as_daemon){
2602         key =  read_key();
2603         last_time = cur_time;
2604     }else
2605         key = -1;
2606     if (key == 'q')
2607         return AVERROR_EXIT;
2608     if (key == '+') av_log_set_level(av_log_get_level()+10);
2609     if (key == '-') av_log_set_level(av_log_get_level()-10);
2610     if (key == 's') qp_hist     ^= 1;
2611     if (key == 'h'){
2612         if (do_hex_dump){
2613             do_hex_dump = do_pkt_dump = 0;
2614         } else if(do_pkt_dump){
2615             do_hex_dump = 1;
2616         } else
2617             do_pkt_dump = 1;
2618         av_log_set_level(AV_LOG_DEBUG);
2619     }
2620     if (key == 'c' || key == 'C'){
2621         char buf[4096], target[64], command[256], arg[256] = {0};
2622         double time;
2623         int k, n = 0;
2624         fprintf(stderr, "\nEnter command: <target> <time> <command>[ <argument>]\n");
2625         i = 0;
2626         while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
2627             if (k > 0)
2628                 buf[i++] = k;
2629         buf[i] = 0;
2630         if (k > 0 &&
2631             (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
2632             av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
2633                    target, time, command, arg);
2634             for (i = 0; i < nb_filtergraphs; i++) {
2635                 FilterGraph *fg = filtergraphs[i];
2636                 if (fg->graph) {
2637                     if (time < 0) {
2638                         ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
2639                                                           key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
2640                         fprintf(stderr, "Command reply for stream %d: ret:%d res:%s\n", i, ret, buf);
2641                     } else {
2642                         ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
2643                     }
2644                 }
2645             }
2646         } else {
2647             av_log(NULL, AV_LOG_ERROR,
2648                    "Parse error, at least 3 arguments were expected, "
2649                    "only %d given in string '%s'\n", n, buf);
2650         }
2651     }
2652     if (key == 'd' || key == 'D'){
2653         int debug=0;
2654         if(key == 'D') {
2655             debug = input_streams[0]->st->codec->debug<<1;
2656             if(!debug) debug = 1;
2657             while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
2658                 debug += debug;
2659         }else
2660             if(scanf("%d", &debug)!=1)
2661                 fprintf(stderr,"error parsing debug value\n");
2662         for(i=0;i<nb_input_streams;i++) {
2663             input_streams[i]->st->codec->debug = debug;
2664         }
2665         for(i=0;i<nb_output_streams;i++) {
2666             OutputStream *ost = output_streams[i];
2667             ost->st->codec->debug = debug;
2668         }
2669         if(debug) av_log_set_level(AV_LOG_DEBUG);
2670         fprintf(stderr,"debug=%d\n", debug);
2671     }
2672     if (key == '?'){
2673         fprintf(stderr, "key    function\n"
2674                         "?      show this help\n"
2675                         "+      increase verbosity\n"
2676                         "-      decrease verbosity\n"
2677                         "c      Send command to filtergraph\n"
2678                         "D      cycle through available debug modes\n"
2679                         "h      dump packets/hex press to cycle through the 3 states\n"
2680                         "q      quit\n"
2681                         "s      Show QP histogram\n"
2682         );
2683     }
2684     return 0;
2685 }
2686
2687 #if HAVE_PTHREADS
2688 static void *input_thread(void *arg)
2689 {
2690     InputFile *f = arg;
2691     int ret = 0;
2692
2693     while (!transcoding_finished && ret >= 0) {
2694         AVPacket pkt;
2695         ret = av_read_frame(f->ctx, &pkt);
2696
2697         if (ret == AVERROR(EAGAIN)) {
2698             av_usleep(10000);
2699             ret = 0;
2700             continue;
2701         } else if (ret < 0)
2702             break;
2703
2704         pthread_mutex_lock(&f->fifo_lock);
2705         while (!av_fifo_space(f->fifo))
2706             pthread_cond_wait(&f->fifo_cond, &f->fifo_lock);
2707
2708         av_dup_packet(&pkt);
2709         av_fifo_generic_write(f->fifo, &pkt, sizeof(pkt), NULL);
2710
2711         pthread_mutex_unlock(&f->fifo_lock);
2712     }
2713
2714     f->finished = 1;
2715     return NULL;
2716 }
2717
2718 static void free_input_threads(void)
2719 {
2720     int i;
2721
2722     if (nb_input_files == 1)
2723         return;
2724
2725     transcoding_finished = 1;
2726
2727     for (i = 0; i < nb_input_files; i++) {
2728         InputFile *f = input_files[i];
2729         AVPacket pkt;
2730
2731         if (!f->fifo || f->joined)
2732             continue;
2733
2734         pthread_mutex_lock(&f->fifo_lock);
2735         while (av_fifo_size(f->fifo)) {
2736             av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2737             av_free_packet(&pkt);
2738         }
2739         pthread_cond_signal(&f->fifo_cond);
2740         pthread_mutex_unlock(&f->fifo_lock);
2741
2742         pthread_join(f->thread, NULL);
2743         f->joined = 1;
2744
2745         while (av_fifo_size(f->fifo)) {
2746             av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2747             av_free_packet(&pkt);
2748         }
2749         av_fifo_free(f->fifo);
2750     }
2751 }
2752
2753 static int init_input_threads(void)
2754 {
2755     int i, ret;
2756
2757     if (nb_input_files == 1)
2758         return 0;
2759
2760     for (i = 0; i < nb_input_files; i++) {
2761         InputFile *f = input_files[i];
2762
2763         if (!(f->fifo = av_fifo_alloc(8*sizeof(AVPacket))))
2764             return AVERROR(ENOMEM);
2765
2766         pthread_mutex_init(&f->fifo_lock, NULL);
2767         pthread_cond_init (&f->fifo_cond, NULL);
2768
2769         if ((ret = pthread_create(&f->thread, NULL, input_thread, f)))
2770             return AVERROR(ret);
2771     }
2772     return 0;
2773 }
2774
2775 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
2776 {
2777     int ret = 0;
2778
2779     pthread_mutex_lock(&f->fifo_lock);
2780
2781     if (av_fifo_size(f->fifo)) {
2782         av_fifo_generic_read(f->fifo, pkt, sizeof(*pkt), NULL);
2783         pthread_cond_signal(&f->fifo_cond);
2784     } else {
2785         if (f->finished)
2786             ret = AVERROR_EOF;
2787         else
2788             ret = AVERROR(EAGAIN);
2789     }
2790
2791     pthread_mutex_unlock(&f->fifo_lock);
2792
2793     return ret;
2794 }
2795 #endif
2796
2797 static int get_input_packet(InputFile *f, AVPacket *pkt)
2798 {
2799 #if HAVE_PTHREADS
2800     if (nb_input_files > 1)
2801         return get_input_packet_mt(f, pkt);
2802 #endif
2803     return av_read_frame(f->ctx, pkt);
2804 }
2805
2806 static int got_eagain(void)
2807 {
2808     int i;
2809     for (i = 0; i < nb_output_streams; i++)
2810         if (output_streams[i]->unavailable)
2811             return 1;
2812     return 0;
2813 }
2814
2815 static void reset_eagain(void)
2816 {
2817     int i;
2818     for (i = 0; i < nb_input_files; i++)
2819         input_files[i]->eagain = 0;
2820     for (i = 0; i < nb_output_streams; i++)
2821         output_streams[i]->unavailable = 0;
2822 }
2823
2824 /*
2825  * Return
2826  * - 0 -- one packet was read and processed
2827  * - AVERROR(EAGAIN) -- no packets were available for selected file,
2828  *   this function should be called again
2829  * - AVERROR_EOF -- this function should not be called again
2830  */
2831 static int process_input(int file_index)
2832 {
2833     InputFile *ifile = input_files[file_index];
2834     AVFormatContext *is;
2835     InputStream *ist;
2836     AVPacket pkt;
2837     int ret, i, j;
2838
2839     is  = ifile->ctx;
2840     ret = get_input_packet(ifile, &pkt);
2841
2842     if (ret == AVERROR(EAGAIN)) {
2843         ifile->eagain = 1;
2844         return ret;
2845     }
2846     if (ret < 0) {
2847         if (ret != AVERROR_EOF) {
2848             print_error(is->filename, ret);
2849             if (exit_on_error)
2850                 exit(1);
2851         }
2852         ifile->eof_reached = 1;
2853
2854         for (i = 0; i < ifile->nb_streams; i++) {
2855             ist = input_streams[ifile->ist_index + i];
2856             if (ist->decoding_needed)
2857                 output_packet(ist, NULL);
2858
2859             /* mark all outputs that don't go through lavfi as finished */
2860             for (j = 0; j < nb_output_streams; j++) {
2861                 OutputStream *ost = output_streams[j];
2862
2863                 if (ost->source_index == ifile->ist_index + i &&
2864                     (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
2865                     close_output_stream(ost);
2866             }
2867         }
2868
2869         return AVERROR(EAGAIN);
2870     }
2871
2872     reset_eagain();
2873
2874     if (do_pkt_dump) {
2875         av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
2876                          is->streams[pkt.stream_index]);
2877     }
2878     /* the following test is needed in case new streams appear
2879        dynamically in stream : we ignore them */
2880     if (pkt.stream_index >= ifile->nb_streams) {
2881         report_new_stream(file_index, &pkt);
2882         goto discard_packet;
2883     }
2884
2885     ist = input_streams[ifile->ist_index + pkt.stream_index];
2886     if (ist->discard)
2887         goto discard_packet;
2888
2889     if (debug_ts) {
2890         av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
2891                "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
2892                ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->st->codec->codec_type),
2893                av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
2894                av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
2895                av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
2896                av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
2897                av_ts2str(input_files[ist->file_index]->ts_offset),
2898                av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
2899     }
2900
2901     if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
2902         int64_t stime, stime2;
2903         // Correcting starttime based on the enabled streams
2904         // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
2905         //       so we instead do it here as part of discontinuity handling
2906         if (   ist->next_dts == AV_NOPTS_VALUE
2907             && ifile->ts_offset == -is->start_time
2908             && (is->iformat->flags & AVFMT_TS_DISCONT)) {
2909             int64_t new_start_time = INT64_MAX;
2910             for (i=0; i<is->nb_streams; i++) {
2911                 AVStream *st = is->streams[i];
2912                 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
2913                     continue;
2914                 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
2915             }
2916             if (new_start_time > is->start_time) {
2917                 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
2918                 ifile->ts_offset = -new_start_time;
2919             }
2920         }
2921
2922         stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
2923         stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
2924         ist->wrap_correction_done = 1;
2925
2926         if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
2927             pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
2928             ist->wrap_correction_done = 0;
2929         }
2930         if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
2931             pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
2932             ist->wrap_correction_done = 0;
2933         }
2934     }
2935
2936     if (pkt.dts != AV_NOPTS_VALUE)
2937         pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2938     if (pkt.pts != AV_NOPTS_VALUE)
2939         pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2940
2941     if (pkt.pts != AV_NOPTS_VALUE)
2942         pkt.pts *= ist->ts_scale;
2943     if (pkt.dts != AV_NOPTS_VALUE)
2944         pkt.dts *= ist->ts_scale;
2945
2946     if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
2947         !copy_ts) {
2948         int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
2949         int64_t delta   = pkt_dts - ist->next_dts;
2950         if (is->iformat->flags & AVFMT_TS_DISCONT) {
2951         if(delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
2952             (delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
2953                 ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE) ||
2954             pkt_dts+1<ist->pts){
2955             ifile->ts_offset -= delta;
2956             av_log(NULL, AV_LOG_DEBUG,
2957                    "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
2958                    delta, ifile->ts_offset);
2959             pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2960             if (pkt.pts != AV_NOPTS_VALUE)
2961                 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2962         }
2963         } else {
2964             if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
2965                 (delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE)
2966                ) {
2967                 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
2968                 pkt.dts = AV_NOPTS_VALUE;
2969             }
2970             if (pkt.pts != AV_NOPTS_VALUE){
2971                 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
2972                 delta   = pkt_pts - ist->next_dts;
2973                 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
2974                     (delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE)
2975                    ) {
2976                     av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
2977                     pkt.pts = AV_NOPTS_VALUE;
2978                 }
2979             }
2980         }
2981     }
2982
2983     if (debug_ts) {
2984         av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
2985                ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->st->codec->codec_type),
2986                av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
2987                av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
2988                av_ts2str(input_files[ist->file_index]->ts_offset),
2989                av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
2990     }
2991
2992     sub2video_heartbeat(ist, pkt.pts);
2993
2994     ret = output_packet(ist, &pkt);
2995     if (ret < 0) {
2996         char buf[128];
2997         av_strerror(ret, buf, sizeof(buf));
2998         av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2999                 ist->file_index, ist->st->index, buf);
3000         if (exit_on_error)
3001             exit(1);
3002     }
3003
3004 discard_packet:
3005     av_free_packet(&pkt);
3006
3007     return 0;
3008 }
3009
3010 /**
3011  * Perform a step of transcoding for the specified filter graph.
3012  *
3013  * @param[in]  graph     filter graph to consider
3014  * @param[out] best_ist  input stream where a frame would allow to continue
3015  * @return  0 for success, <0 for error
3016  */
3017 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3018 {
3019     int i, ret;
3020     int nb_requests, nb_requests_max = 0;
3021     InputFilter *ifilter;
3022     InputStream *ist;
3023
3024     *best_ist = NULL;
3025     ret = avfilter_graph_request_oldest(graph->graph);
3026     if (ret >= 0)
3027         return reap_filters();
3028
3029     if (ret == AVERROR_EOF) {
3030         ret = reap_filters();
3031         for (i = 0; i < graph->nb_outputs; i++)
3032             close_output_stream(graph->outputs[i]->ost);
3033         return ret;
3034     }
3035     if (ret != AVERROR(EAGAIN))
3036         return ret;
3037
3038     for (i = 0; i < graph->nb_inputs; i++) {
3039         ifilter = graph->inputs[i];
3040         ist = ifilter->ist;
3041         if (input_files[ist->file_index]->eagain ||
3042             input_files[ist->file_index]->eof_reached)
3043             continue;
3044         nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3045         if (nb_requests > nb_requests_max) {
3046             nb_requests_max = nb_requests;
3047             *best_ist = ist;
3048         }
3049     }
3050
3051     if (!*best_ist)
3052         for (i = 0; i < graph->nb_outputs; i++)
3053             graph->outputs[i]->ost->unavailable = 1;
3054
3055     return 0;
3056 }
3057
3058 /**
3059  * Run a single step of transcoding.
3060  *
3061  * @return  0 for success, <0 for error
3062  */
3063 static int transcode_step(void)
3064 {
3065     OutputStream *ost;
3066     InputStream  *ist;
3067     int ret;
3068
3069     ost = choose_output();
3070     if (!ost) {
3071         if (got_eagain()) {
3072             reset_eagain();
3073             av_usleep(10000);
3074             return 0;
3075         }
3076         av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3077         return AVERROR_EOF;
3078     }
3079
3080     if (ost->filter) {
3081         if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3082             return ret;
3083         if (!ist)
3084             return 0;
3085     } else {
3086         av_assert0(ost->source_index >= 0);
3087         ist = input_streams[ost->source_index];
3088     }
3089
3090     ret = process_input(ist->file_index);
3091     if (ret == AVERROR(EAGAIN)) {
3092         if (input_files[ist->file_index]->eagain)
3093             ost->unavailable = 1;
3094         return 0;
3095     }
3096     if (ret < 0)
3097         return ret == AVERROR_EOF ? 0 : ret;
3098
3099     return reap_filters();
3100 }
3101
3102 /*
3103  * The following code is the main loop of the file converter
3104  */
3105 static int transcode(void)
3106 {
3107     int ret, i;
3108     AVFormatContext *os;
3109     OutputStream *ost;
3110     InputStream *ist;
3111     int64_t timer_start;
3112
3113     ret = transcode_init();
3114     if (ret < 0)
3115         goto fail;
3116
3117     if (stdin_interaction) {
3118         av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3119     }
3120
3121     timer_start = av_gettime();
3122
3123 #if HAVE_PTHREADS
3124     if ((ret = init_input_threads()) < 0)
3125         goto fail;
3126 #endif
3127
3128     while (!received_sigterm) {
3129         int64_t cur_time= av_gettime();
3130
3131         /* if 'q' pressed, exits */
3132         if (stdin_interaction)
3133             if (check_keyboard_interaction(cur_time) < 0)
3134                 break;
3135
3136         /* check if there's any stream where output is still needed */
3137         if (!need_output()) {
3138             av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3139             break;
3140         }
3141
3142         ret = transcode_step();
3143         if (ret < 0) {
3144             if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
3145                 continue;
3146
3147             av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");
3148             break;
3149         }
3150
3151         /* dump report by using the output first video and audio streams */
3152         print_report(0, timer_start, cur_time);
3153     }
3154 #if HAVE_PTHREADS
3155     free_input_threads();
3156 #endif
3157
3158     /* at the end of stream, we must flush the decoder buffers */
3159     for (i = 0; i < nb_input_streams; i++) {
3160         ist = input_streams[i];
3161         if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3162             output_packet(ist, NULL);
3163         }
3164     }
3165     flush_encoders();
3166
3167     term_exit();
3168
3169     /* write the trailer if needed and close file */
3170     for (i = 0; i < nb_output_files; i++) {
3171         os = output_files[i]->ctx;
3172         av_write_trailer(os);
3173     }
3174
3175     /* dump report by using the first video and audio streams */
3176     print_report(1, timer_start, av_gettime());
3177
3178     /* close each encoder */
3179     for (i = 0; i < nb_output_streams; i++) {
3180         ost = output_streams[i];
3181         if (ost->encoding_needed) {
3182             av_freep(&ost->st->codec->stats_in);
3183             avcodec_close(ost->st->codec);
3184         }
3185     }
3186
3187     /* close each decoder */
3188     for (i = 0; i < nb_input_streams; i++) {
3189         ist = input_streams[i];
3190         if (ist->decoding_needed) {
3191             avcodec_close(ist->st->codec);
3192         }
3193     }
3194
3195     /* finished ! */
3196     ret = 0;
3197
3198  fail:
3199 #if HAVE_PTHREADS
3200     free_input_threads();
3201 #endif
3202
3203     if (output_streams) {
3204         for (i = 0; i < nb_output_streams; i++) {
3205             ost = output_streams[i];
3206             if (ost) {
3207                 if (ost->stream_copy)
3208                     av_freep(&ost->st->codec->extradata);
3209                 if (ost->logfile) {
3210                     fclose(ost->logfile);
3211                     ost->logfile = NULL;
3212                 }
3213                 av_freep(&ost->st->codec->subtitle_header);
3214                 av_free(ost->forced_kf_pts);
3215                 av_dict_free(&ost->opts);
3216                 av_dict_free(&ost->swr_opts);
3217                 av_dict_free(&ost->resample_opts);
3218             }
3219         }
3220     }
3221     return ret;
3222 }
3223
3224
3225 static int64_t getutime(void)
3226 {
3227 #if HAVE_GETRUSAGE
3228     struct rusage rusage;
3229
3230     getrusage(RUSAGE_SELF, &rusage);
3231     return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
3232 #elif HAVE_GETPROCESSTIMES
3233     HANDLE proc;
3234     FILETIME c, e, k, u;
3235     proc = GetCurrentProcess();
3236     GetProcessTimes(proc, &c, &e, &k, &u);
3237     return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
3238 #else
3239     return av_gettime();
3240 #endif
3241 }
3242
3243 static int64_t getmaxrss(void)
3244 {
3245 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
3246     struct rusage rusage;
3247     getrusage(RUSAGE_SELF, &rusage);
3248     return (int64_t)rusage.ru_maxrss * 1024;
3249 #elif HAVE_GETPROCESSMEMORYINFO
3250     HANDLE proc;
3251     PROCESS_MEMORY_COUNTERS memcounters;
3252     proc = GetCurrentProcess();
3253     memcounters.cb = sizeof(memcounters);
3254     GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
3255     return memcounters.PeakPagefileUsage;
3256 #else
3257     return 0;
3258 #endif
3259 }
3260
3261 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
3262 {
3263 }
3264
3265 int main(int argc, char **argv)
3266 {
3267     int ret;
3268     int64_t ti;
3269
3270     atexit(exit_program);
3271
3272     setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
3273
3274     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3275     parse_loglevel(argc, argv, options);
3276
3277     if(argc>1 && !strcmp(argv[1], "-d")){
3278         run_as_daemon=1;
3279         av_log_set_callback(log_callback_null);
3280         argc--;
3281         argv++;
3282     }
3283
3284     avcodec_register_all();
3285 #if CONFIG_AVDEVICE
3286     avdevice_register_all();
3287 #endif
3288     avfilter_register_all();
3289     av_register_all();
3290     avformat_network_init();
3291
3292     show_banner(argc, argv, options);
3293
3294     term_init();
3295
3296     /* parse options and open all input/output files */
3297     ret = ffmpeg_parse_options(argc, argv);
3298     if (ret < 0)
3299         exit(1);
3300
3301     if (nb_output_files <= 0 && nb_input_files == 0) {
3302         show_usage();
3303         av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3304         exit(1);
3305     }
3306
3307     /* file converter / grab */
3308     if (nb_output_files <= 0) {
3309         av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
3310         exit(1);
3311     }
3312
3313 //     if (nb_input_files == 0) {
3314 //         av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
3315 //         exit(1);
3316 //     }
3317
3318     current_time = ti = getutime();
3319     if (transcode() < 0)
3320         exit(1);
3321     ti = getutime() - ti;
3322     if (do_benchmark) {
3323         int maxrss = getmaxrss() / 1024;
3324         printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);
3325     }
3326
3327     exit(received_nb_signals ? 255 : 0);
3328     return 0;
3329 }