]> git.sesse.net Git - ffmpeg/blob - avconv.c
avconv/avprobe: Add missing 'void' to exit_program() definition
[ffmpeg] / avconv.c
1 /*
2  * avconv main
3  * Copyright (c) 2000-2011 The libav developers.
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <ctype.h>
24 #include <string.h>
25 #include <math.h>
26 #include <stdlib.h>
27 #include <errno.h>
28 #include <signal.h>
29 #include <limits.h>
30 #include "libavformat/avformat.h"
31 #include "libavdevice/avdevice.h"
32 #include "libswscale/swscale.h"
33 #include "libavresample/avresample.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/audioconvert.h"
36 #include "libavutil/parseutils.h"
37 #include "libavutil/samplefmt.h"
38 #include "libavutil/colorspace.h"
39 #include "libavutil/fifo.h"
40 #include "libavutil/intreadwrite.h"
41 #include "libavutil/dict.h"
42 #include "libavutil/mathematics.h"
43 #include "libavutil/pixdesc.h"
44 #include "libavutil/avstring.h"
45 #include "libavutil/libm.h"
46 #include "libavutil/imgutils.h"
47 #include "libavutil/time.h"
48 #include "libavformat/os_support.h"
49
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersrc.h"
53 # include "libavfilter/buffersink.h"
54
55 #if HAVE_SYS_RESOURCE_H
56 #include <sys/types.h>
57 #include <sys/resource.h>
58 #elif HAVE_GETPROCESSTIMES
59 #include <windows.h>
60 #endif
61 #if HAVE_GETPROCESSMEMORYINFO
62 #include <windows.h>
63 #include <psapi.h>
64 #endif
65
66 #if HAVE_SYS_SELECT_H
67 #include <sys/select.h>
68 #endif
69
70 #if HAVE_PTHREADS
71 #include <pthread.h>
72 #endif
73
74 #include <time.h>
75
76 #include "avconv.h"
77 #include "cmdutils.h"
78
79 #include "libavutil/avassert.h"
80
81 const char program_name[] = "avconv";
82 const int program_birth_year = 2000;
83
84 static FILE *vstats_file;
85
86 static int64_t video_size = 0;
87 static int64_t audio_size = 0;
88 static int64_t extra_size = 0;
89 static int nb_frames_dup = 0;
90 static int nb_frames_drop = 0;
91
92
93
94 #if HAVE_PTHREADS
95 /* signal to input threads that they should exit; set by the main thread */
96 static int transcoding_finished;
97 #endif
98
99 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
100
101 InputStream **input_streams = NULL;
102 int        nb_input_streams = 0;
103 InputFile   **input_files   = NULL;
104 int        nb_input_files   = 0;
105
106 OutputStream **output_streams = NULL;
107 int         nb_output_streams = 0;
108 OutputFile   **output_files   = NULL;
109 int         nb_output_files   = 0;
110
111 FilterGraph **filtergraphs;
112 int        nb_filtergraphs;
113
114 static void term_exit(void)
115 {
116     av_log(NULL, AV_LOG_QUIET, "");
117 }
118
119 static volatile int received_sigterm = 0;
120 static volatile int received_nb_signals = 0;
121
122 static void
123 sigterm_handler(int sig)
124 {
125     received_sigterm = sig;
126     received_nb_signals++;
127     term_exit();
128 }
129
130 static void term_init(void)
131 {
132     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
133     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
134 #ifdef SIGXCPU
135     signal(SIGXCPU, sigterm_handler);
136 #endif
137 }
138
139 static int decode_interrupt_cb(void *ctx)
140 {
141     return received_nb_signals > 1;
142 }
143
144 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
145
146 static void exit_program(void)
147 {
148     int i, j;
149
150     for (i = 0; i < nb_filtergraphs; i++) {
151         avfilter_graph_free(&filtergraphs[i]->graph);
152         for (j = 0; j < filtergraphs[i]->nb_inputs; j++) {
153             av_freep(&filtergraphs[i]->inputs[j]->name);
154             av_freep(&filtergraphs[i]->inputs[j]);
155         }
156         av_freep(&filtergraphs[i]->inputs);
157         for (j = 0; j < filtergraphs[i]->nb_outputs; j++) {
158             av_freep(&filtergraphs[i]->outputs[j]->name);
159             av_freep(&filtergraphs[i]->outputs[j]);
160         }
161         av_freep(&filtergraphs[i]->outputs);
162         av_freep(&filtergraphs[i]);
163     }
164     av_freep(&filtergraphs);
165
166     /* close files */
167     for (i = 0; i < nb_output_files; i++) {
168         AVFormatContext *s = output_files[i]->ctx;
169         if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
170             avio_close(s->pb);
171         avformat_free_context(s);
172         av_dict_free(&output_files[i]->opts);
173         av_freep(&output_files[i]);
174     }
175     for (i = 0; i < nb_output_streams; i++) {
176         AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
177         while (bsfc) {
178             AVBitStreamFilterContext *next = bsfc->next;
179             av_bitstream_filter_close(bsfc);
180             bsfc = next;
181         }
182         output_streams[i]->bitstream_filters = NULL;
183         avcodec_free_frame(&output_streams[i]->filtered_frame);
184
185         av_freep(&output_streams[i]->forced_keyframes);
186         av_freep(&output_streams[i]->avfilter);
187         av_freep(&output_streams[i]->logfile_prefix);
188         av_freep(&output_streams[i]);
189     }
190     for (i = 0; i < nb_input_files; i++) {
191         avformat_close_input(&input_files[i]->ctx);
192         av_freep(&input_files[i]);
193     }
194     for (i = 0; i < nb_input_streams; i++) {
195         avcodec_free_frame(&input_streams[i]->decoded_frame);
196         av_dict_free(&input_streams[i]->opts);
197         free_buffer_pool(&input_streams[i]->buffer_pool);
198         av_freep(&input_streams[i]->filters);
199         av_freep(&input_streams[i]);
200     }
201
202     if (vstats_file)
203         fclose(vstats_file);
204     av_free(vstats_filename);
205
206     av_freep(&input_streams);
207     av_freep(&input_files);
208     av_freep(&output_streams);
209     av_freep(&output_files);
210
211     uninit_opts();
212
213     avfilter_uninit();
214     avformat_network_deinit();
215
216     if (received_sigterm) {
217         av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
218                (int) received_sigterm);
219         exit (255);
220     }
221 }
222
223 void assert_avoptions(AVDictionary *m)
224 {
225     AVDictionaryEntry *t;
226     if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
227         av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
228         exit(1);
229     }
230 }
231
232 static void assert_codec_experimental(AVCodecContext *c, int encoder)
233 {
234     const char *codec_string = encoder ? "encoder" : "decoder";
235     AVCodec *codec;
236     if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL &&
237         c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
238         av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
239                 "results.\nAdd '-strict experimental' if you want to use it.\n",
240                 codec_string, c->codec->name);
241         codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id);
242         if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
243             av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
244                    codec_string, codec->name);
245         exit(1);
246     }
247 }
248
249 /**
250  * Update the requested input sample format based on the output sample format.
251  * This is currently only used to request float output from decoders which
252  * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
253  * Ideally this will be removed in the future when decoders do not do format
254  * conversion and only output in their native format.
255  */
256 static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
257                               AVCodecContext *enc)
258 {
259     /* if sample formats match or a decoder sample format has already been
260        requested, just return */
261     if (enc->sample_fmt == dec->sample_fmt ||
262         dec->request_sample_fmt > AV_SAMPLE_FMT_NONE)
263         return;
264
265     /* if decoder supports more than one output format */
266     if (dec_codec && dec_codec->sample_fmts &&
267         dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
268         dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
269         const enum AVSampleFormat *p;
270         int min_dec = INT_MAX, min_inc = INT_MAX;
271         enum AVSampleFormat dec_fmt = AV_SAMPLE_FMT_NONE;
272         enum AVSampleFormat inc_fmt = AV_SAMPLE_FMT_NONE;
273
274         /* find a matching sample format in the encoder */
275         for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
276             if (*p == enc->sample_fmt) {
277                 dec->request_sample_fmt = *p;
278                 return;
279             } else {
280                 enum AVSampleFormat dfmt = av_get_packed_sample_fmt(*p);
281                 enum AVSampleFormat efmt = av_get_packed_sample_fmt(enc->sample_fmt);
282                 int fmt_diff = 32 * abs(dfmt - efmt);
283                 if (av_sample_fmt_is_planar(*p) !=
284                     av_sample_fmt_is_planar(enc->sample_fmt))
285                     fmt_diff++;
286                 if (dfmt == efmt) {
287                     min_inc = fmt_diff;
288                     inc_fmt = *p;
289                 } else if (dfmt > efmt) {
290                     if (fmt_diff < min_inc) {
291                         min_inc = fmt_diff;
292                         inc_fmt = *p;
293                     }
294                 } else {
295                     if (fmt_diff < min_dec) {
296                         min_dec = fmt_diff;
297                         dec_fmt = *p;
298                     }
299                 }
300             }
301         }
302
303         /* if none match, provide the one that matches quality closest */
304         dec->request_sample_fmt = min_inc != INT_MAX ? inc_fmt : dec_fmt;
305     }
306 }
307
308 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
309 {
310     AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
311     AVCodecContext          *avctx = ost->st->codec;
312     int ret;
313
314     /*
315      * Audio encoders may split the packets --  #frames in != #packets out.
316      * But there is no reordering, so we can limit the number of output packets
317      * by simply dropping them here.
318      * Counting encoded video frames needs to be done separately because of
319      * reordering, see do_video_out()
320      */
321     if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
322         if (ost->frame_number >= ost->max_frames) {
323             av_free_packet(pkt);
324             return;
325         }
326         ost->frame_number++;
327     }
328
329     while (bsfc) {
330         AVPacket new_pkt = *pkt;
331         int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
332                                            &new_pkt.data, &new_pkt.size,
333                                            pkt->data, pkt->size,
334                                            pkt->flags & AV_PKT_FLAG_KEY);
335         if (a > 0) {
336             av_free_packet(pkt);
337             new_pkt.destruct = av_destruct_packet;
338         } else if (a < 0) {
339             av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
340                    bsfc->filter->name, pkt->stream_index,
341                    avctx->codec ? avctx->codec->name : "copy");
342             print_error("", a);
343             if (exit_on_error)
344                 exit(1);
345         }
346         *pkt = new_pkt;
347
348         bsfc = bsfc->next;
349     }
350
351     pkt->stream_index = ost->index;
352     ret = av_interleaved_write_frame(s, pkt);
353     if (ret < 0) {
354         print_error("av_interleaved_write_frame()", ret);
355         exit(1);
356     }
357 }
358
359 static int check_recording_time(OutputStream *ost)
360 {
361     OutputFile *of = output_files[ost->file_index];
362
363     if (of->recording_time != INT64_MAX &&
364         av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,
365                       AV_TIME_BASE_Q) >= 0) {
366         ost->finished = 1;
367         return 0;
368     }
369     return 1;
370 }
371
372 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
373                          AVFrame *frame)
374 {
375     AVCodecContext *enc = ost->st->codec;
376     AVPacket pkt;
377     int got_packet = 0;
378
379     av_init_packet(&pkt);
380     pkt.data = NULL;
381     pkt.size = 0;
382
383     if (!check_recording_time(ost))
384         return;
385
386     if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
387         frame->pts = ost->sync_opts;
388     ost->sync_opts = frame->pts + frame->nb_samples;
389
390     if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
391         av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
392         exit(1);
393     }
394
395     if (got_packet) {
396         if (pkt.pts != AV_NOPTS_VALUE)
397             pkt.pts      = av_rescale_q(pkt.pts,      enc->time_base, ost->st->time_base);
398         if (pkt.dts != AV_NOPTS_VALUE)
399             pkt.dts      = av_rescale_q(pkt.dts,      enc->time_base, ost->st->time_base);
400         if (pkt.duration > 0)
401             pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
402
403         write_frame(s, &pkt, ost);
404
405         audio_size += pkt.size;
406     }
407 }
408
409 static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
410 {
411     AVCodecContext *dec;
412     AVPicture *picture2;
413     AVPicture picture_tmp;
414     uint8_t *buf = 0;
415
416     dec = ist->st->codec;
417
418     /* deinterlace : must be done before any resize */
419     if (do_deinterlace) {
420         int size;
421
422         /* create temporary picture */
423         size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
424         buf  = av_malloc(size);
425         if (!buf)
426             return;
427
428         picture2 = &picture_tmp;
429         avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
430
431         if (avpicture_deinterlace(picture2, picture,
432                                  dec->pix_fmt, dec->width, dec->height) < 0) {
433             /* if error, do not deinterlace */
434             av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
435             av_free(buf);
436             buf = NULL;
437             picture2 = picture;
438         }
439     } else {
440         picture2 = picture;
441     }
442
443     if (picture != picture2)
444         *picture = *picture2;
445     *bufp = buf;
446 }
447
448 static void do_subtitle_out(AVFormatContext *s,
449                             OutputStream *ost,
450                             InputStream *ist,
451                             AVSubtitle *sub,
452                             int64_t pts)
453 {
454     static uint8_t *subtitle_out = NULL;
455     int subtitle_out_max_size = 1024 * 1024;
456     int subtitle_out_size, nb, i;
457     AVCodecContext *enc;
458     AVPacket pkt;
459
460     if (pts == AV_NOPTS_VALUE) {
461         av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
462         if (exit_on_error)
463             exit(1);
464         return;
465     }
466
467     enc = ost->st->codec;
468
469     if (!subtitle_out) {
470         subtitle_out = av_malloc(subtitle_out_max_size);
471     }
472
473     /* Note: DVB subtitle need one packet to draw them and one other
474        packet to clear them */
475     /* XXX: signal it in the codec context ? */
476     if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
477         nb = 2;
478     else
479         nb = 1;
480
481     for (i = 0; i < nb; i++) {
482         ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
483         if (!check_recording_time(ost))
484             return;
485
486         sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
487         // start_display_time is required to be 0
488         sub->pts               += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
489         sub->end_display_time  -= sub->start_display_time;
490         sub->start_display_time = 0;
491         subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
492                                                     subtitle_out_max_size, sub);
493         if (subtitle_out_size < 0) {
494             av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
495             exit(1);
496         }
497
498         av_init_packet(&pkt);
499         pkt.data = subtitle_out;
500         pkt.size = subtitle_out_size;
501         pkt.pts  = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
502         if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
503             /* XXX: the pts correction is handled here. Maybe handling
504                it in the codec would be better */
505             if (i == 0)
506                 pkt.pts += 90 * sub->start_display_time;
507             else
508                 pkt.pts += 90 * sub->end_display_time;
509         }
510         write_frame(s, &pkt, ost);
511     }
512 }
513
514 static void do_video_out(AVFormatContext *s,
515                          OutputStream *ost,
516                          AVFrame *in_picture,
517                          int *frame_size, float quality)
518 {
519     int ret, format_video_sync;
520     AVPacket pkt;
521     AVCodecContext *enc = ost->st->codec;
522
523     *frame_size = 0;
524
525     format_video_sync = video_sync_method;
526     if (format_video_sync == VSYNC_AUTO)
527         format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
528                             (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
529     if (format_video_sync != VSYNC_PASSTHROUGH &&
530         ost->frame_number &&
531         in_picture->pts != AV_NOPTS_VALUE &&
532         in_picture->pts < ost->sync_opts) {
533         nb_frames_drop++;
534         av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
535         return;
536     }
537
538     if (in_picture->pts == AV_NOPTS_VALUE)
539         in_picture->pts = ost->sync_opts;
540     ost->sync_opts = in_picture->pts;
541
542
543     if (!ost->frame_number)
544         ost->first_pts = in_picture->pts;
545
546     av_init_packet(&pkt);
547     pkt.data = NULL;
548     pkt.size = 0;
549
550     if (!check_recording_time(ost) ||
551         ost->frame_number >= ost->max_frames)
552         return;
553
554     if (s->oformat->flags & AVFMT_RAWPICTURE &&
555         enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
556         /* raw pictures are written as AVPicture structure to
557            avoid any copies. We support temporarily the older
558            method. */
559         enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
560         enc->coded_frame->top_field_first  = in_picture->top_field_first;
561         pkt.data   = (uint8_t *)in_picture;
562         pkt.size   =  sizeof(AVPicture);
563         pkt.pts    = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
564         pkt.flags |= AV_PKT_FLAG_KEY;
565
566         write_frame(s, &pkt, ost);
567     } else {
568         int got_packet;
569         AVFrame big_picture;
570
571         big_picture = *in_picture;
572         /* better than nothing: use input picture interlaced
573            settings */
574         big_picture.interlaced_frame = in_picture->interlaced_frame;
575         if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
576             if (ost->top_field_first == -1)
577                 big_picture.top_field_first = in_picture->top_field_first;
578             else
579                 big_picture.top_field_first = !!ost->top_field_first;
580         }
581
582         /* handles same_quant here. This is not correct because it may
583            not be a global option */
584         big_picture.quality = quality;
585         if (!enc->me_threshold)
586             big_picture.pict_type = 0;
587         if (ost->forced_kf_index < ost->forced_kf_count &&
588             big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
589             big_picture.pict_type = AV_PICTURE_TYPE_I;
590             ost->forced_kf_index++;
591         }
592         ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
593         if (ret < 0) {
594             av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
595             exit(1);
596         }
597
598         if (got_packet) {
599             if (pkt.pts != AV_NOPTS_VALUE)
600                 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
601             if (pkt.dts != AV_NOPTS_VALUE)
602                 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
603
604             write_frame(s, &pkt, ost);
605             *frame_size = pkt.size;
606             video_size += pkt.size;
607
608             /* if two pass, output log */
609             if (ost->logfile && enc->stats_out) {
610                 fprintf(ost->logfile, "%s", enc->stats_out);
611             }
612         }
613     }
614     ost->sync_opts++;
615     /*
616      * For video, number of frames in == number of packets out.
617      * But there may be reordering, so we can't throw away frames on encoder
618      * flush, we need to limit them here, before they go into encoder.
619      */
620     ost->frame_number++;
621 }
622
623 static double psnr(double d)
624 {
625     return -10.0 * log(d) / log(10.0);
626 }
627
628 static void do_video_stats(AVFormatContext *os, OutputStream *ost,
629                            int frame_size)
630 {
631     AVCodecContext *enc;
632     int frame_number;
633     double ti1, bitrate, avg_bitrate;
634
635     /* this is executed just the first time do_video_stats is called */
636     if (!vstats_file) {
637         vstats_file = fopen(vstats_filename, "w");
638         if (!vstats_file) {
639             perror("fopen");
640             exit(1);
641         }
642     }
643
644     enc = ost->st->codec;
645     if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
646         frame_number = ost->frame_number;
647         fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
648         if (enc->flags&CODEC_FLAG_PSNR)
649             fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
650
651         fprintf(vstats_file,"f_size= %6d ", frame_size);
652         /* compute pts value */
653         ti1 = ost->sync_opts * av_q2d(enc->time_base);
654         if (ti1 < 0.01)
655             ti1 = 0.01;
656
657         bitrate     = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
658         avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
659         fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
660                (double)video_size / 1024, ti1, bitrate, avg_bitrate);
661         fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
662     }
663 }
664
665 /**
666  * Read one frame for lavfi output for ost and encode it.
667  */
668 static int poll_filter(OutputStream *ost)
669 {
670     OutputFile    *of = output_files[ost->file_index];
671     AVFilterBufferRef *picref;
672     AVFrame *filtered_frame = NULL;
673     int frame_size, ret;
674
675     if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
676         return AVERROR(ENOMEM);
677     } else
678         avcodec_get_frame_defaults(ost->filtered_frame);
679     filtered_frame = ost->filtered_frame;
680
681     if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
682         !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
683         ret = av_buffersink_read_samples(ost->filter->filter, &picref,
684                                          ost->st->codec->frame_size);
685     else
686         ret = av_buffersink_read(ost->filter->filter, &picref);
687
688     if (ret < 0)
689         return ret;
690
691     avfilter_copy_buf_props(filtered_frame, picref);
692     if (picref->pts != AV_NOPTS_VALUE) {
693         filtered_frame->pts = av_rescale_q(picref->pts,
694                                            ost->filter->filter->inputs[0]->time_base,
695                                            ost->st->codec->time_base) -
696                               av_rescale_q(of->start_time,
697                                            AV_TIME_BASE_Q,
698                                            ost->st->codec->time_base);
699
700         if (of->start_time && filtered_frame->pts < 0) {
701             avfilter_unref_buffer(picref);
702             return 0;
703         }
704     }
705
706     switch (ost->filter->filter->inputs[0]->type) {
707     case AVMEDIA_TYPE_VIDEO:
708         if (!ost->frame_aspect_ratio)
709             ost->st->codec->sample_aspect_ratio = picref->video->pixel_aspect;
710
711         do_video_out(of->ctx, ost, filtered_frame, &frame_size,
712                      same_quant ? ost->last_quality :
713                                   ost->st->codec->global_quality);
714         if (vstats_filename && frame_size)
715             do_video_stats(of->ctx, ost, frame_size);
716         break;
717     case AVMEDIA_TYPE_AUDIO:
718         do_audio_out(of->ctx, ost, filtered_frame);
719         break;
720     default:
721         // TODO support subtitle filters
722         av_assert0(0);
723     }
724
725     avfilter_unref_buffer(picref);
726
727     return 0;
728 }
729
730 /**
731  * Read as many frames from possible from lavfi and encode them.
732  *
733  * Always read from the active stream with the lowest timestamp. If no frames
734  * are available for it then return EAGAIN and wait for more input. This way we
735  * can use lavfi sources that generate unlimited amount of frames without memory
736  * usage exploding.
737  */
738 static int poll_filters(void)
739 {
740     int i, j, ret = 0;
741
742     while (ret >= 0 && !received_sigterm) {
743         OutputStream *ost = NULL;
744         int64_t min_pts = INT64_MAX;
745
746         /* choose output stream with the lowest timestamp */
747         for (i = 0; i < nb_output_streams; i++) {
748             int64_t pts = output_streams[i]->sync_opts;
749
750             if (!output_streams[i]->filter || output_streams[i]->finished)
751                 continue;
752
753             pts = av_rescale_q(pts, output_streams[i]->st->codec->time_base,
754                                AV_TIME_BASE_Q);
755             if (pts < min_pts) {
756                 min_pts = pts;
757                 ost = output_streams[i];
758             }
759         }
760
761         if (!ost)
762             break;
763
764         ret = poll_filter(ost);
765
766         if (ret == AVERROR_EOF) {
767             OutputFile *of = output_files[ost->file_index];
768
769             ost->finished = 1;
770
771             if (of->shortest) {
772                 for (j = 0; j < of->ctx->nb_streams; j++)
773                     output_streams[of->ost_index + j]->finished = 1;
774             }
775
776             ret = 0;
777         } else if (ret == AVERROR(EAGAIN))
778             return 0;
779     }
780
781     return ret;
782 }
783
784 static void print_report(int is_last_report, int64_t timer_start)
785 {
786     char buf[1024];
787     OutputStream *ost;
788     AVFormatContext *oc;
789     int64_t total_size;
790     AVCodecContext *enc;
791     int frame_number, vid, i;
792     double bitrate, ti1, pts;
793     static int64_t last_time = -1;
794     static int qp_histogram[52];
795
796     if (!print_stats && !is_last_report)
797         return;
798
799     if (!is_last_report) {
800         int64_t cur_time;
801         /* display the report every 0.5 seconds */
802         cur_time = av_gettime();
803         if (last_time == -1) {
804             last_time = cur_time;
805             return;
806         }
807         if ((cur_time - last_time) < 500000)
808             return;
809         last_time = cur_time;
810     }
811
812
813     oc = output_files[0]->ctx;
814
815     total_size = avio_size(oc->pb);
816     if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
817         total_size = avio_tell(oc->pb);
818
819     buf[0] = '\0';
820     ti1 = 1e10;
821     vid = 0;
822     for (i = 0; i < nb_output_streams; i++) {
823         float q = -1;
824         ost = output_streams[i];
825         enc = ost->st->codec;
826         if (!ost->stream_copy && enc->coded_frame)
827             q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
828         if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
829             snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
830         }
831         if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
832             float t = (av_gettime() - timer_start) / 1000000.0;
833
834             frame_number = ost->frame_number;
835             snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
836                      frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
837             if (is_last_report)
838                 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
839             if (qp_hist) {
840                 int j;
841                 int qp = lrintf(q);
842                 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
843                     qp_histogram[qp]++;
844                 for (j = 0; j < 32; j++)
845                     snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
846             }
847             if (enc->flags&CODEC_FLAG_PSNR) {
848                 int j;
849                 double error, error_sum = 0;
850                 double scale, scale_sum = 0;
851                 char type[3] = { 'Y','U','V' };
852                 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
853                 for (j = 0; j < 3; j++) {
854                     if (is_last_report) {
855                         error = enc->error[j];
856                         scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
857                     } else {
858                         error = enc->coded_frame->error[j];
859                         scale = enc->width * enc->height * 255.0 * 255.0;
860                     }
861                     if (j)
862                         scale /= 4;
863                     error_sum += error;
864                     scale_sum += scale;
865                     snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
866                 }
867                 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
868             }
869             vid = 1;
870         }
871         /* compute min output value */
872         pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
873         if ((pts < ti1) && (pts > 0))
874             ti1 = pts;
875     }
876     if (ti1 < 0.01)
877         ti1 = 0.01;
878
879     bitrate = (double)(total_size * 8) / ti1 / 1000.0;
880
881     snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
882             "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
883             (double)total_size / 1024, ti1, bitrate);
884
885     if (nb_frames_dup || nb_frames_drop)
886         snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
887                 nb_frames_dup, nb_frames_drop);
888
889     av_log(NULL, AV_LOG_INFO, "%s    \r", buf);
890
891     fflush(stderr);
892
893     if (is_last_report) {
894         int64_t raw= audio_size + video_size + extra_size;
895         av_log(NULL, AV_LOG_INFO, "\n");
896         av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
897                video_size / 1024.0,
898                audio_size / 1024.0,
899                extra_size / 1024.0,
900                100.0 * (total_size - raw) / raw
901         );
902     }
903 }
904
905 static void flush_encoders(void)
906 {
907     int i, ret;
908
909     for (i = 0; i < nb_output_streams; i++) {
910         OutputStream   *ost = output_streams[i];
911         AVCodecContext *enc = ost->st->codec;
912         AVFormatContext *os = output_files[ost->file_index]->ctx;
913         int stop_encoding = 0;
914
915         if (!ost->encoding_needed)
916             continue;
917
918         if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
919             continue;
920         if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
921             continue;
922
923         for (;;) {
924             int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
925             const char *desc;
926             int64_t *size;
927
928             switch (ost->st->codec->codec_type) {
929             case AVMEDIA_TYPE_AUDIO:
930                 encode = avcodec_encode_audio2;
931                 desc   = "Audio";
932                 size   = &audio_size;
933                 break;
934             case AVMEDIA_TYPE_VIDEO:
935                 encode = avcodec_encode_video2;
936                 desc   = "Video";
937                 size   = &video_size;
938                 break;
939             default:
940                 stop_encoding = 1;
941             }
942
943             if (encode) {
944                 AVPacket pkt;
945                 int got_packet;
946                 av_init_packet(&pkt);
947                 pkt.data = NULL;
948                 pkt.size = 0;
949
950                 ret = encode(enc, &pkt, NULL, &got_packet);
951                 if (ret < 0) {
952                     av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
953                     exit(1);
954                 }
955                 *size += ret;
956                 if (ost->logfile && enc->stats_out) {
957                     fprintf(ost->logfile, "%s", enc->stats_out);
958                 }
959                 if (!got_packet) {
960                     stop_encoding = 1;
961                     break;
962                 }
963                 if (pkt.pts != AV_NOPTS_VALUE)
964                     pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
965                 if (pkt.dts != AV_NOPTS_VALUE)
966                     pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
967                 write_frame(os, &pkt, ost);
968             }
969
970             if (stop_encoding)
971                 break;
972         }
973     }
974 }
975
976 /*
977  * Check whether a packet from ist should be written into ost at this time
978  */
979 static int check_output_constraints(InputStream *ist, OutputStream *ost)
980 {
981     OutputFile *of = output_files[ost->file_index];
982     int ist_index  = input_files[ist->file_index]->ist_index + ist->st->index;
983
984     if (ost->source_index != ist_index)
985         return 0;
986
987     if (of->start_time && ist->last_dts < of->start_time)
988         return 0;
989
990     return 1;
991 }
992
993 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
994 {
995     OutputFile *of = output_files[ost->file_index];
996     int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
997     AVPacket opkt;
998
999     av_init_packet(&opkt);
1000
1001     if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1002         !ost->copy_initial_nonkeyframes)
1003         return;
1004
1005     if (of->recording_time != INT64_MAX &&
1006         ist->last_dts >= of->recording_time + of->start_time) {
1007         ost->finished = 1;
1008         return;
1009     }
1010
1011     /* force the input stream PTS */
1012     if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1013         audio_size += pkt->size;
1014     else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1015         video_size += pkt->size;
1016         ost->sync_opts++;
1017     }
1018
1019     if (pkt->pts != AV_NOPTS_VALUE)
1020         opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1021     else
1022         opkt.pts = AV_NOPTS_VALUE;
1023
1024     if (pkt->dts == AV_NOPTS_VALUE)
1025         opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
1026     else
1027         opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1028     opkt.dts -= ost_tb_start_time;
1029
1030     opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1031     opkt.flags    = pkt->flags;
1032
1033     // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1034     if (  ost->st->codec->codec_id != AV_CODEC_ID_H264
1035        && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1036        && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1037        && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1038        ) {
1039         if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
1040             opkt.destruct = av_destruct_packet;
1041     } else {
1042         opkt.data = pkt->data;
1043         opkt.size = pkt->size;
1044     }
1045
1046     write_frame(of->ctx, &opkt, ost);
1047     ost->st->codec->frame_number++;
1048     av_free_packet(&opkt);
1049 }
1050
1051 static void rate_emu_sleep(InputStream *ist)
1052 {
1053     if (input_files[ist->file_index]->rate_emu) {
1054         int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
1055         int64_t now = av_gettime() - ist->start;
1056         if (pts > now)
1057             av_usleep(pts - now);
1058     }
1059 }
1060
1061 int guess_input_channel_layout(InputStream *ist)
1062 {
1063     AVCodecContext *dec = ist->st->codec;
1064
1065     if (!dec->channel_layout) {
1066         char layout_name[256];
1067
1068         dec->channel_layout = av_get_default_channel_layout(dec->channels);
1069         if (!dec->channel_layout)
1070             return 0;
1071         av_get_channel_layout_string(layout_name, sizeof(layout_name),
1072                                      dec->channels, dec->channel_layout);
1073         av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for  Input Stream "
1074                "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1075     }
1076     return 1;
1077 }
1078
1079 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1080 {
1081     AVFrame *decoded_frame;
1082     AVCodecContext *avctx = ist->st->codec;
1083     int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
1084     int i, ret, resample_changed;
1085
1086     if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
1087         return AVERROR(ENOMEM);
1088     else
1089         avcodec_get_frame_defaults(ist->decoded_frame);
1090     decoded_frame = ist->decoded_frame;
1091
1092     ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1093     if (!*got_output || ret < 0) {
1094         if (!pkt->size) {
1095             for (i = 0; i < ist->nb_filters; i++)
1096                 av_buffersrc_buffer(ist->filters[i]->filter, NULL);
1097         }
1098         return ret;
1099     }
1100
1101     /* if the decoder provides a pts, use it instead of the last packet pts.
1102        the decoder could be delaying output by a packet or more. */
1103     if (decoded_frame->pts != AV_NOPTS_VALUE)
1104         ist->next_dts = decoded_frame->pts;
1105     else if (pkt->pts != AV_NOPTS_VALUE) {
1106         decoded_frame->pts = pkt->pts;
1107         pkt->pts           = AV_NOPTS_VALUE;
1108     }
1109
1110     // preprocess audio (volume)
1111     if (audio_volume != 256) {
1112         int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
1113         void *samples = decoded_frame->data[0];
1114         switch (avctx->sample_fmt) {
1115         case AV_SAMPLE_FMT_U8:
1116         {
1117             uint8_t *volp = samples;
1118             for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
1119                 int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
1120                 *volp++ = av_clip_uint8(v);
1121             }
1122             break;
1123         }
1124         case AV_SAMPLE_FMT_S16:
1125         {
1126             int16_t *volp = samples;
1127             for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
1128                 int v = ((*volp) * audio_volume + 128) >> 8;
1129                 *volp++ = av_clip_int16(v);
1130             }
1131             break;
1132         }
1133         case AV_SAMPLE_FMT_S32:
1134         {
1135             int32_t *volp = samples;
1136             for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
1137                 int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
1138                 *volp++ = av_clipl_int32(v);
1139             }
1140             break;
1141         }
1142         case AV_SAMPLE_FMT_FLT:
1143         {
1144             float *volp = samples;
1145             float scale = audio_volume / 256.f;
1146             for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
1147                 *volp++ *= scale;
1148             }
1149             break;
1150         }
1151         case AV_SAMPLE_FMT_DBL:
1152         {
1153             double *volp = samples;
1154             double scale = audio_volume / 256.;
1155             for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
1156                 *volp++ *= scale;
1157             }
1158             break;
1159         }
1160         default:
1161             av_log(NULL, AV_LOG_FATAL,
1162                    "Audio volume adjustment on sample format %s is not supported.\n",
1163                    av_get_sample_fmt_name(ist->st->codec->sample_fmt));
1164             exit(1);
1165         }
1166     }
1167
1168     rate_emu_sleep(ist);
1169
1170     resample_changed = ist->resample_sample_fmt     != decoded_frame->format         ||
1171                        ist->resample_channels       != avctx->channels               ||
1172                        ist->resample_channel_layout != decoded_frame->channel_layout ||
1173                        ist->resample_sample_rate    != decoded_frame->sample_rate;
1174     if (resample_changed) {
1175         char layout1[64], layout2[64];
1176
1177         if (!guess_input_channel_layout(ist)) {
1178             av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1179                    "layout for Input Stream #%d.%d\n", ist->file_index,
1180                    ist->st->index);
1181             exit(1);
1182         }
1183         decoded_frame->channel_layout = avctx->channel_layout;
1184
1185         av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1186                                      ist->resample_channel_layout);
1187         av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1188                                      decoded_frame->channel_layout);
1189
1190         av_log(NULL, AV_LOG_INFO,
1191                "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1192                ist->file_index, ist->st->index,
1193                ist->resample_sample_rate,  av_get_sample_fmt_name(ist->resample_sample_fmt),
1194                ist->resample_channels, layout1,
1195                decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1196                avctx->channels, layout2);
1197
1198         ist->resample_sample_fmt     = decoded_frame->format;
1199         ist->resample_sample_rate    = decoded_frame->sample_rate;
1200         ist->resample_channel_layout = decoded_frame->channel_layout;
1201         ist->resample_channels       = avctx->channels;
1202
1203         for (i = 0; i < nb_filtergraphs; i++)
1204             if (ist_in_filtergraph(filtergraphs[i], ist) &&
1205                 configure_filtergraph(filtergraphs[i]) < 0) {
1206                 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1207                 exit(1);
1208             }
1209     }
1210
1211     if (decoded_frame->pts != AV_NOPTS_VALUE)
1212         decoded_frame->pts = av_rescale_q(decoded_frame->pts,
1213                                           ist->st->time_base,
1214                                           (AVRational){1, ist->st->codec->sample_rate});
1215     for (i = 0; i < ist->nb_filters; i++)
1216         av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
1217
1218     return ret;
1219 }
1220
1221 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
1222 {
1223     AVFrame *decoded_frame;
1224     void *buffer_to_free = NULL;
1225     int i, ret = 0, resample_changed;
1226     float quality;
1227
1228     if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
1229         return AVERROR(ENOMEM);
1230     else
1231         avcodec_get_frame_defaults(ist->decoded_frame);
1232     decoded_frame = ist->decoded_frame;
1233
1234     ret = avcodec_decode_video2(ist->st->codec,
1235                                 decoded_frame, got_output, pkt);
1236     if (!*got_output || ret < 0) {
1237         if (!pkt->size) {
1238             for (i = 0; i < ist->nb_filters; i++)
1239                 av_buffersrc_buffer(ist->filters[i]->filter, NULL);
1240         }
1241         return ret;
1242     }
1243
1244     quality = same_quant ? decoded_frame->quality : 0;
1245     decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
1246                                            decoded_frame->pkt_dts);
1247     pkt->size = 0;
1248     pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
1249
1250     rate_emu_sleep(ist);
1251
1252     if (ist->st->sample_aspect_ratio.num)
1253         decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
1254
1255     resample_changed = ist->resample_width   != decoded_frame->width  ||
1256                        ist->resample_height  != decoded_frame->height ||
1257                        ist->resample_pix_fmt != decoded_frame->format;
1258     if (resample_changed) {
1259         av_log(NULL, AV_LOG_INFO,
1260                "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
1261                ist->file_index, ist->st->index,
1262                ist->resample_width,  ist->resample_height,  av_get_pix_fmt_name(ist->resample_pix_fmt),
1263                decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
1264
1265         ret = poll_filters();
1266         if (ret < 0 && (ret != AVERROR_EOF && ret != AVERROR(EAGAIN)))
1267             av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");
1268
1269         ist->resample_width   = decoded_frame->width;
1270         ist->resample_height  = decoded_frame->height;
1271         ist->resample_pix_fmt = decoded_frame->format;
1272
1273         for (i = 0; i < nb_filtergraphs; i++)
1274             if (ist_in_filtergraph(filtergraphs[i], ist) &&
1275                 configure_filtergraph(filtergraphs[i]) < 0) {
1276                 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1277                 exit(1);
1278             }
1279     }
1280
1281     for (i = 0; i < ist->nb_filters; i++) {
1282         // XXX what an ugly hack
1283         if (ist->filters[i]->graph->nb_outputs == 1)
1284             ist->filters[i]->graph->outputs[0]->ost->last_quality = quality;
1285
1286         if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
1287             FrameBuffer      *buf = decoded_frame->opaque;
1288             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1289                                         decoded_frame->data, decoded_frame->linesize,
1290                                         AV_PERM_READ | AV_PERM_PRESERVE,
1291                                         ist->st->codec->width, ist->st->codec->height,
1292                                         ist->st->codec->pix_fmt);
1293
1294             avfilter_copy_frame_props(fb, decoded_frame);
1295             fb->buf->priv           = buf;
1296             fb->buf->free           = filter_release_buffer;
1297
1298             buf->refcount++;
1299             av_buffersrc_buffer(ist->filters[i]->filter, fb);
1300         } else
1301             av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
1302     }
1303
1304     av_free(buffer_to_free);
1305     return ret;
1306 }
1307
1308 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
1309 {
1310     AVSubtitle subtitle;
1311     int i, ret = avcodec_decode_subtitle2(ist->st->codec,
1312                                           &subtitle, got_output, pkt);
1313     if (ret < 0)
1314         return ret;
1315     if (!*got_output)
1316         return ret;
1317
1318     rate_emu_sleep(ist);
1319
1320     for (i = 0; i < nb_output_streams; i++) {
1321         OutputStream *ost = output_streams[i];
1322
1323         if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
1324             continue;
1325
1326         do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
1327     }
1328
1329     avsubtitle_free(&subtitle);
1330     return ret;
1331 }
1332
1333 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1334 static int output_packet(InputStream *ist, const AVPacket *pkt)
1335 {
1336     int i;
1337     int got_output;
1338     AVPacket avpkt;
1339
1340     if (ist->next_dts == AV_NOPTS_VALUE)
1341         ist->next_dts = ist->last_dts;
1342
1343     if (pkt == NULL) {
1344         /* EOF handling */
1345         av_init_packet(&avpkt);
1346         avpkt.data = NULL;
1347         avpkt.size = 0;
1348         goto handle_eof;
1349     } else {
1350         avpkt = *pkt;
1351     }
1352
1353     if (pkt->dts != AV_NOPTS_VALUE)
1354         ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
1355
1356     // while we have more to decode or while the decoder did output something on EOF
1357     while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
1358         int ret = 0;
1359     handle_eof:
1360
1361         ist->last_dts = ist->next_dts;
1362
1363         if (avpkt.size && avpkt.size != pkt->size) {
1364             av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
1365                    "Multiple frames in a packet from stream %d\n", pkt->stream_index);
1366             ist->showed_multi_packet_warning = 1;
1367         }
1368
1369         switch (ist->st->codec->codec_type) {
1370         case AVMEDIA_TYPE_AUDIO:
1371             ret = decode_audio    (ist, &avpkt, &got_output);
1372             break;
1373         case AVMEDIA_TYPE_VIDEO:
1374             ret = decode_video    (ist, &avpkt, &got_output);
1375             if (avpkt.duration)
1376                 ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
1377             else if (ist->st->avg_frame_rate.num)
1378                 ist->next_dts += av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate),
1379                                               AV_TIME_BASE_Q);
1380             else if (ist->st->codec->time_base.num != 0) {
1381                 int ticks      = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
1382                                                    ist->st->codec->ticks_per_frame;
1383                 ist->next_dts += av_rescale_q(ticks, ist->st->codec->time_base, AV_TIME_BASE_Q);
1384             }
1385             break;
1386         case AVMEDIA_TYPE_SUBTITLE:
1387             ret = transcode_subtitles(ist, &avpkt, &got_output);
1388             break;
1389         default:
1390             return -1;
1391         }
1392
1393         if (ret < 0)
1394             return ret;
1395         // touch data and size only if not EOF
1396         if (pkt) {
1397             avpkt.data += ret;
1398             avpkt.size -= ret;
1399         }
1400         if (!got_output) {
1401             continue;
1402         }
1403     }
1404
1405     /* handle stream copy */
1406     if (!ist->decoding_needed) {
1407         rate_emu_sleep(ist);
1408         ist->last_dts = ist->next_dts;
1409         switch (ist->st->codec->codec_type) {
1410         case AVMEDIA_TYPE_AUDIO:
1411             ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
1412                              ist->st->codec->sample_rate;
1413             break;
1414         case AVMEDIA_TYPE_VIDEO:
1415             if (ist->st->codec->time_base.num != 0) {
1416                 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
1417                 ist->next_dts += ((int64_t)AV_TIME_BASE *
1418                                   ist->st->codec->time_base.num * ticks) /
1419                                   ist->st->codec->time_base.den;
1420             }
1421             break;
1422         }
1423     }
1424     for (i = 0; pkt && i < nb_output_streams; i++) {
1425         OutputStream *ost = output_streams[i];
1426
1427         if (!check_output_constraints(ist, ost) || ost->encoding_needed)
1428             continue;
1429
1430         do_streamcopy(ist, ost, pkt);
1431     }
1432
1433     return 0;
1434 }
1435
1436 static void print_sdp(void)
1437 {
1438     char sdp[2048];
1439     int i;
1440     AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
1441
1442     if (!avc)
1443         exit(1);
1444     for (i = 0; i < nb_output_files; i++)
1445         avc[i] = output_files[i]->ctx;
1446
1447     av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
1448     printf("SDP:\n%s\n", sdp);
1449     fflush(stdout);
1450     av_freep(&avc);
1451 }
1452
1453 static int init_input_stream(int ist_index, char *error, int error_len)
1454 {
1455     int i;
1456     InputStream *ist = input_streams[ist_index];
1457     if (ist->decoding_needed) {
1458         AVCodec *codec = ist->dec;
1459         if (!codec) {
1460             snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
1461                     ist->st->codec->codec_id, ist->file_index, ist->st->index);
1462             return AVERROR(EINVAL);
1463         }
1464
1465         /* update requested sample format for the decoder based on the
1466            corresponding encoder sample format */
1467         for (i = 0; i < nb_output_streams; i++) {
1468             OutputStream *ost = output_streams[i];
1469             if (ost->source_index == ist_index) {
1470                 update_sample_fmt(ist->st->codec, codec, ost->st->codec);
1471                 break;
1472             }
1473         }
1474
1475         if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
1476             ist->st->codec->get_buffer     = codec_get_buffer;
1477             ist->st->codec->release_buffer = codec_release_buffer;
1478             ist->st->codec->opaque         = &ist->buffer_pool;
1479         }
1480
1481         if (!av_dict_get(ist->opts, "threads", NULL, 0))
1482             av_dict_set(&ist->opts, "threads", "auto", 0);
1483         if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
1484             snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
1485                     ist->file_index, ist->st->index);
1486             return AVERROR(EINVAL);
1487         }
1488         assert_codec_experimental(ist->st->codec, 0);
1489         assert_avoptions(ist->opts);
1490     }
1491
1492     ist->last_dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
1493     ist->next_dts = AV_NOPTS_VALUE;
1494     init_pts_correction(&ist->pts_ctx);
1495     ist->is_start = 1;
1496
1497     return 0;
1498 }
1499
1500 static InputStream *get_input_stream(OutputStream *ost)
1501 {
1502     if (ost->source_index >= 0)
1503         return input_streams[ost->source_index];
1504
1505     if (ost->filter) {
1506         FilterGraph *fg = ost->filter->graph;
1507         int i;
1508
1509         for (i = 0; i < fg->nb_inputs; i++)
1510             if (fg->inputs[i]->ist->st->codec->codec_type == ost->st->codec->codec_type)
1511                 return fg->inputs[i]->ist;
1512     }
1513
1514     return NULL;
1515 }
1516
1517 static void parse_forced_key_frames(char *kf, OutputStream *ost,
1518                                     AVCodecContext *avctx)
1519 {
1520     char *p;
1521     int n = 1, i;
1522     int64_t t;
1523
1524     for (p = kf; *p; p++)
1525         if (*p == ',')
1526             n++;
1527     ost->forced_kf_count = n;
1528     ost->forced_kf_pts   = av_malloc(sizeof(*ost->forced_kf_pts) * n);
1529     if (!ost->forced_kf_pts) {
1530         av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
1531         exit(1);
1532     }
1533
1534     p = kf;
1535     for (i = 0; i < n; i++) {
1536         char *next = strchr(p, ',');
1537
1538         if (next)
1539             *next++ = 0;
1540
1541         t = parse_time_or_die("force_key_frames", p, 1);
1542         ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
1543
1544         p = next;
1545     }
1546 }
1547
1548 static int transcode_init(void)
1549 {
1550     int ret = 0, i, j, k;
1551     AVFormatContext *oc;
1552     AVCodecContext *codec;
1553     OutputStream *ost;
1554     InputStream *ist;
1555     char error[1024];
1556     int want_sdp = 1;
1557
1558     /* init framerate emulation */
1559     for (i = 0; i < nb_input_files; i++) {
1560         InputFile *ifile = input_files[i];
1561         if (ifile->rate_emu)
1562             for (j = 0; j < ifile->nb_streams; j++)
1563                 input_streams[j + ifile->ist_index]->start = av_gettime();
1564     }
1565
1566     /* output stream init */
1567     for (i = 0; i < nb_output_files; i++) {
1568         oc = output_files[i]->ctx;
1569         if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
1570             av_dump_format(oc, i, oc->filename, 1);
1571             av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
1572             return AVERROR(EINVAL);
1573         }
1574     }
1575
1576     /* init complex filtergraphs */
1577     for (i = 0; i < nb_filtergraphs; i++)
1578         if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
1579             return ret;
1580
1581     /* for each output stream, we compute the right encoding parameters */
1582     for (i = 0; i < nb_output_streams; i++) {
1583         AVCodecContext *icodec = NULL;
1584         ost = output_streams[i];
1585         oc  = output_files[ost->file_index]->ctx;
1586         ist = get_input_stream(ost);
1587
1588         if (ost->attachment_filename)
1589             continue;
1590
1591         codec  = ost->st->codec;
1592
1593         if (ist) {
1594             icodec = ist->st->codec;
1595
1596             ost->st->disposition          = ist->st->disposition;
1597             codec->bits_per_raw_sample    = icodec->bits_per_raw_sample;
1598             codec->chroma_sample_location = icodec->chroma_sample_location;
1599         }
1600
1601         if (ost->stream_copy) {
1602             uint64_t extra_size;
1603
1604             av_assert0(ist && !ost->filter);
1605
1606             extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
1607
1608             if (extra_size > INT_MAX) {
1609                 return AVERROR(EINVAL);
1610             }
1611
1612             /* if stream_copy is selected, no need to decode or encode */
1613             codec->codec_id   = icodec->codec_id;
1614             codec->codec_type = icodec->codec_type;
1615
1616             if (!codec->codec_tag) {
1617                 if (!oc->oformat->codec_tag ||
1618                      av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
1619                      av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
1620                     codec->codec_tag = icodec->codec_tag;
1621             }
1622
1623             codec->bit_rate       = icodec->bit_rate;
1624             codec->rc_max_rate    = icodec->rc_max_rate;
1625             codec->rc_buffer_size = icodec->rc_buffer_size;
1626             codec->field_order    = icodec->field_order;
1627             codec->extradata      = av_mallocz(extra_size);
1628             if (!codec->extradata) {
1629                 return AVERROR(ENOMEM);
1630             }
1631             memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
1632             codec->extradata_size = icodec->extradata_size;
1633             if (!copy_tb) {
1634                 codec->time_base      = icodec->time_base;
1635                 codec->time_base.num *= icodec->ticks_per_frame;
1636                 av_reduce(&codec->time_base.num, &codec->time_base.den,
1637                           codec->time_base.num, codec->time_base.den, INT_MAX);
1638             } else
1639                 codec->time_base = ist->st->time_base;
1640
1641             switch (codec->codec_type) {
1642             case AVMEDIA_TYPE_AUDIO:
1643                 if (audio_volume != 256) {
1644                     av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
1645                     exit(1);
1646                 }
1647                 codec->channel_layout     = icodec->channel_layout;
1648                 codec->sample_rate        = icodec->sample_rate;
1649                 codec->channels           = icodec->channels;
1650                 codec->frame_size         = icodec->frame_size;
1651                 codec->audio_service_type = icodec->audio_service_type;
1652                 codec->block_align        = icodec->block_align;
1653                 break;
1654             case AVMEDIA_TYPE_VIDEO:
1655                 codec->pix_fmt            = icodec->pix_fmt;
1656                 codec->width              = icodec->width;
1657                 codec->height             = icodec->height;
1658                 codec->has_b_frames       = icodec->has_b_frames;
1659                 if (!codec->sample_aspect_ratio.num) {
1660                     codec->sample_aspect_ratio   =
1661                     ost->st->sample_aspect_ratio =
1662                         ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
1663                         ist->st->codec->sample_aspect_ratio.num ?
1664                         ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
1665                 }
1666                 break;
1667             case AVMEDIA_TYPE_SUBTITLE:
1668                 codec->width  = icodec->width;
1669                 codec->height = icodec->height;
1670                 break;
1671             case AVMEDIA_TYPE_DATA:
1672             case AVMEDIA_TYPE_ATTACHMENT:
1673                 break;
1674             default:
1675                 abort();
1676             }
1677         } else {
1678             if (!ost->enc) {
1679                 /* should only happen when a default codec is not present. */
1680                 snprintf(error, sizeof(error), "Automatic encoder selection "
1681                          "failed for output stream #%d:%d. Default encoder for "
1682                          "format %s is probably disabled. Please choose an "
1683                          "encoder manually.\n", ost->file_index, ost->index,
1684                          oc->oformat->name);
1685                 ret = AVERROR(EINVAL);
1686                 goto dump_format;
1687             }
1688
1689             if (ist)
1690                 ist->decoding_needed = 1;
1691             ost->encoding_needed = 1;
1692
1693             /*
1694              * We want CFR output if and only if one of those is true:
1695              * 1) user specified output framerate with -r
1696              * 2) user specified -vsync cfr
1697              * 3) output format is CFR and the user didn't force vsync to
1698              *    something else than CFR
1699              *
1700              * in such a case, set ost->frame_rate
1701              */
1702             if (codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1703                 !ost->frame_rate.num && ist &&
1704                 (video_sync_method ==  VSYNC_CFR ||
1705                  (video_sync_method ==  VSYNC_AUTO &&
1706                   !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
1707                 ost->frame_rate = ist->framerate.num ? ist->framerate :
1708                                   ist->st->avg_frame_rate.num ?
1709                                   ist->st->avg_frame_rate :
1710                                   (AVRational){25, 1};
1711
1712                 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
1713                     int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
1714                     ost->frame_rate = ost->enc->supported_framerates[idx];
1715                 }
1716             }
1717
1718             if (!ost->filter &&
1719                 (codec->codec_type == AVMEDIA_TYPE_VIDEO ||
1720                  codec->codec_type == AVMEDIA_TYPE_AUDIO)) {
1721                     FilterGraph *fg;
1722                     fg = init_simple_filtergraph(ist, ost);
1723                     if (configure_filtergraph(fg)) {
1724                         av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
1725                         exit(1);
1726                     }
1727             }
1728
1729             switch (codec->codec_type) {
1730             case AVMEDIA_TYPE_AUDIO:
1731                 codec->sample_fmt     = ost->filter->filter->inputs[0]->format;
1732                 codec->sample_rate    = ost->filter->filter->inputs[0]->sample_rate;
1733                 codec->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
1734                 codec->channels       = av_get_channel_layout_nb_channels(codec->channel_layout);
1735                 codec->time_base      = (AVRational){ 1, codec->sample_rate };
1736                 break;
1737             case AVMEDIA_TYPE_VIDEO:
1738                 codec->time_base = ost->filter->filter->inputs[0]->time_base;
1739
1740                 codec->width  = ost->filter->filter->inputs[0]->w;
1741                 codec->height = ost->filter->filter->inputs[0]->h;
1742                 codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
1743                     ost->frame_aspect_ratio ? // overridden by the -aspect cli option
1744                     av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
1745                     ost->filter->filter->inputs[0]->sample_aspect_ratio;
1746                 codec->pix_fmt = ost->filter->filter->inputs[0]->format;
1747
1748                 if (icodec &&
1749                     (codec->width   != icodec->width  ||
1750                      codec->height  != icodec->height ||
1751                      codec->pix_fmt != icodec->pix_fmt)) {
1752                     codec->bits_per_raw_sample = 0;
1753                 }
1754
1755                 if (ost->forced_keyframes)
1756                     parse_forced_key_frames(ost->forced_keyframes, ost,
1757                                             ost->st->codec);
1758                 break;
1759             case AVMEDIA_TYPE_SUBTITLE:
1760                 codec->time_base = (AVRational){1, 1000};
1761                 break;
1762             default:
1763                 abort();
1764                 break;
1765             }
1766             /* two pass mode */
1767             if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
1768                 char logfilename[1024];
1769                 FILE *f;
1770
1771                 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
1772                          ost->logfile_prefix ? ost->logfile_prefix :
1773                                                DEFAULT_PASS_LOGFILENAME_PREFIX,
1774                          i);
1775                 if (!strcmp(ost->enc->name, "libx264")) {
1776                     av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
1777                 } else {
1778                     if (codec->flags & CODEC_FLAG_PASS1) {
1779                         f = fopen(logfilename, "wb");
1780                         if (!f) {
1781                             av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
1782                                    logfilename, strerror(errno));
1783                             exit(1);
1784                         }
1785                         ost->logfile = f;
1786                     } else {
1787                         char  *logbuffer;
1788                         size_t logbuffer_size;
1789                         if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
1790                             av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
1791                                    logfilename);
1792                             exit(1);
1793                         }
1794                         codec->stats_in = logbuffer;
1795                     }
1796                 }
1797             }
1798         }
1799     }
1800
1801     /* open each encoder */
1802     for (i = 0; i < nb_output_streams; i++) {
1803         ost = output_streams[i];
1804         if (ost->encoding_needed) {
1805             AVCodec      *codec = ost->enc;
1806             AVCodecContext *dec = NULL;
1807
1808             if ((ist = get_input_stream(ost)))
1809                 dec = ist->st->codec;
1810             if (dec && dec->subtitle_header) {
1811                 ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size);
1812                 if (!ost->st->codec->subtitle_header) {
1813                     ret = AVERROR(ENOMEM);
1814                     goto dump_format;
1815                 }
1816                 memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
1817                 ost->st->codec->subtitle_header_size = dec->subtitle_header_size;
1818             }
1819             if (!av_dict_get(ost->opts, "threads", NULL, 0))
1820                 av_dict_set(&ost->opts, "threads", "auto", 0);
1821             if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) {
1822                 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
1823                         ost->file_index, ost->index);
1824                 ret = AVERROR(EINVAL);
1825                 goto dump_format;
1826             }
1827             assert_codec_experimental(ost->st->codec, 1);
1828             assert_avoptions(ost->opts);
1829             if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
1830                 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
1831                                              "It takes bits/s as argument, not kbits/s\n");
1832             extra_size += ost->st->codec->extradata_size;
1833
1834             if (ost->st->codec->me_threshold)
1835                 input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
1836         }
1837     }
1838
1839     /* init input streams */
1840     for (i = 0; i < nb_input_streams; i++)
1841         if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
1842             goto dump_format;
1843
1844     /* discard unused programs */
1845     for (i = 0; i < nb_input_files; i++) {
1846         InputFile *ifile = input_files[i];
1847         for (j = 0; j < ifile->ctx->nb_programs; j++) {
1848             AVProgram *p = ifile->ctx->programs[j];
1849             int discard  = AVDISCARD_ALL;
1850
1851             for (k = 0; k < p->nb_stream_indexes; k++)
1852                 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
1853                     discard = AVDISCARD_DEFAULT;
1854                     break;
1855                 }
1856             p->discard = discard;
1857         }
1858     }
1859
1860     /* open files and write file headers */
1861     for (i = 0; i < nb_output_files; i++) {
1862         oc = output_files[i]->ctx;
1863         oc->interrupt_callback = int_cb;
1864         if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
1865             char errbuf[128];
1866             const char *errbuf_ptr = errbuf;
1867             if (av_strerror(ret, errbuf, sizeof(errbuf)) < 0)
1868                 errbuf_ptr = strerror(AVUNERROR(ret));
1869             snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?): %s", i, errbuf_ptr);
1870             ret = AVERROR(EINVAL);
1871             goto dump_format;
1872         }
1873         assert_avoptions(output_files[i]->opts);
1874         if (strcmp(oc->oformat->name, "rtp")) {
1875             want_sdp = 0;
1876         }
1877     }
1878
1879  dump_format:
1880     /* dump the file output parameters - cannot be done before in case
1881        of stream copy */
1882     for (i = 0; i < nb_output_files; i++) {
1883         av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
1884     }
1885
1886     /* dump the stream mapping */
1887     av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
1888     for (i = 0; i < nb_input_streams; i++) {
1889         ist = input_streams[i];
1890
1891         for (j = 0; j < ist->nb_filters; j++) {
1892             if (ist->filters[j]->graph->graph_desc) {
1893                 av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d (%s) -> %s",
1894                        ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
1895                        ist->filters[j]->name);
1896                 if (nb_filtergraphs > 1)
1897                     av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
1898                 av_log(NULL, AV_LOG_INFO, "\n");
1899             }
1900         }
1901     }
1902
1903     for (i = 0; i < nb_output_streams; i++) {
1904         ost = output_streams[i];
1905
1906         if (ost->attachment_filename) {
1907             /* an attached file */
1908             av_log(NULL, AV_LOG_INFO, "  File %s -> Stream #%d:%d\n",
1909                    ost->attachment_filename, ost->file_index, ost->index);
1910             continue;
1911         }
1912
1913         if (ost->filter && ost->filter->graph->graph_desc) {
1914             /* output from a complex graph */
1915             av_log(NULL, AV_LOG_INFO, "  %s", ost->filter->name);
1916             if (nb_filtergraphs > 1)
1917                 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
1918
1919             av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
1920                    ost->index, ost->enc ? ost->enc->name : "?");
1921             continue;
1922         }
1923
1924         av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d -> #%d:%d",
1925                input_streams[ost->source_index]->file_index,
1926                input_streams[ost->source_index]->st->index,
1927                ost->file_index,
1928                ost->index);
1929         if (ost->sync_ist != input_streams[ost->source_index])
1930             av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
1931                    ost->sync_ist->file_index,
1932                    ost->sync_ist->st->index);
1933         if (ost->stream_copy)
1934             av_log(NULL, AV_LOG_INFO, " (copy)");
1935         else
1936             av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ?
1937                    input_streams[ost->source_index]->dec->name : "?",
1938                    ost->enc ? ost->enc->name : "?");
1939         av_log(NULL, AV_LOG_INFO, "\n");
1940     }
1941
1942     if (ret) {
1943         av_log(NULL, AV_LOG_ERROR, "%s\n", error);
1944         return ret;
1945     }
1946
1947     if (want_sdp) {
1948         print_sdp();
1949     }
1950
1951     return 0;
1952 }
1953
1954 /**
1955  * @return 1 if there are still streams where more output is wanted,
1956  *         0 otherwise
1957  */
1958 static int need_output(void)
1959 {
1960     int i;
1961
1962     for (i = 0; i < nb_output_streams; i++) {
1963         OutputStream *ost    = output_streams[i];
1964         OutputFile *of       = output_files[ost->file_index];
1965         AVFormatContext *os  = output_files[ost->file_index]->ctx;
1966
1967         if (ost->finished ||
1968             (os->pb && avio_tell(os->pb) >= of->limit_filesize))
1969             continue;
1970         if (ost->frame_number >= ost->max_frames) {
1971             int j;
1972             for (j = 0; j < of->ctx->nb_streams; j++)
1973                 output_streams[of->ost_index + j]->finished = 1;
1974             continue;
1975         }
1976
1977         return 1;
1978     }
1979
1980     return 0;
1981 }
1982
1983 static InputFile *select_input_file(void)
1984 {
1985     InputFile *ifile = NULL;
1986     int64_t ipts_min = INT64_MAX;
1987     int i;
1988
1989     for (i = 0; i < nb_input_streams; i++) {
1990         InputStream *ist = input_streams[i];
1991         int64_t ipts     = ist->last_dts;
1992
1993         if (ist->discard || input_files[ist->file_index]->eagain)
1994             continue;
1995         if (!input_files[ist->file_index]->eof_reached) {
1996             if (ipts < ipts_min) {
1997                 ipts_min = ipts;
1998                 ifile    = input_files[ist->file_index];
1999             }
2000         }
2001     }
2002
2003     return ifile;
2004 }
2005
2006 #if HAVE_PTHREADS
2007 static void *input_thread(void *arg)
2008 {
2009     InputFile *f = arg;
2010     int ret = 0;
2011
2012     while (!transcoding_finished && ret >= 0) {
2013         AVPacket pkt;
2014         ret = av_read_frame(f->ctx, &pkt);
2015
2016         if (ret == AVERROR(EAGAIN)) {
2017             av_usleep(10000);
2018             ret = 0;
2019             continue;
2020         } else if (ret < 0)
2021             break;
2022
2023         pthread_mutex_lock(&f->fifo_lock);
2024         while (!av_fifo_space(f->fifo))
2025             pthread_cond_wait(&f->fifo_cond, &f->fifo_lock);
2026
2027         av_dup_packet(&pkt);
2028         av_fifo_generic_write(f->fifo, &pkt, sizeof(pkt), NULL);
2029
2030         pthread_mutex_unlock(&f->fifo_lock);
2031     }
2032
2033     f->finished = 1;
2034     return NULL;
2035 }
2036
2037 static void free_input_threads(void)
2038 {
2039     int i;
2040
2041     if (nb_input_files == 1)
2042         return;
2043
2044     transcoding_finished = 1;
2045
2046     for (i = 0; i < nb_input_files; i++) {
2047         InputFile *f = input_files[i];
2048         AVPacket pkt;
2049
2050         if (!f->fifo || f->joined)
2051             continue;
2052
2053         pthread_mutex_lock(&f->fifo_lock);
2054         while (av_fifo_size(f->fifo)) {
2055             av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2056             av_free_packet(&pkt);
2057         }
2058         pthread_cond_signal(&f->fifo_cond);
2059         pthread_mutex_unlock(&f->fifo_lock);
2060
2061         pthread_join(f->thread, NULL);
2062         f->joined = 1;
2063
2064         while (av_fifo_size(f->fifo)) {
2065             av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2066             av_free_packet(&pkt);
2067         }
2068         av_fifo_free(f->fifo);
2069     }
2070 }
2071
2072 static int init_input_threads(void)
2073 {
2074     int i, ret;
2075
2076     if (nb_input_files == 1)
2077         return 0;
2078
2079     for (i = 0; i < nb_input_files; i++) {
2080         InputFile *f = input_files[i];
2081
2082         if (!(f->fifo = av_fifo_alloc(8*sizeof(AVPacket))))
2083             return AVERROR(ENOMEM);
2084
2085         pthread_mutex_init(&f->fifo_lock, NULL);
2086         pthread_cond_init (&f->fifo_cond, NULL);
2087
2088         if ((ret = pthread_create(&f->thread, NULL, input_thread, f)))
2089             return AVERROR(ret);
2090     }
2091     return 0;
2092 }
2093
2094 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
2095 {
2096     int ret = 0;
2097
2098     pthread_mutex_lock(&f->fifo_lock);
2099
2100     if (av_fifo_size(f->fifo)) {
2101         av_fifo_generic_read(f->fifo, pkt, sizeof(*pkt), NULL);
2102         pthread_cond_signal(&f->fifo_cond);
2103     } else {
2104         if (f->finished)
2105             ret = AVERROR_EOF;
2106         else
2107             ret = AVERROR(EAGAIN);
2108     }
2109
2110     pthread_mutex_unlock(&f->fifo_lock);
2111
2112     return ret;
2113 }
2114 #endif
2115
2116 static int get_input_packet(InputFile *f, AVPacket *pkt)
2117 {
2118 #if HAVE_PTHREADS
2119     if (nb_input_files > 1)
2120         return get_input_packet_mt(f, pkt);
2121 #endif
2122     return av_read_frame(f->ctx, pkt);
2123 }
2124
2125 static int got_eagain(void)
2126 {
2127     int i;
2128     for (i = 0; i < nb_input_files; i++)
2129         if (input_files[i]->eagain)
2130             return 1;
2131     return 0;
2132 }
2133
2134 static void reset_eagain(void)
2135 {
2136     int i;
2137     for (i = 0; i < nb_input_files; i++)
2138         input_files[i]->eagain = 0;
2139 }
2140
2141 /**
2142  * Read one packet from an input file and send it for
2143  * - decoding -> lavfi (audio/video)
2144  * - decoding -> encoding -> muxing (subtitles)
2145  * - muxing (streamcopy)
2146  *
2147  * @return
2148  * - 0 -- one packet was read and processed
2149  * - AVERROR(EAGAIN) -- no packets were available for selected file,
2150  *   this function should be called again
2151  * - AVERROR_EOF -- this function should not be called again
2152  */
2153 static int process_input(void)
2154 {
2155     InputFile *ifile;
2156     AVFormatContext *is;
2157     InputStream *ist;
2158     AVPacket pkt;
2159     int ret, i, j;
2160
2161     /* select the stream that we must read now */
2162     ifile = select_input_file();
2163     /* if none, if is finished */
2164     if (!ifile) {
2165         if (got_eagain()) {
2166             reset_eagain();
2167             av_usleep(10000);
2168             return AVERROR(EAGAIN);
2169         }
2170         av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from.\n");
2171         return AVERROR_EOF;
2172     }
2173
2174     is  = ifile->ctx;
2175     ret = get_input_packet(ifile, &pkt);
2176
2177     if (ret == AVERROR(EAGAIN)) {
2178         ifile->eagain = 1;
2179         return ret;
2180     }
2181     if (ret < 0) {
2182         if (ret != AVERROR_EOF) {
2183             print_error(is->filename, ret);
2184             if (exit_on_error)
2185                 exit(1);
2186         }
2187         ifile->eof_reached = 1;
2188
2189         for (i = 0; i < ifile->nb_streams; i++) {
2190             ist = input_streams[ifile->ist_index + i];
2191             if (ist->decoding_needed)
2192                 output_packet(ist, NULL);
2193
2194             /* mark all outputs that don't go through lavfi as finished */
2195             for (j = 0; j < nb_output_streams; j++) {
2196                 OutputStream *ost = output_streams[j];
2197
2198                 if (ost->source_index == ifile->ist_index + i &&
2199                     (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
2200                     ost->finished= 1;
2201             }
2202         }
2203
2204         return AVERROR(EAGAIN);
2205     }
2206
2207     reset_eagain();
2208
2209     if (do_pkt_dump) {
2210         av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
2211                          is->streams[pkt.stream_index]);
2212     }
2213     /* the following test is needed in case new streams appear
2214        dynamically in stream : we ignore them */
2215     if (pkt.stream_index >= ifile->nb_streams)
2216         goto discard_packet;
2217
2218     ist = input_streams[ifile->ist_index + pkt.stream_index];
2219     if (ist->discard)
2220         goto discard_packet;
2221
2222     if (pkt.dts != AV_NOPTS_VALUE)
2223         pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2224     if (pkt.pts != AV_NOPTS_VALUE)
2225         pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2226
2227     if (pkt.pts != AV_NOPTS_VALUE)
2228         pkt.pts *= ist->ts_scale;
2229     if (pkt.dts != AV_NOPTS_VALUE)
2230         pkt.dts *= ist->ts_scale;
2231
2232     if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
2233         (is->iformat->flags & AVFMT_TS_DISCONT)) {
2234         int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
2235         int64_t delta   = pkt_dts - ist->next_dts;
2236
2237         if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
2238             ifile->ts_offset -= delta;
2239             av_log(NULL, AV_LOG_DEBUG,
2240                    "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
2241                    delta, ifile->ts_offset);
2242             pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2243             if (pkt.pts != AV_NOPTS_VALUE)
2244                 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2245         }
2246     }
2247
2248     ret = output_packet(ist, &pkt);
2249     if (ret < 0) {
2250         av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
2251                ist->file_index, ist->st->index);
2252         if (exit_on_error)
2253             exit(1);
2254     }
2255
2256 discard_packet:
2257     av_free_packet(&pkt);
2258
2259     return 0;
2260 }
2261
2262 /*
2263  * The following code is the main loop of the file converter
2264  */
2265 static int transcode(void)
2266 {
2267     int ret, i, need_input = 1;
2268     AVFormatContext *os;
2269     OutputStream *ost;
2270     InputStream *ist;
2271     int64_t timer_start;
2272
2273     ret = transcode_init();
2274     if (ret < 0)
2275         goto fail;
2276
2277     av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
2278     term_init();
2279
2280     timer_start = av_gettime();
2281
2282 #if HAVE_PTHREADS
2283     if ((ret = init_input_threads()) < 0)
2284         goto fail;
2285 #endif
2286
2287     while (!received_sigterm) {
2288         /* check if there's any stream where output is still needed */
2289         if (!need_output()) {
2290             av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
2291             break;
2292         }
2293
2294         /* read and process one input packet if needed */
2295         if (need_input) {
2296             ret = process_input();
2297             if (ret == AVERROR_EOF)
2298                 need_input = 0;
2299         }
2300
2301         ret = poll_filters();
2302         if (ret < 0) {
2303             if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
2304                 continue;
2305
2306             av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");
2307             break;
2308         }
2309
2310         /* dump report by using the output first video and audio streams */
2311         print_report(0, timer_start);
2312     }
2313 #if HAVE_PTHREADS
2314     free_input_threads();
2315 #endif
2316
2317     /* at the end of stream, we must flush the decoder buffers */
2318     for (i = 0; i < nb_input_streams; i++) {
2319         ist = input_streams[i];
2320         if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
2321             output_packet(ist, NULL);
2322         }
2323     }
2324     poll_filters();
2325     flush_encoders();
2326
2327     term_exit();
2328
2329     /* write the trailer if needed and close file */
2330     for (i = 0; i < nb_output_files; i++) {
2331         os = output_files[i]->ctx;
2332         av_write_trailer(os);
2333     }
2334
2335     /* dump report by using the first video and audio streams */
2336     print_report(1, timer_start);
2337
2338     /* close each encoder */
2339     for (i = 0; i < nb_output_streams; i++) {
2340         ost = output_streams[i];
2341         if (ost->encoding_needed) {
2342             av_freep(&ost->st->codec->stats_in);
2343             avcodec_close(ost->st->codec);
2344         }
2345     }
2346
2347     /* close each decoder */
2348     for (i = 0; i < nb_input_streams; i++) {
2349         ist = input_streams[i];
2350         if (ist->decoding_needed) {
2351             avcodec_close(ist->st->codec);
2352         }
2353     }
2354
2355     /* finished ! */
2356     ret = 0;
2357
2358  fail:
2359 #if HAVE_PTHREADS
2360     free_input_threads();
2361 #endif
2362
2363     if (output_streams) {
2364         for (i = 0; i < nb_output_streams; i++) {
2365             ost = output_streams[i];
2366             if (ost) {
2367                 if (ost->stream_copy)
2368                     av_freep(&ost->st->codec->extradata);
2369                 if (ost->logfile) {
2370                     fclose(ost->logfile);
2371                     ost->logfile = NULL;
2372                 }
2373                 av_freep(&ost->st->codec->subtitle_header);
2374                 av_free(ost->forced_kf_pts);
2375                 av_dict_free(&ost->opts);
2376             }
2377         }
2378     }
2379     return ret;
2380 }
2381
2382 static int64_t getutime(void)
2383 {
2384 #if HAVE_GETRUSAGE
2385     struct rusage rusage;
2386
2387     getrusage(RUSAGE_SELF, &rusage);
2388     return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
2389 #elif HAVE_GETPROCESSTIMES
2390     HANDLE proc;
2391     FILETIME c, e, k, u;
2392     proc = GetCurrentProcess();
2393     GetProcessTimes(proc, &c, &e, &k, &u);
2394     return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
2395 #else
2396     return av_gettime();
2397 #endif
2398 }
2399
2400 static int64_t getmaxrss(void)
2401 {
2402 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
2403     struct rusage rusage;
2404     getrusage(RUSAGE_SELF, &rusage);
2405     return (int64_t)rusage.ru_maxrss * 1024;
2406 #elif HAVE_GETPROCESSMEMORYINFO
2407     HANDLE proc;
2408     PROCESS_MEMORY_COUNTERS memcounters;
2409     proc = GetCurrentProcess();
2410     memcounters.cb = sizeof(memcounters);
2411     GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
2412     return memcounters.PeakPagefileUsage;
2413 #else
2414     return 0;
2415 #endif
2416 }
2417
2418 static void parse_cpuflags(int argc, char **argv, const OptionDef *options)
2419 {
2420     int idx = locate_option(argc, argv, options, "cpuflags");
2421     if (idx && argv[idx + 1])
2422         opt_cpuflags(NULL, "cpuflags", argv[idx + 1]);
2423 }
2424
2425 int main(int argc, char **argv)
2426 {
2427     OptionsContext o = { 0 };
2428     int64_t ti;
2429
2430     atexit(exit_program);
2431
2432     reset_options(&o);
2433
2434     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2435     parse_loglevel(argc, argv, options);
2436
2437     avcodec_register_all();
2438 #if CONFIG_AVDEVICE
2439     avdevice_register_all();
2440 #endif
2441     avfilter_register_all();
2442     av_register_all();
2443     avformat_network_init();
2444
2445     show_banner();
2446
2447     parse_cpuflags(argc, argv, options);
2448
2449     /* parse options */
2450     parse_options(&o, argc, argv, options, opt_output_file);
2451
2452     if (nb_output_files <= 0 && nb_input_files == 0) {
2453         show_usage();
2454         av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2455         exit(1);
2456     }
2457
2458     /* file converter / grab */
2459     if (nb_output_files <= 0) {
2460         fprintf(stderr, "At least one output file must be specified\n");
2461         exit(1);
2462     }
2463
2464     ti = getutime();
2465     if (transcode() < 0)
2466         exit(1);
2467     ti = getutime() - ti;
2468     if (do_benchmark) {
2469         int maxrss = getmaxrss() / 1024;
2470         printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);
2471     }
2472
2473     exit(0);
2474     return 0;
2475 }