]> git.sesse.net Git - ffmpeg/blob - avconv.c
avconv: replace no_packet array in transcode() with a var in InputStream
[ffmpeg] / avconv.c
1 /*
2  * avconv main
3  * Copyright (c) 2000-2011 The libav developers.
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <ctype.h>
24 #include <string.h>
25 #include <math.h>
26 #include <stdlib.h>
27 #include <errno.h>
28 #include <signal.h>
29 #include <limits.h>
30 #include "libavformat/avformat.h"
31 #include "libavdevice/avdevice.h"
32 #include "libswscale/swscale.h"
33 #include "libavresample/avresample.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/audioconvert.h"
36 #include "libavutil/parseutils.h"
37 #include "libavutil/samplefmt.h"
38 #include "libavutil/colorspace.h"
39 #include "libavutil/fifo.h"
40 #include "libavutil/intreadwrite.h"
41 #include "libavutil/dict.h"
42 #include "libavutil/mathematics.h"
43 #include "libavutil/pixdesc.h"
44 #include "libavutil/avstring.h"
45 #include "libavutil/libm.h"
46 #include "libavutil/imgutils.h"
47 #include "libavutil/time.h"
48 #include "libavformat/os_support.h"
49
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersrc.h"
53 # include "libavfilter/buffersink.h"
54
55 #if HAVE_SYS_RESOURCE_H
56 #include <sys/types.h>
57 #include <sys/resource.h>
58 #elif HAVE_GETPROCESSTIMES
59 #include <windows.h>
60 #endif
61 #if HAVE_GETPROCESSMEMORYINFO
62 #include <windows.h>
63 #include <psapi.h>
64 #endif
65
66 #if HAVE_SYS_SELECT_H
67 #include <sys/select.h>
68 #endif
69
70 #if HAVE_PTHREADS
71 #include <pthread.h>
72 #endif
73
74 #include <time.h>
75
76 #include "avconv.h"
77 #include "cmdutils.h"
78
79 #include "libavutil/avassert.h"
80
81 const char program_name[] = "avconv";
82 const int program_birth_year = 2000;
83
84 static FILE *vstats_file;
85
86 static int64_t video_size = 0;
87 static int64_t audio_size = 0;
88 static int64_t extra_size = 0;
89 static int nb_frames_dup = 0;
90 static int nb_frames_drop = 0;
91
92
93
94 #if HAVE_PTHREADS
95 /* signal to input threads that they should exit; set by the main thread */
96 static int transcoding_finished;
97 #endif
98
99 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
100
101 InputStream **input_streams = NULL;
102 int        nb_input_streams = 0;
103 InputFile   **input_files   = NULL;
104 int        nb_input_files   = 0;
105
106 OutputStream **output_streams = NULL;
107 int         nb_output_streams = 0;
108 OutputFile   **output_files   = NULL;
109 int         nb_output_files   = 0;
110
111 FilterGraph **filtergraphs;
112 int        nb_filtergraphs;
113
114 static void term_exit(void)
115 {
116     av_log(NULL, AV_LOG_QUIET, "");
117 }
118
119 static volatile int received_sigterm = 0;
120 static volatile int received_nb_signals = 0;
121
122 static void
123 sigterm_handler(int sig)
124 {
125     received_sigterm = sig;
126     received_nb_signals++;
127     term_exit();
128 }
129
130 static void term_init(void)
131 {
132     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
133     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
134 #ifdef SIGXCPU
135     signal(SIGXCPU, sigterm_handler);
136 #endif
137 }
138
139 static int decode_interrupt_cb(void *ctx)
140 {
141     return received_nb_signals > 1;
142 }
143
144 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
145
146 void exit_program(int ret)
147 {
148     int i, j;
149
150     for (i = 0; i < nb_filtergraphs; i++) {
151         avfilter_graph_free(&filtergraphs[i]->graph);
152         for (j = 0; j < filtergraphs[i]->nb_inputs; j++) {
153             av_freep(&filtergraphs[i]->inputs[j]->name);
154             av_freep(&filtergraphs[i]->inputs[j]);
155         }
156         av_freep(&filtergraphs[i]->inputs);
157         for (j = 0; j < filtergraphs[i]->nb_outputs; j++) {
158             av_freep(&filtergraphs[i]->outputs[j]->name);
159             av_freep(&filtergraphs[i]->outputs[j]);
160         }
161         av_freep(&filtergraphs[i]->outputs);
162         av_freep(&filtergraphs[i]);
163     }
164     av_freep(&filtergraphs);
165
166     /* close files */
167     for (i = 0; i < nb_output_files; i++) {
168         AVFormatContext *s = output_files[i]->ctx;
169         if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
170             avio_close(s->pb);
171         avformat_free_context(s);
172         av_dict_free(&output_files[i]->opts);
173         av_freep(&output_files[i]);
174     }
175     for (i = 0; i < nb_output_streams; i++) {
176         AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
177         while (bsfc) {
178             AVBitStreamFilterContext *next = bsfc->next;
179             av_bitstream_filter_close(bsfc);
180             bsfc = next;
181         }
182         output_streams[i]->bitstream_filters = NULL;
183
184         av_freep(&output_streams[i]->forced_keyframes);
185         av_freep(&output_streams[i]->avfilter);
186         av_freep(&output_streams[i]->filtered_frame);
187         av_freep(&output_streams[i]);
188     }
189     for (i = 0; i < nb_input_files; i++) {
190         avformat_close_input(&input_files[i]->ctx);
191         av_freep(&input_files[i]);
192     }
193     for (i = 0; i < nb_input_streams; i++) {
194         av_freep(&input_streams[i]->decoded_frame);
195         av_dict_free(&input_streams[i]->opts);
196         free_buffer_pool(&input_streams[i]->buffer_pool);
197         av_freep(&input_streams[i]->filters);
198         av_freep(&input_streams[i]);
199     }
200
201     if (vstats_file)
202         fclose(vstats_file);
203     av_free(vstats_filename);
204
205     av_freep(&input_streams);
206     av_freep(&input_files);
207     av_freep(&output_streams);
208     av_freep(&output_files);
209
210     uninit_opts();
211
212     avfilter_uninit();
213     avformat_network_deinit();
214
215     if (received_sigterm) {
216         av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
217                (int) received_sigterm);
218         exit (255);
219     }
220
221     exit(ret);
222 }
223
224 void assert_avoptions(AVDictionary *m)
225 {
226     AVDictionaryEntry *t;
227     if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
228         av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
229         exit_program(1);
230     }
231 }
232
233 static void assert_codec_experimental(AVCodecContext *c, int encoder)
234 {
235     const char *codec_string = encoder ? "encoder" : "decoder";
236     AVCodec *codec;
237     if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL &&
238         c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
239         av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
240                 "results.\nAdd '-strict experimental' if you want to use it.\n",
241                 codec_string, c->codec->name);
242         codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id);
243         if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
244             av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
245                    codec_string, codec->name);
246         exit_program(1);
247     }
248 }
249
250 /**
251  * Update the requested input sample format based on the output sample format.
252  * This is currently only used to request float output from decoders which
253  * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
254  * Ideally this will be removed in the future when decoders do not do format
255  * conversion and only output in their native format.
256  */
257 static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
258                               AVCodecContext *enc)
259 {
260     /* if sample formats match or a decoder sample format has already been
261        requested, just return */
262     if (enc->sample_fmt == dec->sample_fmt ||
263         dec->request_sample_fmt > AV_SAMPLE_FMT_NONE)
264         return;
265
266     /* if decoder supports more than one output format */
267     if (dec_codec && dec_codec->sample_fmts &&
268         dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
269         dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
270         const enum AVSampleFormat *p;
271         int min_dec = -1, min_inc = -1;
272
273         /* find a matching sample format in the encoder */
274         for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
275             if (*p == enc->sample_fmt) {
276                 dec->request_sample_fmt = *p;
277                 return;
278             } else if (*p > enc->sample_fmt) {
279                 min_inc = FFMIN(min_inc, *p - enc->sample_fmt);
280             } else
281                 min_dec = FFMIN(min_dec, enc->sample_fmt - *p);
282         }
283
284         /* if none match, provide the one that matches quality closest */
285         dec->request_sample_fmt = min_inc > 0 ? enc->sample_fmt + min_inc :
286                                   enc->sample_fmt - min_dec;
287     }
288 }
289
290 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
291 {
292     AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
293     AVCodecContext          *avctx = ost->st->codec;
294     int ret;
295
296     /*
297      * Audio encoders may split the packets --  #frames in != #packets out.
298      * But there is no reordering, so we can limit the number of output packets
299      * by simply dropping them here.
300      * Counting encoded video frames needs to be done separately because of
301      * reordering, see do_video_out()
302      */
303     if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
304         if (ost->frame_number >= ost->max_frames) {
305             av_free_packet(pkt);
306             return;
307         }
308         ost->frame_number++;
309     }
310
311     while (bsfc) {
312         AVPacket new_pkt = *pkt;
313         int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
314                                            &new_pkt.data, &new_pkt.size,
315                                            pkt->data, pkt->size,
316                                            pkt->flags & AV_PKT_FLAG_KEY);
317         if (a > 0) {
318             av_free_packet(pkt);
319             new_pkt.destruct = av_destruct_packet;
320         } else if (a < 0) {
321             av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
322                    bsfc->filter->name, pkt->stream_index,
323                    avctx->codec ? avctx->codec->name : "copy");
324             print_error("", a);
325             if (exit_on_error)
326                 exit_program(1);
327         }
328         *pkt = new_pkt;
329
330         bsfc = bsfc->next;
331     }
332
333     pkt->stream_index = ost->index;
334     ret = av_interleaved_write_frame(s, pkt);
335     if (ret < 0) {
336         print_error("av_interleaved_write_frame()", ret);
337         exit_program(1);
338     }
339 }
340
341 static int check_recording_time(OutputStream *ost)
342 {
343     OutputFile *of = output_files[ost->file_index];
344
345     if (of->recording_time != INT64_MAX &&
346         av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,
347                       AV_TIME_BASE_Q) >= 0) {
348         ost->is_past_recording_time = 1;
349         return 0;
350     }
351     return 1;
352 }
353
354 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
355                          AVFrame *frame)
356 {
357     AVCodecContext *enc = ost->st->codec;
358     AVPacket pkt;
359     int got_packet = 0;
360
361     av_init_packet(&pkt);
362     pkt.data = NULL;
363     pkt.size = 0;
364
365     if (!check_recording_time(ost))
366         return;
367
368     if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
369         frame->pts = ost->sync_opts;
370     ost->sync_opts = frame->pts + frame->nb_samples;
371
372     if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
373         av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
374         exit_program(1);
375     }
376
377     if (got_packet) {
378         if (pkt.pts != AV_NOPTS_VALUE)
379             pkt.pts      = av_rescale_q(pkt.pts,      enc->time_base, ost->st->time_base);
380         if (pkt.dts != AV_NOPTS_VALUE)
381             pkt.dts      = av_rescale_q(pkt.dts,      enc->time_base, ost->st->time_base);
382         if (pkt.duration > 0)
383             pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
384
385         write_frame(s, &pkt, ost);
386
387         audio_size += pkt.size;
388     }
389 }
390
391 static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
392 {
393     AVCodecContext *dec;
394     AVPicture *picture2;
395     AVPicture picture_tmp;
396     uint8_t *buf = 0;
397
398     dec = ist->st->codec;
399
400     /* deinterlace : must be done before any resize */
401     if (do_deinterlace) {
402         int size;
403
404         /* create temporary picture */
405         size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
406         buf  = av_malloc(size);
407         if (!buf)
408             return;
409
410         picture2 = &picture_tmp;
411         avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
412
413         if (avpicture_deinterlace(picture2, picture,
414                                  dec->pix_fmt, dec->width, dec->height) < 0) {
415             /* if error, do not deinterlace */
416             av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
417             av_free(buf);
418             buf = NULL;
419             picture2 = picture;
420         }
421     } else {
422         picture2 = picture;
423     }
424
425     if (picture != picture2)
426         *picture = *picture2;
427     *bufp = buf;
428 }
429
430 static void do_subtitle_out(AVFormatContext *s,
431                             OutputStream *ost,
432                             InputStream *ist,
433                             AVSubtitle *sub,
434                             int64_t pts)
435 {
436     static uint8_t *subtitle_out = NULL;
437     int subtitle_out_max_size = 1024 * 1024;
438     int subtitle_out_size, nb, i;
439     AVCodecContext *enc;
440     AVPacket pkt;
441
442     if (pts == AV_NOPTS_VALUE) {
443         av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
444         if (exit_on_error)
445             exit_program(1);
446         return;
447     }
448
449     enc = ost->st->codec;
450
451     if (!subtitle_out) {
452         subtitle_out = av_malloc(subtitle_out_max_size);
453     }
454
455     /* Note: DVB subtitle need one packet to draw them and one other
456        packet to clear them */
457     /* XXX: signal it in the codec context ? */
458     if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
459         nb = 2;
460     else
461         nb = 1;
462
463     for (i = 0; i < nb; i++) {
464         ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
465         if (!check_recording_time(ost))
466             return;
467
468         sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
469         // start_display_time is required to be 0
470         sub->pts               += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
471         sub->end_display_time  -= sub->start_display_time;
472         sub->start_display_time = 0;
473         subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
474                                                     subtitle_out_max_size, sub);
475         if (subtitle_out_size < 0) {
476             av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
477             exit_program(1);
478         }
479
480         av_init_packet(&pkt);
481         pkt.data = subtitle_out;
482         pkt.size = subtitle_out_size;
483         pkt.pts  = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
484         if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
485             /* XXX: the pts correction is handled here. Maybe handling
486                it in the codec would be better */
487             if (i == 0)
488                 pkt.pts += 90 * sub->start_display_time;
489             else
490                 pkt.pts += 90 * sub->end_display_time;
491         }
492         write_frame(s, &pkt, ost);
493     }
494 }
495
496 static void do_video_out(AVFormatContext *s,
497                          OutputStream *ost,
498                          AVFrame *in_picture,
499                          int *frame_size, float quality)
500 {
501     int ret, format_video_sync;
502     AVPacket pkt;
503     AVCodecContext *enc = ost->st->codec;
504
505     *frame_size = 0;
506
507     format_video_sync = video_sync_method;
508     if (format_video_sync == VSYNC_AUTO)
509         format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
510                             (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
511     if (format_video_sync != VSYNC_PASSTHROUGH &&
512         ost->frame_number &&
513         in_picture->pts != AV_NOPTS_VALUE &&
514         in_picture->pts < ost->sync_opts) {
515         nb_frames_drop++;
516         av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
517         return;
518     }
519
520     if (in_picture->pts == AV_NOPTS_VALUE)
521         in_picture->pts = ost->sync_opts;
522     ost->sync_opts = in_picture->pts;
523
524
525     if (!ost->frame_number)
526         ost->first_pts = in_picture->pts;
527
528     av_init_packet(&pkt);
529     pkt.data = NULL;
530     pkt.size = 0;
531
532     if (!check_recording_time(ost) ||
533         ost->frame_number >= ost->max_frames)
534         return;
535
536     if (s->oformat->flags & AVFMT_RAWPICTURE &&
537         enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
538         /* raw pictures are written as AVPicture structure to
539            avoid any copies. We support temporarily the older
540            method. */
541         enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
542         enc->coded_frame->top_field_first  = in_picture->top_field_first;
543         pkt.data   = (uint8_t *)in_picture;
544         pkt.size   =  sizeof(AVPicture);
545         pkt.pts    = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
546         pkt.flags |= AV_PKT_FLAG_KEY;
547
548         write_frame(s, &pkt, ost);
549     } else {
550         int got_packet;
551         AVFrame big_picture;
552
553         big_picture = *in_picture;
554         /* better than nothing: use input picture interlaced
555            settings */
556         big_picture.interlaced_frame = in_picture->interlaced_frame;
557         if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
558             if (ost->top_field_first == -1)
559                 big_picture.top_field_first = in_picture->top_field_first;
560             else
561                 big_picture.top_field_first = !!ost->top_field_first;
562         }
563
564         /* handles same_quant here. This is not correct because it may
565            not be a global option */
566         big_picture.quality = quality;
567         if (!enc->me_threshold)
568             big_picture.pict_type = 0;
569         if (ost->forced_kf_index < ost->forced_kf_count &&
570             big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
571             big_picture.pict_type = AV_PICTURE_TYPE_I;
572             ost->forced_kf_index++;
573         }
574         ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
575         if (ret < 0) {
576             av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
577             exit_program(1);
578         }
579
580         if (got_packet) {
581             if (pkt.pts != AV_NOPTS_VALUE)
582                 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
583             if (pkt.dts != AV_NOPTS_VALUE)
584                 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
585
586             write_frame(s, &pkt, ost);
587             *frame_size = pkt.size;
588             video_size += pkt.size;
589
590             /* if two pass, output log */
591             if (ost->logfile && enc->stats_out) {
592                 fprintf(ost->logfile, "%s", enc->stats_out);
593             }
594         }
595     }
596     ost->sync_opts++;
597     /*
598      * For video, number of frames in == number of packets out.
599      * But there may be reordering, so we can't throw away frames on encoder
600      * flush, we need to limit them here, before they go into encoder.
601      */
602     ost->frame_number++;
603 }
604
605 static double psnr(double d)
606 {
607     return -10.0 * log(d) / log(10.0);
608 }
609
610 static void do_video_stats(AVFormatContext *os, OutputStream *ost,
611                            int frame_size)
612 {
613     AVCodecContext *enc;
614     int frame_number;
615     double ti1, bitrate, avg_bitrate;
616
617     /* this is executed just the first time do_video_stats is called */
618     if (!vstats_file) {
619         vstats_file = fopen(vstats_filename, "w");
620         if (!vstats_file) {
621             perror("fopen");
622             exit_program(1);
623         }
624     }
625
626     enc = ost->st->codec;
627     if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
628         frame_number = ost->frame_number;
629         fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
630         if (enc->flags&CODEC_FLAG_PSNR)
631             fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
632
633         fprintf(vstats_file,"f_size= %6d ", frame_size);
634         /* compute pts value */
635         ti1 = ost->sync_opts * av_q2d(enc->time_base);
636         if (ti1 < 0.01)
637             ti1 = 0.01;
638
639         bitrate     = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
640         avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
641         fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
642                (double)video_size / 1024, ti1, bitrate, avg_bitrate);
643         fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
644     }
645 }
646
647 /**
648  * Read one frame for lavfi output for ost and encode it.
649  */
650 static int poll_filter(OutputStream *ost)
651 {
652     OutputFile    *of = output_files[ost->file_index];
653     AVFilterBufferRef *picref;
654     AVFrame *filtered_frame = NULL;
655     int frame_size, ret;
656
657     if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
658         return AVERROR(ENOMEM);
659     } else
660         avcodec_get_frame_defaults(ost->filtered_frame);
661     filtered_frame = ost->filtered_frame;
662
663     if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
664         !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
665         ret = av_buffersink_read_samples(ost->filter->filter, &picref,
666                                          ost->st->codec->frame_size);
667     else
668         ret = av_buffersink_read(ost->filter->filter, &picref);
669
670     if (ret < 0)
671         return ret;
672
673     avfilter_copy_buf_props(filtered_frame, picref);
674     if (picref->pts != AV_NOPTS_VALUE) {
675         filtered_frame->pts = av_rescale_q(picref->pts,
676                                            ost->filter->filter->inputs[0]->time_base,
677                                            ost->st->codec->time_base) -
678                               av_rescale_q(of->start_time,
679                                            AV_TIME_BASE_Q,
680                                            ost->st->codec->time_base);
681
682         if (of->start_time && filtered_frame->pts < 0) {
683             avfilter_unref_buffer(picref);
684             return 0;
685         }
686     }
687
688     switch (ost->filter->filter->inputs[0]->type) {
689     case AVMEDIA_TYPE_VIDEO:
690         if (!ost->frame_aspect_ratio)
691             ost->st->codec->sample_aspect_ratio = picref->video->pixel_aspect;
692
693         do_video_out(of->ctx, ost, filtered_frame, &frame_size,
694                      same_quant ? ost->last_quality :
695                                   ost->st->codec->global_quality);
696         if (vstats_filename && frame_size)
697             do_video_stats(of->ctx, ost, frame_size);
698         break;
699     case AVMEDIA_TYPE_AUDIO:
700         do_audio_out(of->ctx, ost, filtered_frame);
701         break;
702     default:
703         // TODO support subtitle filters
704         av_assert0(0);
705     }
706
707     avfilter_unref_buffer(picref);
708
709     return 0;
710 }
711
712 /**
713  * Read as many frames from possible from lavfi and encode them.
714  *
715  * Always read from the active stream with the lowest timestamp. If no frames
716  * are available for it then return EAGAIN and wait for more input. This way we
717  * can use lavfi sources that generate unlimited amount of frames without memory
718  * usage exploding.
719  */
720 static int poll_filters(void)
721 {
722     int i, ret = 0;
723
724     while (ret >= 0 && !received_sigterm) {
725         OutputStream *ost = NULL;
726         int64_t min_pts = INT64_MAX;
727
728         /* choose output stream with the lowest timestamp */
729         for (i = 0; i < nb_output_streams; i++) {
730             int64_t pts = output_streams[i]->sync_opts;
731
732             if (!output_streams[i]->filter ||
733                 output_streams[i]->is_past_recording_time)
734                 continue;
735
736             pts = av_rescale_q(pts, output_streams[i]->st->codec->time_base,
737                                AV_TIME_BASE_Q);
738             if (pts < min_pts) {
739                 min_pts = pts;
740                 ost = output_streams[i];
741             }
742         }
743
744         if (!ost)
745             break;
746
747         ret = poll_filter(ost);
748
749         if (ret == AVERROR_EOF) {
750             ost->is_past_recording_time = 1;
751
752             if (opt_shortest)
753                 return ret;
754
755             ret = 0;
756         } else if (ret == AVERROR(EAGAIN))
757             return 0;
758     }
759
760     return ret;
761 }
762
763 static void print_report(int is_last_report, int64_t timer_start)
764 {
765     char buf[1024];
766     OutputStream *ost;
767     AVFormatContext *oc;
768     int64_t total_size;
769     AVCodecContext *enc;
770     int frame_number, vid, i;
771     double bitrate, ti1, pts;
772     static int64_t last_time = -1;
773     static int qp_histogram[52];
774
775     if (!print_stats && !is_last_report)
776         return;
777
778     if (!is_last_report) {
779         int64_t cur_time;
780         /* display the report every 0.5 seconds */
781         cur_time = av_gettime();
782         if (last_time == -1) {
783             last_time = cur_time;
784             return;
785         }
786         if ((cur_time - last_time) < 500000)
787             return;
788         last_time = cur_time;
789     }
790
791
792     oc = output_files[0]->ctx;
793
794     total_size = avio_size(oc->pb);
795     if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
796         total_size = avio_tell(oc->pb);
797
798     buf[0] = '\0';
799     ti1 = 1e10;
800     vid = 0;
801     for (i = 0; i < nb_output_streams; i++) {
802         float q = -1;
803         ost = output_streams[i];
804         enc = ost->st->codec;
805         if (!ost->stream_copy && enc->coded_frame)
806             q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
807         if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
808             snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
809         }
810         if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
811             float t = (av_gettime() - timer_start) / 1000000.0;
812
813             frame_number = ost->frame_number;
814             snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
815                      frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
816             if (is_last_report)
817                 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
818             if (qp_hist) {
819                 int j;
820                 int qp = lrintf(q);
821                 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
822                     qp_histogram[qp]++;
823                 for (j = 0; j < 32; j++)
824                     snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j] + 1) / log(2)));
825             }
826             if (enc->flags&CODEC_FLAG_PSNR) {
827                 int j;
828                 double error, error_sum = 0;
829                 double scale, scale_sum = 0;
830                 char type[3] = { 'Y','U','V' };
831                 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
832                 for (j = 0; j < 3; j++) {
833                     if (is_last_report) {
834                         error = enc->error[j];
835                         scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
836                     } else {
837                         error = enc->coded_frame->error[j];
838                         scale = enc->width * enc->height * 255.0 * 255.0;
839                     }
840                     if (j)
841                         scale /= 4;
842                     error_sum += error;
843                     scale_sum += scale;
844                     snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
845                 }
846                 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
847             }
848             vid = 1;
849         }
850         /* compute min output value */
851         pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
852         if ((pts < ti1) && (pts > 0))
853             ti1 = pts;
854     }
855     if (ti1 < 0.01)
856         ti1 = 0.01;
857
858     bitrate = (double)(total_size * 8) / ti1 / 1000.0;
859
860     snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
861             "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
862             (double)total_size / 1024, ti1, bitrate);
863
864     if (nb_frames_dup || nb_frames_drop)
865         snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
866                 nb_frames_dup, nb_frames_drop);
867
868     av_log(NULL, AV_LOG_INFO, "%s    \r", buf);
869
870     fflush(stderr);
871
872     if (is_last_report) {
873         int64_t raw= audio_size + video_size + extra_size;
874         av_log(NULL, AV_LOG_INFO, "\n");
875         av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
876                video_size / 1024.0,
877                audio_size / 1024.0,
878                extra_size / 1024.0,
879                100.0 * (total_size - raw) / raw
880         );
881     }
882 }
883
884 static void flush_encoders(void)
885 {
886     int i, ret;
887
888     for (i = 0; i < nb_output_streams; i++) {
889         OutputStream   *ost = output_streams[i];
890         AVCodecContext *enc = ost->st->codec;
891         AVFormatContext *os = output_files[ost->file_index]->ctx;
892         int stop_encoding = 0;
893
894         if (!ost->encoding_needed)
895             continue;
896
897         if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
898             continue;
899         if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
900             continue;
901
902         for (;;) {
903             int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
904             const char *desc;
905             int64_t *size;
906
907             switch (ost->st->codec->codec_type) {
908             case AVMEDIA_TYPE_AUDIO:
909                 encode = avcodec_encode_audio2;
910                 desc   = "Audio";
911                 size   = &audio_size;
912                 break;
913             case AVMEDIA_TYPE_VIDEO:
914                 encode = avcodec_encode_video2;
915                 desc   = "Video";
916                 size   = &video_size;
917                 break;
918             default:
919                 stop_encoding = 1;
920             }
921
922             if (encode) {
923                 AVPacket pkt;
924                 int got_packet;
925                 av_init_packet(&pkt);
926                 pkt.data = NULL;
927                 pkt.size = 0;
928
929                 ret = encode(enc, &pkt, NULL, &got_packet);
930                 if (ret < 0) {
931                     av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
932                     exit_program(1);
933                 }
934                 *size += ret;
935                 if (ost->logfile && enc->stats_out) {
936                     fprintf(ost->logfile, "%s", enc->stats_out);
937                 }
938                 if (!got_packet) {
939                     stop_encoding = 1;
940                     break;
941                 }
942                 if (pkt.pts != AV_NOPTS_VALUE)
943                     pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
944                 if (pkt.dts != AV_NOPTS_VALUE)
945                     pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
946                 write_frame(os, &pkt, ost);
947             }
948
949             if (stop_encoding)
950                 break;
951         }
952     }
953 }
954
955 /*
956  * Check whether a packet from ist should be written into ost at this time
957  */
958 static int check_output_constraints(InputStream *ist, OutputStream *ost)
959 {
960     OutputFile *of = output_files[ost->file_index];
961     int ist_index  = input_files[ist->file_index]->ist_index + ist->st->index;
962
963     if (ost->source_index != ist_index)
964         return 0;
965
966     if (of->start_time && ist->last_dts < of->start_time)
967         return 0;
968
969     return 1;
970 }
971
972 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
973 {
974     OutputFile *of = output_files[ost->file_index];
975     int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
976     AVPacket opkt;
977
978     av_init_packet(&opkt);
979
980     if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
981         !ost->copy_initial_nonkeyframes)
982         return;
983
984     if (of->recording_time != INT64_MAX &&
985         ist->last_dts >= of->recording_time + of->start_time) {
986         ost->is_past_recording_time = 1;
987         return;
988     }
989
990     /* force the input stream PTS */
991     if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
992         audio_size += pkt->size;
993     else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
994         video_size += pkt->size;
995         ost->sync_opts++;
996     }
997
998     if (pkt->pts != AV_NOPTS_VALUE)
999         opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1000     else
1001         opkt.pts = AV_NOPTS_VALUE;
1002
1003     if (pkt->dts == AV_NOPTS_VALUE)
1004         opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
1005     else
1006         opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1007     opkt.dts -= ost_tb_start_time;
1008
1009     opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1010     opkt.flags    = pkt->flags;
1011
1012     // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1013     if (  ost->st->codec->codec_id != AV_CODEC_ID_H264
1014        && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1015        && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1016        && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1017        ) {
1018         if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
1019             opkt.destruct = av_destruct_packet;
1020     } else {
1021         opkt.data = pkt->data;
1022         opkt.size = pkt->size;
1023     }
1024
1025     write_frame(of->ctx, &opkt, ost);
1026     ost->st->codec->frame_number++;
1027     av_free_packet(&opkt);
1028 }
1029
1030 static void rate_emu_sleep(InputStream *ist)
1031 {
1032     if (input_files[ist->file_index]->rate_emu) {
1033         int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
1034         int64_t now = av_gettime() - ist->start;
1035         if (pts > now)
1036             av_usleep(pts - now);
1037     }
1038 }
1039
1040 int guess_input_channel_layout(InputStream *ist)
1041 {
1042     AVCodecContext *dec = ist->st->codec;
1043
1044     if (!dec->channel_layout) {
1045         char layout_name[256];
1046
1047         dec->channel_layout = av_get_default_channel_layout(dec->channels);
1048         if (!dec->channel_layout)
1049             return 0;
1050         av_get_channel_layout_string(layout_name, sizeof(layout_name),
1051                                      dec->channels, dec->channel_layout);
1052         av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for  Input Stream "
1053                "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1054     }
1055     return 1;
1056 }
1057
1058 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1059 {
1060     AVFrame *decoded_frame;
1061     AVCodecContext *avctx = ist->st->codec;
1062     int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
1063     int i, ret, resample_changed;
1064
1065     if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
1066         return AVERROR(ENOMEM);
1067     else
1068         avcodec_get_frame_defaults(ist->decoded_frame);
1069     decoded_frame = ist->decoded_frame;
1070
1071     ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1072     if (ret < 0) {
1073         return ret;
1074     }
1075
1076     if (!*got_output) {
1077         /* no audio frame */
1078         if (!pkt->size)
1079             for (i = 0; i < ist->nb_filters; i++)
1080                 av_buffersrc_buffer(ist->filters[i]->filter, NULL);
1081         return ret;
1082     }
1083
1084     /* if the decoder provides a pts, use it instead of the last packet pts.
1085        the decoder could be delaying output by a packet or more. */
1086     if (decoded_frame->pts != AV_NOPTS_VALUE)
1087         ist->next_dts = decoded_frame->pts;
1088     else if (pkt->pts != AV_NOPTS_VALUE) {
1089         decoded_frame->pts = pkt->pts;
1090         pkt->pts           = AV_NOPTS_VALUE;
1091     }
1092
1093     // preprocess audio (volume)
1094     if (audio_volume != 256) {
1095         int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
1096         void *samples = decoded_frame->data[0];
1097         switch (avctx->sample_fmt) {
1098         case AV_SAMPLE_FMT_U8:
1099         {
1100             uint8_t *volp = samples;
1101             for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
1102                 int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
1103                 *volp++ = av_clip_uint8(v);
1104             }
1105             break;
1106         }
1107         case AV_SAMPLE_FMT_S16:
1108         {
1109             int16_t *volp = samples;
1110             for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
1111                 int v = ((*volp) * audio_volume + 128) >> 8;
1112                 *volp++ = av_clip_int16(v);
1113             }
1114             break;
1115         }
1116         case AV_SAMPLE_FMT_S32:
1117         {
1118             int32_t *volp = samples;
1119             for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
1120                 int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
1121                 *volp++ = av_clipl_int32(v);
1122             }
1123             break;
1124         }
1125         case AV_SAMPLE_FMT_FLT:
1126         {
1127             float *volp = samples;
1128             float scale = audio_volume / 256.f;
1129             for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
1130                 *volp++ *= scale;
1131             }
1132             break;
1133         }
1134         case AV_SAMPLE_FMT_DBL:
1135         {
1136             double *volp = samples;
1137             double scale = audio_volume / 256.;
1138             for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
1139                 *volp++ *= scale;
1140             }
1141             break;
1142         }
1143         default:
1144             av_log(NULL, AV_LOG_FATAL,
1145                    "Audio volume adjustment on sample format %s is not supported.\n",
1146                    av_get_sample_fmt_name(ist->st->codec->sample_fmt));
1147             exit_program(1);
1148         }
1149     }
1150
1151     rate_emu_sleep(ist);
1152
1153     resample_changed = ist->resample_sample_fmt     != decoded_frame->format         ||
1154                        ist->resample_channels       != avctx->channels               ||
1155                        ist->resample_channel_layout != decoded_frame->channel_layout ||
1156                        ist->resample_sample_rate    != decoded_frame->sample_rate;
1157     if (resample_changed) {
1158         char layout1[64], layout2[64];
1159
1160         if (!guess_input_channel_layout(ist)) {
1161             av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1162                    "layout for Input Stream #%d.%d\n", ist->file_index,
1163                    ist->st->index);
1164             exit_program(1);
1165         }
1166         decoded_frame->channel_layout = avctx->channel_layout;
1167
1168         av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1169                                      ist->resample_channel_layout);
1170         av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1171                                      decoded_frame->channel_layout);
1172
1173         av_log(NULL, AV_LOG_INFO,
1174                "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1175                ist->file_index, ist->st->index,
1176                ist->resample_sample_rate,  av_get_sample_fmt_name(ist->resample_sample_fmt),
1177                ist->resample_channels, layout1,
1178                decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1179                avctx->channels, layout2);
1180
1181         ist->resample_sample_fmt     = decoded_frame->format;
1182         ist->resample_sample_rate    = decoded_frame->sample_rate;
1183         ist->resample_channel_layout = decoded_frame->channel_layout;
1184         ist->resample_channels       = avctx->channels;
1185
1186         for (i = 0; i < nb_filtergraphs; i++)
1187             if (ist_in_filtergraph(filtergraphs[i], ist) &&
1188                 configure_filtergraph(filtergraphs[i]) < 0) {
1189                 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1190                 exit_program(1);
1191             }
1192     }
1193
1194     if (decoded_frame->pts != AV_NOPTS_VALUE)
1195         decoded_frame->pts = av_rescale_q(decoded_frame->pts,
1196                                           ist->st->time_base,
1197                                           (AVRational){1, ist->st->codec->sample_rate});
1198     for (i = 0; i < ist->nb_filters; i++)
1199         av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
1200
1201     return ret;
1202 }
1203
1204 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
1205 {
1206     AVFrame *decoded_frame;
1207     void *buffer_to_free = NULL;
1208     int i, ret = 0, resample_changed;
1209     float quality;
1210
1211     if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
1212         return AVERROR(ENOMEM);
1213     else
1214         avcodec_get_frame_defaults(ist->decoded_frame);
1215     decoded_frame = ist->decoded_frame;
1216
1217     ret = avcodec_decode_video2(ist->st->codec,
1218                                 decoded_frame, got_output, pkt);
1219     if (ret < 0)
1220         return ret;
1221
1222     quality = same_quant ? decoded_frame->quality : 0;
1223     if (!*got_output) {
1224         /* no picture yet */
1225         if (!pkt->size)
1226             for (i = 0; i < ist->nb_filters; i++)
1227                 av_buffersrc_buffer(ist->filters[i]->filter, NULL);
1228         return ret;
1229     }
1230     decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
1231                                            decoded_frame->pkt_dts);
1232     pkt->size = 0;
1233     pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
1234
1235     rate_emu_sleep(ist);
1236
1237     if (ist->st->sample_aspect_ratio.num)
1238         decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
1239
1240     resample_changed = ist->resample_width   != decoded_frame->width  ||
1241                        ist->resample_height  != decoded_frame->height ||
1242                        ist->resample_pix_fmt != decoded_frame->format;
1243     if (resample_changed) {
1244         av_log(NULL, AV_LOG_INFO,
1245                "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
1246                ist->file_index, ist->st->index,
1247                ist->resample_width,  ist->resample_height,  av_get_pix_fmt_name(ist->resample_pix_fmt),
1248                decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
1249
1250         ist->resample_width   = decoded_frame->width;
1251         ist->resample_height  = decoded_frame->height;
1252         ist->resample_pix_fmt = decoded_frame->format;
1253
1254         for (i = 0; i < nb_filtergraphs; i++)
1255             if (ist_in_filtergraph(filtergraphs[i], ist) &&
1256                 configure_filtergraph(filtergraphs[i]) < 0) {
1257                 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1258                 exit_program(1);
1259             }
1260     }
1261
1262     for (i = 0; i < ist->nb_filters; i++) {
1263         // XXX what an ugly hack
1264         if (ist->filters[i]->graph->nb_outputs == 1)
1265             ist->filters[i]->graph->outputs[0]->ost->last_quality = quality;
1266
1267         if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
1268             FrameBuffer      *buf = decoded_frame->opaque;
1269             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1270                                         decoded_frame->data, decoded_frame->linesize,
1271                                         AV_PERM_READ | AV_PERM_PRESERVE,
1272                                         ist->st->codec->width, ist->st->codec->height,
1273                                         ist->st->codec->pix_fmt);
1274
1275             avfilter_copy_frame_props(fb, decoded_frame);
1276             fb->buf->priv           = buf;
1277             fb->buf->free           = filter_release_buffer;
1278
1279             buf->refcount++;
1280             av_buffersrc_buffer(ist->filters[i]->filter, fb);
1281         } else
1282             av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
1283     }
1284
1285     av_free(buffer_to_free);
1286     return ret;
1287 }
1288
1289 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
1290 {
1291     AVSubtitle subtitle;
1292     int i, ret = avcodec_decode_subtitle2(ist->st->codec,
1293                                           &subtitle, got_output, pkt);
1294     if (ret < 0)
1295         return ret;
1296     if (!*got_output)
1297         return ret;
1298
1299     rate_emu_sleep(ist);
1300
1301     for (i = 0; i < nb_output_streams; i++) {
1302         OutputStream *ost = output_streams[i];
1303
1304         if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
1305             continue;
1306
1307         do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
1308     }
1309
1310     avsubtitle_free(&subtitle);
1311     return ret;
1312 }
1313
1314 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1315 static int output_packet(InputStream *ist, const AVPacket *pkt)
1316 {
1317     int i;
1318     int got_output;
1319     AVPacket avpkt;
1320
1321     if (ist->next_dts == AV_NOPTS_VALUE)
1322         ist->next_dts = ist->last_dts;
1323
1324     if (pkt == NULL) {
1325         /* EOF handling */
1326         av_init_packet(&avpkt);
1327         avpkt.data = NULL;
1328         avpkt.size = 0;
1329         goto handle_eof;
1330     } else {
1331         avpkt = *pkt;
1332     }
1333
1334     if (pkt->dts != AV_NOPTS_VALUE)
1335         ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
1336
1337     // while we have more to decode or while the decoder did output something on EOF
1338     while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
1339         int ret = 0;
1340     handle_eof:
1341
1342         ist->last_dts = ist->next_dts;
1343
1344         if (avpkt.size && avpkt.size != pkt->size) {
1345             av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
1346                    "Multiple frames in a packet from stream %d\n", pkt->stream_index);
1347             ist->showed_multi_packet_warning = 1;
1348         }
1349
1350         switch (ist->st->codec->codec_type) {
1351         case AVMEDIA_TYPE_AUDIO:
1352             ret = decode_audio    (ist, &avpkt, &got_output);
1353             break;
1354         case AVMEDIA_TYPE_VIDEO:
1355             ret = decode_video    (ist, &avpkt, &got_output);
1356             if (avpkt.duration)
1357                 ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
1358             else if (ist->st->avg_frame_rate.num)
1359                 ist->next_dts += av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate),
1360                                               AV_TIME_BASE_Q);
1361             else if (ist->st->codec->time_base.num != 0) {
1362                 int ticks      = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
1363                                                    ist->st->codec->ticks_per_frame;
1364                 ist->next_dts += av_rescale_q(ticks, ist->st->codec->time_base, AV_TIME_BASE_Q);
1365             }
1366             break;
1367         case AVMEDIA_TYPE_SUBTITLE:
1368             ret = transcode_subtitles(ist, &avpkt, &got_output);
1369             break;
1370         default:
1371             return -1;
1372         }
1373
1374         if (ret < 0)
1375             return ret;
1376         // touch data and size only if not EOF
1377         if (pkt) {
1378             avpkt.data += ret;
1379             avpkt.size -= ret;
1380         }
1381         if (!got_output) {
1382             continue;
1383         }
1384     }
1385
1386     /* handle stream copy */
1387     if (!ist->decoding_needed) {
1388         rate_emu_sleep(ist);
1389         ist->last_dts = ist->next_dts;
1390         switch (ist->st->codec->codec_type) {
1391         case AVMEDIA_TYPE_AUDIO:
1392             ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
1393                              ist->st->codec->sample_rate;
1394             break;
1395         case AVMEDIA_TYPE_VIDEO:
1396             if (ist->st->codec->time_base.num != 0) {
1397                 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
1398                 ist->next_dts += ((int64_t)AV_TIME_BASE *
1399                                   ist->st->codec->time_base.num * ticks) /
1400                                   ist->st->codec->time_base.den;
1401             }
1402             break;
1403         }
1404     }
1405     for (i = 0; pkt && i < nb_output_streams; i++) {
1406         OutputStream *ost = output_streams[i];
1407
1408         if (!check_output_constraints(ist, ost) || ost->encoding_needed)
1409             continue;
1410
1411         do_streamcopy(ist, ost, pkt);
1412     }
1413
1414     return 0;
1415 }
1416
1417 static void print_sdp(void)
1418 {
1419     char sdp[2048];
1420     int i;
1421     AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
1422
1423     if (!avc)
1424         exit_program(1);
1425     for (i = 0; i < nb_output_files; i++)
1426         avc[i] = output_files[i]->ctx;
1427
1428     av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
1429     printf("SDP:\n%s\n", sdp);
1430     fflush(stdout);
1431     av_freep(&avc);
1432 }
1433
1434 static int init_input_stream(int ist_index, char *error, int error_len)
1435 {
1436     int i;
1437     InputStream *ist = input_streams[ist_index];
1438     if (ist->decoding_needed) {
1439         AVCodec *codec = ist->dec;
1440         if (!codec) {
1441             snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
1442                     ist->st->codec->codec_id, ist->file_index, ist->st->index);
1443             return AVERROR(EINVAL);
1444         }
1445
1446         /* update requested sample format for the decoder based on the
1447            corresponding encoder sample format */
1448         for (i = 0; i < nb_output_streams; i++) {
1449             OutputStream *ost = output_streams[i];
1450             if (ost->source_index == ist_index) {
1451                 update_sample_fmt(ist->st->codec, codec, ost->st->codec);
1452                 break;
1453             }
1454         }
1455
1456         if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
1457             ist->st->codec->get_buffer     = codec_get_buffer;
1458             ist->st->codec->release_buffer = codec_release_buffer;
1459             ist->st->codec->opaque         = &ist->buffer_pool;
1460         }
1461
1462         if (!av_dict_get(ist->opts, "threads", NULL, 0))
1463             av_dict_set(&ist->opts, "threads", "auto", 0);
1464         if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
1465             snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
1466                     ist->file_index, ist->st->index);
1467             return AVERROR(EINVAL);
1468         }
1469         assert_codec_experimental(ist->st->codec, 0);
1470         assert_avoptions(ist->opts);
1471     }
1472
1473     ist->last_dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
1474     ist->next_dts = AV_NOPTS_VALUE;
1475     init_pts_correction(&ist->pts_ctx);
1476     ist->is_start = 1;
1477
1478     return 0;
1479 }
1480
1481 static InputStream *get_input_stream(OutputStream *ost)
1482 {
1483     if (ost->source_index >= 0)
1484         return input_streams[ost->source_index];
1485
1486     if (ost->filter) {
1487         FilterGraph *fg = ost->filter->graph;
1488         int i;
1489
1490         for (i = 0; i < fg->nb_inputs; i++)
1491             if (fg->inputs[i]->ist->st->codec->codec_type == ost->st->codec->codec_type)
1492                 return fg->inputs[i]->ist;
1493     }
1494
1495     return NULL;
1496 }
1497
1498 static void parse_forced_key_frames(char *kf, OutputStream *ost,
1499                                     AVCodecContext *avctx)
1500 {
1501     char *p;
1502     int n = 1, i;
1503     int64_t t;
1504
1505     for (p = kf; *p; p++)
1506         if (*p == ',')
1507             n++;
1508     ost->forced_kf_count = n;
1509     ost->forced_kf_pts   = av_malloc(sizeof(*ost->forced_kf_pts) * n);
1510     if (!ost->forced_kf_pts) {
1511         av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
1512         exit_program(1);
1513     }
1514
1515     p = kf;
1516     for (i = 0; i < n; i++) {
1517         char *next = strchr(p, ',');
1518
1519         if (next)
1520             *next++ = 0;
1521
1522         t = parse_time_or_die("force_key_frames", p, 1);
1523         ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
1524
1525         p = next;
1526     }
1527 }
1528
1529 static int transcode_init(void)
1530 {
1531     int ret = 0, i, j, k;
1532     AVFormatContext *oc;
1533     AVCodecContext *codec, *icodec;
1534     OutputStream *ost;
1535     InputStream *ist;
1536     char error[1024];
1537     int want_sdp = 1;
1538
1539     /* init framerate emulation */
1540     for (i = 0; i < nb_input_files; i++) {
1541         InputFile *ifile = input_files[i];
1542         if (ifile->rate_emu)
1543             for (j = 0; j < ifile->nb_streams; j++)
1544                 input_streams[j + ifile->ist_index]->start = av_gettime();
1545     }
1546
1547     /* output stream init */
1548     for (i = 0; i < nb_output_files; i++) {
1549         oc = output_files[i]->ctx;
1550         if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
1551             av_dump_format(oc, i, oc->filename, 1);
1552             av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
1553             return AVERROR(EINVAL);
1554         }
1555     }
1556
1557     /* init complex filtergraphs */
1558     for (i = 0; i < nb_filtergraphs; i++)
1559         if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
1560             return ret;
1561
1562     /* for each output stream, we compute the right encoding parameters */
1563     for (i = 0; i < nb_output_streams; i++) {
1564         ost = output_streams[i];
1565         oc  = output_files[ost->file_index]->ctx;
1566         ist = get_input_stream(ost);
1567
1568         if (ost->attachment_filename)
1569             continue;
1570
1571         codec  = ost->st->codec;
1572
1573         if (ist) {
1574             icodec = ist->st->codec;
1575
1576             ost->st->disposition          = ist->st->disposition;
1577             codec->bits_per_raw_sample    = icodec->bits_per_raw_sample;
1578             codec->chroma_sample_location = icodec->chroma_sample_location;
1579         }
1580
1581         if (ost->stream_copy) {
1582             uint64_t extra_size;
1583
1584             av_assert0(ist && !ost->filter);
1585
1586             extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
1587
1588             if (extra_size > INT_MAX) {
1589                 return AVERROR(EINVAL);
1590             }
1591
1592             /* if stream_copy is selected, no need to decode or encode */
1593             codec->codec_id   = icodec->codec_id;
1594             codec->codec_type = icodec->codec_type;
1595
1596             if (!codec->codec_tag) {
1597                 if (!oc->oformat->codec_tag ||
1598                      av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
1599                      av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
1600                     codec->codec_tag = icodec->codec_tag;
1601             }
1602
1603             codec->bit_rate       = icodec->bit_rate;
1604             codec->rc_max_rate    = icodec->rc_max_rate;
1605             codec->rc_buffer_size = icodec->rc_buffer_size;
1606             codec->field_order    = icodec->field_order;
1607             codec->extradata      = av_mallocz(extra_size);
1608             if (!codec->extradata) {
1609                 return AVERROR(ENOMEM);
1610             }
1611             memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
1612             codec->extradata_size = icodec->extradata_size;
1613             if (!copy_tb) {
1614                 codec->time_base      = icodec->time_base;
1615                 codec->time_base.num *= icodec->ticks_per_frame;
1616                 av_reduce(&codec->time_base.num, &codec->time_base.den,
1617                           codec->time_base.num, codec->time_base.den, INT_MAX);
1618             } else
1619                 codec->time_base = ist->st->time_base;
1620
1621             switch (codec->codec_type) {
1622             case AVMEDIA_TYPE_AUDIO:
1623                 if (audio_volume != 256) {
1624                     av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
1625                     exit_program(1);
1626                 }
1627                 codec->channel_layout     = icodec->channel_layout;
1628                 codec->sample_rate        = icodec->sample_rate;
1629                 codec->channels           = icodec->channels;
1630                 codec->frame_size         = icodec->frame_size;
1631                 codec->audio_service_type = icodec->audio_service_type;
1632                 codec->block_align        = icodec->block_align;
1633                 break;
1634             case AVMEDIA_TYPE_VIDEO:
1635                 codec->pix_fmt            = icodec->pix_fmt;
1636                 codec->width              = icodec->width;
1637                 codec->height             = icodec->height;
1638                 codec->has_b_frames       = icodec->has_b_frames;
1639                 if (!codec->sample_aspect_ratio.num) {
1640                     codec->sample_aspect_ratio   =
1641                     ost->st->sample_aspect_ratio =
1642                         ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
1643                         ist->st->codec->sample_aspect_ratio.num ?
1644                         ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
1645                 }
1646                 break;
1647             case AVMEDIA_TYPE_SUBTITLE:
1648                 codec->width  = icodec->width;
1649                 codec->height = icodec->height;
1650                 break;
1651             case AVMEDIA_TYPE_DATA:
1652             case AVMEDIA_TYPE_ATTACHMENT:
1653                 break;
1654             default:
1655                 abort();
1656             }
1657         } else {
1658             if (!ost->enc) {
1659                 /* should only happen when a default codec is not present. */
1660                 snprintf(error, sizeof(error), "Automatic encoder selection "
1661                          "failed for output stream #%d:%d. Default encoder for "
1662                          "format %s is probably disabled. Please choose an "
1663                          "encoder manually.\n", ost->file_index, ost->index,
1664                          oc->oformat->name);
1665                 ret = AVERROR(EINVAL);
1666                 goto dump_format;
1667             }
1668
1669             if (ist)
1670                 ist->decoding_needed = 1;
1671             ost->encoding_needed = 1;
1672
1673             /*
1674              * We want CFR output if and only if one of those is true:
1675              * 1) user specified output framerate with -r
1676              * 2) user specified -vsync cfr
1677              * 3) output format is CFR and the user didn't force vsync to
1678              *    something else than CFR
1679              *
1680              * in such a case, set ost->frame_rate
1681              */
1682             if (codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1683                 !ost->frame_rate.num && ist &&
1684                 (video_sync_method ==  VSYNC_CFR ||
1685                  (video_sync_method ==  VSYNC_AUTO &&
1686                   !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
1687                 ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
1688                 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
1689                     int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
1690                     ost->frame_rate = ost->enc->supported_framerates[idx];
1691                 }
1692             }
1693
1694             if (!ost->filter &&
1695                 (codec->codec_type == AVMEDIA_TYPE_VIDEO ||
1696                  codec->codec_type == AVMEDIA_TYPE_AUDIO)) {
1697                     FilterGraph *fg;
1698                     fg = init_simple_filtergraph(ist, ost);
1699                     if (configure_filtergraph(fg)) {
1700                         av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
1701                         exit(1);
1702                     }
1703             }
1704
1705             switch (codec->codec_type) {
1706             case AVMEDIA_TYPE_AUDIO:
1707                 codec->sample_fmt     = ost->filter->filter->inputs[0]->format;
1708                 codec->sample_rate    = ost->filter->filter->inputs[0]->sample_rate;
1709                 codec->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
1710                 codec->channels       = av_get_channel_layout_nb_channels(codec->channel_layout);
1711                 codec->time_base      = (AVRational){ 1, codec->sample_rate };
1712                 break;
1713             case AVMEDIA_TYPE_VIDEO:
1714                 codec->time_base = ost->filter->filter->inputs[0]->time_base;
1715
1716                 codec->width  = ost->filter->filter->inputs[0]->w;
1717                 codec->height = ost->filter->filter->inputs[0]->h;
1718                 codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
1719                     ost->frame_aspect_ratio ? // overridden by the -aspect cli option
1720                     av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
1721                     ost->filter->filter->inputs[0]->sample_aspect_ratio;
1722                 codec->pix_fmt = ost->filter->filter->inputs[0]->format;
1723
1724                 if (codec->width   != icodec->width  ||
1725                     codec->height  != icodec->height ||
1726                     codec->pix_fmt != icodec->pix_fmt) {
1727                     codec->bits_per_raw_sample = 0;
1728                 }
1729
1730                 if (ost->forced_keyframes)
1731                     parse_forced_key_frames(ost->forced_keyframes, ost,
1732                                             ost->st->codec);
1733                 break;
1734             case AVMEDIA_TYPE_SUBTITLE:
1735                 codec->time_base = (AVRational){1, 1000};
1736                 break;
1737             default:
1738                 abort();
1739                 break;
1740             }
1741             /* two pass mode */
1742             if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
1743                 char logfilename[1024];
1744                 FILE *f;
1745
1746                 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
1747                          pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
1748                          i);
1749                 if (!strcmp(ost->enc->name, "libx264")) {
1750                     av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
1751                 } else {
1752                     if (codec->flags & CODEC_FLAG_PASS1) {
1753                         f = fopen(logfilename, "wb");
1754                         if (!f) {
1755                             av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
1756                                    logfilename, strerror(errno));
1757                             exit_program(1);
1758                         }
1759                         ost->logfile = f;
1760                     } else {
1761                         char  *logbuffer;
1762                         size_t logbuffer_size;
1763                         if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
1764                             av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
1765                                    logfilename);
1766                             exit_program(1);
1767                         }
1768                         codec->stats_in = logbuffer;
1769                     }
1770                 }
1771             }
1772         }
1773     }
1774
1775     /* open each encoder */
1776     for (i = 0; i < nb_output_streams; i++) {
1777         ost = output_streams[i];
1778         if (ost->encoding_needed) {
1779             AVCodec      *codec = ost->enc;
1780             AVCodecContext *dec = NULL;
1781
1782             if ((ist = get_input_stream(ost)))
1783                 dec = ist->st->codec;
1784             if (dec && dec->subtitle_header) {
1785                 ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size);
1786                 if (!ost->st->codec->subtitle_header) {
1787                     ret = AVERROR(ENOMEM);
1788                     goto dump_format;
1789                 }
1790                 memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
1791                 ost->st->codec->subtitle_header_size = dec->subtitle_header_size;
1792             }
1793             if (!av_dict_get(ost->opts, "threads", NULL, 0))
1794                 av_dict_set(&ost->opts, "threads", "auto", 0);
1795             if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) {
1796                 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
1797                         ost->file_index, ost->index);
1798                 ret = AVERROR(EINVAL);
1799                 goto dump_format;
1800             }
1801             assert_codec_experimental(ost->st->codec, 1);
1802             assert_avoptions(ost->opts);
1803             if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
1804                 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
1805                                              "It takes bits/s as argument, not kbits/s\n");
1806             extra_size += ost->st->codec->extradata_size;
1807
1808             if (ost->st->codec->me_threshold)
1809                 input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
1810         }
1811     }
1812
1813     /* init input streams */
1814     for (i = 0; i < nb_input_streams; i++)
1815         if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
1816             goto dump_format;
1817
1818     /* discard unused programs */
1819     for (i = 0; i < nb_input_files; i++) {
1820         InputFile *ifile = input_files[i];
1821         for (j = 0; j < ifile->ctx->nb_programs; j++) {
1822             AVProgram *p = ifile->ctx->programs[j];
1823             int discard  = AVDISCARD_ALL;
1824
1825             for (k = 0; k < p->nb_stream_indexes; k++)
1826                 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
1827                     discard = AVDISCARD_DEFAULT;
1828                     break;
1829                 }
1830             p->discard = discard;
1831         }
1832     }
1833
1834     /* open files and write file headers */
1835     for (i = 0; i < nb_output_files; i++) {
1836         oc = output_files[i]->ctx;
1837         oc->interrupt_callback = int_cb;
1838         if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
1839             char errbuf[128];
1840             const char *errbuf_ptr = errbuf;
1841             if (av_strerror(ret, errbuf, sizeof(errbuf)) < 0)
1842                 errbuf_ptr = strerror(AVUNERROR(ret));
1843             snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?): %s", i, errbuf_ptr);
1844             ret = AVERROR(EINVAL);
1845             goto dump_format;
1846         }
1847         assert_avoptions(output_files[i]->opts);
1848         if (strcmp(oc->oformat->name, "rtp")) {
1849             want_sdp = 0;
1850         }
1851     }
1852
1853  dump_format:
1854     /* dump the file output parameters - cannot be done before in case
1855        of stream copy */
1856     for (i = 0; i < nb_output_files; i++) {
1857         av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
1858     }
1859
1860     /* dump the stream mapping */
1861     av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
1862     for (i = 0; i < nb_input_streams; i++) {
1863         ist = input_streams[i];
1864
1865         for (j = 0; j < ist->nb_filters; j++) {
1866             if (ist->filters[j]->graph->graph_desc) {
1867                 av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d (%s) -> %s",
1868                        ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
1869                        ist->filters[j]->name);
1870                 if (nb_filtergraphs > 1)
1871                     av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
1872                 av_log(NULL, AV_LOG_INFO, "\n");
1873             }
1874         }
1875     }
1876
1877     for (i = 0; i < nb_output_streams; i++) {
1878         ost = output_streams[i];
1879
1880         if (ost->attachment_filename) {
1881             /* an attached file */
1882             av_log(NULL, AV_LOG_INFO, "  File %s -> Stream #%d:%d\n",
1883                    ost->attachment_filename, ost->file_index, ost->index);
1884             continue;
1885         }
1886
1887         if (ost->filter && ost->filter->graph->graph_desc) {
1888             /* output from a complex graph */
1889             av_log(NULL, AV_LOG_INFO, "  %s", ost->filter->name);
1890             if (nb_filtergraphs > 1)
1891                 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
1892
1893             av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
1894                    ost->index, ost->enc ? ost->enc->name : "?");
1895             continue;
1896         }
1897
1898         av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d -> #%d:%d",
1899                input_streams[ost->source_index]->file_index,
1900                input_streams[ost->source_index]->st->index,
1901                ost->file_index,
1902                ost->index);
1903         if (ost->sync_ist != input_streams[ost->source_index])
1904             av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
1905                    ost->sync_ist->file_index,
1906                    ost->sync_ist->st->index);
1907         if (ost->stream_copy)
1908             av_log(NULL, AV_LOG_INFO, " (copy)");
1909         else
1910             av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ?
1911                    input_streams[ost->source_index]->dec->name : "?",
1912                    ost->enc ? ost->enc->name : "?");
1913         av_log(NULL, AV_LOG_INFO, "\n");
1914     }
1915
1916     if (ret) {
1917         av_log(NULL, AV_LOG_ERROR, "%s\n", error);
1918         return ret;
1919     }
1920
1921     if (want_sdp) {
1922         print_sdp();
1923     }
1924
1925     return 0;
1926 }
1927
1928 /**
1929  * @return 1 if there are still streams where more output is wanted,
1930  *         0 otherwise
1931  */
1932 static int need_output(void)
1933 {
1934     int i;
1935
1936     for (i = 0; i < nb_output_streams; i++) {
1937         OutputStream *ost    = output_streams[i];
1938         OutputFile *of       = output_files[ost->file_index];
1939         AVFormatContext *os  = output_files[ost->file_index]->ctx;
1940
1941         if (ost->is_past_recording_time ||
1942             (os->pb && avio_tell(os->pb) >= of->limit_filesize))
1943             continue;
1944         if (ost->frame_number >= ost->max_frames) {
1945             int j;
1946             for (j = 0; j < of->ctx->nb_streams; j++)
1947                 output_streams[of->ost_index + j]->is_past_recording_time = 1;
1948             continue;
1949         }
1950
1951         return 1;
1952     }
1953
1954     return 0;
1955 }
1956
1957 static int select_input_file(void)
1958 {
1959     int64_t ipts_min = INT64_MAX;
1960     int i, file_index = -1;
1961
1962     for (i = 0; i < nb_input_streams; i++) {
1963         InputStream *ist = input_streams[i];
1964         int64_t ipts     = ist->last_dts;
1965
1966         if (ist->discard || input_files[ist->file_index]->eagain)
1967             continue;
1968         if (!input_files[ist->file_index]->eof_reached) {
1969             if (ipts < ipts_min) {
1970                 ipts_min = ipts;
1971                 file_index = ist->file_index;
1972             }
1973         }
1974     }
1975
1976     return file_index;
1977 }
1978
1979 #if HAVE_PTHREADS
1980 static void *input_thread(void *arg)
1981 {
1982     InputFile *f = arg;
1983     int ret = 0;
1984
1985     while (!transcoding_finished && ret >= 0) {
1986         AVPacket pkt;
1987         ret = av_read_frame(f->ctx, &pkt);
1988
1989         if (ret == AVERROR(EAGAIN)) {
1990             av_usleep(10000);
1991             ret = 0;
1992             continue;
1993         } else if (ret < 0)
1994             break;
1995
1996         pthread_mutex_lock(&f->fifo_lock);
1997         while (!av_fifo_space(f->fifo))
1998             pthread_cond_wait(&f->fifo_cond, &f->fifo_lock);
1999
2000         av_dup_packet(&pkt);
2001         av_fifo_generic_write(f->fifo, &pkt, sizeof(pkt), NULL);
2002
2003         pthread_mutex_unlock(&f->fifo_lock);
2004     }
2005
2006     f->finished = 1;
2007     return NULL;
2008 }
2009
2010 static void free_input_threads(void)
2011 {
2012     int i;
2013
2014     if (nb_input_files == 1)
2015         return;
2016
2017     transcoding_finished = 1;
2018
2019     for (i = 0; i < nb_input_files; i++) {
2020         InputFile *f = input_files[i];
2021         AVPacket pkt;
2022
2023         if (!f->fifo || f->joined)
2024             continue;
2025
2026         pthread_mutex_lock(&f->fifo_lock);
2027         while (av_fifo_size(f->fifo)) {
2028             av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2029             av_free_packet(&pkt);
2030         }
2031         pthread_cond_signal(&f->fifo_cond);
2032         pthread_mutex_unlock(&f->fifo_lock);
2033
2034         pthread_join(f->thread, NULL);
2035         f->joined = 1;
2036
2037         while (av_fifo_size(f->fifo)) {
2038             av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2039             av_free_packet(&pkt);
2040         }
2041         av_fifo_free(f->fifo);
2042     }
2043 }
2044
2045 static int init_input_threads(void)
2046 {
2047     int i, ret;
2048
2049     if (nb_input_files == 1)
2050         return 0;
2051
2052     for (i = 0; i < nb_input_files; i++) {
2053         InputFile *f = input_files[i];
2054
2055         if (!(f->fifo = av_fifo_alloc(8*sizeof(AVPacket))))
2056             return AVERROR(ENOMEM);
2057
2058         pthread_mutex_init(&f->fifo_lock, NULL);
2059         pthread_cond_init (&f->fifo_cond, NULL);
2060
2061         if ((ret = pthread_create(&f->thread, NULL, input_thread, f)))
2062             return AVERROR(ret);
2063     }
2064     return 0;
2065 }
2066
2067 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
2068 {
2069     int ret = 0;
2070
2071     pthread_mutex_lock(&f->fifo_lock);
2072
2073     if (av_fifo_size(f->fifo)) {
2074         av_fifo_generic_read(f->fifo, pkt, sizeof(*pkt), NULL);
2075         pthread_cond_signal(&f->fifo_cond);
2076     } else {
2077         if (f->finished)
2078             ret = AVERROR_EOF;
2079         else
2080             ret = AVERROR(EAGAIN);
2081     }
2082
2083     pthread_mutex_unlock(&f->fifo_lock);
2084
2085     return ret;
2086 }
2087 #endif
2088
2089 static int get_input_packet(InputFile *f, AVPacket *pkt)
2090 {
2091 #if HAVE_PTHREADS
2092     if (nb_input_files > 1)
2093         return get_input_packet_mt(f, pkt);
2094 #endif
2095     return av_read_frame(f->ctx, pkt);
2096 }
2097
2098 static int got_eagain(void)
2099 {
2100     int i;
2101     for (i = 0; i < nb_input_files; i++)
2102         if (input_files[i]->eagain)
2103             return 1;
2104     return 0;
2105 }
2106
2107 static void reset_eagain(void)
2108 {
2109     int i;
2110     for (i = 0; i < nb_input_files; i++)
2111         input_files[i]->eagain = 0;
2112 }
2113
2114 /*
2115  * The following code is the main loop of the file converter
2116  */
2117 static int transcode(void)
2118 {
2119     int ret, i;
2120     AVFormatContext *is, *os;
2121     OutputStream *ost;
2122     InputStream *ist;
2123     int64_t timer_start;
2124
2125     ret = transcode_init();
2126     if (ret < 0)
2127         goto fail;
2128
2129     av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
2130     term_init();
2131
2132     timer_start = av_gettime();
2133
2134 #if HAVE_PTHREADS
2135     if ((ret = init_input_threads()) < 0)
2136         goto fail;
2137 #endif
2138
2139     for (; received_sigterm == 0;) {
2140         int file_index, ist_index;
2141         AVPacket pkt;
2142
2143         /* check if there's any stream where output is still needed */
2144         if (!need_output()) {
2145             av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
2146             break;
2147         }
2148
2149         /* select the stream that we must read now */
2150         file_index = select_input_file();
2151         /* if none, if is finished */
2152         if (file_index < 0) {
2153             if (got_eagain()) {
2154                 reset_eagain();
2155                 av_usleep(10000);
2156                 continue;
2157             }
2158             av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
2159             break;
2160         }
2161
2162         is  = input_files[file_index]->ctx;
2163         ret = get_input_packet(input_files[file_index], &pkt);
2164
2165         if (ret == AVERROR(EAGAIN)) {
2166             input_files[file_index]->eagain = 1;
2167             continue;
2168         }
2169         if (ret < 0) {
2170             if (ret != AVERROR_EOF) {
2171                 print_error(is->filename, ret);
2172                 if (exit_on_error)
2173                     exit_program(1);
2174             }
2175             input_files[file_index]->eof_reached = 1;
2176
2177             for (i = 0; i < input_files[file_index]->nb_streams; i++) {
2178                 ist = input_streams[input_files[file_index]->ist_index + i];
2179                 if (ist->decoding_needed)
2180                     output_packet(ist, NULL);
2181             }
2182
2183             if (opt_shortest)
2184                 break;
2185             else
2186                 continue;
2187         }
2188
2189         reset_eagain();
2190
2191         if (do_pkt_dump) {
2192             av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
2193                              is->streams[pkt.stream_index]);
2194         }
2195         /* the following test is needed in case new streams appear
2196            dynamically in stream : we ignore them */
2197         if (pkt.stream_index >= input_files[file_index]->nb_streams)
2198             goto discard_packet;
2199         ist_index = input_files[file_index]->ist_index + pkt.stream_index;
2200         ist = input_streams[ist_index];
2201         if (ist->discard)
2202             goto discard_packet;
2203
2204         if (pkt.dts != AV_NOPTS_VALUE)
2205             pkt.dts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2206         if (pkt.pts != AV_NOPTS_VALUE)
2207             pkt.pts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2208
2209         if (pkt.pts != AV_NOPTS_VALUE)
2210             pkt.pts *= ist->ts_scale;
2211         if (pkt.dts != AV_NOPTS_VALUE)
2212             pkt.dts *= ist->ts_scale;
2213
2214         if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE
2215             && (is->iformat->flags & AVFMT_TS_DISCONT)) {
2216             int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
2217             int64_t delta   = pkt_dts - ist->next_dts;
2218             if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
2219                 input_files[ist->file_index]->ts_offset -= delta;
2220                 av_log(NULL, AV_LOG_DEBUG,
2221                        "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
2222                        delta, input_files[ist->file_index]->ts_offset);
2223                 pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2224                 if (pkt.pts != AV_NOPTS_VALUE)
2225                     pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2226             }
2227         }
2228
2229         if (output_packet(ist, &pkt) < 0 || poll_filters() < 0) {
2230             av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
2231                    ist->file_index, ist->st->index);
2232             if (exit_on_error)
2233                 exit_program(1);
2234             av_free_packet(&pkt);
2235             continue;
2236         }
2237
2238     discard_packet:
2239         av_free_packet(&pkt);
2240
2241         /* dump report by using the output first video and audio streams */
2242         print_report(0, timer_start);
2243     }
2244 #if HAVE_PTHREADS
2245     free_input_threads();
2246 #endif
2247
2248     /* at the end of stream, we must flush the decoder buffers */
2249     for (i = 0; i < nb_input_streams; i++) {
2250         ist = input_streams[i];
2251         if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
2252             output_packet(ist, NULL);
2253         }
2254     }
2255     poll_filters();
2256     flush_encoders();
2257
2258     term_exit();
2259
2260     /* write the trailer if needed and close file */
2261     for (i = 0; i < nb_output_files; i++) {
2262         os = output_files[i]->ctx;
2263         av_write_trailer(os);
2264     }
2265
2266     /* dump report by using the first video and audio streams */
2267     print_report(1, timer_start);
2268
2269     /* close each encoder */
2270     for (i = 0; i < nb_output_streams; i++) {
2271         ost = output_streams[i];
2272         if (ost->encoding_needed) {
2273             av_freep(&ost->st->codec->stats_in);
2274             avcodec_close(ost->st->codec);
2275         }
2276     }
2277
2278     /* close each decoder */
2279     for (i = 0; i < nb_input_streams; i++) {
2280         ist = input_streams[i];
2281         if (ist->decoding_needed) {
2282             avcodec_close(ist->st->codec);
2283         }
2284     }
2285
2286     /* finished ! */
2287     ret = 0;
2288
2289  fail:
2290 #if HAVE_PTHREADS
2291     free_input_threads();
2292 #endif
2293
2294     if (output_streams) {
2295         for (i = 0; i < nb_output_streams; i++) {
2296             ost = output_streams[i];
2297             if (ost) {
2298                 if (ost->stream_copy)
2299                     av_freep(&ost->st->codec->extradata);
2300                 if (ost->logfile) {
2301                     fclose(ost->logfile);
2302                     ost->logfile = NULL;
2303                 }
2304                 av_freep(&ost->st->codec->subtitle_header);
2305                 av_free(ost->forced_kf_pts);
2306                 av_dict_free(&ost->opts);
2307             }
2308         }
2309     }
2310     return ret;
2311 }
2312
2313 static int64_t getutime(void)
2314 {
2315 #if HAVE_GETRUSAGE
2316     struct rusage rusage;
2317
2318     getrusage(RUSAGE_SELF, &rusage);
2319     return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
2320 #elif HAVE_GETPROCESSTIMES
2321     HANDLE proc;
2322     FILETIME c, e, k, u;
2323     proc = GetCurrentProcess();
2324     GetProcessTimes(proc, &c, &e, &k, &u);
2325     return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
2326 #else
2327     return av_gettime();
2328 #endif
2329 }
2330
2331 static int64_t getmaxrss(void)
2332 {
2333 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
2334     struct rusage rusage;
2335     getrusage(RUSAGE_SELF, &rusage);
2336     return (int64_t)rusage.ru_maxrss * 1024;
2337 #elif HAVE_GETPROCESSMEMORYINFO
2338     HANDLE proc;
2339     PROCESS_MEMORY_COUNTERS memcounters;
2340     proc = GetCurrentProcess();
2341     memcounters.cb = sizeof(memcounters);
2342     GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
2343     return memcounters.PeakPagefileUsage;
2344 #else
2345     return 0;
2346 #endif
2347 }
2348
2349 static void parse_cpuflags(int argc, char **argv, const OptionDef *options)
2350 {
2351     int idx = locate_option(argc, argv, options, "cpuflags");
2352     if (idx && argv[idx + 1])
2353         opt_cpuflags("cpuflags", argv[idx + 1]);
2354 }
2355
2356 int main(int argc, char **argv)
2357 {
2358     OptionsContext o = { 0 };
2359     int64_t ti;
2360
2361     reset_options(&o);
2362
2363     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2364     parse_loglevel(argc, argv, options);
2365
2366     avcodec_register_all();
2367 #if CONFIG_AVDEVICE
2368     avdevice_register_all();
2369 #endif
2370     avfilter_register_all();
2371     av_register_all();
2372     avformat_network_init();
2373
2374     show_banner();
2375
2376     parse_cpuflags(argc, argv, options);
2377
2378     /* parse options */
2379     parse_options(&o, argc, argv, options, opt_output_file);
2380
2381     if (nb_output_files <= 0 && nb_input_files == 0) {
2382         show_usage();
2383         av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2384         exit_program(1);
2385     }
2386
2387     /* file converter / grab */
2388     if (nb_output_files <= 0) {
2389         fprintf(stderr, "At least one output file must be specified\n");
2390         exit_program(1);
2391     }
2392
2393     if (nb_input_files == 0) {
2394         av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
2395         exit_program(1);
2396     }
2397
2398     ti = getutime();
2399     if (transcode() < 0)
2400         exit_program(1);
2401     ti = getutime() - ti;
2402     if (do_benchmark) {
2403         int maxrss = getmaxrss() / 1024;
2404         printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);
2405     }
2406
2407     exit_program(0);
2408     return 0;
2409 }