3 * Copyright (c) 2000-2011 The libav developers.
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31 #include "libavformat/avformat.h"
32 #include "libavdevice/avdevice.h"
33 #include "libswscale/swscale.h"
34 #include "libavresample/avresample.h"
35 #include "libavutil/opt.h"
36 #include "libavutil/audioconvert.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/colorspace.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/intreadwrite.h"
42 #include "libavutil/dict.h"
43 #include "libavutil/mathematics.h"
44 #include "libavutil/pixdesc.h"
45 #include "libavutil/avstring.h"
46 #include "libavutil/libm.h"
47 #include "libavutil/imgutils.h"
48 #include "libavformat/os_support.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersrc.h"
53 # include "libavfilter/buffersink.h"
54 # include "libavfilter/vsrc_buffer.h"
56 #if HAVE_SYS_RESOURCE_H
57 #include <sys/types.h>
59 #include <sys/resource.h>
60 #elif HAVE_GETPROCESSTIMES
63 #if HAVE_GETPROCESSMEMORYINFO
69 #include <sys/select.h>
76 #include "libavutil/avassert.h"
79 #define VSYNC_PASSTHROUGH 0
83 const char program_name[] = "avconv";
84 const int program_birth_year = 2000;
86 /* select an input stream for an output stream */
87 typedef struct StreamMap {
88 int disabled; /** 1 is this mapping is disabled by a negative map */
92 int sync_stream_index;
93 char *linklabel; /** name of an output link, for mapping lavfi outputs */
97 * select an input file for an output file
99 typedef struct MetadataMap {
100 int file; ///< file index
101 char type; ///< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram
102 int index; ///< stream/chapter/program number
105 static const OptionDef options[];
107 static int video_discard = 0;
108 static int same_quant = 0;
109 static int do_deinterlace = 0;
110 static int intra_dc_precision = 8;
111 static int qp_hist = 0;
113 static int file_overwrite = 0;
114 static int do_benchmark = 0;
115 static int do_hex_dump = 0;
116 static int do_pkt_dump = 0;
117 static int do_pass = 0;
118 static char *pass_logfilename_prefix = NULL;
119 static int video_sync_method = VSYNC_AUTO;
120 static int audio_sync_method = 0;
121 static float audio_drift_threshold = 0.1;
122 static int copy_ts = 0;
123 static int copy_tb = 1;
124 static int opt_shortest = 0;
125 static char *vstats_filename;
126 static FILE *vstats_file;
128 static int audio_volume = 256;
130 static int exit_on_error = 0;
131 static int using_stdin = 0;
132 static int64_t video_size = 0;
133 static int64_t audio_size = 0;
134 static int64_t extra_size = 0;
135 static int nb_frames_dup = 0;
136 static int nb_frames_drop = 0;
137 static int input_sync;
139 static float dts_delta_threshold = 10;
141 static int print_stats = 1;
143 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
145 typedef struct InputFilter {
146 AVFilterContext *filter;
147 struct InputStream *ist;
148 struct FilterGraph *graph;
151 typedef struct OutputFilter {
152 AVFilterContext *filter;
153 struct OutputStream *ost;
154 struct FilterGraph *graph;
156 /* temporary storage until stream maps are processed */
157 AVFilterInOut *out_tmp;
160 typedef struct FilterGraph {
162 const char *graph_desc;
164 AVFilterGraph *graph;
166 InputFilter **inputs;
168 OutputFilter **outputs;
172 typedef struct FrameBuffer {
178 enum PixelFormat pix_fmt;
181 struct InputStream *ist;
182 struct FrameBuffer *next;
185 typedef struct InputStream {
188 int discard; /* true if stream data should be discarded */
189 int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
191 AVFrame *decoded_frame;
193 int64_t start; /* time when read started */
194 /* predicted dts of the next packet read for this stream or (when there are
195 * several frames in a packet) of the next frame in current packet */
197 /* dts of the last packet read for this stream */
199 PtsCorrectionContext pts_ctx;
201 int is_start; /* is 1 at the start and after a discontinuity */
202 int showed_multi_packet_warning;
207 int resample_pix_fmt;
209 int resample_sample_fmt;
210 int resample_sample_rate;
211 int resample_channels;
212 uint64_t resample_channel_layout;
214 /* a pool of free buffers for decoded data */
215 FrameBuffer *buffer_pool;
217 /* decoded data from this stream goes into all those filters
218 * currently video only */
219 InputFilter **filters;
223 typedef struct InputFile {
224 AVFormatContext *ctx;
225 int eof_reached; /* true if eof reached */
226 int ist_index; /* index of first stream in ist_table */
227 int buffer_size; /* current total buffer size */
229 int nb_streams; /* number of stream that avconv is aware of; may be different
230 from ctx.nb_streams if new streams appear during av_read_frame() */
234 typedef struct OutputStream {
235 int file_index; /* file index */
236 int index; /* stream index in the output file */
237 int source_index; /* InputStream index */
238 AVStream *st; /* stream in the output file */
239 int encoding_needed; /* true if encoding needed for this stream */
241 /* input pts and corresponding output pts
243 // double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
244 struct InputStream *sync_ist; /* input stream to sync against */
245 int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
246 /* pts of the first frame encoded for this stream, used for limiting
249 AVBitStreamFilterContext *bitstream_filters;
252 AVFrame *filtered_frame;
255 AVRational frame_rate;
259 float frame_aspect_ratio;
262 /* forced key frames */
263 int64_t *forced_kf_pts;
269 OutputFilter *filter;
274 int is_past_recording_time;
276 const char *attachment_filename;
277 int copy_initial_nonkeyframes;
279 enum PixelFormat pix_fmts[2];
283 typedef struct OutputFile {
284 AVFormatContext *ctx;
286 int ost_index; /* index of the first stream in output_streams */
287 int64_t recording_time; /* desired length of the resulting file in microseconds */
288 int64_t start_time; /* start time in microseconds */
289 uint64_t limit_filesize;
292 static InputStream **input_streams = NULL;
293 static int nb_input_streams = 0;
294 static InputFile **input_files = NULL;
295 static int nb_input_files = 0;
297 static OutputStream **output_streams = NULL;
298 static int nb_output_streams = 0;
299 static OutputFile **output_files = NULL;
300 static int nb_output_files = 0;
302 static FilterGraph **filtergraphs;
305 typedef struct OptionsContext {
306 /* input/output options */
310 SpecifierOpt *codec_names;
312 SpecifierOpt *audio_channels;
313 int nb_audio_channels;
314 SpecifierOpt *audio_sample_rate;
315 int nb_audio_sample_rate;
316 SpecifierOpt *frame_rates;
318 SpecifierOpt *frame_sizes;
320 SpecifierOpt *frame_pix_fmts;
321 int nb_frame_pix_fmts;
324 int64_t input_ts_offset;
327 SpecifierOpt *ts_scale;
329 SpecifierOpt *dump_attachment;
330 int nb_dump_attachment;
333 StreamMap *stream_maps;
335 /* first item specifies output metadata, second is input */
336 MetadataMap (*meta_data_maps)[2];
337 int nb_meta_data_maps;
338 int metadata_global_manual;
339 int metadata_streams_manual;
340 int metadata_chapters_manual;
341 const char **attachments;
344 int chapters_input_file;
346 int64_t recording_time;
347 uint64_t limit_filesize;
353 int subtitle_disable;
356 /* indexed by output file stream index */
360 SpecifierOpt *metadata;
362 SpecifierOpt *max_frames;
364 SpecifierOpt *bitstream_filters;
365 int nb_bitstream_filters;
366 SpecifierOpt *codec_tags;
368 SpecifierOpt *sample_fmts;
370 SpecifierOpt *qscale;
372 SpecifierOpt *forced_key_frames;
373 int nb_forced_key_frames;
374 SpecifierOpt *force_fps;
376 SpecifierOpt *frame_aspect_ratios;
377 int nb_frame_aspect_ratios;
378 SpecifierOpt *rc_overrides;
380 SpecifierOpt *intra_matrices;
381 int nb_intra_matrices;
382 SpecifierOpt *inter_matrices;
383 int nb_inter_matrices;
384 SpecifierOpt *top_field_first;
385 int nb_top_field_first;
386 SpecifierOpt *metadata_map;
388 SpecifierOpt *presets;
390 SpecifierOpt *copy_initial_nonkeyframes;
391 int nb_copy_initial_nonkeyframes;
392 SpecifierOpt *filters;
396 #define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\
399 for (i = 0; i < o->nb_ ## name; i++) {\
400 char *spec = o->name[i].specifier;\
401 if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0)\
402 outvar = o->name[i].u.type;\
408 static void reset_options(OptionsContext *o)
410 const OptionDef *po = options;
413 /* all OPT_SPEC and OPT_STRING can be freed in generic way */
415 void *dst = (uint8_t*)o + po->u.off;
417 if (po->flags & OPT_SPEC) {
418 SpecifierOpt **so = dst;
419 int i, *count = (int*)(so + 1);
420 for (i = 0; i < *count; i++) {
421 av_freep(&(*so)[i].specifier);
422 if (po->flags & OPT_STRING)
423 av_freep(&(*so)[i].u.str);
427 } else if (po->flags & OPT_OFFSET && po->flags & OPT_STRING)
432 for (i = 0; i < o->nb_stream_maps; i++)
433 av_freep(&o->stream_maps[i].linklabel);
434 av_freep(&o->stream_maps);
435 av_freep(&o->meta_data_maps);
436 av_freep(&o->streamid_map);
438 memset(o, 0, sizeof(*o));
440 o->mux_max_delay = 0.7;
441 o->recording_time = INT64_MAX;
442 o->limit_filesize = UINT64_MAX;
443 o->chapters_input_file = INT_MAX;
449 static int alloc_buffer(InputStream *ist, AVCodecContext *s, FrameBuffer **pbuf)
451 FrameBuffer *buf = av_mallocz(sizeof(*buf));
453 const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
454 int h_chroma_shift, v_chroma_shift;
455 int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
456 int w = s->width, h = s->height;
459 return AVERROR(ENOMEM);
461 if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
466 avcodec_align_dimensions(s, &w, &h);
467 if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
468 s->pix_fmt, 32)) < 0) {
472 /* XXX this shouldn't be needed, but some tests break without this line
473 * those decoders are buggy and need to be fixed.
474 * the following tests fail:
475 * cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
477 memset(buf->base[0], 128, ret);
479 avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
480 for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
481 const int h_shift = i==0 ? 0 : h_chroma_shift;
482 const int v_shift = i==0 ? 0 : v_chroma_shift;
483 if (s->flags & CODEC_FLAG_EMU_EDGE)
484 buf->data[i] = buf->base[i];
486 buf->data[i] = buf->base[i] +
487 FFALIGN((buf->linesize[i]*edge >> v_shift) +
488 (pixel_size*edge >> h_shift), 32);
492 buf->pix_fmt = s->pix_fmt;
499 static void free_buffer_pool(InputStream *ist)
501 FrameBuffer *buf = ist->buffer_pool;
503 ist->buffer_pool = buf->next;
504 av_freep(&buf->base[0]);
506 buf = ist->buffer_pool;
510 static void unref_buffer(InputStream *ist, FrameBuffer *buf)
512 av_assert0(buf->refcount);
514 if (!buf->refcount) {
515 buf->next = ist->buffer_pool;
516 ist->buffer_pool = buf;
520 static int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
522 InputStream *ist = s->opaque;
526 if (!ist->buffer_pool && (ret = alloc_buffer(ist, s, &ist->buffer_pool)) < 0)
529 buf = ist->buffer_pool;
530 ist->buffer_pool = buf->next;
532 if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
533 av_freep(&buf->base[0]);
535 if ((ret = alloc_buffer(ist, s, &buf)) < 0)
541 frame->type = FF_BUFFER_TYPE_USER;
542 frame->extended_data = frame->data;
543 frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
544 frame->width = buf->w;
545 frame->height = buf->h;
546 frame->format = buf->pix_fmt;
547 frame->sample_aspect_ratio = s->sample_aspect_ratio;
549 for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
550 frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't
551 frame->data[i] = buf->data[i];
552 frame->linesize[i] = buf->linesize[i];
558 static void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
560 InputStream *ist = s->opaque;
561 FrameBuffer *buf = frame->opaque;
564 for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
565 frame->data[i] = NULL;
567 unref_buffer(ist, buf);
570 static void filter_release_buffer(AVFilterBuffer *fb)
572 FrameBuffer *buf = fb->priv;
574 unref_buffer(buf->ist, buf);
578 * Define a function for building a string containing a list of
581 #define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name, separator) \
582 static char *choose_ ## var ## s(OutputStream *ost) \
584 if (ost->st->codec->var != none) { \
585 get_name(ost->st->codec->var); \
586 return av_strdup(name); \
587 } else if (ost->enc->supported_list) { \
589 AVIOContext *s = NULL; \
593 if (avio_open_dyn_buf(&s) < 0) \
596 for (p = ost->enc->supported_list; *p != none; p++) { \
598 avio_printf(s, "%s" separator, name); \
600 len = avio_close_dyn_buf(s, &ret); \
607 #define GET_PIX_FMT_NAME(pix_fmt)\
608 const char *name = av_get_pix_fmt_name(pix_fmt);
610 DEF_CHOOSE_FORMAT(enum PixelFormat, pix_fmt, pix_fmts, PIX_FMT_NONE,
611 GET_PIX_FMT_NAME, ":")
613 #define GET_SAMPLE_FMT_NAME(sample_fmt)\
614 const char *name = av_get_sample_fmt_name(sample_fmt)
616 DEF_CHOOSE_FORMAT(enum AVSampleFormat, sample_fmt, sample_fmts,
617 AV_SAMPLE_FMT_NONE, GET_SAMPLE_FMT_NAME, ",")
619 #define GET_SAMPLE_RATE_NAME(rate)\
621 snprintf(name, sizeof(name), "%d", rate);
623 DEF_CHOOSE_FORMAT(int, sample_rate, supported_samplerates, 0,
624 GET_SAMPLE_RATE_NAME, ",")
626 #define GET_CH_LAYOUT_NAME(ch_layout)\
628 snprintf(name, sizeof(name), "0x%"PRIx64, ch_layout);
630 DEF_CHOOSE_FORMAT(uint64_t, channel_layout, channel_layouts, 0,
631 GET_CH_LAYOUT_NAME, ",")
633 static int configure_audio_filters(FilterGraph *fg, AVFilterContext **in_filter,
634 AVFilterContext **out_filter)
636 InputStream *ist = fg->inputs[0]->ist;
637 OutputStream *ost = fg->outputs[0]->ost;
638 AVCodecContext *codec = ost->st->codec;
639 AVCodecContext *icodec = ist->st->codec;
640 char *sample_fmts, *sample_rates, *channel_layouts;
644 avfilter_graph_free(&fg->graph);
645 if (!(fg->graph = avfilter_graph_alloc()))
646 return AVERROR(ENOMEM);
648 snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:"
649 "channel_layout=0x%"PRIx64, ist->st->time_base.num,
650 ist->st->time_base.den, icodec->sample_rate,
651 av_get_sample_fmt_name(icodec->sample_fmt), icodec->channel_layout);
652 ret = avfilter_graph_create_filter(&fg->inputs[0]->filter,
653 avfilter_get_by_name("abuffer"),
654 "src", args, NULL, fg->graph);
658 ret = avfilter_graph_create_filter(&fg->outputs[0]->filter,
659 avfilter_get_by_name("abuffersink"),
660 "out", NULL, NULL, fg->graph);
664 *in_filter = fg->inputs[0]->filter;
665 *out_filter = fg->outputs[0]->filter;
667 if (codec->channels && !codec->channel_layout)
668 codec->channel_layout = av_get_default_channel_layout(codec->channels);
670 sample_fmts = choose_sample_fmts(ost);
671 sample_rates = choose_sample_rates(ost);
672 channel_layouts = choose_channel_layouts(ost);
673 if (sample_fmts || sample_rates || channel_layouts) {
674 AVFilterContext *format;
679 len += snprintf(args + len, sizeof(args) - len, "sample_fmts=%s:",
682 len += snprintf(args + len, sizeof(args) - len, "sample_rates=%s:",
685 len += snprintf(args + len, sizeof(args) - len, "channel_layouts=%s:",
689 av_freep(&sample_fmts);
690 av_freep(&sample_rates);
691 av_freep(&channel_layouts);
693 ret = avfilter_graph_create_filter(&format,
694 avfilter_get_by_name("aformat"),
695 "aformat", args, NULL, fg->graph);
699 ret = avfilter_link(format, 0, fg->outputs[0]->filter, 0);
703 *out_filter = format;
706 if (audio_sync_method > 0) {
707 AVFilterContext *async;
711 av_log(NULL, AV_LOG_WARNING, "-async has been deprecated. Used the "
712 "asyncts audio filter instead.\n");
714 if (audio_sync_method > 1)
715 len += snprintf(args + len, sizeof(args) - len, "compensate=1:"
716 "max_comp=%d:", audio_sync_method);
717 snprintf(args + len, sizeof(args) - len, "min_delta=%f",
718 audio_drift_threshold);
720 ret = avfilter_graph_create_filter(&async,
721 avfilter_get_by_name("asyncts"),
722 "async", args, NULL, fg->graph);
726 ret = avfilter_link(*in_filter, 0, async, 0);
736 static int configure_video_filters(FilterGraph *fg, AVFilterContext **in_filter,
737 AVFilterContext **out_filter)
739 InputStream *ist = fg->inputs[0]->ist;
740 OutputStream *ost = fg->outputs[0]->ost;
741 AVFilterContext *filter;
742 AVCodecContext *codec = ost->st->codec;
744 AVRational sample_aspect_ratio;
748 if (ist->st->sample_aspect_ratio.num) {
749 sample_aspect_ratio = ist->st->sample_aspect_ratio;
751 sample_aspect_ratio = ist->st->codec->sample_aspect_ratio;
753 snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
754 ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
755 sample_aspect_ratio.num, sample_aspect_ratio.den);
757 ret = avfilter_graph_create_filter(&fg->inputs[0]->filter,
758 avfilter_get_by_name("buffer"),
759 "src", args, NULL, fg->graph);
762 ret = avfilter_graph_create_filter(&fg->outputs[0]->filter,
763 avfilter_get_by_name("buffersink"),
764 "out", NULL, NULL, fg->graph);
767 *in_filter = fg->inputs[0]->filter;
768 *out_filter = fg->outputs[0]->filter;
770 if (codec->width || codec->height) {
771 snprintf(args, 255, "%d:%d:flags=0x%X",
774 (unsigned)ost->sws_flags);
775 if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
776 NULL, args, NULL, fg->graph)) < 0)
778 if ((ret = avfilter_link(*in_filter, 0, filter, 0)) < 0)
783 if ((pix_fmts = choose_pix_fmts(ost))) {
784 if ((ret = avfilter_graph_create_filter(&filter,
785 avfilter_get_by_name("format"),
786 "format", pix_fmts, NULL,
789 if ((ret = avfilter_link(filter, 0, *out_filter, 0)) < 0)
792 *out_filter = filter;
796 snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
797 fg->graph->scale_sws_opts = av_strdup(args);
802 static int configure_simple_filtergraph(FilterGraph *fg)
804 OutputStream *ost = fg->outputs[0]->ost;
805 AVFilterContext *in_filter, *out_filter;
808 avfilter_graph_free(&fg->graph);
809 fg->graph = avfilter_graph_alloc();
811 switch (ost->st->codec->codec_type) {
812 case AVMEDIA_TYPE_VIDEO:
813 ret = configure_video_filters(fg, &in_filter, &out_filter);
815 case AVMEDIA_TYPE_AUDIO:
816 ret = configure_audio_filters(fg, &in_filter, &out_filter);
818 default: av_assert0(0);
824 AVFilterInOut *outputs = avfilter_inout_alloc();
825 AVFilterInOut *inputs = avfilter_inout_alloc();
827 outputs->name = av_strdup("in");
828 outputs->filter_ctx = in_filter;
829 outputs->pad_idx = 0;
830 outputs->next = NULL;
832 inputs->name = av_strdup("out");
833 inputs->filter_ctx = out_filter;
837 if ((ret = avfilter_graph_parse(fg->graph, ost->avfilter, inputs, outputs, NULL)) < 0)
840 if ((ret = avfilter_link(in_filter, 0, out_filter, 0)) < 0)
844 if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
847 ost->filter = fg->outputs[0];
852 static FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
854 FilterGraph *fg = av_mallocz(sizeof(*fg));
858 fg->index = nb_filtergraphs;
860 fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs), &fg->nb_outputs,
862 if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
864 fg->outputs[0]->ost = ost;
865 fg->outputs[0]->graph = fg;
867 fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs), &fg->nb_inputs,
869 if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
871 fg->inputs[0]->ist = ist;
872 fg->inputs[0]->graph = fg;
874 ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
875 &ist->nb_filters, ist->nb_filters + 1);
876 ist->filters[ist->nb_filters - 1] = fg->inputs[0];
878 filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
879 &nb_filtergraphs, nb_filtergraphs + 1);
880 filtergraphs[nb_filtergraphs - 1] = fg;
885 static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
888 enum AVMediaType type = in->filter_ctx->input_pads[in->pad_idx].type;
891 // TODO: support other filter types
892 if (type != AVMEDIA_TYPE_VIDEO) {
893 av_log(NULL, AV_LOG_FATAL, "Only video filters supported currently.\n");
901 int file_idx = strtol(in->name, &p, 0);
903 if (file_idx < 0 || file_idx >= nb_input_files) {
904 av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtegraph description %s.\n",
905 file_idx, fg->graph_desc);
908 s = input_files[file_idx]->ctx;
910 for (i = 0; i < s->nb_streams; i++) {
911 if (s->streams[i]->codec->codec_type != type)
913 if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
919 av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
920 "matches no streams.\n", p, fg->graph_desc);
923 ist = input_streams[input_files[file_idx]->ist_index + st->index];
925 /* find the first unused stream of corresponding type */
926 for (i = 0; i < nb_input_streams; i++) {
927 ist = input_streams[i];
928 if (ist->st->codec->codec_type == type && ist->discard)
931 if (i == nb_input_streams) {
932 av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
933 "unlabeled input pad %d on filter %s", in->pad_idx,
934 in->filter_ctx->name);
939 ist->decoding_needed = 1;
940 ist->st->discard = AVDISCARD_NONE;
942 fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs),
943 &fg->nb_inputs, fg->nb_inputs + 1);
944 if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
946 fg->inputs[fg->nb_inputs - 1]->ist = ist;
947 fg->inputs[fg->nb_inputs - 1]->graph = fg;
949 ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
950 &ist->nb_filters, ist->nb_filters + 1);
951 ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
954 static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
957 AVCodecContext *codec = ofilter->ost->st->codec;
958 AVFilterContext *last_filter = out->filter_ctx;
959 int pad_idx = out->pad_idx;
963 ret = avfilter_graph_create_filter(&ofilter->filter,
964 avfilter_get_by_name("buffersink"),
965 "out", NULL, pix_fmts, fg->graph);
969 if (codec->width || codec->height) {
971 AVFilterContext *filter;
973 snprintf(args, sizeof(args), "%d:%d:flags=0x%X",
976 (unsigned)ofilter->ost->sws_flags);
977 if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
978 NULL, args, NULL, fg->graph)) < 0)
980 if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
983 last_filter = filter;
987 if ((pix_fmts = choose_pix_fmts(ofilter->ost))) {
988 AVFilterContext *filter;
989 if ((ret = avfilter_graph_create_filter(&filter,
990 avfilter_get_by_name("format"),
991 "format", pix_fmts, NULL,
994 if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
997 last_filter = filter;
1002 if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
1008 static int configure_complex_filter(FilterGraph *fg)
1010 AVFilterInOut *inputs, *outputs, *cur;
1011 int ret, i, init = !fg->graph;
1013 avfilter_graph_free(&fg->graph);
1014 if (!(fg->graph = avfilter_graph_alloc()))
1015 return AVERROR(ENOMEM);
1017 if ((ret = avfilter_graph_parse2(fg->graph, fg->graph_desc, &inputs, &outputs)) < 0)
1020 for (cur = inputs; init && cur; cur = cur->next)
1021 init_input_filter(fg, cur);
1023 for (cur = inputs, i = 0; cur; cur = cur->next, i++) {
1024 InputFilter *ifilter = fg->inputs[i];
1025 InputStream *ist = ifilter->ist;
1029 sar = ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
1030 ist->st->codec->sample_aspect_ratio;
1031 snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
1032 ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
1035 if ((ret = avfilter_graph_create_filter(&ifilter->filter,
1036 avfilter_get_by_name("buffer"), cur->name,
1037 args, NULL, fg->graph)) < 0)
1039 if ((ret = avfilter_link(ifilter->filter, 0,
1040 cur->filter_ctx, cur->pad_idx)) < 0)
1043 avfilter_inout_free(&inputs);
1046 /* we already know the mappings between lavfi outputs and output streams,
1047 * so we can finish the setup */
1048 for (cur = outputs, i = 0; cur; cur = cur->next, i++)
1049 configure_output_filter(fg, fg->outputs[i], cur);
1050 avfilter_inout_free(&outputs);
1052 if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
1055 /* wait until output mappings are processed */
1056 for (cur = outputs; cur;) {
1057 fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs),
1058 &fg->nb_outputs, fg->nb_outputs + 1);
1059 if (!(fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]))))
1061 fg->outputs[fg->nb_outputs - 1]->graph = fg;
1062 fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
1064 fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
1071 static int configure_complex_filters(void)
1075 for (i = 0; i < nb_filtergraphs; i++)
1076 if (!filtergraphs[i]->graph &&
1077 (ret = configure_complex_filter(filtergraphs[i])) < 0)
1082 static int configure_filtergraph(FilterGraph *fg)
1084 return fg->graph_desc ? configure_complex_filter(fg) :
1085 configure_simple_filtergraph(fg);
1088 static int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
1091 for (i = 0; i < fg->nb_inputs; i++)
1092 if (fg->inputs[i]->ist == ist)
1097 static void term_exit(void)
1099 av_log(NULL, AV_LOG_QUIET, "");
1102 static volatile int received_sigterm = 0;
1103 static volatile int received_nb_signals = 0;
1106 sigterm_handler(int sig)
1108 received_sigterm = sig;
1109 received_nb_signals++;
1113 static void term_init(void)
1115 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
1116 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
1118 signal(SIGXCPU, sigterm_handler);
1122 static int decode_interrupt_cb(void *ctx)
1124 return received_nb_signals > 1;
1127 static const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
1129 void exit_program(int ret)
1133 for (i = 0; i < nb_filtergraphs; i++) {
1134 avfilter_graph_free(&filtergraphs[i]->graph);
1135 for (j = 0; j < filtergraphs[i]->nb_inputs; j++)
1136 av_freep(&filtergraphs[i]->inputs[j]);
1137 av_freep(&filtergraphs[i]->inputs);
1138 for (j = 0; j < filtergraphs[i]->nb_outputs; j++)
1139 av_freep(&filtergraphs[i]->outputs[j]);
1140 av_freep(&filtergraphs[i]->outputs);
1141 av_freep(&filtergraphs[i]);
1143 av_freep(&filtergraphs);
1146 for (i = 0; i < nb_output_files; i++) {
1147 AVFormatContext *s = output_files[i]->ctx;
1148 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
1150 avformat_free_context(s);
1151 av_dict_free(&output_files[i]->opts);
1152 av_freep(&output_files[i]);
1154 for (i = 0; i < nb_output_streams; i++) {
1155 AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
1157 AVBitStreamFilterContext *next = bsfc->next;
1158 av_bitstream_filter_close(bsfc);
1161 output_streams[i]->bitstream_filters = NULL;
1163 av_freep(&output_streams[i]->avfilter);
1164 av_freep(&output_streams[i]->filtered_frame);
1165 av_freep(&output_streams[i]);
1167 for (i = 0; i < nb_input_files; i++) {
1168 avformat_close_input(&input_files[i]->ctx);
1169 av_freep(&input_files[i]);
1171 for (i = 0; i < nb_input_streams; i++) {
1172 av_freep(&input_streams[i]->decoded_frame);
1173 av_dict_free(&input_streams[i]->opts);
1174 free_buffer_pool(input_streams[i]);
1175 av_freep(&input_streams[i]->filters);
1176 av_freep(&input_streams[i]);
1180 fclose(vstats_file);
1181 av_free(vstats_filename);
1183 av_freep(&input_streams);
1184 av_freep(&input_files);
1185 av_freep(&output_streams);
1186 av_freep(&output_files);
1191 avformat_network_deinit();
1193 if (received_sigterm) {
1194 av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
1195 (int) received_sigterm);
1202 static void assert_avoptions(AVDictionary *m)
1204 AVDictionaryEntry *t;
1205 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1206 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
1211 static void assert_codec_experimental(AVCodecContext *c, int encoder)
1213 const char *codec_string = encoder ? "encoder" : "decoder";
1215 if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL &&
1216 c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
1217 av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
1218 "results.\nAdd '-strict experimental' if you want to use it.\n",
1219 codec_string, c->codec->name);
1220 codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id);
1221 if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
1222 av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
1223 codec_string, codec->name);
1229 * Update the requested input sample format based on the output sample format.
1230 * This is currently only used to request float output from decoders which
1231 * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
1232 * Ideally this will be removed in the future when decoders do not do format
1233 * conversion and only output in their native format.
1235 static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
1236 AVCodecContext *enc)
1238 /* if sample formats match or a decoder sample format has already been
1239 requested, just return */
1240 if (enc->sample_fmt == dec->sample_fmt ||
1241 dec->request_sample_fmt > AV_SAMPLE_FMT_NONE)
1244 /* if decoder supports more than one output format */
1245 if (dec_codec && dec_codec->sample_fmts &&
1246 dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
1247 dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
1248 const enum AVSampleFormat *p;
1249 int min_dec = -1, min_inc = -1;
1251 /* find a matching sample format in the encoder */
1252 for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
1253 if (*p == enc->sample_fmt) {
1254 dec->request_sample_fmt = *p;
1256 } else if (*p > enc->sample_fmt) {
1257 min_inc = FFMIN(min_inc, *p - enc->sample_fmt);
1259 min_dec = FFMIN(min_dec, enc->sample_fmt - *p);
1262 /* if none match, provide the one that matches quality closest */
1263 dec->request_sample_fmt = min_inc > 0 ? enc->sample_fmt + min_inc :
1264 enc->sample_fmt - min_dec;
1269 get_sync_ipts(const OutputStream *ost, int64_t pts)
1271 OutputFile *of = output_files[ost->file_index];
1272 return (double)(pts - of->start_time) / AV_TIME_BASE;
1275 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
1277 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
1278 AVCodecContext *avctx = ost->st->codec;
1282 * Audio encoders may split the packets -- #frames in != #packets out.
1283 * But there is no reordering, so we can limit the number of output packets
1284 * by simply dropping them here.
1285 * Counting encoded video frames needs to be done separately because of
1286 * reordering, see do_video_out()
1288 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
1289 if (ost->frame_number >= ost->max_frames) {
1290 av_free_packet(pkt);
1293 ost->frame_number++;
1297 AVPacket new_pkt = *pkt;
1298 int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
1299 &new_pkt.data, &new_pkt.size,
1300 pkt->data, pkt->size,
1301 pkt->flags & AV_PKT_FLAG_KEY);
1303 av_free_packet(pkt);
1304 new_pkt.destruct = av_destruct_packet;
1306 av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
1307 bsfc->filter->name, pkt->stream_index,
1308 avctx->codec ? avctx->codec->name : "copy");
1318 pkt->stream_index = ost->index;
1319 ret = av_interleaved_write_frame(s, pkt);
1321 print_error("av_interleaved_write_frame()", ret);
1326 static int check_recording_time(OutputStream *ost)
1328 OutputFile *of = output_files[ost->file_index];
1330 if (of->recording_time != INT64_MAX &&
1331 av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,
1332 AV_TIME_BASE_Q) >= 0) {
1333 ost->is_past_recording_time = 1;
1339 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
1342 AVCodecContext *enc = ost->st->codec;
1346 av_init_packet(&pkt);
1350 if (!check_recording_time(ost))
1353 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1354 frame->pts = ost->sync_opts;
1355 ost->sync_opts = frame->pts + frame->nb_samples;
1357 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
1358 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1363 if (pkt.pts != AV_NOPTS_VALUE)
1364 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1365 if (pkt.dts != AV_NOPTS_VALUE)
1366 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1367 if (pkt.duration > 0)
1368 pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
1370 write_frame(s, &pkt, ost);
1372 audio_size += pkt.size;
1376 static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
1378 AVCodecContext *dec;
1379 AVPicture *picture2;
1380 AVPicture picture_tmp;
1383 dec = ist->st->codec;
1385 /* deinterlace : must be done before any resize */
1386 if (do_deinterlace) {
1389 /* create temporary picture */
1390 size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
1391 buf = av_malloc(size);
1395 picture2 = &picture_tmp;
1396 avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
1398 if (avpicture_deinterlace(picture2, picture,
1399 dec->pix_fmt, dec->width, dec->height) < 0) {
1400 /* if error, do not deinterlace */
1401 av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
1410 if (picture != picture2)
1411 *picture = *picture2;
1415 static void do_subtitle_out(AVFormatContext *s,
1421 static uint8_t *subtitle_out = NULL;
1422 int subtitle_out_max_size = 1024 * 1024;
1423 int subtitle_out_size, nb, i;
1424 AVCodecContext *enc;
1427 if (pts == AV_NOPTS_VALUE) {
1428 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1434 enc = ost->st->codec;
1436 if (!subtitle_out) {
1437 subtitle_out = av_malloc(subtitle_out_max_size);
1440 /* Note: DVB subtitle need one packet to draw them and one other
1441 packet to clear them */
1442 /* XXX: signal it in the codec context ? */
1443 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE)
1448 for (i = 0; i < nb; i++) {
1449 ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
1450 if (!check_recording_time(ost))
1453 sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
1454 // start_display_time is required to be 0
1455 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1456 sub->end_display_time -= sub->start_display_time;
1457 sub->start_display_time = 0;
1458 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1459 subtitle_out_max_size, sub);
1460 if (subtitle_out_size < 0) {
1461 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1465 av_init_packet(&pkt);
1466 pkt.data = subtitle_out;
1467 pkt.size = subtitle_out_size;
1468 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
1469 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {
1470 /* XXX: the pts correction is handled here. Maybe handling
1471 it in the codec would be better */
1473 pkt.pts += 90 * sub->start_display_time;
1475 pkt.pts += 90 * sub->end_display_time;
1477 write_frame(s, &pkt, ost);
1481 static void do_video_out(AVFormatContext *s,
1483 AVFrame *in_picture,
1484 int *frame_size, float quality)
1486 int nb_frames, i, ret, format_video_sync;
1487 AVCodecContext *enc;
1488 double sync_ipts, delta;
1490 enc = ost->st->codec;
1492 sync_ipts = get_sync_ipts(ost, in_picture->pts) / av_q2d(enc->time_base);
1493 delta = sync_ipts - ost->sync_opts;
1495 /* by default, we output a single frame */
1500 format_video_sync = video_sync_method;
1501 if (format_video_sync == VSYNC_AUTO)
1502 format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
1503 (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
1505 switch (format_video_sync) {
1507 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1510 else if (delta > 1.1)
1511 nb_frames = lrintf(delta);
1516 else if (delta > 0.6)
1517 ost->sync_opts = lrint(sync_ipts);
1519 case VSYNC_PASSTHROUGH:
1520 ost->sync_opts = lrint(sync_ipts);
1526 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1527 if (nb_frames == 0) {
1529 av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
1531 } else if (nb_frames > 1) {
1532 nb_frames_dup += nb_frames - 1;
1533 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1536 if (!ost->frame_number)
1537 ost->first_pts = ost->sync_opts;
1539 /* duplicates frame if needed */
1540 for (i = 0; i < nb_frames; i++) {
1542 av_init_packet(&pkt);
1546 if (!check_recording_time(ost))
1549 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1550 enc->codec->id == CODEC_ID_RAWVIDEO) {
1551 /* raw pictures are written as AVPicture structure to
1552 avoid any copies. We support temporarily the older
1554 enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
1555 enc->coded_frame->top_field_first = in_picture->top_field_first;
1556 pkt.data = (uint8_t *)in_picture;
1557 pkt.size = sizeof(AVPicture);
1558 pkt.pts = av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
1559 pkt.flags |= AV_PKT_FLAG_KEY;
1561 write_frame(s, &pkt, ost);
1564 AVFrame big_picture;
1566 big_picture = *in_picture;
1567 /* better than nothing: use input picture interlaced
1569 big_picture.interlaced_frame = in_picture->interlaced_frame;
1570 if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
1571 if (ost->top_field_first == -1)
1572 big_picture.top_field_first = in_picture->top_field_first;
1574 big_picture.top_field_first = !!ost->top_field_first;
1577 /* handles same_quant here. This is not correct because it may
1578 not be a global option */
1579 big_picture.quality = quality;
1580 if (!enc->me_threshold)
1581 big_picture.pict_type = 0;
1582 big_picture.pts = ost->sync_opts;
1583 if (ost->forced_kf_index < ost->forced_kf_count &&
1584 big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1585 big_picture.pict_type = AV_PICTURE_TYPE_I;
1586 ost->forced_kf_index++;
1588 ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
1590 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1595 if (pkt.pts != AV_NOPTS_VALUE)
1596 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1597 if (pkt.dts != AV_NOPTS_VALUE)
1598 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1600 write_frame(s, &pkt, ost);
1601 *frame_size = pkt.size;
1602 video_size += pkt.size;
1604 /* if two pass, output log */
1605 if (ost->logfile && enc->stats_out) {
1606 fprintf(ost->logfile, "%s", enc->stats_out);
1612 * For video, number of frames in == number of packets out.
1613 * But there may be reordering, so we can't throw away frames on encoder
1614 * flush, we need to limit them here, before they go into encoder.
1616 ost->frame_number++;
1620 static double psnr(double d)
1622 return -10.0 * log(d) / log(10.0);
1625 static void do_video_stats(AVFormatContext *os, OutputStream *ost,
1628 AVCodecContext *enc;
1630 double ti1, bitrate, avg_bitrate;
1632 /* this is executed just the first time do_video_stats is called */
1634 vstats_file = fopen(vstats_filename, "w");
1641 enc = ost->st->codec;
1642 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1643 frame_number = ost->frame_number;
1644 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
1645 if (enc->flags&CODEC_FLAG_PSNR)
1646 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1648 fprintf(vstats_file,"f_size= %6d ", frame_size);
1649 /* compute pts value */
1650 ti1 = ost->sync_opts * av_q2d(enc->time_base);
1654 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1655 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
1656 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1657 (double)video_size / 1024, ti1, bitrate, avg_bitrate);
1658 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
1662 /* check for new output on any of the filtergraphs */
1663 static int poll_filters(void)
1665 AVFilterBufferRef *picref;
1666 AVFrame *filtered_frame = NULL;
1669 for (i = 0; i < nb_output_streams; i++) {
1670 OutputStream *ost = output_streams[i];
1671 OutputFile *of = output_files[ost->file_index];
1674 if (!ost->filter || ost->is_past_recording_time)
1677 if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
1678 return AVERROR(ENOMEM);
1680 avcodec_get_frame_defaults(ost->filtered_frame);
1681 filtered_frame = ost->filtered_frame;
1684 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
1685 !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
1686 ret = av_buffersink_read_samples(ost->filter->filter, &picref,
1687 ost->st->codec->frame_size);
1689 ret = av_buffersink_read(ost->filter->filter, &picref);
1694 avfilter_copy_buf_props(filtered_frame, picref);
1695 if (ost->enc->type == AVMEDIA_TYPE_VIDEO)
1696 filtered_frame->pts = av_rescale_q(picref->pts,
1697 ost->filter->filter->inputs[0]->time_base,
1699 else if (picref->pts != AV_NOPTS_VALUE)
1700 filtered_frame->pts = av_rescale_q(picref->pts,
1701 ost->filter->filter->inputs[0]->time_base,
1702 ost->st->codec->time_base) -
1703 av_rescale_q(of->start_time,
1705 ost->st->codec->time_base);
1707 if (of->start_time && filtered_frame->pts < of->start_time)
1710 switch (ost->filter->filter->inputs[0]->type) {
1711 case AVMEDIA_TYPE_VIDEO:
1712 if (!ost->frame_aspect_ratio)
1713 ost->st->codec->sample_aspect_ratio = picref->video->pixel_aspect;
1715 do_video_out(of->ctx, ost, filtered_frame, &frame_size,
1716 same_quant ? ost->last_quality :
1717 ost->st->codec->global_quality);
1718 if (vstats_filename && frame_size)
1719 do_video_stats(of->ctx, ost, frame_size);
1721 case AVMEDIA_TYPE_AUDIO:
1722 do_audio_out(of->ctx, ost, filtered_frame);
1725 // TODO support subtitle filters
1729 avfilter_unref_buffer(picref);
1735 static void print_report(int is_last_report, int64_t timer_start)
1739 AVFormatContext *oc;
1741 AVCodecContext *enc;
1742 int frame_number, vid, i;
1743 double bitrate, ti1, pts;
1744 static int64_t last_time = -1;
1745 static int qp_histogram[52];
1747 if (!print_stats && !is_last_report)
1750 if (!is_last_report) {
1752 /* display the report every 0.5 seconds */
1753 cur_time = av_gettime();
1754 if (last_time == -1) {
1755 last_time = cur_time;
1758 if ((cur_time - last_time) < 500000)
1760 last_time = cur_time;
1764 oc = output_files[0]->ctx;
1766 total_size = avio_size(oc->pb);
1767 if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
1768 total_size = avio_tell(oc->pb);
1773 for (i = 0; i < nb_output_streams; i++) {
1775 ost = output_streams[i];
1776 enc = ost->st->codec;
1777 if (!ost->stream_copy && enc->coded_frame)
1778 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1779 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1780 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1782 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1783 float t = (av_gettime() - timer_start) / 1000000.0;
1785 frame_number = ost->frame_number;
1786 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
1787 frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
1789 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1793 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1795 for (j = 0; j < 32; j++)
1796 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j] + 1) / log(2)));
1798 if (enc->flags&CODEC_FLAG_PSNR) {
1800 double error, error_sum = 0;
1801 double scale, scale_sum = 0;
1802 char type[3] = { 'Y','U','V' };
1803 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1804 for (j = 0; j < 3; j++) {
1805 if (is_last_report) {
1806 error = enc->error[j];
1807 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1809 error = enc->coded_frame->error[j];
1810 scale = enc->width * enc->height * 255.0 * 255.0;
1816 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
1818 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1822 /* compute min output value */
1823 pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
1824 if ((pts < ti1) && (pts > 0))
1830 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
1832 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1833 "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
1834 (double)total_size / 1024, ti1, bitrate);
1836 if (nb_frames_dup || nb_frames_drop)
1837 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1838 nb_frames_dup, nb_frames_drop);
1840 av_log(NULL, AV_LOG_INFO, "%s \r", buf);
1844 if (is_last_report) {
1845 int64_t raw= audio_size + video_size + extra_size;
1846 av_log(NULL, AV_LOG_INFO, "\n");
1847 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
1848 video_size / 1024.0,
1849 audio_size / 1024.0,
1850 extra_size / 1024.0,
1851 100.0 * (total_size - raw) / raw
1856 static void flush_encoders(void)
1860 for (i = 0; i < nb_output_streams; i++) {
1861 OutputStream *ost = output_streams[i];
1862 AVCodecContext *enc = ost->st->codec;
1863 AVFormatContext *os = output_files[ost->file_index]->ctx;
1864 int stop_encoding = 0;
1866 if (!ost->encoding_needed)
1869 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1871 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO)
1875 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1879 switch (ost->st->codec->codec_type) {
1880 case AVMEDIA_TYPE_AUDIO:
1881 encode = avcodec_encode_audio2;
1885 case AVMEDIA_TYPE_VIDEO:
1886 encode = avcodec_encode_video2;
1897 av_init_packet(&pkt);
1901 ret = encode(enc, &pkt, NULL, &got_packet);
1903 av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1907 if (ost->logfile && enc->stats_out) {
1908 fprintf(ost->logfile, "%s", enc->stats_out);
1914 if (pkt.pts != AV_NOPTS_VALUE)
1915 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1916 if (pkt.dts != AV_NOPTS_VALUE)
1917 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1918 write_frame(os, &pkt, ost);
1928 * Check whether a packet from ist should be written into ost at this time
1930 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1932 OutputFile *of = output_files[ost->file_index];
1933 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1935 if (ost->source_index != ist_index)
1938 if (of->start_time && ist->last_dts < of->start_time)
1944 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1946 OutputFile *of = output_files[ost->file_index];
1947 int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
1950 av_init_packet(&opkt);
1952 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1953 !ost->copy_initial_nonkeyframes)
1956 if (of->recording_time != INT64_MAX &&
1957 ist->last_dts >= of->recording_time + of->start_time) {
1958 ost->is_past_recording_time = 1;
1962 /* force the input stream PTS */
1963 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1964 audio_size += pkt->size;
1965 else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1966 video_size += pkt->size;
1970 if (pkt->pts != AV_NOPTS_VALUE)
1971 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1973 opkt.pts = AV_NOPTS_VALUE;
1975 if (pkt->dts == AV_NOPTS_VALUE)
1976 opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
1978 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1979 opkt.dts -= ost_tb_start_time;
1981 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1982 opkt.flags = pkt->flags;
1984 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1985 if ( ost->st->codec->codec_id != CODEC_ID_H264
1986 && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
1987 && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
1988 && ost->st->codec->codec_id != CODEC_ID_VC1
1990 if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
1991 opkt.destruct = av_destruct_packet;
1993 opkt.data = pkt->data;
1994 opkt.size = pkt->size;
1997 write_frame(of->ctx, &opkt, ost);
1998 ost->st->codec->frame_number++;
1999 av_free_packet(&opkt);
2002 static void rate_emu_sleep(InputStream *ist)
2004 if (input_files[ist->file_index]->rate_emu) {
2005 int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
2006 int64_t now = av_gettime() - ist->start;
2012 static int guess_input_channel_layout(InputStream *ist)
2014 AVCodecContext *dec = ist->st->codec;
2016 if (!dec->channel_layout) {
2017 char layout_name[256];
2019 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2020 if (!dec->channel_layout)
2022 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2023 dec->channels, dec->channel_layout);
2024 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2025 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2030 static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2032 AVFrame *decoded_frame;
2033 AVCodecContext *avctx = ist->st->codec;
2034 int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
2035 int i, ret, resample_changed;
2037 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
2038 return AVERROR(ENOMEM);
2040 avcodec_get_frame_defaults(ist->decoded_frame);
2041 decoded_frame = ist->decoded_frame;
2043 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
2049 /* no audio frame */
2051 for (i = 0; i < ist->nb_filters; i++)
2052 av_buffersrc_buffer(ist->filters[i]->filter, NULL);
2056 /* if the decoder provides a pts, use it instead of the last packet pts.
2057 the decoder could be delaying output by a packet or more. */
2058 if (decoded_frame->pts != AV_NOPTS_VALUE)
2059 ist->next_dts = decoded_frame->pts;
2060 else if (pkt->pts != AV_NOPTS_VALUE) {
2061 decoded_frame->pts = pkt->pts;
2062 pkt->pts = AV_NOPTS_VALUE;
2065 // preprocess audio (volume)
2066 if (audio_volume != 256) {
2067 int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
2068 void *samples = decoded_frame->data[0];
2069 switch (avctx->sample_fmt) {
2070 case AV_SAMPLE_FMT_U8:
2072 uint8_t *volp = samples;
2073 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2074 int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
2075 *volp++ = av_clip_uint8(v);
2079 case AV_SAMPLE_FMT_S16:
2081 int16_t *volp = samples;
2082 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2083 int v = ((*volp) * audio_volume + 128) >> 8;
2084 *volp++ = av_clip_int16(v);
2088 case AV_SAMPLE_FMT_S32:
2090 int32_t *volp = samples;
2091 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2092 int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
2093 *volp++ = av_clipl_int32(v);
2097 case AV_SAMPLE_FMT_FLT:
2099 float *volp = samples;
2100 float scale = audio_volume / 256.f;
2101 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2106 case AV_SAMPLE_FMT_DBL:
2108 double *volp = samples;
2109 double scale = audio_volume / 256.;
2110 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2116 av_log(NULL, AV_LOG_FATAL,
2117 "Audio volume adjustment on sample format %s is not supported.\n",
2118 av_get_sample_fmt_name(ist->st->codec->sample_fmt));
2123 rate_emu_sleep(ist);
2125 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2126 ist->resample_channels != avctx->channels ||
2127 ist->resample_channel_layout != decoded_frame->channel_layout ||
2128 ist->resample_sample_rate != decoded_frame->sample_rate;
2129 if (resample_changed) {
2130 char layout1[64], layout2[64];
2132 if (!guess_input_channel_layout(ist)) {
2133 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2134 "layout for Input Stream #%d.%d\n", ist->file_index,
2138 decoded_frame->channel_layout = avctx->channel_layout;
2140 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2141 ist->resample_channel_layout);
2142 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2143 decoded_frame->channel_layout);
2145 av_log(NULL, AV_LOG_INFO,
2146 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2147 ist->file_index, ist->st->index,
2148 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2149 ist->resample_channels, layout1,
2150 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2151 avctx->channels, layout2);
2153 ist->resample_sample_fmt = decoded_frame->format;
2154 ist->resample_sample_rate = decoded_frame->sample_rate;
2155 ist->resample_channel_layout = decoded_frame->channel_layout;
2156 ist->resample_channels = avctx->channels;
2158 for (i = 0; i < nb_filtergraphs; i++)
2159 if (ist_in_filtergraph(filtergraphs[i], ist) &&
2160 configure_filtergraph(filtergraphs[i]) < 0) {
2161 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2166 for (i = 0; i < ist->nb_filters; i++)
2167 av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
2172 static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *pkt_pts)
2174 AVFrame *decoded_frame;
2175 void *buffer_to_free = NULL;
2176 int i, ret = 0, resample_changed;
2179 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
2180 return AVERROR(ENOMEM);
2182 avcodec_get_frame_defaults(ist->decoded_frame);
2183 decoded_frame = ist->decoded_frame;
2184 pkt->pts = *pkt_pts;
2185 pkt->dts = ist->last_dts;
2186 *pkt_pts = AV_NOPTS_VALUE;
2188 ret = avcodec_decode_video2(ist->st->codec,
2189 decoded_frame, got_output, pkt);
2193 quality = same_quant ? decoded_frame->quality : 0;
2195 /* no picture yet */
2197 for (i = 0; i < ist->nb_filters; i++)
2198 av_buffersrc_buffer(ist->filters[i]->filter, NULL);
2201 decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
2202 decoded_frame->pkt_dts);
2204 pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
2206 rate_emu_sleep(ist);
2208 if (ist->st->sample_aspect_ratio.num)
2209 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2211 resample_changed = ist->resample_width != decoded_frame->width ||
2212 ist->resample_height != decoded_frame->height ||
2213 ist->resample_pix_fmt != decoded_frame->format;
2214 if (resample_changed) {
2215 av_log(NULL, AV_LOG_INFO,
2216 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2217 ist->file_index, ist->st->index,
2218 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2219 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2221 ist->resample_width = decoded_frame->width;
2222 ist->resample_height = decoded_frame->height;
2223 ist->resample_pix_fmt = decoded_frame->format;
2225 for (i = 0; i < nb_filtergraphs; i++)
2226 if (ist_in_filtergraph(filtergraphs[i], ist) &&
2227 configure_filtergraph(filtergraphs[i]) < 0) {
2228 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2233 for (i = 0; i < ist->nb_filters; i++) {
2234 // XXX what an ugly hack
2235 if (ist->filters[i]->graph->nb_outputs == 1)
2236 ist->filters[i]->graph->outputs[0]->ost->last_quality = quality;
2238 if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
2239 FrameBuffer *buf = decoded_frame->opaque;
2240 AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
2241 decoded_frame->data, decoded_frame->linesize,
2242 AV_PERM_READ | AV_PERM_PRESERVE,
2243 ist->st->codec->width, ist->st->codec->height,
2244 ist->st->codec->pix_fmt);
2246 avfilter_copy_frame_props(fb, decoded_frame);
2247 fb->buf->priv = buf;
2248 fb->buf->free = filter_release_buffer;
2251 av_buffersrc_buffer(ist->filters[i]->filter, fb);
2253 av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
2256 av_free(buffer_to_free);
2260 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2262 AVSubtitle subtitle;
2263 int i, ret = avcodec_decode_subtitle2(ist->st->codec,
2264 &subtitle, got_output, pkt);
2270 rate_emu_sleep(ist);
2272 for (i = 0; i < nb_output_streams; i++) {
2273 OutputStream *ost = output_streams[i];
2275 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
2278 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
2281 avsubtitle_free(&subtitle);
2285 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2286 static int output_packet(InputStream *ist, const AVPacket *pkt)
2290 int64_t pkt_pts = AV_NOPTS_VALUE;
2293 if (ist->next_dts == AV_NOPTS_VALUE)
2294 ist->next_dts = ist->last_dts;
2298 av_init_packet(&avpkt);
2306 if (pkt->dts != AV_NOPTS_VALUE)
2307 ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2308 if (pkt->pts != AV_NOPTS_VALUE)
2309 pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2311 // while we have more to decode or while the decoder did output something on EOF
2312 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2316 ist->last_dts = ist->next_dts;
2318 if (avpkt.size && avpkt.size != pkt->size) {
2319 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2320 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2321 ist->showed_multi_packet_warning = 1;
2324 switch (ist->st->codec->codec_type) {
2325 case AVMEDIA_TYPE_AUDIO:
2326 ret = transcode_audio (ist, &avpkt, &got_output);
2328 case AVMEDIA_TYPE_VIDEO:
2329 ret = transcode_video (ist, &avpkt, &got_output, &pkt_pts);
2331 ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2332 else if (ist->st->r_frame_rate.num)
2333 ist->next_dts += av_rescale_q(1, (AVRational){ist->st->r_frame_rate.den,
2334 ist->st->r_frame_rate.num},
2336 else if (ist->st->codec->time_base.num != 0) {
2337 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
2338 ist->st->codec->ticks_per_frame;
2339 ist->next_dts += av_rescale_q(ticks, ist->st->codec->time_base, AV_TIME_BASE_Q);
2342 case AVMEDIA_TYPE_SUBTITLE:
2343 ret = transcode_subtitles(ist, &avpkt, &got_output);
2351 // touch data and size only if not EOF
2361 /* handle stream copy */
2362 if (!ist->decoding_needed) {
2363 rate_emu_sleep(ist);
2364 ist->last_dts = ist->next_dts;
2365 switch (ist->st->codec->codec_type) {
2366 case AVMEDIA_TYPE_AUDIO:
2367 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
2368 ist->st->codec->sample_rate;
2370 case AVMEDIA_TYPE_VIDEO:
2371 if (ist->st->codec->time_base.num != 0) {
2372 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
2373 ist->next_dts += ((int64_t)AV_TIME_BASE *
2374 ist->st->codec->time_base.num * ticks) /
2375 ist->st->codec->time_base.den;
2380 for (i = 0; pkt && i < nb_output_streams; i++) {
2381 OutputStream *ost = output_streams[i];
2383 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2386 do_streamcopy(ist, ost, pkt);
2392 static void print_sdp(void)
2396 AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
2400 for (i = 0; i < nb_output_files; i++)
2401 avc[i] = output_files[i]->ctx;
2403 av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
2404 printf("SDP:\n%s\n", sdp);
2409 static int init_input_stream(int ist_index, char *error, int error_len)
2412 InputStream *ist = input_streams[ist_index];
2413 if (ist->decoding_needed) {
2414 AVCodec *codec = ist->dec;
2416 snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
2417 ist->st->codec->codec_id, ist->file_index, ist->st->index);
2418 return AVERROR(EINVAL);
2421 /* update requested sample format for the decoder based on the
2422 corresponding encoder sample format */
2423 for (i = 0; i < nb_output_streams; i++) {
2424 OutputStream *ost = output_streams[i];
2425 if (ost->source_index == ist_index) {
2426 update_sample_fmt(ist->st->codec, codec, ost->st->codec);
2431 if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
2432 ist->st->codec->get_buffer = codec_get_buffer;
2433 ist->st->codec->release_buffer = codec_release_buffer;
2434 ist->st->codec->opaque = ist;
2437 if (!av_dict_get(ist->opts, "threads", NULL, 0))
2438 av_dict_set(&ist->opts, "threads", "auto", 0);
2439 if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
2440 snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
2441 ist->file_index, ist->st->index);
2442 return AVERROR(EINVAL);
2444 assert_codec_experimental(ist->st->codec, 0);
2445 assert_avoptions(ist->opts);
2448 ist->last_dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2449 ist->next_dts = AV_NOPTS_VALUE;
2450 init_pts_correction(&ist->pts_ctx);
2456 static InputStream *get_input_stream(OutputStream *ost)
2458 if (ost->source_index >= 0)
2459 return input_streams[ost->source_index];
2462 FilterGraph *fg = ost->filter->graph;
2465 for (i = 0; i < fg->nb_inputs; i++)
2466 if (fg->inputs[i]->ist->st->codec->codec_type == ost->st->codec->codec_type)
2467 return fg->inputs[i]->ist;
2473 static int transcode_init(void)
2475 int ret = 0, i, j, k;
2476 AVFormatContext *oc;
2477 AVCodecContext *codec, *icodec;
2483 /* init framerate emulation */
2484 for (i = 0; i < nb_input_files; i++) {
2485 InputFile *ifile = input_files[i];
2486 if (ifile->rate_emu)
2487 for (j = 0; j < ifile->nb_streams; j++)
2488 input_streams[j + ifile->ist_index]->start = av_gettime();
2491 /* output stream init */
2492 for (i = 0; i < nb_output_files; i++) {
2493 oc = output_files[i]->ctx;
2494 if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
2495 av_dump_format(oc, i, oc->filename, 1);
2496 av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2497 return AVERROR(EINVAL);
2501 /* init complex filtergraphs */
2502 for (i = 0; i < nb_filtergraphs; i++)
2503 if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
2506 /* for each output stream, we compute the right encoding parameters */
2507 for (i = 0; i < nb_output_streams; i++) {
2508 ost = output_streams[i];
2509 oc = output_files[ost->file_index]->ctx;
2510 ist = get_input_stream(ost);
2512 if (ost->attachment_filename)
2515 codec = ost->st->codec;
2518 icodec = ist->st->codec;
2520 ost->st->disposition = ist->st->disposition;
2521 codec->bits_per_raw_sample = icodec->bits_per_raw_sample;
2522 codec->chroma_sample_location = icodec->chroma_sample_location;
2525 if (ost->stream_copy) {
2526 uint64_t extra_size;
2528 av_assert0(ist && !ost->filter);
2530 extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2532 if (extra_size > INT_MAX) {
2533 return AVERROR(EINVAL);
2536 /* if stream_copy is selected, no need to decode or encode */
2537 codec->codec_id = icodec->codec_id;
2538 codec->codec_type = icodec->codec_type;
2540 if (!codec->codec_tag) {
2541 if (!oc->oformat->codec_tag ||
2542 av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
2543 av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
2544 codec->codec_tag = icodec->codec_tag;
2547 codec->bit_rate = icodec->bit_rate;
2548 codec->rc_max_rate = icodec->rc_max_rate;
2549 codec->rc_buffer_size = icodec->rc_buffer_size;
2550 codec->field_order = icodec->field_order;
2551 codec->extradata = av_mallocz(extra_size);
2552 if (!codec->extradata) {
2553 return AVERROR(ENOMEM);
2555 memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
2556 codec->extradata_size = icodec->extradata_size;
2558 codec->time_base = icodec->time_base;
2559 codec->time_base.num *= icodec->ticks_per_frame;
2560 av_reduce(&codec->time_base.num, &codec->time_base.den,
2561 codec->time_base.num, codec->time_base.den, INT_MAX);
2563 codec->time_base = ist->st->time_base;
2565 switch (codec->codec_type) {
2566 case AVMEDIA_TYPE_AUDIO:
2567 if (audio_volume != 256) {
2568 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2571 codec->channel_layout = icodec->channel_layout;
2572 codec->sample_rate = icodec->sample_rate;
2573 codec->channels = icodec->channels;
2574 codec->frame_size = icodec->frame_size;
2575 codec->audio_service_type = icodec->audio_service_type;
2576 codec->block_align = icodec->block_align;
2578 case AVMEDIA_TYPE_VIDEO:
2579 codec->pix_fmt = icodec->pix_fmt;
2580 codec->width = icodec->width;
2581 codec->height = icodec->height;
2582 codec->has_b_frames = icodec->has_b_frames;
2583 if (!codec->sample_aspect_ratio.num) {
2584 codec->sample_aspect_ratio =
2585 ost->st->sample_aspect_ratio =
2586 ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
2587 ist->st->codec->sample_aspect_ratio.num ?
2588 ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
2591 case AVMEDIA_TYPE_SUBTITLE:
2592 codec->width = icodec->width;
2593 codec->height = icodec->height;
2595 case AVMEDIA_TYPE_DATA:
2596 case AVMEDIA_TYPE_ATTACHMENT:
2603 /* should only happen when a default codec is not present. */
2604 snprintf(error, sizeof(error), "Automatic encoder selection "
2605 "failed for output stream #%d:%d. Default encoder for "
2606 "format %s is probably disabled. Please choose an "
2607 "encoder manually.\n", ost->file_index, ost->index,
2609 ret = AVERROR(EINVAL);
2614 ist->decoding_needed = 1;
2615 ost->encoding_needed = 1;
2618 (codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2619 codec->codec_type == AVMEDIA_TYPE_AUDIO)) {
2621 fg = init_simple_filtergraph(ist, ost);
2622 if (configure_simple_filtergraph(fg)) {
2623 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2628 switch (codec->codec_type) {
2629 case AVMEDIA_TYPE_AUDIO:
2630 codec->sample_fmt = ost->filter->filter->inputs[0]->format;
2631 codec->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
2632 codec->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
2633 codec->channels = av_get_channel_layout_nb_channels(codec->channel_layout);
2634 codec->time_base = (AVRational){ 1, codec->sample_rate };
2636 case AVMEDIA_TYPE_VIDEO:
2638 * We want CFR output if and only if one of those is true:
2639 * 1) user specified output framerate with -r
2640 * 2) user specified -vsync cfr
2641 * 3) output format is CFR and the user didn't force vsync to
2642 * something else than CFR
2644 * in such a case, set ost->frame_rate
2646 if (!ost->frame_rate.num && ist &&
2647 (video_sync_method == VSYNC_CFR ||
2648 (video_sync_method == VSYNC_AUTO &&
2649 !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
2650 ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
2651 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
2652 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
2653 ost->frame_rate = ost->enc->supported_framerates[idx];
2656 if (ost->frame_rate.num) {
2657 codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num};
2658 video_sync_method = VSYNC_CFR;
2660 codec->time_base = ist->st->time_base;
2662 codec->time_base = ost->filter->filter->inputs[0]->time_base;
2664 codec->width = ost->filter->filter->inputs[0]->w;
2665 codec->height = ost->filter->filter->inputs[0]->h;
2666 codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2667 ost->frame_aspect_ratio ? // overridden by the -aspect cli option
2668 av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
2669 ost->filter->filter->inputs[0]->sample_aspect_ratio;
2670 codec->pix_fmt = ost->filter->filter->inputs[0]->format;
2672 if (codec->width != icodec->width ||
2673 codec->height != icodec->height ||
2674 codec->pix_fmt != icodec->pix_fmt) {
2675 codec->bits_per_raw_sample = 0;
2679 case AVMEDIA_TYPE_SUBTITLE:
2680 codec->time_base = (AVRational){1, 1000};
2687 if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
2688 char logfilename[1024];
2691 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
2692 pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
2694 if (!strcmp(ost->enc->name, "libx264")) {
2695 av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
2697 if (codec->flags & CODEC_FLAG_PASS1) {
2698 f = fopen(logfilename, "wb");
2700 av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
2701 logfilename, strerror(errno));
2707 size_t logbuffer_size;
2708 if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
2709 av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
2713 codec->stats_in = logbuffer;
2720 /* open each encoder */
2721 for (i = 0; i < nb_output_streams; i++) {
2722 ost = output_streams[i];
2723 if (ost->encoding_needed) {
2724 AVCodec *codec = ost->enc;
2725 AVCodecContext *dec = NULL;
2727 if ((ist = get_input_stream(ost)))
2728 dec = ist->st->codec;
2729 if (dec && dec->subtitle_header) {
2730 ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size);
2731 if (!ost->st->codec->subtitle_header) {
2732 ret = AVERROR(ENOMEM);
2735 memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2736 ost->st->codec->subtitle_header_size = dec->subtitle_header_size;
2738 if (!av_dict_get(ost->opts, "threads", NULL, 0))
2739 av_dict_set(&ost->opts, "threads", "auto", 0);
2740 if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) {
2741 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
2742 ost->file_index, ost->index);
2743 ret = AVERROR(EINVAL);
2746 assert_codec_experimental(ost->st->codec, 1);
2747 assert_avoptions(ost->opts);
2748 if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
2749 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2750 "It takes bits/s as argument, not kbits/s\n");
2751 extra_size += ost->st->codec->extradata_size;
2753 if (ost->st->codec->me_threshold)
2754 input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
2758 /* init input streams */
2759 for (i = 0; i < nb_input_streams; i++)
2760 if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
2763 /* discard unused programs */
2764 for (i = 0; i < nb_input_files; i++) {
2765 InputFile *ifile = input_files[i];
2766 for (j = 0; j < ifile->ctx->nb_programs; j++) {
2767 AVProgram *p = ifile->ctx->programs[j];
2768 int discard = AVDISCARD_ALL;
2770 for (k = 0; k < p->nb_stream_indexes; k++)
2771 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
2772 discard = AVDISCARD_DEFAULT;
2775 p->discard = discard;
2779 /* open files and write file headers */
2780 for (i = 0; i < nb_output_files; i++) {
2781 oc = output_files[i]->ctx;
2782 oc->interrupt_callback = int_cb;
2783 if (avformat_write_header(oc, &output_files[i]->opts) < 0) {
2784 snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i);
2785 ret = AVERROR(EINVAL);
2788 assert_avoptions(output_files[i]->opts);
2789 if (strcmp(oc->oformat->name, "rtp")) {
2795 /* dump the file output parameters - cannot be done before in case
2797 for (i = 0; i < nb_output_files; i++) {
2798 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
2801 /* dump the stream mapping */
2802 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
2803 for (i = 0; i < nb_input_streams; i++) {
2804 ist = input_streams[i];
2806 for (j = 0; j < ist->nb_filters; j++) {
2807 AVFilterLink *link = ist->filters[j]->filter->outputs[0];
2808 if (ist->filters[j]->graph->graph_desc) {
2809 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
2810 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
2811 link->dst->filter->name);
2812 if (link->dst->input_count > 1)
2813 av_log(NULL, AV_LOG_INFO, ":%s", link->dstpad->name);
2814 if (nb_filtergraphs > 1)
2815 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
2816 av_log(NULL, AV_LOG_INFO, "\n");
2821 for (i = 0; i < nb_output_streams; i++) {
2822 ost = output_streams[i];
2824 if (ost->attachment_filename) {
2825 /* an attached file */
2826 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
2827 ost->attachment_filename, ost->file_index, ost->index);
2831 if (ost->filter && ost->filter->graph->graph_desc) {
2832 /* output from a complex graph */
2833 AVFilterLink *link = ost->filter->filter->inputs[0];
2834 av_log(NULL, AV_LOG_INFO, " %s", link->src->filter->name);
2835 if (link->src->output_count > 1)
2836 av_log(NULL, AV_LOG_INFO, ":%s", link->srcpad->name);
2837 if (nb_filtergraphs > 1)
2838 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
2840 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
2841 ost->index, ost->enc ? ost->enc->name : "?");
2845 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
2846 input_streams[ost->source_index]->file_index,
2847 input_streams[ost->source_index]->st->index,
2850 if (ost->sync_ist != input_streams[ost->source_index])
2851 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2852 ost->sync_ist->file_index,
2853 ost->sync_ist->st->index);
2854 if (ost->stream_copy)
2855 av_log(NULL, AV_LOG_INFO, " (copy)");
2857 av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ?
2858 input_streams[ost->source_index]->dec->name : "?",
2859 ost->enc ? ost->enc->name : "?");
2860 av_log(NULL, AV_LOG_INFO, "\n");
2864 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
2876 * The following code is the main loop of the file converter
2878 static int transcode(void)
2881 AVFormatContext *is, *os;
2885 int no_packet_count = 0;
2886 int64_t timer_start;
2888 if (!(no_packet = av_mallocz(nb_input_files)))
2891 ret = transcode_init();
2895 av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
2898 timer_start = av_gettime();
2900 for (; received_sigterm == 0;) {
2901 int file_index, ist_index, past_recording_time = 1;
2905 ipts_min = INT64_MAX;
2907 /* check if there's any stream where output is still needed */
2908 for (i = 0; i < nb_output_streams; i++) {
2910 ost = output_streams[i];
2911 of = output_files[ost->file_index];
2912 os = output_files[ost->file_index]->ctx;
2913 if (ost->is_past_recording_time ||
2914 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
2916 if (ost->frame_number > ost->max_frames) {
2918 for (j = 0; j < of->ctx->nb_streams; j++)
2919 output_streams[of->ost_index + j]->is_past_recording_time = 1;
2922 past_recording_time = 0;
2924 if (past_recording_time)
2927 /* select the stream that we must read now by looking at the
2928 smallest output pts */
2930 for (i = 0; i < nb_input_streams; i++) {
2932 ist = input_streams[i];
2933 ipts = ist->last_dts;
2934 if (ist->discard || no_packet[ist->file_index])
2936 if (!input_files[ist->file_index]->eof_reached) {
2937 if (ipts < ipts_min) {
2939 file_index = ist->file_index;
2943 /* if none, if is finished */
2944 if (file_index < 0) {
2945 if (no_packet_count) {
2946 no_packet_count = 0;
2947 memset(no_packet, 0, nb_input_files);
2954 /* read a frame from it and output it in the fifo */
2955 is = input_files[file_index]->ctx;
2956 ret = av_read_frame(is, &pkt);
2957 if (ret == AVERROR(EAGAIN)) {
2958 no_packet[file_index] = 1;
2963 input_files[file_index]->eof_reached = 1;
2965 for (i = 0; i < input_files[file_index]->nb_streams; i++) {
2966 ist = input_streams[input_files[file_index]->ist_index + i];
2967 if (ist->decoding_needed)
2968 output_packet(ist, NULL);
2977 no_packet_count = 0;
2978 memset(no_packet, 0, nb_input_files);
2981 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
2982 is->streams[pkt.stream_index]);
2984 /* the following test is needed in case new streams appear
2985 dynamically in stream : we ignore them */
2986 if (pkt.stream_index >= input_files[file_index]->nb_streams)
2987 goto discard_packet;
2988 ist_index = input_files[file_index]->ist_index + pkt.stream_index;
2989 ist = input_streams[ist_index];
2991 goto discard_packet;
2993 if (pkt.dts != AV_NOPTS_VALUE)
2994 pkt.dts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2995 if (pkt.pts != AV_NOPTS_VALUE)
2996 pkt.pts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2998 if (pkt.pts != AV_NOPTS_VALUE)
2999 pkt.pts *= ist->ts_scale;
3000 if (pkt.dts != AV_NOPTS_VALUE)
3001 pkt.dts *= ist->ts_scale;
3003 //fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n",
3005 // pkt.dts, input_files[ist->file_index].ts_offset,
3006 // ist->st->codec->codec_type);
3007 if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE
3008 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3009 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3010 int64_t delta = pkt_dts - ist->next_dts;
3011 if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
3012 input_files[ist->file_index]->ts_offset -= delta;
3013 av_log(NULL, AV_LOG_DEBUG,
3014 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3015 delta, input_files[ist->file_index]->ts_offset);
3016 pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3017 if (pkt.pts != AV_NOPTS_VALUE)
3018 pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3022 // fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
3023 if (output_packet(ist, &pkt) < 0 || poll_filters() < 0) {
3024 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
3025 ist->file_index, ist->st->index);
3028 av_free_packet(&pkt);
3033 av_free_packet(&pkt);
3035 /* dump report by using the output first video and audio streams */
3036 print_report(0, timer_start);
3039 /* at the end of stream, we must flush the decoder buffers */
3040 for (i = 0; i < nb_input_streams; i++) {
3041 ist = input_streams[i];
3042 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3043 output_packet(ist, NULL);
3051 /* write the trailer if needed and close file */
3052 for (i = 0; i < nb_output_files; i++) {
3053 os = output_files[i]->ctx;
3054 av_write_trailer(os);
3057 /* dump report by using the first video and audio streams */
3058 print_report(1, timer_start);
3060 /* close each encoder */
3061 for (i = 0; i < nb_output_streams; i++) {
3062 ost = output_streams[i];
3063 if (ost->encoding_needed) {
3064 av_freep(&ost->st->codec->stats_in);
3065 avcodec_close(ost->st->codec);
3069 /* close each decoder */
3070 for (i = 0; i < nb_input_streams; i++) {
3071 ist = input_streams[i];
3072 if (ist->decoding_needed) {
3073 avcodec_close(ist->st->codec);
3081 av_freep(&no_packet);
3083 if (output_streams) {
3084 for (i = 0; i < nb_output_streams; i++) {
3085 ost = output_streams[i];
3087 if (ost->stream_copy)
3088 av_freep(&ost->st->codec->extradata);
3090 fclose(ost->logfile);
3091 ost->logfile = NULL;
3093 av_freep(&ost->st->codec->subtitle_header);
3094 av_free(ost->forced_kf_pts);
3095 av_dict_free(&ost->opts);
3102 static double parse_frame_aspect_ratio(const char *arg)
3109 p = strchr(arg, ':');
3111 x = strtol(arg, &end, 10);
3113 y = strtol(end + 1, &end, 10);
3115 ar = (double)x / (double)y;
3117 ar = strtod(arg, NULL);
3120 av_log(NULL, AV_LOG_FATAL, "Incorrect aspect ratio specification.\n");
3126 static int opt_audio_codec(OptionsContext *o, const char *opt, const char *arg)
3128 return parse_option(o, "codec:a", arg, options);
3131 static int opt_video_codec(OptionsContext *o, const char *opt, const char *arg)
3133 return parse_option(o, "codec:v", arg, options);
3136 static int opt_subtitle_codec(OptionsContext *o, const char *opt, const char *arg)
3138 return parse_option(o, "codec:s", arg, options);
3141 static int opt_data_codec(OptionsContext *o, const char *opt, const char *arg)
3143 return parse_option(o, "codec:d", arg, options);
3146 static int opt_map(OptionsContext *o, const char *opt, const char *arg)
3148 StreamMap *m = NULL;
3149 int i, negative = 0, file_idx;
3150 int sync_file_idx = -1, sync_stream_idx;
3158 map = av_strdup(arg);
3160 /* parse sync stream first, just pick first matching stream */
3161 if (sync = strchr(map, ',')) {
3163 sync_file_idx = strtol(sync + 1, &sync, 0);
3164 if (sync_file_idx >= nb_input_files || sync_file_idx < 0) {
3165 av_log(NULL, AV_LOG_FATAL, "Invalid sync file index: %d.\n", sync_file_idx);
3170 for (i = 0; i < input_files[sync_file_idx]->nb_streams; i++)
3171 if (check_stream_specifier(input_files[sync_file_idx]->ctx,
3172 input_files[sync_file_idx]->ctx->streams[i], sync) == 1) {
3173 sync_stream_idx = i;
3176 if (i == input_files[sync_file_idx]->nb_streams) {
3177 av_log(NULL, AV_LOG_FATAL, "Sync stream specification in map %s does not "
3178 "match any streams.\n", arg);
3184 if (map[0] == '[') {
3185 /* this mapping refers to lavfi output */
3186 const char *c = map + 1;
3187 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
3188 &o->nb_stream_maps, o->nb_stream_maps + 1);
3189 m = &o->stream_maps[o->nb_stream_maps - 1];
3190 m->linklabel = av_get_token(&c, "]");
3191 if (!m->linklabel) {
3192 av_log(NULL, AV_LOG_ERROR, "Invalid output link label: %s.\n", map);
3196 file_idx = strtol(map, &p, 0);
3197 if (file_idx >= nb_input_files || file_idx < 0) {
3198 av_log(NULL, AV_LOG_FATAL, "Invalid input file index: %d.\n", file_idx);
3202 /* disable some already defined maps */
3203 for (i = 0; i < o->nb_stream_maps; i++) {
3204 m = &o->stream_maps[i];
3205 if (file_idx == m->file_index &&
3206 check_stream_specifier(input_files[m->file_index]->ctx,
3207 input_files[m->file_index]->ctx->streams[m->stream_index],
3208 *p == ':' ? p + 1 : p) > 0)
3212 for (i = 0; i < input_files[file_idx]->nb_streams; i++) {
3213 if (check_stream_specifier(input_files[file_idx]->ctx, input_files[file_idx]->ctx->streams[i],
3214 *p == ':' ? p + 1 : p) <= 0)
3216 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
3217 &o->nb_stream_maps, o->nb_stream_maps + 1);
3218 m = &o->stream_maps[o->nb_stream_maps - 1];
3220 m->file_index = file_idx;
3221 m->stream_index = i;
3223 if (sync_file_idx >= 0) {
3224 m->sync_file_index = sync_file_idx;
3225 m->sync_stream_index = sync_stream_idx;
3227 m->sync_file_index = file_idx;
3228 m->sync_stream_index = i;
3234 av_log(NULL, AV_LOG_FATAL, "Stream map '%s' matches no streams.\n", arg);
3242 static int opt_attach(OptionsContext *o, const char *opt, const char *arg)
3244 o->attachments = grow_array(o->attachments, sizeof(*o->attachments),
3245 &o->nb_attachments, o->nb_attachments + 1);
3246 o->attachments[o->nb_attachments - 1] = arg;
3251 * Parse a metadata specifier in arg.
3252 * @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram)
3253 * @param index for type c/p, chapter/program index is written here
3254 * @param stream_spec for type s, the stream specifier is written here
3256 static void parse_meta_type(char *arg, char *type, int *index, const char **stream_spec)
3264 if (*(++arg) && *arg != ':') {
3265 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", arg);
3268 *stream_spec = *arg == ':' ? arg + 1 : "";
3272 if (*(++arg) == ':')
3273 *index = strtol(++arg, NULL, 0);
3276 av_log(NULL, AV_LOG_FATAL, "Invalid metadata type %c.\n", *arg);
3283 static int copy_metadata(char *outspec, char *inspec, AVFormatContext *oc, AVFormatContext *ic, OptionsContext *o)
3285 AVDictionary **meta_in = NULL;
3286 AVDictionary **meta_out;
3288 char type_in, type_out;
3289 const char *istream_spec = NULL, *ostream_spec = NULL;
3290 int idx_in = 0, idx_out = 0;
3292 parse_meta_type(inspec, &type_in, &idx_in, &istream_spec);
3293 parse_meta_type(outspec, &type_out, &idx_out, &ostream_spec);
3295 if (type_in == 'g' || type_out == 'g')
3296 o->metadata_global_manual = 1;
3297 if (type_in == 's' || type_out == 's')
3298 o->metadata_streams_manual = 1;
3299 if (type_in == 'c' || type_out == 'c')
3300 o->metadata_chapters_manual = 1;
3302 #define METADATA_CHECK_INDEX(index, nb_elems, desc)\
3303 if ((index) < 0 || (index) >= (nb_elems)) {\
3304 av_log(NULL, AV_LOG_FATAL, "Invalid %s index %d while processing metadata maps.\n",\
3309 #define SET_DICT(type, meta, context, index)\
3312 meta = &context->metadata;\
3315 METADATA_CHECK_INDEX(index, context->nb_chapters, "chapter")\
3316 meta = &context->chapters[index]->metadata;\
3319 METADATA_CHECK_INDEX(index, context->nb_programs, "program")\
3320 meta = &context->programs[index]->metadata;\
3324 SET_DICT(type_in, meta_in, ic, idx_in);
3325 SET_DICT(type_out, meta_out, oc, idx_out);
3327 /* for input streams choose first matching stream */
3328 if (type_in == 's') {
3329 for (i = 0; i < ic->nb_streams; i++) {
3330 if ((ret = check_stream_specifier(ic, ic->streams[i], istream_spec)) > 0) {
3331 meta_in = &ic->streams[i]->metadata;
3337 av_log(NULL, AV_LOG_FATAL, "Stream specifier %s does not match any streams.\n", istream_spec);
3342 if (type_out == 's') {
3343 for (i = 0; i < oc->nb_streams; i++) {
3344 if ((ret = check_stream_specifier(oc, oc->streams[i], ostream_spec)) > 0) {
3345 meta_out = &oc->streams[i]->metadata;
3346 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3351 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3356 static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
3358 const char *codec_string = encoder ? "encoder" : "decoder";
3362 avcodec_find_encoder_by_name(name) :
3363 avcodec_find_decoder_by_name(name);
3365 av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name);
3368 if (codec->type != type) {
3369 av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name);
3375 static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st)
3377 char *codec_name = NULL;
3379 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, st);
3381 AVCodec *codec = find_codec_or_die(codec_name, st->codec->codec_type, 0);
3382 st->codec->codec_id = codec->id;
3385 return avcodec_find_decoder(st->codec->codec_id);
3389 * Add all the streams from the given input file to the global
3390 * list of input streams.
3392 static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
3396 for (i = 0; i < ic->nb_streams; i++) {
3397 AVStream *st = ic->streams[i];
3398 AVCodecContext *dec = st->codec;
3399 InputStream *ist = av_mallocz(sizeof(*ist));
3404 input_streams = grow_array(input_streams, sizeof(*input_streams), &nb_input_streams, nb_input_streams + 1);
3405 input_streams[nb_input_streams - 1] = ist;
3408 ist->file_index = nb_input_files;
3410 st->discard = AVDISCARD_ALL;
3411 ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, ic, st);
3413 ist->ts_scale = 1.0;
3414 MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st);
3416 ist->dec = choose_decoder(o, ic, st);
3418 switch (dec->codec_type) {
3419 case AVMEDIA_TYPE_VIDEO:
3420 ist->resample_height = dec->height;
3421 ist->resample_width = dec->width;
3422 ist->resample_pix_fmt = dec->pix_fmt;
3425 case AVMEDIA_TYPE_AUDIO:
3426 guess_input_channel_layout(ist);
3428 ist->resample_sample_fmt = dec->sample_fmt;
3429 ist->resample_sample_rate = dec->sample_rate;
3430 ist->resample_channels = dec->channels;
3431 ist->resample_channel_layout = dec->channel_layout;
3434 case AVMEDIA_TYPE_DATA:
3435 case AVMEDIA_TYPE_SUBTITLE:
3436 case AVMEDIA_TYPE_ATTACHMENT:
3437 case AVMEDIA_TYPE_UNKNOWN:
3445 static void assert_file_overwrite(const char *filename)
3447 if (!file_overwrite &&
3448 (strchr(filename, ':') == NULL || filename[1] == ':' ||
3449 av_strstart(filename, "file:", NULL))) {
3450 if (avio_check(filename, 0) == 0) {
3452 fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
3454 if (!read_yesno()) {
3455 fprintf(stderr, "Not overwriting - exiting\n");
3460 fprintf(stderr,"File '%s' already exists. Exiting.\n", filename);
3467 static void dump_attachment(AVStream *st, const char *filename)
3470 AVIOContext *out = NULL;
3471 AVDictionaryEntry *e;
3473 if (!st->codec->extradata_size) {
3474 av_log(NULL, AV_LOG_WARNING, "No extradata to dump in stream #%d:%d.\n",
3475 nb_input_files - 1, st->index);
3478 if (!*filename && (e = av_dict_get(st->metadata, "filename", NULL, 0)))
3479 filename = e->value;
3481 av_log(NULL, AV_LOG_FATAL, "No filename specified and no 'filename' tag"
3482 "in stream #%d:%d.\n", nb_input_files - 1, st->index);
3486 assert_file_overwrite(filename);
3488 if ((ret = avio_open2(&out, filename, AVIO_FLAG_WRITE, &int_cb, NULL)) < 0) {
3489 av_log(NULL, AV_LOG_FATAL, "Could not open file %s for writing.\n",
3494 avio_write(out, st->codec->extradata, st->codec->extradata_size);
3499 static int opt_input_file(OptionsContext *o, const char *opt, const char *filename)
3501 AVFormatContext *ic;
3502 AVInputFormat *file_iformat = NULL;
3506 AVDictionary **opts;
3507 int orig_nb_streams; // number of streams before avformat_find_stream_info
3510 if (!(file_iformat = av_find_input_format(o->format))) {
3511 av_log(NULL, AV_LOG_FATAL, "Unknown input format: '%s'\n", o->format);
3516 if (!strcmp(filename, "-"))
3519 using_stdin |= !strncmp(filename, "pipe:", 5) ||
3520 !strcmp(filename, "/dev/stdin");
3522 /* get default parameters from command line */
3523 ic = avformat_alloc_context();
3525 print_error(filename, AVERROR(ENOMEM));
3528 if (o->nb_audio_sample_rate) {
3529 snprintf(buf, sizeof(buf), "%d", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i);
3530 av_dict_set(&format_opts, "sample_rate", buf, 0);
3532 if (o->nb_audio_channels) {
3533 /* because we set audio_channels based on both the "ac" and
3534 * "channel_layout" options, we need to check that the specified
3535 * demuxer actually has the "channels" option before setting it */
3536 if (file_iformat && file_iformat->priv_class &&
3537 av_opt_find(&file_iformat->priv_class, "channels", NULL, 0,
3538 AV_OPT_SEARCH_FAKE_OBJ)) {
3539 snprintf(buf, sizeof(buf), "%d",
3540 o->audio_channels[o->nb_audio_channels - 1].u.i);
3541 av_dict_set(&format_opts, "channels", buf, 0);
3544 if (o->nb_frame_rates) {
3545 av_dict_set(&format_opts, "framerate", o->frame_rates[o->nb_frame_rates - 1].u.str, 0);
3547 if (o->nb_frame_sizes) {
3548 av_dict_set(&format_opts, "video_size", o->frame_sizes[o->nb_frame_sizes - 1].u.str, 0);
3550 if (o->nb_frame_pix_fmts)
3551 av_dict_set(&format_opts, "pixel_format", o->frame_pix_fmts[o->nb_frame_pix_fmts - 1].u.str, 0);
3553 ic->flags |= AVFMT_FLAG_NONBLOCK;
3554 ic->interrupt_callback = int_cb;
3556 /* open the input file with generic libav function */
3557 err = avformat_open_input(&ic, filename, file_iformat, &format_opts);
3559 print_error(filename, err);
3562 assert_avoptions(format_opts);
3564 /* apply forced codec ids */
3565 for (i = 0; i < ic->nb_streams; i++)
3566 choose_decoder(o, ic, ic->streams[i]);
3568 /* Set AVCodecContext options for avformat_find_stream_info */
3569 opts = setup_find_stream_info_opts(ic, codec_opts);
3570 orig_nb_streams = ic->nb_streams;
3572 /* If not enough info to get the stream parameters, we decode the
3573 first frames to get it. (used in mpeg case for example) */
3574 ret = avformat_find_stream_info(ic, opts);
3576 av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
3577 avformat_close_input(&ic);
3581 timestamp = o->start_time;
3582 /* add the stream start time */
3583 if (ic->start_time != AV_NOPTS_VALUE)
3584 timestamp += ic->start_time;
3586 /* if seeking requested, we execute it */
3587 if (o->start_time != 0) {
3588 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
3590 av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
3591 filename, (double)timestamp / AV_TIME_BASE);
3595 /* update the current parameters so that they match the one of the input stream */
3596 add_input_streams(o, ic);
3598 /* dump the file content */
3599 av_dump_format(ic, nb_input_files, filename, 0);
3601 input_files = grow_array(input_files, sizeof(*input_files), &nb_input_files, nb_input_files + 1);
3602 if (!(input_files[nb_input_files - 1] = av_mallocz(sizeof(*input_files[0]))))
3605 input_files[nb_input_files - 1]->ctx = ic;
3606 input_files[nb_input_files - 1]->ist_index = nb_input_streams - ic->nb_streams;
3607 input_files[nb_input_files - 1]->ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
3608 input_files[nb_input_files - 1]->nb_streams = ic->nb_streams;
3609 input_files[nb_input_files - 1]->rate_emu = o->rate_emu;
3611 for (i = 0; i < o->nb_dump_attachment; i++) {
3614 for (j = 0; j < ic->nb_streams; j++) {
3615 AVStream *st = ic->streams[j];
3617 if (check_stream_specifier(ic, st, o->dump_attachment[i].specifier) == 1)
3618 dump_attachment(st, o->dump_attachment[i].u.str);
3622 for (i = 0; i < orig_nb_streams; i++)
3623 av_dict_free(&opts[i]);
3630 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3631 AVCodecContext *avctx)
3637 for (p = kf; *p; p++)
3640 ost->forced_kf_count = n;
3641 ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
3642 if (!ost->forced_kf_pts) {
3643 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3646 for (i = 0; i < n; i++) {
3647 p = i ? strchr(p, ',') + 1 : kf;
3648 t = parse_time_or_die("force_key_frames", p, 1);
3649 ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3653 static uint8_t *get_line(AVIOContext *s)
3659 if (avio_open_dyn_buf(&line) < 0) {
3660 av_log(NULL, AV_LOG_FATAL, "Could not alloc buffer for reading preset.\n");
3664 while ((c = avio_r8(s)) && c != '\n')
3667 avio_close_dyn_buf(line, &buf);
3672 static int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s)
3675 char filename[1000];
3676 const char *base[3] = { getenv("AVCONV_DATADIR"),
3681 for (i = 0; i < FF_ARRAY_ELEMS(base) && ret; i++) {
3685 snprintf(filename, sizeof(filename), "%s%s/%s-%s.avpreset", base[i],
3686 i != 1 ? "" : "/.avconv", codec_name, preset_name);
3687 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3690 snprintf(filename, sizeof(filename), "%s%s/%s.avpreset", base[i],
3691 i != 1 ? "" : "/.avconv", preset_name);
3692 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3698 static void choose_encoder(OptionsContext *o, AVFormatContext *s, OutputStream *ost)
3700 char *codec_name = NULL;
3702 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, ost->st);
3704 ost->st->codec->codec_id = av_guess_codec(s->oformat, NULL, s->filename,
3705 NULL, ost->st->codec->codec_type);
3706 ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
3707 } else if (!strcmp(codec_name, "copy"))
3708 ost->stream_copy = 1;
3710 ost->enc = find_codec_or_die(codec_name, ost->st->codec->codec_type, 1);
3711 ost->st->codec->codec_id = ost->enc->id;
3715 static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVMediaType type)
3718 AVStream *st = avformat_new_stream(oc, NULL);
3719 int idx = oc->nb_streams - 1, ret = 0;
3720 char *bsf = NULL, *next, *codec_tag = NULL;
3721 AVBitStreamFilterContext *bsfc, *bsfc_prev = NULL;
3723 char *buf = NULL, *arg = NULL, *preset = NULL;
3724 AVIOContext *s = NULL;
3727 av_log(NULL, AV_LOG_FATAL, "Could not alloc stream.\n");
3731 if (oc->nb_streams - 1 < o->nb_streamid_map)
3732 st->id = o->streamid_map[oc->nb_streams - 1];
3734 output_streams = grow_array(output_streams, sizeof(*output_streams), &nb_output_streams,
3735 nb_output_streams + 1);
3736 if (!(ost = av_mallocz(sizeof(*ost))))
3738 output_streams[nb_output_streams - 1] = ost;
3740 ost->file_index = nb_output_files;
3743 st->codec->codec_type = type;
3744 choose_encoder(o, oc, ost);
3746 ost->opts = filter_codec_opts(codec_opts, ost->enc->id, oc, st);
3749 avcodec_get_context_defaults3(st->codec, ost->enc);
3750 st->codec->codec_type = type; // XXX hack, avcodec_get_context_defaults2() sets type to unknown for stream copy
3752 MATCH_PER_STREAM_OPT(presets, str, preset, oc, st);
3753 if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) {
3756 if (!buf[0] || buf[0] == '#') {
3760 if (!(arg = strchr(buf, '='))) {
3761 av_log(NULL, AV_LOG_FATAL, "Invalid line found in the preset file.\n");
3765 av_dict_set(&ost->opts, buf, arg, AV_DICT_DONT_OVERWRITE);
3767 } while (!s->eof_reached);
3771 av_log(NULL, AV_LOG_FATAL,
3772 "Preset %s specified for stream %d:%d, but could not be opened.\n",
3773 preset, ost->file_index, ost->index);
3777 ost->max_frames = INT64_MAX;
3778 MATCH_PER_STREAM_OPT(max_frames, i64, ost->max_frames, oc, st);
3780 MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st);
3782 if (next = strchr(bsf, ','))
3784 if (!(bsfc = av_bitstream_filter_init(bsf))) {
3785 av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf);
3789 bsfc_prev->next = bsfc;
3791 ost->bitstream_filters = bsfc;
3797 MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st);
3799 uint32_t tag = strtol(codec_tag, &next, 0);
3801 tag = AV_RL32(codec_tag);
3802 st->codec->codec_tag = tag;
3805 MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st);
3806 if (qscale >= 0 || same_quant) {
3807 st->codec->flags |= CODEC_FLAG_QSCALE;
3808 st->codec->global_quality = FF_QP2LAMBDA * qscale;
3811 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
3812 st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
3814 av_opt_get_int(sws_opts, "sws_flags", 0, &ost->sws_flags);
3816 ost->pix_fmts[0] = ost->pix_fmts[1] = PIX_FMT_NONE;
3821 static void parse_matrix_coeffs(uint16_t *dest, const char *str)
3824 const char *p = str;
3831 av_log(NULL, AV_LOG_FATAL, "Syntax error in matrix \"%s\" at coeff %d\n", str, i);
3838 static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc)
3842 AVCodecContext *video_enc;
3844 ost = new_output_stream(o, oc, AVMEDIA_TYPE_VIDEO);
3846 video_enc = st->codec;
3848 if (!ost->stream_copy) {
3849 const char *p = NULL;
3850 char *forced_key_frames = NULL, *frame_rate = NULL, *frame_size = NULL;
3851 char *frame_aspect_ratio = NULL, *frame_pix_fmt = NULL;
3852 char *intra_matrix = NULL, *inter_matrix = NULL, *filters = NULL;
3855 MATCH_PER_STREAM_OPT(frame_rates, str, frame_rate, oc, st);
3856 if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) {
3857 av_log(NULL, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
3861 MATCH_PER_STREAM_OPT(frame_sizes, str, frame_size, oc, st);
3862 if (frame_size && av_parse_video_size(&video_enc->width, &video_enc->height, frame_size) < 0) {
3863 av_log(NULL, AV_LOG_FATAL, "Invalid frame size: %s.\n", frame_size);
3867 MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
3868 if (frame_aspect_ratio)
3869 ost->frame_aspect_ratio = parse_frame_aspect_ratio(frame_aspect_ratio);
3871 MATCH_PER_STREAM_OPT(frame_pix_fmts, str, frame_pix_fmt, oc, st);
3872 if (frame_pix_fmt && (video_enc->pix_fmt = av_get_pix_fmt(frame_pix_fmt)) == PIX_FMT_NONE) {
3873 av_log(NULL, AV_LOG_FATAL, "Unknown pixel format requested: %s.\n", frame_pix_fmt);
3876 st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
3878 MATCH_PER_STREAM_OPT(intra_matrices, str, intra_matrix, oc, st);
3880 if (!(video_enc->intra_matrix = av_mallocz(sizeof(*video_enc->intra_matrix) * 64))) {
3881 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for intra matrix.\n");
3884 parse_matrix_coeffs(video_enc->intra_matrix, intra_matrix);
3886 MATCH_PER_STREAM_OPT(inter_matrices, str, inter_matrix, oc, st);
3888 if (!(video_enc->inter_matrix = av_mallocz(sizeof(*video_enc->inter_matrix) * 64))) {
3889 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for inter matrix.\n");
3892 parse_matrix_coeffs(video_enc->inter_matrix, inter_matrix);
3895 MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st);
3896 for (i = 0; p; i++) {
3898 int e = sscanf(p, "%d,%d,%d", &start, &end, &q);
3900 av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
3903 video_enc->rc_override =
3904 av_realloc(video_enc->rc_override,
3905 sizeof(RcOverride) * (i + 1));
3906 video_enc->rc_override[i].start_frame = start;
3907 video_enc->rc_override[i].end_frame = end;
3909 video_enc->rc_override[i].qscale = q;
3910 video_enc->rc_override[i].quality_factor = 1.0;
3913 video_enc->rc_override[i].qscale = 0;
3914 video_enc->rc_override[i].quality_factor = -q/100.0;
3919 video_enc->rc_override_count = i;
3920 if (!video_enc->rc_initial_buffer_occupancy)
3921 video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size * 3 / 4;
3922 video_enc->intra_dc_precision = intra_dc_precision - 8;
3927 video_enc->flags |= CODEC_FLAG_PASS1;
3929 video_enc->flags |= CODEC_FLAG_PASS2;
3933 MATCH_PER_STREAM_OPT(forced_key_frames, str, forced_key_frames, oc, st);
3934 if (forced_key_frames)
3935 parse_forced_key_frames(forced_key_frames, ost, video_enc);
3937 MATCH_PER_STREAM_OPT(force_fps, i, ost->force_fps, oc, st);
3939 ost->top_field_first = -1;
3940 MATCH_PER_STREAM_OPT(top_field_first, i, ost->top_field_first, oc, st);
3942 MATCH_PER_STREAM_OPT(filters, str, filters, oc, st);
3944 ost->avfilter = av_strdup(filters);
3946 MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i, ost->copy_initial_nonkeyframes, oc ,st);
3952 static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc)
3956 AVCodecContext *audio_enc;
3958 ost = new_output_stream(o, oc, AVMEDIA_TYPE_AUDIO);
3961 audio_enc = st->codec;
3962 audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
3964 if (!ost->stream_copy) {
3965 char *sample_fmt = NULL, *filters = NULL;;
3967 MATCH_PER_STREAM_OPT(audio_channels, i, audio_enc->channels, oc, st);
3969 MATCH_PER_STREAM_OPT(sample_fmts, str, sample_fmt, oc, st);
3971 (audio_enc->sample_fmt = av_get_sample_fmt(sample_fmt)) == AV_SAMPLE_FMT_NONE) {
3972 av_log(NULL, AV_LOG_FATAL, "Invalid sample format '%s'\n", sample_fmt);
3976 MATCH_PER_STREAM_OPT(audio_sample_rate, i, audio_enc->sample_rate, oc, st);
3978 MATCH_PER_STREAM_OPT(filters, str, filters, oc, st);
3980 ost->avfilter = av_strdup(filters);
3986 static OutputStream *new_data_stream(OptionsContext *o, AVFormatContext *oc)
3990 ost = new_output_stream(o, oc, AVMEDIA_TYPE_DATA);
3991 if (!ost->stream_copy) {
3992 av_log(NULL, AV_LOG_FATAL, "Data stream encoding not supported yet (only streamcopy)\n");
3999 static OutputStream *new_attachment_stream(OptionsContext *o, AVFormatContext *oc)
4001 OutputStream *ost = new_output_stream(o, oc, AVMEDIA_TYPE_ATTACHMENT);
4002 ost->stream_copy = 1;
4006 static OutputStream *new_subtitle_stream(OptionsContext *o, AVFormatContext *oc)
4010 AVCodecContext *subtitle_enc;
4012 ost = new_output_stream(o, oc, AVMEDIA_TYPE_SUBTITLE);
4014 subtitle_enc = st->codec;
4016 subtitle_enc->codec_type = AVMEDIA_TYPE_SUBTITLE;
4021 /* arg format is "output-stream-index:streamid-value". */
4022 static int opt_streamid(OptionsContext *o, const char *opt, const char *arg)
4028 av_strlcpy(idx_str, arg, sizeof(idx_str));
4029 p = strchr(idx_str, ':');
4031 av_log(NULL, AV_LOG_FATAL,
4032 "Invalid value '%s' for option '%s', required syntax is 'index:value'\n",
4037 idx = parse_number_or_die(opt, idx_str, OPT_INT, 0, INT_MAX);
4038 o->streamid_map = grow_array(o->streamid_map, sizeof(*o->streamid_map), &o->nb_streamid_map, idx+1);
4039 o->streamid_map[idx] = parse_number_or_die(opt, p, OPT_INT, 0, INT_MAX);
4043 static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata)
4045 AVFormatContext *is = ifile->ctx;
4046 AVFormatContext *os = ofile->ctx;
4049 for (i = 0; i < is->nb_chapters; i++) {
4050 AVChapter *in_ch = is->chapters[i], *out_ch;
4051 int64_t ts_off = av_rescale_q(ofile->start_time - ifile->ts_offset,
4052 AV_TIME_BASE_Q, in_ch->time_base);
4053 int64_t rt = (ofile->recording_time == INT64_MAX) ? INT64_MAX :
4054 av_rescale_q(ofile->recording_time, AV_TIME_BASE_Q, in_ch->time_base);
4057 if (in_ch->end < ts_off)
4059 if (rt != INT64_MAX && in_ch->start > rt + ts_off)
4062 out_ch = av_mallocz(sizeof(AVChapter));
4064 return AVERROR(ENOMEM);
4066 out_ch->id = in_ch->id;
4067 out_ch->time_base = in_ch->time_base;
4068 out_ch->start = FFMAX(0, in_ch->start - ts_off);
4069 out_ch->end = FFMIN(rt, in_ch->end - ts_off);
4072 av_dict_copy(&out_ch->metadata, in_ch->metadata, 0);
4075 os->chapters = av_realloc(os->chapters, sizeof(AVChapter) * os->nb_chapters);
4077 return AVERROR(ENOMEM);
4078 os->chapters[os->nb_chapters - 1] = out_ch;
4083 static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
4084 AVFormatContext *oc)
4088 if (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type != AVMEDIA_TYPE_VIDEO) {
4089 av_log(NULL, AV_LOG_FATAL, "Only video filters are supported currently.\n");
4093 ost = new_video_stream(o, oc);
4094 ost->source_index = -1;
4095 ost->filter = ofilter;
4099 if (ost->stream_copy) {
4100 av_log(NULL, AV_LOG_ERROR, "Streamcopy requested for output stream %d:%d, "
4101 "which is fed from a complex filtergraph. Filtering and streamcopy "
4102 "cannot be used together.\n", ost->file_index, ost->index);
4106 if (configure_output_filter(ofilter->graph, ofilter, ofilter->out_tmp) < 0) {
4107 av_log(NULL, AV_LOG_FATAL, "Error configuring filter.\n");
4110 avfilter_inout_free(&ofilter->out_tmp);
4113 static void opt_output_file(void *optctx, const char *filename)
4115 OptionsContext *o = optctx;
4116 AVFormatContext *oc;
4118 AVOutputFormat *file_oformat;
4122 if (configure_complex_filters() < 0) {
4123 av_log(NULL, AV_LOG_FATAL, "Error configuring filters.\n");
4127 if (!strcmp(filename, "-"))
4130 oc = avformat_alloc_context();
4132 print_error(filename, AVERROR(ENOMEM));
4137 file_oformat = av_guess_format(o->format, NULL, NULL);
4138 if (!file_oformat) {
4139 av_log(NULL, AV_LOG_FATAL, "Requested output format '%s' is not a suitable output format\n", o->format);
4143 file_oformat = av_guess_format(NULL, filename, NULL);
4144 if (!file_oformat) {
4145 av_log(NULL, AV_LOG_FATAL, "Unable to find a suitable output format for '%s'\n",
4151 oc->oformat = file_oformat;
4152 oc->interrupt_callback = int_cb;
4153 av_strlcpy(oc->filename, filename, sizeof(oc->filename));
4155 /* create streams for all unlabeled output pads */
4156 for (i = 0; i < nb_filtergraphs; i++) {
4157 FilterGraph *fg = filtergraphs[i];
4158 for (j = 0; j < fg->nb_outputs; j++) {
4159 OutputFilter *ofilter = fg->outputs[j];
4161 if (!ofilter->out_tmp || ofilter->out_tmp->name)
4164 switch (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type) {
4165 case AVMEDIA_TYPE_VIDEO: o->video_disable = 1; break;
4166 case AVMEDIA_TYPE_AUDIO: o->audio_disable = 1; break;
4167 case AVMEDIA_TYPE_SUBTITLE: o->subtitle_disable = 1; break;
4169 init_output_filter(ofilter, o, oc);
4173 if (!o->nb_stream_maps) {
4174 /* pick the "best" stream of each type */
4175 #define NEW_STREAM(type, index)\
4177 ost = new_ ## type ## _stream(o, oc);\
4178 ost->source_index = index;\
4179 ost->sync_ist = input_streams[index];\
4180 input_streams[index]->discard = 0;\
4181 input_streams[index]->st->discard = AVDISCARD_NONE;\
4184 /* video: highest resolution */
4185 if (!o->video_disable && oc->oformat->video_codec != CODEC_ID_NONE) {
4186 int area = 0, idx = -1;
4187 for (i = 0; i < nb_input_streams; i++) {
4188 ist = input_streams[i];
4189 if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
4190 ist->st->codec->width * ist->st->codec->height > area) {
4191 area = ist->st->codec->width * ist->st->codec->height;
4195 NEW_STREAM(video, idx);
4198 /* audio: most channels */
4199 if (!o->audio_disable && oc->oformat->audio_codec != CODEC_ID_NONE) {
4200 int channels = 0, idx = -1;
4201 for (i = 0; i < nb_input_streams; i++) {
4202 ist = input_streams[i];
4203 if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
4204 ist->st->codec->channels > channels) {
4205 channels = ist->st->codec->channels;
4209 NEW_STREAM(audio, idx);
4212 /* subtitles: pick first */
4213 if (!o->subtitle_disable && oc->oformat->subtitle_codec != CODEC_ID_NONE) {
4214 for (i = 0; i < nb_input_streams; i++)
4215 if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
4216 NEW_STREAM(subtitle, i);
4220 /* do something with data? */
4222 for (i = 0; i < o->nb_stream_maps; i++) {
4223 StreamMap *map = &o->stream_maps[i];
4228 if (map->linklabel) {
4230 OutputFilter *ofilter = NULL;
4233 for (j = 0; j < nb_filtergraphs; j++) {
4234 fg = filtergraphs[j];
4235 for (k = 0; k < fg->nb_outputs; k++) {
4236 AVFilterInOut *out = fg->outputs[k]->out_tmp;
4237 if (out && !strcmp(out->name, map->linklabel)) {
4238 ofilter = fg->outputs[k];
4245 av_log(NULL, AV_LOG_FATAL, "Output with label '%s' does not exist "
4246 "in any defined filter graph.\n", map->linklabel);
4249 init_output_filter(ofilter, o, oc);
4251 ist = input_streams[input_files[map->file_index]->ist_index + map->stream_index];
4252 switch (ist->st->codec->codec_type) {
4253 case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc); break;
4254 case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc); break;
4255 case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(o, oc); break;
4256 case AVMEDIA_TYPE_DATA: ost = new_data_stream(o, oc); break;
4257 case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc); break;
4259 av_log(NULL, AV_LOG_FATAL, "Cannot map stream #%d:%d - unsupported type.\n",
4260 map->file_index, map->stream_index);
4264 ost->source_index = input_files[map->file_index]->ist_index + map->stream_index;
4265 ost->sync_ist = input_streams[input_files[map->sync_file_index]->ist_index +
4266 map->sync_stream_index];
4268 ist->st->discard = AVDISCARD_NONE;
4273 /* handle attached files */
4274 for (i = 0; i < o->nb_attachments; i++) {
4276 uint8_t *attachment;
4280 if ((err = avio_open2(&pb, o->attachments[i], AVIO_FLAG_READ, &int_cb, NULL)) < 0) {
4281 av_log(NULL, AV_LOG_FATAL, "Could not open attachment file %s.\n",
4285 if ((len = avio_size(pb)) <= 0) {
4286 av_log(NULL, AV_LOG_FATAL, "Could not get size of the attachment %s.\n",
4290 if (!(attachment = av_malloc(len))) {
4291 av_log(NULL, AV_LOG_FATAL, "Attachment %s too large to fit into memory.\n",
4295 avio_read(pb, attachment, len);
4297 ost = new_attachment_stream(o, oc);
4298 ost->stream_copy = 0;
4299 ost->source_index = -1;
4300 ost->attachment_filename = o->attachments[i];
4301 ost->st->codec->extradata = attachment;
4302 ost->st->codec->extradata_size = len;
4304 p = strrchr(o->attachments[i], '/');
4305 av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
4309 output_files = grow_array(output_files, sizeof(*output_files), &nb_output_files, nb_output_files + 1);
4310 if (!(output_files[nb_output_files - 1] = av_mallocz(sizeof(*output_files[0]))))
4313 output_files[nb_output_files - 1]->ctx = oc;
4314 output_files[nb_output_files - 1]->ost_index = nb_output_streams - oc->nb_streams;
4315 output_files[nb_output_files - 1]->recording_time = o->recording_time;
4316 if (o->recording_time != INT64_MAX)
4317 oc->duration = o->recording_time;
4318 output_files[nb_output_files - 1]->start_time = o->start_time;
4319 output_files[nb_output_files - 1]->limit_filesize = o->limit_filesize;
4320 av_dict_copy(&output_files[nb_output_files - 1]->opts, format_opts, 0);
4322 /* check filename in case of an image number is expected */
4323 if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
4324 if (!av_filename_number_test(oc->filename)) {
4325 print_error(oc->filename, AVERROR(EINVAL));
4330 if (!(oc->oformat->flags & AVFMT_NOFILE)) {
4331 /* test if it already exists to avoid losing precious files */
4332 assert_file_overwrite(filename);
4335 if ((err = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,
4336 &oc->interrupt_callback,
4337 &output_files[nb_output_files - 1]->opts)) < 0) {
4338 print_error(filename, err);
4343 if (o->mux_preload) {
4345 snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE));
4346 av_dict_set(&output_files[nb_output_files - 1]->opts, "preload", buf, 0);
4348 oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
4349 oc->flags |= AVFMT_FLAG_NONBLOCK;
4352 for (i = 0; i < o->nb_metadata_map; i++) {
4354 int in_file_index = strtol(o->metadata_map[i].u.str, &p, 0);
4356 if (in_file_index < 0)
4358 if (in_file_index >= nb_input_files) {
4359 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d while processing metadata maps\n", in_file_index);
4362 copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc, input_files[in_file_index]->ctx, o);
4366 if (o->chapters_input_file >= nb_input_files) {
4367 if (o->chapters_input_file == INT_MAX) {
4368 /* copy chapters from the first input file that has them*/
4369 o->chapters_input_file = -1;
4370 for (i = 0; i < nb_input_files; i++)
4371 if (input_files[i]->ctx->nb_chapters) {
4372 o->chapters_input_file = i;
4376 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d in chapter mapping.\n",
4377 o->chapters_input_file);
4381 if (o->chapters_input_file >= 0)
4382 copy_chapters(input_files[o->chapters_input_file], output_files[nb_output_files - 1],
4383 !o->metadata_chapters_manual);
4385 /* copy global metadata by default */
4386 if (!o->metadata_global_manual && nb_input_files)
4387 av_dict_copy(&oc->metadata, input_files[0]->ctx->metadata,
4388 AV_DICT_DONT_OVERWRITE);
4389 if (!o->metadata_streams_manual)
4390 for (i = output_files[nb_output_files - 1]->ost_index; i < nb_output_streams; i++) {
4392 if (output_streams[i]->source_index < 0) /* this is true e.g. for attached files */
4394 ist = input_streams[output_streams[i]->source_index];
4395 av_dict_copy(&output_streams[i]->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
4398 /* process manually set metadata */
4399 for (i = 0; i < o->nb_metadata; i++) {
4402 const char *stream_spec;
4403 int index = 0, j, ret;
4405 val = strchr(o->metadata[i].u.str, '=');
4407 av_log(NULL, AV_LOG_FATAL, "No '=' character in metadata string %s.\n",
4408 o->metadata[i].u.str);
4413 parse_meta_type(o->metadata[i].specifier, &type, &index, &stream_spec);
4415 for (j = 0; j < oc->nb_streams; j++) {
4416 if ((ret = check_stream_specifier(oc, oc->streams[j], stream_spec)) > 0) {
4417 av_dict_set(&oc->streams[j]->metadata, o->metadata[i].u.str, *val ? val : NULL, 0);
4421 printf("ret %d, stream_spec %s\n", ret, stream_spec);
4429 if (index < 0 || index >= oc->nb_chapters) {
4430 av_log(NULL, AV_LOG_FATAL, "Invalid chapter index %d in metadata specifier.\n", index);
4433 m = &oc->chapters[index]->metadata;
4436 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", o->metadata[i].specifier);
4439 av_dict_set(m, o->metadata[i].u.str, *val ? val : NULL, 0);
4446 /* same option as mencoder */
4447 static int opt_pass(const char *opt, const char *arg)
4449 do_pass = parse_number_or_die(opt, arg, OPT_INT, 1, 2);
4453 static int64_t getutime(void)
4456 struct rusage rusage;
4458 getrusage(RUSAGE_SELF, &rusage);
4459 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4460 #elif HAVE_GETPROCESSTIMES
4462 FILETIME c, e, k, u;
4463 proc = GetCurrentProcess();
4464 GetProcessTimes(proc, &c, &e, &k, &u);
4465 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4467 return av_gettime();
4471 static int64_t getmaxrss(void)
4473 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4474 struct rusage rusage;
4475 getrusage(RUSAGE_SELF, &rusage);
4476 return (int64_t)rusage.ru_maxrss * 1024;
4477 #elif HAVE_GETPROCESSMEMORYINFO
4479 PROCESS_MEMORY_COUNTERS memcounters;
4480 proc = GetCurrentProcess();
4481 memcounters.cb = sizeof(memcounters);
4482 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4483 return memcounters.PeakPagefileUsage;
4489 static int opt_audio_qscale(OptionsContext *o, const char *opt, const char *arg)
4491 return parse_option(o, "q:a", arg, options);
4494 static void show_usage(void)
4496 printf("Hyper fast Audio and Video encoder\n");
4497 printf("usage: %s [options] [[infile options] -i infile]... {[outfile options] outfile}...\n", program_name);
4501 static void show_help(void)
4503 int flags = AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_ENCODING_PARAM;
4504 av_log_set_callback(log_callback_help);
4506 show_help_options(options, "Main options:\n",
4507 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB, 0);
4508 show_help_options(options, "\nAdvanced options:\n",
4509 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB,
4511 show_help_options(options, "\nVideo options:\n",
4512 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4514 show_help_options(options, "\nAdvanced Video options:\n",
4515 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4516 OPT_VIDEO | OPT_EXPERT);
4517 show_help_options(options, "\nAudio options:\n",
4518 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4520 show_help_options(options, "\nAdvanced Audio options:\n",
4521 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4522 OPT_AUDIO | OPT_EXPERT);
4523 show_help_options(options, "\nSubtitle options:\n",
4524 OPT_SUBTITLE | OPT_GRAB,
4526 show_help_options(options, "\nAudio/Video grab options:\n",
4530 show_help_children(avcodec_get_class(), flags);
4531 show_help_children(avformat_get_class(), flags);
4532 show_help_children(sws_get_class(), flags);
4535 static int opt_target(OptionsContext *o, const char *opt, const char *arg)
4537 enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN;
4538 static const char *const frame_rates[] = { "25", "30000/1001", "24000/1001" };
4540 if (!strncmp(arg, "pal-", 4)) {
4543 } else if (!strncmp(arg, "ntsc-", 5)) {
4546 } else if (!strncmp(arg, "film-", 5)) {
4550 /* Try to determine PAL/NTSC by peeking in the input files */
4551 if (nb_input_files) {
4553 for (j = 0; j < nb_input_files; j++) {
4554 for (i = 0; i < input_files[j]->nb_streams; i++) {
4555 AVCodecContext *c = input_files[j]->ctx->streams[i]->codec;
4556 if (c->codec_type != AVMEDIA_TYPE_VIDEO)
4558 fr = c->time_base.den * 1000 / c->time_base.num;
4562 } else if ((fr == 29970) || (fr == 23976)) {
4567 if (norm != UNKNOWN)
4571 if (norm != UNKNOWN)
4572 av_log(NULL, AV_LOG_INFO, "Assuming %s for target.\n", norm == PAL ? "PAL" : "NTSC");
4575 if (norm == UNKNOWN) {
4576 av_log(NULL, AV_LOG_FATAL, "Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n");
4577 av_log(NULL, AV_LOG_FATAL, "Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n");
4578 av_log(NULL, AV_LOG_FATAL, "or set a framerate with \"-r xxx\".\n");
4582 if (!strcmp(arg, "vcd")) {
4583 opt_video_codec(o, "c:v", "mpeg1video");
4584 opt_audio_codec(o, "c:a", "mp2");
4585 parse_option(o, "f", "vcd", options);
4587 parse_option(o, "s", norm == PAL ? "352x288" : "352x240", options);
4588 parse_option(o, "r", frame_rates[norm], options);
4589 opt_default("g", norm == PAL ? "15" : "18");
4591 opt_default("b", "1150000");
4592 opt_default("maxrate", "1150000");
4593 opt_default("minrate", "1150000");
4594 opt_default("bufsize", "327680"); // 40*1024*8;
4596 opt_default("b:a", "224000");
4597 parse_option(o, "ar", "44100", options);
4598 parse_option(o, "ac", "2", options);
4600 opt_default("packetsize", "2324");
4601 opt_default("muxrate", "1411200"); // 2352 * 75 * 8;
4603 /* We have to offset the PTS, so that it is consistent with the SCR.
4604 SCR starts at 36000, but the first two packs contain only padding
4605 and the first pack from the other stream, respectively, may also have
4606 been written before.
4607 So the real data starts at SCR 36000+3*1200. */
4608 o->mux_preload = (36000 + 3 * 1200) / 90000.0; // 0.44
4609 } else if (!strcmp(arg, "svcd")) {
4611 opt_video_codec(o, "c:v", "mpeg2video");
4612 opt_audio_codec(o, "c:a", "mp2");
4613 parse_option(o, "f", "svcd", options);
4615 parse_option(o, "s", norm == PAL ? "480x576" : "480x480", options);
4616 parse_option(o, "r", frame_rates[norm], options);
4617 opt_default("g", norm == PAL ? "15" : "18");
4619 opt_default("b", "2040000");
4620 opt_default("maxrate", "2516000");
4621 opt_default("minrate", "0"); // 1145000;
4622 opt_default("bufsize", "1835008"); // 224*1024*8;
4623 opt_default("flags", "+scan_offset");
4626 opt_default("b:a", "224000");
4627 parse_option(o, "ar", "44100", options);
4629 opt_default("packetsize", "2324");
4631 } else if (!strcmp(arg, "dvd")) {
4633 opt_video_codec(o, "c:v", "mpeg2video");
4634 opt_audio_codec(o, "c:a", "ac3");
4635 parse_option(o, "f", "dvd", options);
4637 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4638 parse_option(o, "r", frame_rates[norm], options);
4639 opt_default("g", norm == PAL ? "15" : "18");
4641 opt_default("b", "6000000");
4642 opt_default("maxrate", "9000000");
4643 opt_default("minrate", "0"); // 1500000;
4644 opt_default("bufsize", "1835008"); // 224*1024*8;
4646 opt_default("packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
4647 opt_default("muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
4649 opt_default("b:a", "448000");
4650 parse_option(o, "ar", "48000", options);
4652 } else if (!strncmp(arg, "dv", 2)) {
4654 parse_option(o, "f", "dv", options);
4656 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4657 parse_option(o, "pix_fmt", !strncmp(arg, "dv50", 4) ? "yuv422p" :
4658 norm == PAL ? "yuv420p" : "yuv411p", options);
4659 parse_option(o, "r", frame_rates[norm], options);
4661 parse_option(o, "ar", "48000", options);
4662 parse_option(o, "ac", "2", options);
4665 av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg);
4666 return AVERROR(EINVAL);
4671 static int opt_vstats_file(const char *opt, const char *arg)
4673 av_free (vstats_filename);
4674 vstats_filename = av_strdup (arg);
4678 static int opt_vstats(const char *opt, const char *arg)
4681 time_t today2 = time(NULL);
4682 struct tm *today = localtime(&today2);
4684 snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min,
4686 return opt_vstats_file(opt, filename);
4689 static int opt_video_frames(OptionsContext *o, const char *opt, const char *arg)
4691 return parse_option(o, "frames:v", arg, options);
4694 static int opt_audio_frames(OptionsContext *o, const char *opt, const char *arg)
4696 return parse_option(o, "frames:a", arg, options);
4699 static int opt_data_frames(OptionsContext *o, const char *opt, const char *arg)
4701 return parse_option(o, "frames:d", arg, options);
4704 static int opt_video_tag(OptionsContext *o, const char *opt, const char *arg)
4706 return parse_option(o, "tag:v", arg, options);
4709 static int opt_audio_tag(OptionsContext *o, const char *opt, const char *arg)
4711 return parse_option(o, "tag:a", arg, options);
4714 static int opt_subtitle_tag(OptionsContext *o, const char *opt, const char *arg)
4716 return parse_option(o, "tag:s", arg, options);
4719 static int opt_video_filters(OptionsContext *o, const char *opt, const char *arg)
4721 return parse_option(o, "filter:v", arg, options);
4724 static int opt_audio_filters(OptionsContext *o, const char *opt, const char *arg)
4726 return parse_option(o, "filter:a", arg, options);
4729 static int opt_vsync(const char *opt, const char *arg)
4731 if (!av_strcasecmp(arg, "cfr")) video_sync_method = VSYNC_CFR;
4732 else if (!av_strcasecmp(arg, "vfr")) video_sync_method = VSYNC_VFR;
4733 else if (!av_strcasecmp(arg, "passthrough")) video_sync_method = VSYNC_PASSTHROUGH;
4735 if (video_sync_method == VSYNC_AUTO)
4736 video_sync_method = parse_number_or_die("vsync", arg, OPT_INT, VSYNC_AUTO, VSYNC_VFR);
4740 static int opt_deinterlace(const char *opt, const char *arg)
4742 av_log(NULL, AV_LOG_WARNING, "-%s is deprecated, use -filter:v yadif instead\n", opt);
4747 static int opt_cpuflags(const char *opt, const char *arg)
4749 int flags = av_parse_cpu_flags(arg);
4754 av_set_cpu_flags_mask(flags);
4758 static void parse_cpuflags(int argc, char **argv, const OptionDef *options)
4760 int idx = locate_option(argc, argv, options, "cpuflags");
4761 if (idx && argv[idx + 1])
4762 opt_cpuflags("cpuflags", argv[idx + 1]);
4765 static int opt_channel_layout(OptionsContext *o, const char *opt, const char *arg)
4767 char layout_str[32];
4770 int ret, channels, ac_str_size;
4773 layout = av_get_channel_layout(arg);
4775 av_log(NULL, AV_LOG_ERROR, "Unknown channel layout: %s\n", arg);
4776 return AVERROR(EINVAL);
4778 snprintf(layout_str, sizeof(layout_str), "%"PRIu64, layout);
4779 ret = opt_default(opt, layout_str);
4783 /* set 'ac' option based on channel layout */
4784 channels = av_get_channel_layout_nb_channels(layout);
4785 snprintf(layout_str, sizeof(layout_str), "%d", channels);
4786 stream_str = strchr(opt, ':');
4787 ac_str_size = 3 + (stream_str ? strlen(stream_str) : 0);
4788 ac_str = av_mallocz(ac_str_size);
4790 return AVERROR(ENOMEM);
4791 av_strlcpy(ac_str, "ac", 3);
4793 av_strlcat(ac_str, stream_str, ac_str_size);
4794 ret = parse_option(o, ac_str, layout_str, options);
4800 static int opt_filter_complex(const char *opt, const char *arg)
4802 filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
4803 &nb_filtergraphs, nb_filtergraphs + 1);
4804 if (!(filtergraphs[nb_filtergraphs - 1] = av_mallocz(sizeof(*filtergraphs[0]))))
4805 return AVERROR(ENOMEM);
4806 filtergraphs[nb_filtergraphs - 1]->index = nb_filtergraphs - 1;
4807 filtergraphs[nb_filtergraphs - 1]->graph_desc = arg;
4811 #define OFFSET(x) offsetof(OptionsContext, x)
4812 static const OptionDef options[] = {
4814 #include "cmdutils_common_opts.h"
4815 { "f", HAS_ARG | OPT_STRING | OPT_OFFSET, {.off = OFFSET(format)}, "force format", "fmt" },
4816 { "i", HAS_ARG | OPT_FUNC2, {(void*)opt_input_file}, "input file name", "filename" },
4817 { "y", OPT_BOOL, {(void*)&file_overwrite}, "overwrite output files" },
4818 { "c", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4819 { "codec", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4820 { "pre", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(presets)}, "preset name", "preset" },
4821 { "map", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map}, "set input stream mapping", "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
4822 { "map_metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata_map)}, "set metadata information of outfile from infile",
4823 "outfile[,metadata]:infile[,metadata]" },
4824 { "map_chapters", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(chapters_input_file)}, "set chapters mapping", "input_file_index" },
4825 { "t", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(recording_time)}, "record or transcode \"duration\" seconds of audio/video", "duration" },
4826 { "fs", HAS_ARG | OPT_INT64 | OPT_OFFSET, {.off = OFFSET(limit_filesize)}, "set the limit file size in bytes", "limit_size" }, //
4827 { "ss", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(start_time)}, "set the start time offset", "time_off" },
4828 { "itsoffset", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(input_ts_offset)}, "set the input ts offset", "time_off" },
4829 { "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(ts_scale)}, "set the input ts scale", "scale" },
4830 { "metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata)}, "add metadata", "string=string" },
4831 { "dframes", HAS_ARG | OPT_FUNC2, {(void*)opt_data_frames}, "set the number of data frames to record", "number" },
4832 { "benchmark", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark},
4833 "add timings for benchmarking" },
4834 { "timelimit", HAS_ARG, {(void*)opt_timelimit}, "set max runtime in seconds", "limit" },
4835 { "dump", OPT_BOOL | OPT_EXPERT, {(void*)&do_pkt_dump},
4836 "dump each input packet" },
4837 { "hex", OPT_BOOL | OPT_EXPERT, {(void*)&do_hex_dump},
4838 "when dumping packets, also dump the payload" },
4839 { "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(rate_emu)}, "read input at native frame rate", "" },
4840 { "target", HAS_ARG | OPT_FUNC2, {(void*)opt_target}, "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" },
4841 { "vsync", HAS_ARG | OPT_EXPERT, {(void*)opt_vsync}, "video sync method", "" },
4842 { "async", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&audio_sync_method}, "audio sync method", "" },
4843 { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&audio_drift_threshold}, "audio drift threshold", "threshold" },
4844 { "copyts", OPT_BOOL | OPT_EXPERT, {(void*)©_ts}, "copy timestamps" },
4845 { "copytb", OPT_BOOL | OPT_EXPERT, {(void*)©_tb}, "copy input stream time base when stream copying" },
4846 { "shortest", OPT_BOOL | OPT_EXPERT, {(void*)&opt_shortest}, "finish encoding within shortest input" }, //
4847 { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&dts_delta_threshold}, "timestamp discontinuity delta threshold", "threshold" },
4848 { "xerror", OPT_BOOL, {(void*)&exit_on_error}, "exit on error", "error" },
4849 { "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC, {.off = OFFSET(copy_initial_nonkeyframes)}, "copy initial non-keyframes" },
4850 { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC, {.off = OFFSET(max_frames)}, "set the number of frames to record", "number" },
4851 { "tag", OPT_STRING | HAS_ARG | OPT_SPEC, {.off = OFFSET(codec_tags)}, "force codec tag/fourcc", "fourcc/tag" },
4852 { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4853 { "qscale", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4854 { "filter", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(filters)}, "set stream filterchain", "filter_list" },
4855 { "filter_complex", HAS_ARG | OPT_EXPERT, {(void*)opt_filter_complex}, "create a complex filtergraph", "graph_description" },
4856 { "stats", OPT_BOOL, {&print_stats}, "print progress report during encoding", },
4857 { "attach", HAS_ARG | OPT_FUNC2, {(void*)opt_attach}, "add an attachment to the output file", "filename" },
4858 { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(dump_attachment)}, "extract an attachment into a file", "filename" },
4859 { "cpuflags", HAS_ARG | OPT_EXPERT, {(void*)opt_cpuflags}, "set CPU flags mask", "mask" },
4862 { "vframes", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_frames}, "set the number of video frames to record", "number" },
4863 { "r", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_rates)}, "set frame rate (Hz value, fraction or abbreviation)", "rate" },
4864 { "s", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_sizes)}, "set frame size (WxH or abbreviation)", "size" },
4865 { "aspect", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_aspect_ratios)}, "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" },
4866 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_pix_fmts)}, "set pixel format", "format" },
4867 { "vn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET, {.off = OFFSET(video_disable)}, "disable video" },
4868 { "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" },
4869 { "rc_override", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(rc_overrides)}, "rate control override for specific intervals", "override" },
4870 { "vcodec", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_codec}, "force video codec ('copy' to copy stream)", "codec" },
4871 { "same_quant", OPT_BOOL | OPT_VIDEO, {(void*)&same_quant},
4872 "use same quantizer as source (implies VBR)" },
4873 { "pass", HAS_ARG | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" },
4874 { "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename_prefix}, "select two pass log file name prefix", "prefix" },
4875 { "deinterlace", OPT_EXPERT | OPT_VIDEO, {(void*)opt_deinterlace},
4876 "this option is deprecated, use the yadif filter instead" },
4877 { "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" },
4878 { "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_vstats_file}, "dump video coding statistics to file", "file" },
4879 { "vf", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_filters}, "video filters", "filter list" },
4880 { "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(intra_matrices)}, "specify intra matrix coeffs", "matrix" },
4881 { "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(inter_matrices)}, "specify inter matrix coeffs", "matrix" },
4882 { "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_INT| OPT_SPEC, {.off = OFFSET(top_field_first)}, "top=1/bottom=0/auto=-1 field first", "" },
4883 { "dc", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_dc_precision}, "intra_dc_precision", "precision" },
4884 { "vtag", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_tag}, "force video tag/fourcc", "fourcc/tag" },
4885 { "qphist", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, { (void *)&qp_hist }, "show QP histogram" },
4886 { "force_fps", OPT_BOOL | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(force_fps)}, "force the selected framerate, disable the best supported framerate selection" },
4887 { "streamid", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_streamid}, "set the value of an outfile streamid", "streamIndex:value" },
4888 { "force_key_frames", OPT_STRING | HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(forced_key_frames)}, "force key frames at specified timestamps", "timestamps" },
4891 { "aframes", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_frames}, "set the number of audio frames to record", "number" },
4892 { "aq", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_qscale}, "set audio quality (codec-specific)", "quality", },
4893 { "ar", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_sample_rate)}, "set audio sampling rate (in Hz)", "rate" },
4894 { "ac", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_channels)}, "set number of audio channels", "channels" },
4895 { "an", OPT_BOOL | OPT_AUDIO | OPT_OFFSET, {.off = OFFSET(audio_disable)}, "disable audio" },
4896 { "acodec", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_codec}, "force audio codec ('copy' to copy stream)", "codec" },
4897 { "atag", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_tag}, "force audio tag/fourcc", "fourcc/tag" },
4898 { "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" }, //
4899 { "sample_fmt", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_SPEC | OPT_STRING, {.off = OFFSET(sample_fmts)}, "set sample format", "format" },
4900 { "channel_layout", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_channel_layout}, "set channel layout", "layout" },
4901 { "af", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_filters}, "audio filters", "filter list" },
4903 /* subtitle options */
4904 { "sn", OPT_BOOL | OPT_SUBTITLE | OPT_OFFSET, {.off = OFFSET(subtitle_disable)}, "disable subtitle" },
4905 { "scodec", HAS_ARG | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_codec}, "force subtitle codec ('copy' to copy stream)", "codec" },
4906 { "stag", HAS_ARG | OPT_EXPERT | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_tag}, "force subtitle tag/fourcc", "fourcc/tag" },
4909 { "isync", OPT_BOOL | OPT_EXPERT | OPT_GRAB, {(void*)&input_sync}, "sync read on input", "" },
4912 { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_max_delay)}, "set the maximum demux-decode delay", "seconds" },
4913 { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_preload)}, "set the initial demux-decode delay", "seconds" },
4915 { "bsf", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(bitstream_filters)}, "A comma-separated list of bitstream filters", "bitstream_filters" },
4917 /* data codec support */
4918 { "dcodec", HAS_ARG | OPT_DATA | OPT_FUNC2, {(void*)opt_data_codec}, "force data codec ('copy' to copy stream)", "codec" },
4920 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
4924 int main(int argc, char **argv)
4926 OptionsContext o = { 0 };
4931 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4932 parse_loglevel(argc, argv, options);
4934 avcodec_register_all();
4936 avdevice_register_all();
4938 avfilter_register_all();
4940 avformat_network_init();
4944 parse_cpuflags(argc, argv, options);
4947 parse_options(&o, argc, argv, options, opt_output_file);
4949 if (nb_output_files <= 0 && nb_input_files == 0) {
4951 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4955 /* file converter / grab */
4956 if (nb_output_files <= 0) {
4957 fprintf(stderr, "At least one output file must be specified\n");
4961 if (nb_input_files == 0) {
4962 av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4967 if (transcode() < 0)
4969 ti = getutime() - ti;
4971 int maxrss = getmaxrss() / 1024;
4972 printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);