3 * Copyright (c) 2000-2011 The libav developers.
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31 #include "libavformat/avformat.h"
32 #include "libavdevice/avdevice.h"
33 #include "libswscale/swscale.h"
34 #include "libavresample/avresample.h"
35 #include "libavutil/opt.h"
36 #include "libavutil/audioconvert.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/colorspace.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/intreadwrite.h"
42 #include "libavutil/dict.h"
43 #include "libavutil/mathematics.h"
44 #include "libavutil/pixdesc.h"
45 #include "libavutil/avstring.h"
46 #include "libavutil/libm.h"
47 #include "libavutil/imgutils.h"
48 #include "libavformat/os_support.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersrc.h"
53 # include "libavfilter/buffersink.h"
54 # include "libavfilter/vsrc_buffer.h"
56 #if HAVE_SYS_RESOURCE_H
57 #include <sys/types.h>
59 #include <sys/resource.h>
60 #elif HAVE_GETPROCESSTIMES
63 #if HAVE_GETPROCESSMEMORYINFO
69 #include <sys/select.h>
76 #include "libavutil/avassert.h"
79 #define VSYNC_PASSTHROUGH 0
83 const char program_name[] = "avconv";
84 const int program_birth_year = 2000;
86 /* select an input stream for an output stream */
87 typedef struct StreamMap {
88 int disabled; /** 1 is this mapping is disabled by a negative map */
92 int sync_stream_index;
93 char *linklabel; /** name of an output link, for mapping lavfi outputs */
97 * select an input file for an output file
99 typedef struct MetadataMap {
100 int file; ///< file index
101 char type; ///< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram
102 int index; ///< stream/chapter/program number
105 static const OptionDef options[];
107 static int video_discard = 0;
108 static int same_quant = 0;
109 static int do_deinterlace = 0;
110 static int intra_dc_precision = 8;
111 static int qp_hist = 0;
113 static int file_overwrite = 0;
114 static int do_benchmark = 0;
115 static int do_hex_dump = 0;
116 static int do_pkt_dump = 0;
117 static int do_pass = 0;
118 static char *pass_logfilename_prefix = NULL;
119 static int video_sync_method = VSYNC_AUTO;
120 static int audio_sync_method = 0;
121 static float audio_drift_threshold = 0.1;
122 static int copy_ts = 0;
123 static int copy_tb = 1;
124 static int opt_shortest = 0;
125 static char *vstats_filename;
126 static FILE *vstats_file;
128 static int audio_volume = 256;
130 static int exit_on_error = 0;
131 static int using_stdin = 0;
132 static int64_t video_size = 0;
133 static int64_t audio_size = 0;
134 static int64_t extra_size = 0;
135 static int nb_frames_dup = 0;
136 static int nb_frames_drop = 0;
137 static int input_sync;
139 static float dts_delta_threshold = 10;
141 static int print_stats = 1;
143 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
145 typedef struct InputFilter {
146 AVFilterContext *filter;
147 struct InputStream *ist;
148 struct FilterGraph *graph;
151 typedef struct OutputFilter {
152 AVFilterContext *filter;
153 struct OutputStream *ost;
154 struct FilterGraph *graph;
156 /* temporary storage until stream maps are processed */
157 AVFilterInOut *out_tmp;
160 typedef struct FilterGraph {
162 const char *graph_desc;
164 AVFilterGraph *graph;
166 InputFilter **inputs;
168 OutputFilter **outputs;
172 typedef struct FrameBuffer {
178 enum PixelFormat pix_fmt;
181 struct InputStream *ist;
182 struct FrameBuffer *next;
185 typedef struct InputStream {
188 int discard; /* true if stream data should be discarded */
189 int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
191 AVFrame *decoded_frame;
193 int64_t start; /* time when read started */
194 /* predicted dts of the next packet read for this stream or (when there are
195 * several frames in a packet) of the next frame in current packet */
197 /* dts of the last packet read for this stream */
199 PtsCorrectionContext pts_ctx;
201 int is_start; /* is 1 at the start and after a discontinuity */
202 int showed_multi_packet_warning;
207 int resample_pix_fmt;
209 int resample_sample_fmt;
210 int resample_sample_rate;
211 int resample_channels;
212 uint64_t resample_channel_layout;
214 /* a pool of free buffers for decoded data */
215 FrameBuffer *buffer_pool;
217 /* decoded data from this stream goes into all those filters
218 * currently video and audio only */
219 InputFilter **filters;
223 typedef struct InputFile {
224 AVFormatContext *ctx;
225 int eof_reached; /* true if eof reached */
226 int ist_index; /* index of first stream in ist_table */
227 int buffer_size; /* current total buffer size */
229 int nb_streams; /* number of stream that avconv is aware of; may be different
230 from ctx.nb_streams if new streams appear during av_read_frame() */
234 typedef struct OutputStream {
235 int file_index; /* file index */
236 int index; /* stream index in the output file */
237 int source_index; /* InputStream index */
238 AVStream *st; /* stream in the output file */
239 int encoding_needed; /* true if encoding needed for this stream */
241 /* input pts and corresponding output pts
243 // double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
244 struct InputStream *sync_ist; /* input stream to sync against */
245 int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
246 /* pts of the first frame encoded for this stream, used for limiting
249 AVBitStreamFilterContext *bitstream_filters;
252 AVFrame *filtered_frame;
255 AVRational frame_rate;
259 float frame_aspect_ratio;
262 /* forced key frames */
263 int64_t *forced_kf_pts;
269 OutputFilter *filter;
274 int is_past_recording_time;
276 const char *attachment_filename;
277 int copy_initial_nonkeyframes;
279 enum PixelFormat pix_fmts[2];
283 typedef struct OutputFile {
284 AVFormatContext *ctx;
286 int ost_index; /* index of the first stream in output_streams */
287 int64_t recording_time; /* desired length of the resulting file in microseconds */
288 int64_t start_time; /* start time in microseconds */
289 uint64_t limit_filesize;
292 static InputStream **input_streams = NULL;
293 static int nb_input_streams = 0;
294 static InputFile **input_files = NULL;
295 static int nb_input_files = 0;
297 static OutputStream **output_streams = NULL;
298 static int nb_output_streams = 0;
299 static OutputFile **output_files = NULL;
300 static int nb_output_files = 0;
302 static FilterGraph **filtergraphs;
305 typedef struct OptionsContext {
306 /* input/output options */
310 SpecifierOpt *codec_names;
312 SpecifierOpt *audio_channels;
313 int nb_audio_channels;
314 SpecifierOpt *audio_sample_rate;
315 int nb_audio_sample_rate;
316 SpecifierOpt *frame_rates;
318 SpecifierOpt *frame_sizes;
320 SpecifierOpt *frame_pix_fmts;
321 int nb_frame_pix_fmts;
324 int64_t input_ts_offset;
327 SpecifierOpt *ts_scale;
329 SpecifierOpt *dump_attachment;
330 int nb_dump_attachment;
333 StreamMap *stream_maps;
335 /* first item specifies output metadata, second is input */
336 MetadataMap (*meta_data_maps)[2];
337 int nb_meta_data_maps;
338 int metadata_global_manual;
339 int metadata_streams_manual;
340 int metadata_chapters_manual;
341 const char **attachments;
344 int chapters_input_file;
346 int64_t recording_time;
347 uint64_t limit_filesize;
353 int subtitle_disable;
356 /* indexed by output file stream index */
360 SpecifierOpt *metadata;
362 SpecifierOpt *max_frames;
364 SpecifierOpt *bitstream_filters;
365 int nb_bitstream_filters;
366 SpecifierOpt *codec_tags;
368 SpecifierOpt *sample_fmts;
370 SpecifierOpt *qscale;
372 SpecifierOpt *forced_key_frames;
373 int nb_forced_key_frames;
374 SpecifierOpt *force_fps;
376 SpecifierOpt *frame_aspect_ratios;
377 int nb_frame_aspect_ratios;
378 SpecifierOpt *rc_overrides;
380 SpecifierOpt *intra_matrices;
381 int nb_intra_matrices;
382 SpecifierOpt *inter_matrices;
383 int nb_inter_matrices;
384 SpecifierOpt *top_field_first;
385 int nb_top_field_first;
386 SpecifierOpt *metadata_map;
388 SpecifierOpt *presets;
390 SpecifierOpt *copy_initial_nonkeyframes;
391 int nb_copy_initial_nonkeyframes;
392 SpecifierOpt *filters;
396 #define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\
399 for (i = 0; i < o->nb_ ## name; i++) {\
400 char *spec = o->name[i].specifier;\
401 if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0)\
402 outvar = o->name[i].u.type;\
408 static void reset_options(OptionsContext *o)
410 const OptionDef *po = options;
413 /* all OPT_SPEC and OPT_STRING can be freed in generic way */
415 void *dst = (uint8_t*)o + po->u.off;
417 if (po->flags & OPT_SPEC) {
418 SpecifierOpt **so = dst;
419 int i, *count = (int*)(so + 1);
420 for (i = 0; i < *count; i++) {
421 av_freep(&(*so)[i].specifier);
422 if (po->flags & OPT_STRING)
423 av_freep(&(*so)[i].u.str);
427 } else if (po->flags & OPT_OFFSET && po->flags & OPT_STRING)
432 for (i = 0; i < o->nb_stream_maps; i++)
433 av_freep(&o->stream_maps[i].linklabel);
434 av_freep(&o->stream_maps);
435 av_freep(&o->meta_data_maps);
436 av_freep(&o->streamid_map);
438 memset(o, 0, sizeof(*o));
440 o->mux_max_delay = 0.7;
441 o->recording_time = INT64_MAX;
442 o->limit_filesize = UINT64_MAX;
443 o->chapters_input_file = INT_MAX;
449 static int alloc_buffer(InputStream *ist, AVCodecContext *s, FrameBuffer **pbuf)
451 FrameBuffer *buf = av_mallocz(sizeof(*buf));
453 const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
454 int h_chroma_shift, v_chroma_shift;
455 int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
456 int w = s->width, h = s->height;
459 return AVERROR(ENOMEM);
461 if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
466 avcodec_align_dimensions(s, &w, &h);
467 if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
468 s->pix_fmt, 32)) < 0) {
472 /* XXX this shouldn't be needed, but some tests break without this line
473 * those decoders are buggy and need to be fixed.
474 * the following tests fail:
475 * cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
477 memset(buf->base[0], 128, ret);
479 avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
480 for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
481 const int h_shift = i==0 ? 0 : h_chroma_shift;
482 const int v_shift = i==0 ? 0 : v_chroma_shift;
483 if (s->flags & CODEC_FLAG_EMU_EDGE)
484 buf->data[i] = buf->base[i];
486 buf->data[i] = buf->base[i] +
487 FFALIGN((buf->linesize[i]*edge >> v_shift) +
488 (pixel_size*edge >> h_shift), 32);
492 buf->pix_fmt = s->pix_fmt;
499 static void free_buffer_pool(InputStream *ist)
501 FrameBuffer *buf = ist->buffer_pool;
503 ist->buffer_pool = buf->next;
504 av_freep(&buf->base[0]);
506 buf = ist->buffer_pool;
510 static void unref_buffer(InputStream *ist, FrameBuffer *buf)
512 av_assert0(buf->refcount);
514 if (!buf->refcount) {
515 buf->next = ist->buffer_pool;
516 ist->buffer_pool = buf;
520 static int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
522 InputStream *ist = s->opaque;
526 if (!ist->buffer_pool && (ret = alloc_buffer(ist, s, &ist->buffer_pool)) < 0)
529 buf = ist->buffer_pool;
530 ist->buffer_pool = buf->next;
532 if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
533 av_freep(&buf->base[0]);
535 if ((ret = alloc_buffer(ist, s, &buf)) < 0)
541 frame->type = FF_BUFFER_TYPE_USER;
542 frame->extended_data = frame->data;
543 frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
544 frame->width = buf->w;
545 frame->height = buf->h;
546 frame->format = buf->pix_fmt;
547 frame->sample_aspect_ratio = s->sample_aspect_ratio;
549 for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
550 frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't
551 frame->data[i] = buf->data[i];
552 frame->linesize[i] = buf->linesize[i];
558 static void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
560 InputStream *ist = s->opaque;
561 FrameBuffer *buf = frame->opaque;
564 for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
565 frame->data[i] = NULL;
567 unref_buffer(ist, buf);
570 static void filter_release_buffer(AVFilterBuffer *fb)
572 FrameBuffer *buf = fb->priv;
574 unref_buffer(buf->ist, buf);
578 * Define a function for building a string containing a list of
581 #define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name, separator) \
582 static char *choose_ ## var ## s(OutputStream *ost) \
584 if (ost->st->codec->var != none) { \
585 get_name(ost->st->codec->var); \
586 return av_strdup(name); \
587 } else if (ost->enc->supported_list) { \
589 AVIOContext *s = NULL; \
593 if (avio_open_dyn_buf(&s) < 0) \
596 for (p = ost->enc->supported_list; *p != none; p++) { \
598 avio_printf(s, "%s" separator, name); \
600 len = avio_close_dyn_buf(s, &ret); \
607 #define GET_PIX_FMT_NAME(pix_fmt)\
608 const char *name = av_get_pix_fmt_name(pix_fmt);
610 DEF_CHOOSE_FORMAT(enum PixelFormat, pix_fmt, pix_fmts, PIX_FMT_NONE,
611 GET_PIX_FMT_NAME, ":")
613 #define GET_SAMPLE_FMT_NAME(sample_fmt)\
614 const char *name = av_get_sample_fmt_name(sample_fmt)
616 DEF_CHOOSE_FORMAT(enum AVSampleFormat, sample_fmt, sample_fmts,
617 AV_SAMPLE_FMT_NONE, GET_SAMPLE_FMT_NAME, ",")
619 #define GET_SAMPLE_RATE_NAME(rate)\
621 snprintf(name, sizeof(name), "%d", rate);
623 DEF_CHOOSE_FORMAT(int, sample_rate, supported_samplerates, 0,
624 GET_SAMPLE_RATE_NAME, ",")
626 #define GET_CH_LAYOUT_NAME(ch_layout)\
628 snprintf(name, sizeof(name), "0x%"PRIx64, ch_layout);
630 DEF_CHOOSE_FORMAT(uint64_t, channel_layout, channel_layouts, 0,
631 GET_CH_LAYOUT_NAME, ",")
633 static int configure_audio_filters(FilterGraph *fg, AVFilterContext **in_filter,
634 AVFilterContext **out_filter)
636 InputStream *ist = fg->inputs[0]->ist;
637 OutputStream *ost = fg->outputs[0]->ost;
638 AVCodecContext *codec = ost->st->codec;
639 AVCodecContext *icodec = ist->st->codec;
640 char *sample_fmts, *sample_rates, *channel_layouts;
644 avfilter_graph_free(&fg->graph);
645 if (!(fg->graph = avfilter_graph_alloc()))
646 return AVERROR(ENOMEM);
648 snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:"
649 "channel_layout=0x%"PRIx64, ist->st->time_base.num,
650 ist->st->time_base.den, icodec->sample_rate,
651 av_get_sample_fmt_name(icodec->sample_fmt), icodec->channel_layout);
652 ret = avfilter_graph_create_filter(&fg->inputs[0]->filter,
653 avfilter_get_by_name("abuffer"),
654 "src", args, NULL, fg->graph);
658 ret = avfilter_graph_create_filter(&fg->outputs[0]->filter,
659 avfilter_get_by_name("abuffersink"),
660 "out", NULL, NULL, fg->graph);
664 *in_filter = fg->inputs[0]->filter;
665 *out_filter = fg->outputs[0]->filter;
667 if (codec->channels && !codec->channel_layout)
668 codec->channel_layout = av_get_default_channel_layout(codec->channels);
670 sample_fmts = choose_sample_fmts(ost);
671 sample_rates = choose_sample_rates(ost);
672 channel_layouts = choose_channel_layouts(ost);
673 if (sample_fmts || sample_rates || channel_layouts) {
674 AVFilterContext *format;
679 len += snprintf(args + len, sizeof(args) - len, "sample_fmts=%s:",
682 len += snprintf(args + len, sizeof(args) - len, "sample_rates=%s:",
685 len += snprintf(args + len, sizeof(args) - len, "channel_layouts=%s:",
689 av_freep(&sample_fmts);
690 av_freep(&sample_rates);
691 av_freep(&channel_layouts);
693 ret = avfilter_graph_create_filter(&format,
694 avfilter_get_by_name("aformat"),
695 "aformat", args, NULL, fg->graph);
699 ret = avfilter_link(format, 0, fg->outputs[0]->filter, 0);
703 *out_filter = format;
706 if (audio_sync_method > 0) {
707 AVFilterContext *async;
711 av_log(NULL, AV_LOG_WARNING, "-async has been deprecated. Used the "
712 "asyncts audio filter instead.\n");
714 if (audio_sync_method > 1)
715 len += snprintf(args + len, sizeof(args) - len, "compensate=1:"
716 "max_comp=%d:", audio_sync_method);
717 snprintf(args + len, sizeof(args) - len, "min_delta=%f",
718 audio_drift_threshold);
720 ret = avfilter_graph_create_filter(&async,
721 avfilter_get_by_name("asyncts"),
722 "async", args, NULL, fg->graph);
726 ret = avfilter_link(*in_filter, 0, async, 0);
736 static int configure_video_filters(FilterGraph *fg, AVFilterContext **in_filter,
737 AVFilterContext **out_filter)
739 InputStream *ist = fg->inputs[0]->ist;
740 OutputStream *ost = fg->outputs[0]->ost;
741 AVFilterContext *filter;
742 AVCodecContext *codec = ost->st->codec;
744 AVRational sample_aspect_ratio;
748 if (ist->st->sample_aspect_ratio.num) {
749 sample_aspect_ratio = ist->st->sample_aspect_ratio;
751 sample_aspect_ratio = ist->st->codec->sample_aspect_ratio;
753 snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
754 ist->st->codec->height, ist->st->codec->pix_fmt,
755 ist->st->time_base.num, ist->st->time_base.den,
756 sample_aspect_ratio.num, sample_aspect_ratio.den);
758 ret = avfilter_graph_create_filter(&fg->inputs[0]->filter,
759 avfilter_get_by_name("buffer"),
760 "src", args, NULL, fg->graph);
763 ret = avfilter_graph_create_filter(&fg->outputs[0]->filter,
764 avfilter_get_by_name("buffersink"),
765 "out", NULL, NULL, fg->graph);
768 *in_filter = fg->inputs[0]->filter;
769 *out_filter = fg->outputs[0]->filter;
771 if (codec->width || codec->height) {
772 snprintf(args, 255, "%d:%d:flags=0x%X",
775 (unsigned)ost->sws_flags);
776 if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
777 NULL, args, NULL, fg->graph)) < 0)
779 if ((ret = avfilter_link(*in_filter, 0, filter, 0)) < 0)
784 if (ost->frame_rate.num) {
785 snprintf(args, sizeof(args), "fps=%d/%d", ost->frame_rate.num,
786 ost->frame_rate.den);
787 ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("fps"),
788 "fps", args, NULL, fg->graph);
792 ret = avfilter_link(filter, 0, *out_filter, 0);
795 *out_filter = filter;
798 if ((pix_fmts = choose_pix_fmts(ost))) {
799 if ((ret = avfilter_graph_create_filter(&filter,
800 avfilter_get_by_name("format"),
801 "format", pix_fmts, NULL,
804 if ((ret = avfilter_link(filter, 0, *out_filter, 0)) < 0)
807 *out_filter = filter;
811 snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
812 fg->graph->scale_sws_opts = av_strdup(args);
817 static int configure_simple_filtergraph(FilterGraph *fg)
819 OutputStream *ost = fg->outputs[0]->ost;
820 AVFilterContext *in_filter, *out_filter;
823 avfilter_graph_free(&fg->graph);
824 fg->graph = avfilter_graph_alloc();
826 switch (ost->st->codec->codec_type) {
827 case AVMEDIA_TYPE_VIDEO:
828 ret = configure_video_filters(fg, &in_filter, &out_filter);
830 case AVMEDIA_TYPE_AUDIO:
831 ret = configure_audio_filters(fg, &in_filter, &out_filter);
833 default: av_assert0(0);
839 AVFilterInOut *outputs = avfilter_inout_alloc();
840 AVFilterInOut *inputs = avfilter_inout_alloc();
842 outputs->name = av_strdup("in");
843 outputs->filter_ctx = in_filter;
844 outputs->pad_idx = 0;
845 outputs->next = NULL;
847 inputs->name = av_strdup("out");
848 inputs->filter_ctx = out_filter;
852 if ((ret = avfilter_graph_parse(fg->graph, ost->avfilter, inputs, outputs, NULL)) < 0)
855 if ((ret = avfilter_link(in_filter, 0, out_filter, 0)) < 0)
859 if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
862 ost->filter = fg->outputs[0];
867 static FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
869 FilterGraph *fg = av_mallocz(sizeof(*fg));
873 fg->index = nb_filtergraphs;
875 fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs), &fg->nb_outputs,
877 if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
879 fg->outputs[0]->ost = ost;
880 fg->outputs[0]->graph = fg;
882 fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs), &fg->nb_inputs,
884 if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
886 fg->inputs[0]->ist = ist;
887 fg->inputs[0]->graph = fg;
889 ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
890 &ist->nb_filters, ist->nb_filters + 1);
891 ist->filters[ist->nb_filters - 1] = fg->inputs[0];
893 filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
894 &nb_filtergraphs, nb_filtergraphs + 1);
895 filtergraphs[nb_filtergraphs - 1] = fg;
900 static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
903 enum AVMediaType type = in->filter_ctx->input_pads[in->pad_idx].type;
906 // TODO: support other filter types
907 if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
908 av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
917 int file_idx = strtol(in->name, &p, 0);
919 if (file_idx < 0 || file_idx >= nb_input_files) {
920 av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtegraph description %s.\n",
921 file_idx, fg->graph_desc);
924 s = input_files[file_idx]->ctx;
926 for (i = 0; i < s->nb_streams; i++) {
927 if (s->streams[i]->codec->codec_type != type)
929 if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
935 av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
936 "matches no streams.\n", p, fg->graph_desc);
939 ist = input_streams[input_files[file_idx]->ist_index + st->index];
941 /* find the first unused stream of corresponding type */
942 for (i = 0; i < nb_input_streams; i++) {
943 ist = input_streams[i];
944 if (ist->st->codec->codec_type == type && ist->discard)
947 if (i == nb_input_streams) {
948 av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
949 "unlabeled input pad %d on filter %s", in->pad_idx,
950 in->filter_ctx->name);
955 ist->decoding_needed = 1;
956 ist->st->discard = AVDISCARD_NONE;
958 fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs),
959 &fg->nb_inputs, fg->nb_inputs + 1);
960 if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
962 fg->inputs[fg->nb_inputs - 1]->ist = ist;
963 fg->inputs[fg->nb_inputs - 1]->graph = fg;
965 ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
966 &ist->nb_filters, ist->nb_filters + 1);
967 ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
970 static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
973 AVCodecContext *codec = ofilter->ost->st->codec;
974 AVFilterContext *last_filter = out->filter_ctx;
975 int pad_idx = out->pad_idx;
979 ret = avfilter_graph_create_filter(&ofilter->filter,
980 avfilter_get_by_name("buffersink"),
981 "out", NULL, pix_fmts, fg->graph);
985 if (codec->width || codec->height) {
987 AVFilterContext *filter;
989 snprintf(args, sizeof(args), "%d:%d:flags=0x%X",
992 (unsigned)ofilter->ost->sws_flags);
993 if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
994 NULL, args, NULL, fg->graph)) < 0)
996 if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
999 last_filter = filter;
1003 if ((pix_fmts = choose_pix_fmts(ofilter->ost))) {
1004 AVFilterContext *filter;
1005 if ((ret = avfilter_graph_create_filter(&filter,
1006 avfilter_get_by_name("format"),
1007 "format", pix_fmts, NULL,
1010 if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1013 last_filter = filter;
1015 av_freep(&pix_fmts);
1018 if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
1024 static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
1026 OutputStream *ost = ofilter->ost;
1027 AVCodecContext *codec = ost->st->codec;
1028 AVFilterContext *last_filter = out->filter_ctx;
1029 int pad_idx = out->pad_idx;
1030 char *sample_fmts, *sample_rates, *channel_layouts;
1033 ret = avfilter_graph_create_filter(&ofilter->filter,
1034 avfilter_get_by_name("abuffersink"),
1035 "out", NULL, NULL, fg->graph);
1039 if (codec->channels && !codec->channel_layout)
1040 codec->channel_layout = av_get_default_channel_layout(codec->channels);
1042 sample_fmts = choose_sample_fmts(ost);
1043 sample_rates = choose_sample_rates(ost);
1044 channel_layouts = choose_channel_layouts(ost);
1045 if (sample_fmts || sample_rates || channel_layouts) {
1046 AVFilterContext *format;
1051 len += snprintf(args + len, sizeof(args) - len, "sample_fmts=%s:",
1054 len += snprintf(args + len, sizeof(args) - len, "sample_rates=%s:",
1056 if (channel_layouts)
1057 len += snprintf(args + len, sizeof(args) - len, "channel_layouts=%s:",
1061 av_freep(&sample_fmts);
1062 av_freep(&sample_rates);
1063 av_freep(&channel_layouts);
1065 ret = avfilter_graph_create_filter(&format,
1066 avfilter_get_by_name("aformat"),
1067 "aformat", args, NULL, fg->graph);
1071 ret = avfilter_link(last_filter, pad_idx, format, 0);
1075 last_filter = format;
1079 if (audio_sync_method > 0) {
1080 AVFilterContext *async;
1084 av_log(NULL, AV_LOG_WARNING, "-async has been deprecated. Used the "
1085 "asyncts audio filter instead.\n");
1087 if (audio_sync_method > 1)
1088 len += snprintf(args + len, sizeof(args) - len, "compensate=1:"
1089 "max_comp=%d:", audio_sync_method);
1090 snprintf(args + len, sizeof(args) - len, "min_delta=%f",
1091 audio_drift_threshold);
1093 ret = avfilter_graph_create_filter(&async,
1094 avfilter_get_by_name("asyncts"),
1095 "async", args, NULL, fg->graph);
1099 ret = avfilter_link(last_filter, pad_idx, async, 0);
1103 last_filter = async;
1107 if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
1113 static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
1115 switch (out->filter_ctx->output_pads[out->pad_idx].type) {
1116 case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
1117 case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
1118 default: av_assert0(0);
1122 static int configure_complex_filter(FilterGraph *fg)
1124 AVFilterInOut *inputs, *outputs, *cur;
1125 int ret, i, init = !fg->graph;
1127 avfilter_graph_free(&fg->graph);
1128 if (!(fg->graph = avfilter_graph_alloc()))
1129 return AVERROR(ENOMEM);
1131 if ((ret = avfilter_graph_parse2(fg->graph, fg->graph_desc, &inputs, &outputs)) < 0)
1134 for (cur = inputs; init && cur; cur = cur->next)
1135 init_input_filter(fg, cur);
1137 for (cur = inputs, i = 0; cur; cur = cur->next, i++) {
1138 InputFilter *ifilter = fg->inputs[i];
1139 InputStream *ist = ifilter->ist;
1144 switch (cur->filter_ctx->input_pads[cur->pad_idx].type) {
1145 case AVMEDIA_TYPE_VIDEO:
1146 sar = ist->st->sample_aspect_ratio.num ?
1147 ist->st->sample_aspect_ratio :
1148 ist->st->codec->sample_aspect_ratio;
1149 snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
1150 ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
1152 filter = avfilter_get_by_name("buffer");
1154 case AVMEDIA_TYPE_AUDIO:
1155 snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:"
1156 "sample_fmt=%s:channel_layout=0x%"PRIx64,
1157 ist->st->time_base.num, ist->st->time_base.den,
1158 ist->st->codec->sample_rate,
1159 av_get_sample_fmt_name(ist->st->codec->sample_fmt),
1160 ist->st->codec->channel_layout);
1161 filter = avfilter_get_by_name("abuffer");
1167 if ((ret = avfilter_graph_create_filter(&ifilter->filter,
1169 args, NULL, fg->graph)) < 0)
1171 if ((ret = avfilter_link(ifilter->filter, 0,
1172 cur->filter_ctx, cur->pad_idx)) < 0)
1175 avfilter_inout_free(&inputs);
1178 /* we already know the mappings between lavfi outputs and output streams,
1179 * so we can finish the setup */
1180 for (cur = outputs, i = 0; cur; cur = cur->next, i++)
1181 configure_output_filter(fg, fg->outputs[i], cur);
1182 avfilter_inout_free(&outputs);
1184 if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
1187 /* wait until output mappings are processed */
1188 for (cur = outputs; cur;) {
1189 fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs),
1190 &fg->nb_outputs, fg->nb_outputs + 1);
1191 if (!(fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]))))
1193 fg->outputs[fg->nb_outputs - 1]->graph = fg;
1194 fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
1196 fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
1203 static int configure_complex_filters(void)
1207 for (i = 0; i < nb_filtergraphs; i++)
1208 if (!filtergraphs[i]->graph &&
1209 (ret = configure_complex_filter(filtergraphs[i])) < 0)
1214 static int configure_filtergraph(FilterGraph *fg)
1216 return fg->graph_desc ? configure_complex_filter(fg) :
1217 configure_simple_filtergraph(fg);
1220 static int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
1223 for (i = 0; i < fg->nb_inputs; i++)
1224 if (fg->inputs[i]->ist == ist)
1229 static void term_exit(void)
1231 av_log(NULL, AV_LOG_QUIET, "");
1234 static volatile int received_sigterm = 0;
1235 static volatile int received_nb_signals = 0;
1238 sigterm_handler(int sig)
1240 received_sigterm = sig;
1241 received_nb_signals++;
1245 static void term_init(void)
1247 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
1248 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
1250 signal(SIGXCPU, sigterm_handler);
1254 static int decode_interrupt_cb(void *ctx)
1256 return received_nb_signals > 1;
1259 static const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
1261 void exit_program(int ret)
1265 for (i = 0; i < nb_filtergraphs; i++) {
1266 avfilter_graph_free(&filtergraphs[i]->graph);
1267 for (j = 0; j < filtergraphs[i]->nb_inputs; j++)
1268 av_freep(&filtergraphs[i]->inputs[j]);
1269 av_freep(&filtergraphs[i]->inputs);
1270 for (j = 0; j < filtergraphs[i]->nb_outputs; j++)
1271 av_freep(&filtergraphs[i]->outputs[j]);
1272 av_freep(&filtergraphs[i]->outputs);
1273 av_freep(&filtergraphs[i]);
1275 av_freep(&filtergraphs);
1278 for (i = 0; i < nb_output_files; i++) {
1279 AVFormatContext *s = output_files[i]->ctx;
1280 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
1282 avformat_free_context(s);
1283 av_dict_free(&output_files[i]->opts);
1284 av_freep(&output_files[i]);
1286 for (i = 0; i < nb_output_streams; i++) {
1287 AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
1289 AVBitStreamFilterContext *next = bsfc->next;
1290 av_bitstream_filter_close(bsfc);
1293 output_streams[i]->bitstream_filters = NULL;
1295 av_freep(&output_streams[i]->avfilter);
1296 av_freep(&output_streams[i]->filtered_frame);
1297 av_freep(&output_streams[i]);
1299 for (i = 0; i < nb_input_files; i++) {
1300 avformat_close_input(&input_files[i]->ctx);
1301 av_freep(&input_files[i]);
1303 for (i = 0; i < nb_input_streams; i++) {
1304 av_freep(&input_streams[i]->decoded_frame);
1305 av_dict_free(&input_streams[i]->opts);
1306 free_buffer_pool(input_streams[i]);
1307 av_freep(&input_streams[i]->filters);
1308 av_freep(&input_streams[i]);
1312 fclose(vstats_file);
1313 av_free(vstats_filename);
1315 av_freep(&input_streams);
1316 av_freep(&input_files);
1317 av_freep(&output_streams);
1318 av_freep(&output_files);
1323 avformat_network_deinit();
1325 if (received_sigterm) {
1326 av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
1327 (int) received_sigterm);
1334 static void assert_avoptions(AVDictionary *m)
1336 AVDictionaryEntry *t;
1337 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1338 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
1343 static void assert_codec_experimental(AVCodecContext *c, int encoder)
1345 const char *codec_string = encoder ? "encoder" : "decoder";
1347 if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL &&
1348 c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
1349 av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
1350 "results.\nAdd '-strict experimental' if you want to use it.\n",
1351 codec_string, c->codec->name);
1352 codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id);
1353 if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
1354 av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
1355 codec_string, codec->name);
1361 * Update the requested input sample format based on the output sample format.
1362 * This is currently only used to request float output from decoders which
1363 * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
1364 * Ideally this will be removed in the future when decoders do not do format
1365 * conversion and only output in their native format.
1367 static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
1368 AVCodecContext *enc)
1370 /* if sample formats match or a decoder sample format has already been
1371 requested, just return */
1372 if (enc->sample_fmt == dec->sample_fmt ||
1373 dec->request_sample_fmt > AV_SAMPLE_FMT_NONE)
1376 /* if decoder supports more than one output format */
1377 if (dec_codec && dec_codec->sample_fmts &&
1378 dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
1379 dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
1380 const enum AVSampleFormat *p;
1381 int min_dec = -1, min_inc = -1;
1383 /* find a matching sample format in the encoder */
1384 for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
1385 if (*p == enc->sample_fmt) {
1386 dec->request_sample_fmt = *p;
1388 } else if (*p > enc->sample_fmt) {
1389 min_inc = FFMIN(min_inc, *p - enc->sample_fmt);
1391 min_dec = FFMIN(min_dec, enc->sample_fmt - *p);
1394 /* if none match, provide the one that matches quality closest */
1395 dec->request_sample_fmt = min_inc > 0 ? enc->sample_fmt + min_inc :
1396 enc->sample_fmt - min_dec;
1400 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
1402 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
1403 AVCodecContext *avctx = ost->st->codec;
1407 * Audio encoders may split the packets -- #frames in != #packets out.
1408 * But there is no reordering, so we can limit the number of output packets
1409 * by simply dropping them here.
1410 * Counting encoded video frames needs to be done separately because of
1411 * reordering, see do_video_out()
1413 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
1414 if (ost->frame_number >= ost->max_frames) {
1415 av_free_packet(pkt);
1418 ost->frame_number++;
1422 AVPacket new_pkt = *pkt;
1423 int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
1424 &new_pkt.data, &new_pkt.size,
1425 pkt->data, pkt->size,
1426 pkt->flags & AV_PKT_FLAG_KEY);
1428 av_free_packet(pkt);
1429 new_pkt.destruct = av_destruct_packet;
1431 av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
1432 bsfc->filter->name, pkt->stream_index,
1433 avctx->codec ? avctx->codec->name : "copy");
1443 pkt->stream_index = ost->index;
1444 ret = av_interleaved_write_frame(s, pkt);
1446 print_error("av_interleaved_write_frame()", ret);
1451 static int check_recording_time(OutputStream *ost)
1453 OutputFile *of = output_files[ost->file_index];
1455 if (of->recording_time != INT64_MAX &&
1456 av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,
1457 AV_TIME_BASE_Q) >= 0) {
1458 ost->is_past_recording_time = 1;
1464 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
1467 AVCodecContext *enc = ost->st->codec;
1471 av_init_packet(&pkt);
1475 if (!check_recording_time(ost))
1478 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1479 frame->pts = ost->sync_opts;
1480 ost->sync_opts = frame->pts + frame->nb_samples;
1482 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
1483 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1488 if (pkt.pts != AV_NOPTS_VALUE)
1489 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1490 if (pkt.dts != AV_NOPTS_VALUE)
1491 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1492 if (pkt.duration > 0)
1493 pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
1495 write_frame(s, &pkt, ost);
1497 audio_size += pkt.size;
1501 static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
1503 AVCodecContext *dec;
1504 AVPicture *picture2;
1505 AVPicture picture_tmp;
1508 dec = ist->st->codec;
1510 /* deinterlace : must be done before any resize */
1511 if (do_deinterlace) {
1514 /* create temporary picture */
1515 size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
1516 buf = av_malloc(size);
1520 picture2 = &picture_tmp;
1521 avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
1523 if (avpicture_deinterlace(picture2, picture,
1524 dec->pix_fmt, dec->width, dec->height) < 0) {
1525 /* if error, do not deinterlace */
1526 av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
1535 if (picture != picture2)
1536 *picture = *picture2;
1540 static void do_subtitle_out(AVFormatContext *s,
1546 static uint8_t *subtitle_out = NULL;
1547 int subtitle_out_max_size = 1024 * 1024;
1548 int subtitle_out_size, nb, i;
1549 AVCodecContext *enc;
1552 if (pts == AV_NOPTS_VALUE) {
1553 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1559 enc = ost->st->codec;
1561 if (!subtitle_out) {
1562 subtitle_out = av_malloc(subtitle_out_max_size);
1565 /* Note: DVB subtitle need one packet to draw them and one other
1566 packet to clear them */
1567 /* XXX: signal it in the codec context ? */
1568 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE)
1573 for (i = 0; i < nb; i++) {
1574 ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
1575 if (!check_recording_time(ost))
1578 sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
1579 // start_display_time is required to be 0
1580 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1581 sub->end_display_time -= sub->start_display_time;
1582 sub->start_display_time = 0;
1583 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1584 subtitle_out_max_size, sub);
1585 if (subtitle_out_size < 0) {
1586 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1590 av_init_packet(&pkt);
1591 pkt.data = subtitle_out;
1592 pkt.size = subtitle_out_size;
1593 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
1594 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {
1595 /* XXX: the pts correction is handled here. Maybe handling
1596 it in the codec would be better */
1598 pkt.pts += 90 * sub->start_display_time;
1600 pkt.pts += 90 * sub->end_display_time;
1602 write_frame(s, &pkt, ost);
1606 static void do_video_out(AVFormatContext *s,
1608 AVFrame *in_picture,
1609 int *frame_size, float quality)
1611 int ret, format_video_sync;
1613 AVCodecContext *enc = ost->st->codec;
1617 format_video_sync = video_sync_method;
1618 if (format_video_sync == VSYNC_AUTO)
1619 format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
1620 (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
1621 if (format_video_sync != VSYNC_PASSTHROUGH &&
1622 ost->frame_number &&
1623 in_picture->pts != AV_NOPTS_VALUE &&
1624 in_picture->pts < ost->sync_opts) {
1626 av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
1630 if (in_picture->pts == AV_NOPTS_VALUE)
1631 in_picture->pts = ost->sync_opts;
1632 ost->sync_opts = in_picture->pts;
1635 if (!ost->frame_number)
1636 ost->first_pts = in_picture->pts;
1638 av_init_packet(&pkt);
1642 if (!check_recording_time(ost) ||
1643 ost->frame_number >= ost->max_frames)
1646 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1647 enc->codec->id == CODEC_ID_RAWVIDEO) {
1648 /* raw pictures are written as AVPicture structure to
1649 avoid any copies. We support temporarily the older
1651 enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
1652 enc->coded_frame->top_field_first = in_picture->top_field_first;
1653 pkt.data = (uint8_t *)in_picture;
1654 pkt.size = sizeof(AVPicture);
1655 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1656 pkt.flags |= AV_PKT_FLAG_KEY;
1658 write_frame(s, &pkt, ost);
1661 AVFrame big_picture;
1663 big_picture = *in_picture;
1664 /* better than nothing: use input picture interlaced
1666 big_picture.interlaced_frame = in_picture->interlaced_frame;
1667 if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
1668 if (ost->top_field_first == -1)
1669 big_picture.top_field_first = in_picture->top_field_first;
1671 big_picture.top_field_first = !!ost->top_field_first;
1674 /* handles same_quant here. This is not correct because it may
1675 not be a global option */
1676 big_picture.quality = quality;
1677 if (!enc->me_threshold)
1678 big_picture.pict_type = 0;
1679 if (ost->forced_kf_index < ost->forced_kf_count &&
1680 big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1681 big_picture.pict_type = AV_PICTURE_TYPE_I;
1682 ost->forced_kf_index++;
1684 ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
1686 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1691 if (pkt.pts != AV_NOPTS_VALUE)
1692 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1693 if (pkt.dts != AV_NOPTS_VALUE)
1694 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1696 write_frame(s, &pkt, ost);
1697 *frame_size = pkt.size;
1698 video_size += pkt.size;
1700 /* if two pass, output log */
1701 if (ost->logfile && enc->stats_out) {
1702 fprintf(ost->logfile, "%s", enc->stats_out);
1708 * For video, number of frames in == number of packets out.
1709 * But there may be reordering, so we can't throw away frames on encoder
1710 * flush, we need to limit them here, before they go into encoder.
1712 ost->frame_number++;
1715 static double psnr(double d)
1717 return -10.0 * log(d) / log(10.0);
1720 static void do_video_stats(AVFormatContext *os, OutputStream *ost,
1723 AVCodecContext *enc;
1725 double ti1, bitrate, avg_bitrate;
1727 /* this is executed just the first time do_video_stats is called */
1729 vstats_file = fopen(vstats_filename, "w");
1736 enc = ost->st->codec;
1737 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1738 frame_number = ost->frame_number;
1739 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
1740 if (enc->flags&CODEC_FLAG_PSNR)
1741 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1743 fprintf(vstats_file,"f_size= %6d ", frame_size);
1744 /* compute pts value */
1745 ti1 = ost->sync_opts * av_q2d(enc->time_base);
1749 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1750 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
1751 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1752 (double)video_size / 1024, ti1, bitrate, avg_bitrate);
1753 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
1757 /* check for new output on any of the filtergraphs */
1758 static int poll_filters(void)
1760 AVFilterBufferRef *picref;
1761 AVFrame *filtered_frame = NULL;
1764 for (i = 0; i < nb_output_streams; i++) {
1765 OutputStream *ost = output_streams[i];
1766 OutputFile *of = output_files[ost->file_index];
1769 if (!ost->filter || ost->is_past_recording_time)
1772 if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
1773 return AVERROR(ENOMEM);
1775 avcodec_get_frame_defaults(ost->filtered_frame);
1776 filtered_frame = ost->filtered_frame;
1779 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
1780 !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
1781 ret = av_buffersink_read_samples(ost->filter->filter, &picref,
1782 ost->st->codec->frame_size);
1784 ret = av_buffersink_read(ost->filter->filter, &picref);
1789 avfilter_copy_buf_props(filtered_frame, picref);
1790 if (picref->pts != AV_NOPTS_VALUE)
1791 filtered_frame->pts = av_rescale_q(picref->pts,
1792 ost->filter->filter->inputs[0]->time_base,
1793 ost->st->codec->time_base) -
1794 av_rescale_q(of->start_time,
1796 ost->st->codec->time_base);
1798 if (of->start_time && filtered_frame->pts < of->start_time) {
1799 avfilter_unref_buffer(picref);
1803 switch (ost->filter->filter->inputs[0]->type) {
1804 case AVMEDIA_TYPE_VIDEO:
1805 if (!ost->frame_aspect_ratio)
1806 ost->st->codec->sample_aspect_ratio = picref->video->pixel_aspect;
1808 do_video_out(of->ctx, ost, filtered_frame, &frame_size,
1809 same_quant ? ost->last_quality :
1810 ost->st->codec->global_quality);
1811 if (vstats_filename && frame_size)
1812 do_video_stats(of->ctx, ost, frame_size);
1814 case AVMEDIA_TYPE_AUDIO:
1815 do_audio_out(of->ctx, ost, filtered_frame);
1818 // TODO support subtitle filters
1822 avfilter_unref_buffer(picref);
1828 static void print_report(int is_last_report, int64_t timer_start)
1832 AVFormatContext *oc;
1834 AVCodecContext *enc;
1835 int frame_number, vid, i;
1836 double bitrate, ti1, pts;
1837 static int64_t last_time = -1;
1838 static int qp_histogram[52];
1840 if (!print_stats && !is_last_report)
1843 if (!is_last_report) {
1845 /* display the report every 0.5 seconds */
1846 cur_time = av_gettime();
1847 if (last_time == -1) {
1848 last_time = cur_time;
1851 if ((cur_time - last_time) < 500000)
1853 last_time = cur_time;
1857 oc = output_files[0]->ctx;
1859 total_size = avio_size(oc->pb);
1860 if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
1861 total_size = avio_tell(oc->pb);
1866 for (i = 0; i < nb_output_streams; i++) {
1868 ost = output_streams[i];
1869 enc = ost->st->codec;
1870 if (!ost->stream_copy && enc->coded_frame)
1871 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1872 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1873 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1875 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1876 float t = (av_gettime() - timer_start) / 1000000.0;
1878 frame_number = ost->frame_number;
1879 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
1880 frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
1882 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1886 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1888 for (j = 0; j < 32; j++)
1889 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j] + 1) / log(2)));
1891 if (enc->flags&CODEC_FLAG_PSNR) {
1893 double error, error_sum = 0;
1894 double scale, scale_sum = 0;
1895 char type[3] = { 'Y','U','V' };
1896 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1897 for (j = 0; j < 3; j++) {
1898 if (is_last_report) {
1899 error = enc->error[j];
1900 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1902 error = enc->coded_frame->error[j];
1903 scale = enc->width * enc->height * 255.0 * 255.0;
1909 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
1911 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1915 /* compute min output value */
1916 pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
1917 if ((pts < ti1) && (pts > 0))
1923 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
1925 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1926 "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
1927 (double)total_size / 1024, ti1, bitrate);
1929 if (nb_frames_dup || nb_frames_drop)
1930 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1931 nb_frames_dup, nb_frames_drop);
1933 av_log(NULL, AV_LOG_INFO, "%s \r", buf);
1937 if (is_last_report) {
1938 int64_t raw= audio_size + video_size + extra_size;
1939 av_log(NULL, AV_LOG_INFO, "\n");
1940 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
1941 video_size / 1024.0,
1942 audio_size / 1024.0,
1943 extra_size / 1024.0,
1944 100.0 * (total_size - raw) / raw
1949 static void flush_encoders(void)
1953 for (i = 0; i < nb_output_streams; i++) {
1954 OutputStream *ost = output_streams[i];
1955 AVCodecContext *enc = ost->st->codec;
1956 AVFormatContext *os = output_files[ost->file_index]->ctx;
1957 int stop_encoding = 0;
1959 if (!ost->encoding_needed)
1962 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1964 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO)
1968 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1972 switch (ost->st->codec->codec_type) {
1973 case AVMEDIA_TYPE_AUDIO:
1974 encode = avcodec_encode_audio2;
1978 case AVMEDIA_TYPE_VIDEO:
1979 encode = avcodec_encode_video2;
1990 av_init_packet(&pkt);
1994 ret = encode(enc, &pkt, NULL, &got_packet);
1996 av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
2000 if (ost->logfile && enc->stats_out) {
2001 fprintf(ost->logfile, "%s", enc->stats_out);
2007 if (pkt.pts != AV_NOPTS_VALUE)
2008 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
2009 if (pkt.dts != AV_NOPTS_VALUE)
2010 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
2011 write_frame(os, &pkt, ost);
2021 * Check whether a packet from ist should be written into ost at this time
2023 static int check_output_constraints(InputStream *ist, OutputStream *ost)
2025 OutputFile *of = output_files[ost->file_index];
2026 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2028 if (ost->source_index != ist_index)
2031 if (of->start_time && ist->last_dts < of->start_time)
2037 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2039 OutputFile *of = output_files[ost->file_index];
2040 int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
2043 av_init_packet(&opkt);
2045 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2046 !ost->copy_initial_nonkeyframes)
2049 if (of->recording_time != INT64_MAX &&
2050 ist->last_dts >= of->recording_time + of->start_time) {
2051 ost->is_past_recording_time = 1;
2055 /* force the input stream PTS */
2056 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
2057 audio_size += pkt->size;
2058 else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2059 video_size += pkt->size;
2063 if (pkt->pts != AV_NOPTS_VALUE)
2064 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
2066 opkt.pts = AV_NOPTS_VALUE;
2068 if (pkt->dts == AV_NOPTS_VALUE)
2069 opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
2071 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
2072 opkt.dts -= ost_tb_start_time;
2074 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
2075 opkt.flags = pkt->flags;
2077 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2078 if ( ost->st->codec->codec_id != CODEC_ID_H264
2079 && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
2080 && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
2081 && ost->st->codec->codec_id != CODEC_ID_VC1
2083 if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
2084 opkt.destruct = av_destruct_packet;
2086 opkt.data = pkt->data;
2087 opkt.size = pkt->size;
2090 write_frame(of->ctx, &opkt, ost);
2091 ost->st->codec->frame_number++;
2092 av_free_packet(&opkt);
2095 static void rate_emu_sleep(InputStream *ist)
2097 if (input_files[ist->file_index]->rate_emu) {
2098 int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
2099 int64_t now = av_gettime() - ist->start;
2105 static int guess_input_channel_layout(InputStream *ist)
2107 AVCodecContext *dec = ist->st->codec;
2109 if (!dec->channel_layout) {
2110 char layout_name[256];
2112 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2113 if (!dec->channel_layout)
2115 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2116 dec->channels, dec->channel_layout);
2117 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2118 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2123 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2125 AVFrame *decoded_frame;
2126 AVCodecContext *avctx = ist->st->codec;
2127 int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
2128 int i, ret, resample_changed;
2130 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
2131 return AVERROR(ENOMEM);
2133 avcodec_get_frame_defaults(ist->decoded_frame);
2134 decoded_frame = ist->decoded_frame;
2136 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
2142 /* no audio frame */
2144 for (i = 0; i < ist->nb_filters; i++)
2145 av_buffersrc_buffer(ist->filters[i]->filter, NULL);
2149 /* if the decoder provides a pts, use it instead of the last packet pts.
2150 the decoder could be delaying output by a packet or more. */
2151 if (decoded_frame->pts != AV_NOPTS_VALUE)
2152 ist->next_dts = decoded_frame->pts;
2153 else if (pkt->pts != AV_NOPTS_VALUE) {
2154 decoded_frame->pts = pkt->pts;
2155 pkt->pts = AV_NOPTS_VALUE;
2158 // preprocess audio (volume)
2159 if (audio_volume != 256) {
2160 int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
2161 void *samples = decoded_frame->data[0];
2162 switch (avctx->sample_fmt) {
2163 case AV_SAMPLE_FMT_U8:
2165 uint8_t *volp = samples;
2166 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2167 int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
2168 *volp++ = av_clip_uint8(v);
2172 case AV_SAMPLE_FMT_S16:
2174 int16_t *volp = samples;
2175 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2176 int v = ((*volp) * audio_volume + 128) >> 8;
2177 *volp++ = av_clip_int16(v);
2181 case AV_SAMPLE_FMT_S32:
2183 int32_t *volp = samples;
2184 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2185 int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
2186 *volp++ = av_clipl_int32(v);
2190 case AV_SAMPLE_FMT_FLT:
2192 float *volp = samples;
2193 float scale = audio_volume / 256.f;
2194 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2199 case AV_SAMPLE_FMT_DBL:
2201 double *volp = samples;
2202 double scale = audio_volume / 256.;
2203 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2209 av_log(NULL, AV_LOG_FATAL,
2210 "Audio volume adjustment on sample format %s is not supported.\n",
2211 av_get_sample_fmt_name(ist->st->codec->sample_fmt));
2216 rate_emu_sleep(ist);
2218 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2219 ist->resample_channels != avctx->channels ||
2220 ist->resample_channel_layout != decoded_frame->channel_layout ||
2221 ist->resample_sample_rate != decoded_frame->sample_rate;
2222 if (resample_changed) {
2223 char layout1[64], layout2[64];
2225 if (!guess_input_channel_layout(ist)) {
2226 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2227 "layout for Input Stream #%d.%d\n", ist->file_index,
2231 decoded_frame->channel_layout = avctx->channel_layout;
2233 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2234 ist->resample_channel_layout);
2235 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2236 decoded_frame->channel_layout);
2238 av_log(NULL, AV_LOG_INFO,
2239 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2240 ist->file_index, ist->st->index,
2241 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2242 ist->resample_channels, layout1,
2243 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2244 avctx->channels, layout2);
2246 ist->resample_sample_fmt = decoded_frame->format;
2247 ist->resample_sample_rate = decoded_frame->sample_rate;
2248 ist->resample_channel_layout = decoded_frame->channel_layout;
2249 ist->resample_channels = avctx->channels;
2251 for (i = 0; i < nb_filtergraphs; i++)
2252 if (ist_in_filtergraph(filtergraphs[i], ist) &&
2253 configure_filtergraph(filtergraphs[i]) < 0) {
2254 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2259 for (i = 0; i < ist->nb_filters; i++)
2260 av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
2265 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2267 AVFrame *decoded_frame;
2268 void *buffer_to_free = NULL;
2269 int i, ret = 0, resample_changed;
2272 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
2273 return AVERROR(ENOMEM);
2275 avcodec_get_frame_defaults(ist->decoded_frame);
2276 decoded_frame = ist->decoded_frame;
2278 ret = avcodec_decode_video2(ist->st->codec,
2279 decoded_frame, got_output, pkt);
2283 quality = same_quant ? decoded_frame->quality : 0;
2285 /* no picture yet */
2287 for (i = 0; i < ist->nb_filters; i++)
2288 av_buffersrc_buffer(ist->filters[i]->filter, NULL);
2291 decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
2292 decoded_frame->pkt_dts);
2294 pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
2296 rate_emu_sleep(ist);
2298 if (ist->st->sample_aspect_ratio.num)
2299 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2301 resample_changed = ist->resample_width != decoded_frame->width ||
2302 ist->resample_height != decoded_frame->height ||
2303 ist->resample_pix_fmt != decoded_frame->format;
2304 if (resample_changed) {
2305 av_log(NULL, AV_LOG_INFO,
2306 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2307 ist->file_index, ist->st->index,
2308 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2309 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2311 ist->resample_width = decoded_frame->width;
2312 ist->resample_height = decoded_frame->height;
2313 ist->resample_pix_fmt = decoded_frame->format;
2315 for (i = 0; i < nb_filtergraphs; i++)
2316 if (ist_in_filtergraph(filtergraphs[i], ist) &&
2317 configure_filtergraph(filtergraphs[i]) < 0) {
2318 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2323 for (i = 0; i < ist->nb_filters; i++) {
2324 // XXX what an ugly hack
2325 if (ist->filters[i]->graph->nb_outputs == 1)
2326 ist->filters[i]->graph->outputs[0]->ost->last_quality = quality;
2328 if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
2329 FrameBuffer *buf = decoded_frame->opaque;
2330 AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
2331 decoded_frame->data, decoded_frame->linesize,
2332 AV_PERM_READ | AV_PERM_PRESERVE,
2333 ist->st->codec->width, ist->st->codec->height,
2334 ist->st->codec->pix_fmt);
2336 avfilter_copy_frame_props(fb, decoded_frame);
2337 fb->buf->priv = buf;
2338 fb->buf->free = filter_release_buffer;
2341 av_buffersrc_buffer(ist->filters[i]->filter, fb);
2343 av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
2346 av_free(buffer_to_free);
2350 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2352 AVSubtitle subtitle;
2353 int i, ret = avcodec_decode_subtitle2(ist->st->codec,
2354 &subtitle, got_output, pkt);
2360 rate_emu_sleep(ist);
2362 for (i = 0; i < nb_output_streams; i++) {
2363 OutputStream *ost = output_streams[i];
2365 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
2368 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
2371 avsubtitle_free(&subtitle);
2375 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2376 static int output_packet(InputStream *ist, const AVPacket *pkt)
2382 if (ist->next_dts == AV_NOPTS_VALUE)
2383 ist->next_dts = ist->last_dts;
2387 av_init_packet(&avpkt);
2395 if (pkt->dts != AV_NOPTS_VALUE)
2396 ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2398 // while we have more to decode or while the decoder did output something on EOF
2399 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2403 ist->last_dts = ist->next_dts;
2405 if (avpkt.size && avpkt.size != pkt->size) {
2406 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2407 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2408 ist->showed_multi_packet_warning = 1;
2411 switch (ist->st->codec->codec_type) {
2412 case AVMEDIA_TYPE_AUDIO:
2413 ret = decode_audio (ist, &avpkt, &got_output);
2415 case AVMEDIA_TYPE_VIDEO:
2416 ret = decode_video (ist, &avpkt, &got_output);
2418 ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2419 else if (ist->st->r_frame_rate.num)
2420 ist->next_dts += av_rescale_q(1, (AVRational){ist->st->r_frame_rate.den,
2421 ist->st->r_frame_rate.num},
2423 else if (ist->st->codec->time_base.num != 0) {
2424 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
2425 ist->st->codec->ticks_per_frame;
2426 ist->next_dts += av_rescale_q(ticks, ist->st->codec->time_base, AV_TIME_BASE_Q);
2429 case AVMEDIA_TYPE_SUBTITLE:
2430 ret = transcode_subtitles(ist, &avpkt, &got_output);
2438 // touch data and size only if not EOF
2448 /* handle stream copy */
2449 if (!ist->decoding_needed) {
2450 rate_emu_sleep(ist);
2451 ist->last_dts = ist->next_dts;
2452 switch (ist->st->codec->codec_type) {
2453 case AVMEDIA_TYPE_AUDIO:
2454 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
2455 ist->st->codec->sample_rate;
2457 case AVMEDIA_TYPE_VIDEO:
2458 if (ist->st->codec->time_base.num != 0) {
2459 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
2460 ist->next_dts += ((int64_t)AV_TIME_BASE *
2461 ist->st->codec->time_base.num * ticks) /
2462 ist->st->codec->time_base.den;
2467 for (i = 0; pkt && i < nb_output_streams; i++) {
2468 OutputStream *ost = output_streams[i];
2470 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2473 do_streamcopy(ist, ost, pkt);
2479 static void print_sdp(void)
2483 AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
2487 for (i = 0; i < nb_output_files; i++)
2488 avc[i] = output_files[i]->ctx;
2490 av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
2491 printf("SDP:\n%s\n", sdp);
2496 static int init_input_stream(int ist_index, char *error, int error_len)
2499 InputStream *ist = input_streams[ist_index];
2500 if (ist->decoding_needed) {
2501 AVCodec *codec = ist->dec;
2503 snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
2504 ist->st->codec->codec_id, ist->file_index, ist->st->index);
2505 return AVERROR(EINVAL);
2508 /* update requested sample format for the decoder based on the
2509 corresponding encoder sample format */
2510 for (i = 0; i < nb_output_streams; i++) {
2511 OutputStream *ost = output_streams[i];
2512 if (ost->source_index == ist_index) {
2513 update_sample_fmt(ist->st->codec, codec, ost->st->codec);
2518 if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
2519 ist->st->codec->get_buffer = codec_get_buffer;
2520 ist->st->codec->release_buffer = codec_release_buffer;
2521 ist->st->codec->opaque = ist;
2524 if (!av_dict_get(ist->opts, "threads", NULL, 0))
2525 av_dict_set(&ist->opts, "threads", "auto", 0);
2526 if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
2527 snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
2528 ist->file_index, ist->st->index);
2529 return AVERROR(EINVAL);
2531 assert_codec_experimental(ist->st->codec, 0);
2532 assert_avoptions(ist->opts);
2535 ist->last_dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2536 ist->next_dts = AV_NOPTS_VALUE;
2537 init_pts_correction(&ist->pts_ctx);
2543 static InputStream *get_input_stream(OutputStream *ost)
2545 if (ost->source_index >= 0)
2546 return input_streams[ost->source_index];
2549 FilterGraph *fg = ost->filter->graph;
2552 for (i = 0; i < fg->nb_inputs; i++)
2553 if (fg->inputs[i]->ist->st->codec->codec_type == ost->st->codec->codec_type)
2554 return fg->inputs[i]->ist;
2560 static int transcode_init(void)
2562 int ret = 0, i, j, k;
2563 AVFormatContext *oc;
2564 AVCodecContext *codec, *icodec;
2570 /* init framerate emulation */
2571 for (i = 0; i < nb_input_files; i++) {
2572 InputFile *ifile = input_files[i];
2573 if (ifile->rate_emu)
2574 for (j = 0; j < ifile->nb_streams; j++)
2575 input_streams[j + ifile->ist_index]->start = av_gettime();
2578 /* output stream init */
2579 for (i = 0; i < nb_output_files; i++) {
2580 oc = output_files[i]->ctx;
2581 if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
2582 av_dump_format(oc, i, oc->filename, 1);
2583 av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2584 return AVERROR(EINVAL);
2588 /* init complex filtergraphs */
2589 for (i = 0; i < nb_filtergraphs; i++)
2590 if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
2593 /* for each output stream, we compute the right encoding parameters */
2594 for (i = 0; i < nb_output_streams; i++) {
2595 ost = output_streams[i];
2596 oc = output_files[ost->file_index]->ctx;
2597 ist = get_input_stream(ost);
2599 if (ost->attachment_filename)
2602 codec = ost->st->codec;
2605 icodec = ist->st->codec;
2607 ost->st->disposition = ist->st->disposition;
2608 codec->bits_per_raw_sample = icodec->bits_per_raw_sample;
2609 codec->chroma_sample_location = icodec->chroma_sample_location;
2612 if (ost->stream_copy) {
2613 uint64_t extra_size;
2615 av_assert0(ist && !ost->filter);
2617 extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2619 if (extra_size > INT_MAX) {
2620 return AVERROR(EINVAL);
2623 /* if stream_copy is selected, no need to decode or encode */
2624 codec->codec_id = icodec->codec_id;
2625 codec->codec_type = icodec->codec_type;
2627 if (!codec->codec_tag) {
2628 if (!oc->oformat->codec_tag ||
2629 av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
2630 av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
2631 codec->codec_tag = icodec->codec_tag;
2634 codec->bit_rate = icodec->bit_rate;
2635 codec->rc_max_rate = icodec->rc_max_rate;
2636 codec->rc_buffer_size = icodec->rc_buffer_size;
2637 codec->field_order = icodec->field_order;
2638 codec->extradata = av_mallocz(extra_size);
2639 if (!codec->extradata) {
2640 return AVERROR(ENOMEM);
2642 memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
2643 codec->extradata_size = icodec->extradata_size;
2645 codec->time_base = icodec->time_base;
2646 codec->time_base.num *= icodec->ticks_per_frame;
2647 av_reduce(&codec->time_base.num, &codec->time_base.den,
2648 codec->time_base.num, codec->time_base.den, INT_MAX);
2650 codec->time_base = ist->st->time_base;
2652 switch (codec->codec_type) {
2653 case AVMEDIA_TYPE_AUDIO:
2654 if (audio_volume != 256) {
2655 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2658 codec->channel_layout = icodec->channel_layout;
2659 codec->sample_rate = icodec->sample_rate;
2660 codec->channels = icodec->channels;
2661 codec->frame_size = icodec->frame_size;
2662 codec->audio_service_type = icodec->audio_service_type;
2663 codec->block_align = icodec->block_align;
2665 case AVMEDIA_TYPE_VIDEO:
2666 codec->pix_fmt = icodec->pix_fmt;
2667 codec->width = icodec->width;
2668 codec->height = icodec->height;
2669 codec->has_b_frames = icodec->has_b_frames;
2670 if (!codec->sample_aspect_ratio.num) {
2671 codec->sample_aspect_ratio =
2672 ost->st->sample_aspect_ratio =
2673 ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
2674 ist->st->codec->sample_aspect_ratio.num ?
2675 ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
2678 case AVMEDIA_TYPE_SUBTITLE:
2679 codec->width = icodec->width;
2680 codec->height = icodec->height;
2682 case AVMEDIA_TYPE_DATA:
2683 case AVMEDIA_TYPE_ATTACHMENT:
2690 /* should only happen when a default codec is not present. */
2691 snprintf(error, sizeof(error), "Automatic encoder selection "
2692 "failed for output stream #%d:%d. Default encoder for "
2693 "format %s is probably disabled. Please choose an "
2694 "encoder manually.\n", ost->file_index, ost->index,
2696 ret = AVERROR(EINVAL);
2701 ist->decoding_needed = 1;
2702 ost->encoding_needed = 1;
2705 * We want CFR output if and only if one of those is true:
2706 * 1) user specified output framerate with -r
2707 * 2) user specified -vsync cfr
2708 * 3) output format is CFR and the user didn't force vsync to
2709 * something else than CFR
2711 * in such a case, set ost->frame_rate
2713 if (codec->codec_type == AVMEDIA_TYPE_VIDEO &&
2714 !ost->frame_rate.num && ist &&
2715 (video_sync_method == VSYNC_CFR ||
2716 (video_sync_method == VSYNC_AUTO &&
2717 !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
2718 ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
2719 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
2720 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
2721 ost->frame_rate = ost->enc->supported_framerates[idx];
2726 (codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2727 codec->codec_type == AVMEDIA_TYPE_AUDIO)) {
2729 fg = init_simple_filtergraph(ist, ost);
2730 if (configure_simple_filtergraph(fg)) {
2731 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2736 switch (codec->codec_type) {
2737 case AVMEDIA_TYPE_AUDIO:
2738 codec->sample_fmt = ost->filter->filter->inputs[0]->format;
2739 codec->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
2740 codec->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
2741 codec->channels = av_get_channel_layout_nb_channels(codec->channel_layout);
2742 codec->time_base = (AVRational){ 1, codec->sample_rate };
2744 case AVMEDIA_TYPE_VIDEO:
2745 codec->time_base = ost->filter->filter->inputs[0]->time_base;
2747 codec->width = ost->filter->filter->inputs[0]->w;
2748 codec->height = ost->filter->filter->inputs[0]->h;
2749 codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2750 ost->frame_aspect_ratio ? // overridden by the -aspect cli option
2751 av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
2752 ost->filter->filter->inputs[0]->sample_aspect_ratio;
2753 codec->pix_fmt = ost->filter->filter->inputs[0]->format;
2755 if (codec->width != icodec->width ||
2756 codec->height != icodec->height ||
2757 codec->pix_fmt != icodec->pix_fmt) {
2758 codec->bits_per_raw_sample = 0;
2762 case AVMEDIA_TYPE_SUBTITLE:
2763 codec->time_base = (AVRational){1, 1000};
2770 if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
2771 char logfilename[1024];
2774 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
2775 pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
2777 if (!strcmp(ost->enc->name, "libx264")) {
2778 av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
2780 if (codec->flags & CODEC_FLAG_PASS1) {
2781 f = fopen(logfilename, "wb");
2783 av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
2784 logfilename, strerror(errno));
2790 size_t logbuffer_size;
2791 if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
2792 av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
2796 codec->stats_in = logbuffer;
2803 /* open each encoder */
2804 for (i = 0; i < nb_output_streams; i++) {
2805 ost = output_streams[i];
2806 if (ost->encoding_needed) {
2807 AVCodec *codec = ost->enc;
2808 AVCodecContext *dec = NULL;
2810 if ((ist = get_input_stream(ost)))
2811 dec = ist->st->codec;
2812 if (dec && dec->subtitle_header) {
2813 ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size);
2814 if (!ost->st->codec->subtitle_header) {
2815 ret = AVERROR(ENOMEM);
2818 memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2819 ost->st->codec->subtitle_header_size = dec->subtitle_header_size;
2821 if (!av_dict_get(ost->opts, "threads", NULL, 0))
2822 av_dict_set(&ost->opts, "threads", "auto", 0);
2823 if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) {
2824 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
2825 ost->file_index, ost->index);
2826 ret = AVERROR(EINVAL);
2829 assert_codec_experimental(ost->st->codec, 1);
2830 assert_avoptions(ost->opts);
2831 if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
2832 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2833 "It takes bits/s as argument, not kbits/s\n");
2834 extra_size += ost->st->codec->extradata_size;
2836 if (ost->st->codec->me_threshold)
2837 input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
2841 /* init input streams */
2842 for (i = 0; i < nb_input_streams; i++)
2843 if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
2846 /* discard unused programs */
2847 for (i = 0; i < nb_input_files; i++) {
2848 InputFile *ifile = input_files[i];
2849 for (j = 0; j < ifile->ctx->nb_programs; j++) {
2850 AVProgram *p = ifile->ctx->programs[j];
2851 int discard = AVDISCARD_ALL;
2853 for (k = 0; k < p->nb_stream_indexes; k++)
2854 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
2855 discard = AVDISCARD_DEFAULT;
2858 p->discard = discard;
2862 /* open files and write file headers */
2863 for (i = 0; i < nb_output_files; i++) {
2864 oc = output_files[i]->ctx;
2865 oc->interrupt_callback = int_cb;
2866 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
2868 const char *errbuf_ptr = errbuf;
2869 if (av_strerror(ret, errbuf, sizeof(errbuf)) < 0)
2870 errbuf_ptr = strerror(AVUNERROR(ret));
2871 snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?): %s", i, errbuf_ptr);
2872 ret = AVERROR(EINVAL);
2875 assert_avoptions(output_files[i]->opts);
2876 if (strcmp(oc->oformat->name, "rtp")) {
2882 /* dump the file output parameters - cannot be done before in case
2884 for (i = 0; i < nb_output_files; i++) {
2885 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
2888 /* dump the stream mapping */
2889 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
2890 for (i = 0; i < nb_input_streams; i++) {
2891 ist = input_streams[i];
2893 for (j = 0; j < ist->nb_filters; j++) {
2894 AVFilterLink *link = ist->filters[j]->filter->outputs[0];
2895 if (ist->filters[j]->graph->graph_desc) {
2896 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
2897 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
2898 link->dst->filter->name);
2899 if (link->dst->input_count > 1)
2900 av_log(NULL, AV_LOG_INFO, ":%s", link->dstpad->name);
2901 if (nb_filtergraphs > 1)
2902 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
2903 av_log(NULL, AV_LOG_INFO, "\n");
2908 for (i = 0; i < nb_output_streams; i++) {
2909 ost = output_streams[i];
2911 if (ost->attachment_filename) {
2912 /* an attached file */
2913 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
2914 ost->attachment_filename, ost->file_index, ost->index);
2918 if (ost->filter && ost->filter->graph->graph_desc) {
2919 /* output from a complex graph */
2920 AVFilterLink *link = ost->filter->filter->inputs[0];
2921 av_log(NULL, AV_LOG_INFO, " %s", link->src->filter->name);
2922 if (link->src->output_count > 1)
2923 av_log(NULL, AV_LOG_INFO, ":%s", link->srcpad->name);
2924 if (nb_filtergraphs > 1)
2925 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
2927 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
2928 ost->index, ost->enc ? ost->enc->name : "?");
2932 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
2933 input_streams[ost->source_index]->file_index,
2934 input_streams[ost->source_index]->st->index,
2937 if (ost->sync_ist != input_streams[ost->source_index])
2938 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2939 ost->sync_ist->file_index,
2940 ost->sync_ist->st->index);
2941 if (ost->stream_copy)
2942 av_log(NULL, AV_LOG_INFO, " (copy)");
2944 av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ?
2945 input_streams[ost->source_index]->dec->name : "?",
2946 ost->enc ? ost->enc->name : "?");
2947 av_log(NULL, AV_LOG_INFO, "\n");
2951 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
2963 * The following code is the main loop of the file converter
2965 static int transcode(void)
2968 AVFormatContext *is, *os;
2972 int no_packet_count = 0;
2973 int64_t timer_start;
2975 if (!(no_packet = av_mallocz(nb_input_files)))
2978 ret = transcode_init();
2982 av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
2985 timer_start = av_gettime();
2987 for (; received_sigterm == 0;) {
2988 int file_index, ist_index, past_recording_time = 1;
2992 ipts_min = INT64_MAX;
2994 /* check if there's any stream where output is still needed */
2995 for (i = 0; i < nb_output_streams; i++) {
2997 ost = output_streams[i];
2998 of = output_files[ost->file_index];
2999 os = output_files[ost->file_index]->ctx;
3000 if (ost->is_past_recording_time ||
3001 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3003 if (ost->frame_number > ost->max_frames) {
3005 for (j = 0; j < of->ctx->nb_streams; j++)
3006 output_streams[of->ost_index + j]->is_past_recording_time = 1;
3009 past_recording_time = 0;
3011 if (past_recording_time)
3014 /* select the stream that we must read now by looking at the
3015 smallest output pts */
3017 for (i = 0; i < nb_input_streams; i++) {
3019 ist = input_streams[i];
3020 ipts = ist->last_dts;
3021 if (ist->discard || no_packet[ist->file_index])
3023 if (!input_files[ist->file_index]->eof_reached) {
3024 if (ipts < ipts_min) {
3026 file_index = ist->file_index;
3030 /* if none, if is finished */
3031 if (file_index < 0) {
3032 if (no_packet_count) {
3033 no_packet_count = 0;
3034 memset(no_packet, 0, nb_input_files);
3041 /* read a frame from it and output it in the fifo */
3042 is = input_files[file_index]->ctx;
3043 ret = av_read_frame(is, &pkt);
3044 if (ret == AVERROR(EAGAIN)) {
3045 no_packet[file_index] = 1;
3050 input_files[file_index]->eof_reached = 1;
3052 for (i = 0; i < input_files[file_index]->nb_streams; i++) {
3053 ist = input_streams[input_files[file_index]->ist_index + i];
3054 if (ist->decoding_needed)
3055 output_packet(ist, NULL);
3064 no_packet_count = 0;
3065 memset(no_packet, 0, nb_input_files);
3068 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3069 is->streams[pkt.stream_index]);
3071 /* the following test is needed in case new streams appear
3072 dynamically in stream : we ignore them */
3073 if (pkt.stream_index >= input_files[file_index]->nb_streams)
3074 goto discard_packet;
3075 ist_index = input_files[file_index]->ist_index + pkt.stream_index;
3076 ist = input_streams[ist_index];
3078 goto discard_packet;
3080 if (pkt.dts != AV_NOPTS_VALUE)
3081 pkt.dts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3082 if (pkt.pts != AV_NOPTS_VALUE)
3083 pkt.pts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3085 if (pkt.pts != AV_NOPTS_VALUE)
3086 pkt.pts *= ist->ts_scale;
3087 if (pkt.dts != AV_NOPTS_VALUE)
3088 pkt.dts *= ist->ts_scale;
3090 //fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n",
3092 // pkt.dts, input_files[ist->file_index].ts_offset,
3093 // ist->st->codec->codec_type);
3094 if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE
3095 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3096 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3097 int64_t delta = pkt_dts - ist->next_dts;
3098 if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
3099 input_files[ist->file_index]->ts_offset -= delta;
3100 av_log(NULL, AV_LOG_DEBUG,
3101 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3102 delta, input_files[ist->file_index]->ts_offset);
3103 pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3104 if (pkt.pts != AV_NOPTS_VALUE)
3105 pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3109 // fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
3110 if (output_packet(ist, &pkt) < 0 || poll_filters() < 0) {
3111 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
3112 ist->file_index, ist->st->index);
3115 av_free_packet(&pkt);
3120 av_free_packet(&pkt);
3122 /* dump report by using the output first video and audio streams */
3123 print_report(0, timer_start);
3126 /* at the end of stream, we must flush the decoder buffers */
3127 for (i = 0; i < nb_input_streams; i++) {
3128 ist = input_streams[i];
3129 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3130 output_packet(ist, NULL);
3138 /* write the trailer if needed and close file */
3139 for (i = 0; i < nb_output_files; i++) {
3140 os = output_files[i]->ctx;
3141 av_write_trailer(os);
3144 /* dump report by using the first video and audio streams */
3145 print_report(1, timer_start);
3147 /* close each encoder */
3148 for (i = 0; i < nb_output_streams; i++) {
3149 ost = output_streams[i];
3150 if (ost->encoding_needed) {
3151 av_freep(&ost->st->codec->stats_in);
3152 avcodec_close(ost->st->codec);
3156 /* close each decoder */
3157 for (i = 0; i < nb_input_streams; i++) {
3158 ist = input_streams[i];
3159 if (ist->decoding_needed) {
3160 avcodec_close(ist->st->codec);
3168 av_freep(&no_packet);
3170 if (output_streams) {
3171 for (i = 0; i < nb_output_streams; i++) {
3172 ost = output_streams[i];
3174 if (ost->stream_copy)
3175 av_freep(&ost->st->codec->extradata);
3177 fclose(ost->logfile);
3178 ost->logfile = NULL;
3180 av_freep(&ost->st->codec->subtitle_header);
3181 av_free(ost->forced_kf_pts);
3182 av_dict_free(&ost->opts);
3189 static double parse_frame_aspect_ratio(const char *arg)
3196 p = strchr(arg, ':');
3198 x = strtol(arg, &end, 10);
3200 y = strtol(end + 1, &end, 10);
3202 ar = (double)x / (double)y;
3204 ar = strtod(arg, NULL);
3207 av_log(NULL, AV_LOG_FATAL, "Incorrect aspect ratio specification.\n");
3213 static int opt_audio_codec(OptionsContext *o, const char *opt, const char *arg)
3215 return parse_option(o, "codec:a", arg, options);
3218 static int opt_video_codec(OptionsContext *o, const char *opt, const char *arg)
3220 return parse_option(o, "codec:v", arg, options);
3223 static int opt_subtitle_codec(OptionsContext *o, const char *opt, const char *arg)
3225 return parse_option(o, "codec:s", arg, options);
3228 static int opt_data_codec(OptionsContext *o, const char *opt, const char *arg)
3230 return parse_option(o, "codec:d", arg, options);
3233 static int opt_map(OptionsContext *o, const char *opt, const char *arg)
3235 StreamMap *m = NULL;
3236 int i, negative = 0, file_idx;
3237 int sync_file_idx = -1, sync_stream_idx;
3245 map = av_strdup(arg);
3247 /* parse sync stream first, just pick first matching stream */
3248 if (sync = strchr(map, ',')) {
3250 sync_file_idx = strtol(sync + 1, &sync, 0);
3251 if (sync_file_idx >= nb_input_files || sync_file_idx < 0) {
3252 av_log(NULL, AV_LOG_FATAL, "Invalid sync file index: %d.\n", sync_file_idx);
3257 for (i = 0; i < input_files[sync_file_idx]->nb_streams; i++)
3258 if (check_stream_specifier(input_files[sync_file_idx]->ctx,
3259 input_files[sync_file_idx]->ctx->streams[i], sync) == 1) {
3260 sync_stream_idx = i;
3263 if (i == input_files[sync_file_idx]->nb_streams) {
3264 av_log(NULL, AV_LOG_FATAL, "Sync stream specification in map %s does not "
3265 "match any streams.\n", arg);
3271 if (map[0] == '[') {
3272 /* this mapping refers to lavfi output */
3273 const char *c = map + 1;
3274 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
3275 &o->nb_stream_maps, o->nb_stream_maps + 1);
3276 m = &o->stream_maps[o->nb_stream_maps - 1];
3277 m->linklabel = av_get_token(&c, "]");
3278 if (!m->linklabel) {
3279 av_log(NULL, AV_LOG_ERROR, "Invalid output link label: %s.\n", map);
3283 file_idx = strtol(map, &p, 0);
3284 if (file_idx >= nb_input_files || file_idx < 0) {
3285 av_log(NULL, AV_LOG_FATAL, "Invalid input file index: %d.\n", file_idx);
3289 /* disable some already defined maps */
3290 for (i = 0; i < o->nb_stream_maps; i++) {
3291 m = &o->stream_maps[i];
3292 if (file_idx == m->file_index &&
3293 check_stream_specifier(input_files[m->file_index]->ctx,
3294 input_files[m->file_index]->ctx->streams[m->stream_index],
3295 *p == ':' ? p + 1 : p) > 0)
3299 for (i = 0; i < input_files[file_idx]->nb_streams; i++) {
3300 if (check_stream_specifier(input_files[file_idx]->ctx, input_files[file_idx]->ctx->streams[i],
3301 *p == ':' ? p + 1 : p) <= 0)
3303 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
3304 &o->nb_stream_maps, o->nb_stream_maps + 1);
3305 m = &o->stream_maps[o->nb_stream_maps - 1];
3307 m->file_index = file_idx;
3308 m->stream_index = i;
3310 if (sync_file_idx >= 0) {
3311 m->sync_file_index = sync_file_idx;
3312 m->sync_stream_index = sync_stream_idx;
3314 m->sync_file_index = file_idx;
3315 m->sync_stream_index = i;
3321 av_log(NULL, AV_LOG_FATAL, "Stream map '%s' matches no streams.\n", arg);
3329 static int opt_attach(OptionsContext *o, const char *opt, const char *arg)
3331 o->attachments = grow_array(o->attachments, sizeof(*o->attachments),
3332 &o->nb_attachments, o->nb_attachments + 1);
3333 o->attachments[o->nb_attachments - 1] = arg;
3338 * Parse a metadata specifier in arg.
3339 * @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram)
3340 * @param index for type c/p, chapter/program index is written here
3341 * @param stream_spec for type s, the stream specifier is written here
3343 static void parse_meta_type(char *arg, char *type, int *index, const char **stream_spec)
3351 if (*(++arg) && *arg != ':') {
3352 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", arg);
3355 *stream_spec = *arg == ':' ? arg + 1 : "";
3359 if (*(++arg) == ':')
3360 *index = strtol(++arg, NULL, 0);
3363 av_log(NULL, AV_LOG_FATAL, "Invalid metadata type %c.\n", *arg);
3370 static int copy_metadata(char *outspec, char *inspec, AVFormatContext *oc, AVFormatContext *ic, OptionsContext *o)
3372 AVDictionary **meta_in = NULL;
3373 AVDictionary **meta_out;
3375 char type_in, type_out;
3376 const char *istream_spec = NULL, *ostream_spec = NULL;
3377 int idx_in = 0, idx_out = 0;
3379 parse_meta_type(inspec, &type_in, &idx_in, &istream_spec);
3380 parse_meta_type(outspec, &type_out, &idx_out, &ostream_spec);
3382 if (type_in == 'g' || type_out == 'g')
3383 o->metadata_global_manual = 1;
3384 if (type_in == 's' || type_out == 's')
3385 o->metadata_streams_manual = 1;
3386 if (type_in == 'c' || type_out == 'c')
3387 o->metadata_chapters_manual = 1;
3389 #define METADATA_CHECK_INDEX(index, nb_elems, desc)\
3390 if ((index) < 0 || (index) >= (nb_elems)) {\
3391 av_log(NULL, AV_LOG_FATAL, "Invalid %s index %d while processing metadata maps.\n",\
3396 #define SET_DICT(type, meta, context, index)\
3399 meta = &context->metadata;\
3402 METADATA_CHECK_INDEX(index, context->nb_chapters, "chapter")\
3403 meta = &context->chapters[index]->metadata;\
3406 METADATA_CHECK_INDEX(index, context->nb_programs, "program")\
3407 meta = &context->programs[index]->metadata;\
3411 SET_DICT(type_in, meta_in, ic, idx_in);
3412 SET_DICT(type_out, meta_out, oc, idx_out);
3414 /* for input streams choose first matching stream */
3415 if (type_in == 's') {
3416 for (i = 0; i < ic->nb_streams; i++) {
3417 if ((ret = check_stream_specifier(ic, ic->streams[i], istream_spec)) > 0) {
3418 meta_in = &ic->streams[i]->metadata;
3424 av_log(NULL, AV_LOG_FATAL, "Stream specifier %s does not match any streams.\n", istream_spec);
3429 if (type_out == 's') {
3430 for (i = 0; i < oc->nb_streams; i++) {
3431 if ((ret = check_stream_specifier(oc, oc->streams[i], ostream_spec)) > 0) {
3432 meta_out = &oc->streams[i]->metadata;
3433 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3438 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3443 static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
3445 const char *codec_string = encoder ? "encoder" : "decoder";
3449 avcodec_find_encoder_by_name(name) :
3450 avcodec_find_decoder_by_name(name);
3452 av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name);
3455 if (codec->type != type) {
3456 av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name);
3462 static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st)
3464 char *codec_name = NULL;
3466 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, st);
3468 AVCodec *codec = find_codec_or_die(codec_name, st->codec->codec_type, 0);
3469 st->codec->codec_id = codec->id;
3472 return avcodec_find_decoder(st->codec->codec_id);
3476 * Add all the streams from the given input file to the global
3477 * list of input streams.
3479 static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
3483 for (i = 0; i < ic->nb_streams; i++) {
3484 AVStream *st = ic->streams[i];
3485 AVCodecContext *dec = st->codec;
3486 InputStream *ist = av_mallocz(sizeof(*ist));
3491 input_streams = grow_array(input_streams, sizeof(*input_streams), &nb_input_streams, nb_input_streams + 1);
3492 input_streams[nb_input_streams - 1] = ist;
3495 ist->file_index = nb_input_files;
3497 st->discard = AVDISCARD_ALL;
3498 ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, ic, st);
3500 ist->ts_scale = 1.0;
3501 MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st);
3503 ist->dec = choose_decoder(o, ic, st);
3505 switch (dec->codec_type) {
3506 case AVMEDIA_TYPE_VIDEO:
3507 ist->resample_height = dec->height;
3508 ist->resample_width = dec->width;
3509 ist->resample_pix_fmt = dec->pix_fmt;
3512 case AVMEDIA_TYPE_AUDIO:
3513 guess_input_channel_layout(ist);
3515 ist->resample_sample_fmt = dec->sample_fmt;
3516 ist->resample_sample_rate = dec->sample_rate;
3517 ist->resample_channels = dec->channels;
3518 ist->resample_channel_layout = dec->channel_layout;
3521 case AVMEDIA_TYPE_DATA:
3522 case AVMEDIA_TYPE_SUBTITLE:
3523 case AVMEDIA_TYPE_ATTACHMENT:
3524 case AVMEDIA_TYPE_UNKNOWN:
3532 static void assert_file_overwrite(const char *filename)
3534 if (!file_overwrite &&
3535 (strchr(filename, ':') == NULL || filename[1] == ':' ||
3536 av_strstart(filename, "file:", NULL))) {
3537 if (avio_check(filename, 0) == 0) {
3539 fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
3541 if (!read_yesno()) {
3542 fprintf(stderr, "Not overwriting - exiting\n");
3547 fprintf(stderr,"File '%s' already exists. Exiting.\n", filename);
3554 static void dump_attachment(AVStream *st, const char *filename)
3557 AVIOContext *out = NULL;
3558 AVDictionaryEntry *e;
3560 if (!st->codec->extradata_size) {
3561 av_log(NULL, AV_LOG_WARNING, "No extradata to dump in stream #%d:%d.\n",
3562 nb_input_files - 1, st->index);
3565 if (!*filename && (e = av_dict_get(st->metadata, "filename", NULL, 0)))
3566 filename = e->value;
3568 av_log(NULL, AV_LOG_FATAL, "No filename specified and no 'filename' tag"
3569 "in stream #%d:%d.\n", nb_input_files - 1, st->index);
3573 assert_file_overwrite(filename);
3575 if ((ret = avio_open2(&out, filename, AVIO_FLAG_WRITE, &int_cb, NULL)) < 0) {
3576 av_log(NULL, AV_LOG_FATAL, "Could not open file %s for writing.\n",
3581 avio_write(out, st->codec->extradata, st->codec->extradata_size);
3586 static int opt_input_file(OptionsContext *o, const char *opt, const char *filename)
3588 AVFormatContext *ic;
3589 AVInputFormat *file_iformat = NULL;
3593 AVDictionary **opts;
3594 int orig_nb_streams; // number of streams before avformat_find_stream_info
3597 if (!(file_iformat = av_find_input_format(o->format))) {
3598 av_log(NULL, AV_LOG_FATAL, "Unknown input format: '%s'\n", o->format);
3603 if (!strcmp(filename, "-"))
3606 using_stdin |= !strncmp(filename, "pipe:", 5) ||
3607 !strcmp(filename, "/dev/stdin");
3609 /* get default parameters from command line */
3610 ic = avformat_alloc_context();
3612 print_error(filename, AVERROR(ENOMEM));
3615 if (o->nb_audio_sample_rate) {
3616 snprintf(buf, sizeof(buf), "%d", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i);
3617 av_dict_set(&format_opts, "sample_rate", buf, 0);
3619 if (o->nb_audio_channels) {
3620 /* because we set audio_channels based on both the "ac" and
3621 * "channel_layout" options, we need to check that the specified
3622 * demuxer actually has the "channels" option before setting it */
3623 if (file_iformat && file_iformat->priv_class &&
3624 av_opt_find(&file_iformat->priv_class, "channels", NULL, 0,
3625 AV_OPT_SEARCH_FAKE_OBJ)) {
3626 snprintf(buf, sizeof(buf), "%d",
3627 o->audio_channels[o->nb_audio_channels - 1].u.i);
3628 av_dict_set(&format_opts, "channels", buf, 0);
3631 if (o->nb_frame_rates) {
3632 av_dict_set(&format_opts, "framerate", o->frame_rates[o->nb_frame_rates - 1].u.str, 0);
3634 if (o->nb_frame_sizes) {
3635 av_dict_set(&format_opts, "video_size", o->frame_sizes[o->nb_frame_sizes - 1].u.str, 0);
3637 if (o->nb_frame_pix_fmts)
3638 av_dict_set(&format_opts, "pixel_format", o->frame_pix_fmts[o->nb_frame_pix_fmts - 1].u.str, 0);
3640 ic->flags |= AVFMT_FLAG_NONBLOCK;
3641 ic->interrupt_callback = int_cb;
3643 /* open the input file with generic libav function */
3644 err = avformat_open_input(&ic, filename, file_iformat, &format_opts);
3646 print_error(filename, err);
3649 assert_avoptions(format_opts);
3651 /* apply forced codec ids */
3652 for (i = 0; i < ic->nb_streams; i++)
3653 choose_decoder(o, ic, ic->streams[i]);
3655 /* Set AVCodecContext options for avformat_find_stream_info */
3656 opts = setup_find_stream_info_opts(ic, codec_opts);
3657 orig_nb_streams = ic->nb_streams;
3659 /* If not enough info to get the stream parameters, we decode the
3660 first frames to get it. (used in mpeg case for example) */
3661 ret = avformat_find_stream_info(ic, opts);
3663 av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
3664 avformat_close_input(&ic);
3668 timestamp = o->start_time;
3669 /* add the stream start time */
3670 if (ic->start_time != AV_NOPTS_VALUE)
3671 timestamp += ic->start_time;
3673 /* if seeking requested, we execute it */
3674 if (o->start_time != 0) {
3675 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
3677 av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
3678 filename, (double)timestamp / AV_TIME_BASE);
3682 /* update the current parameters so that they match the one of the input stream */
3683 add_input_streams(o, ic);
3685 /* dump the file content */
3686 av_dump_format(ic, nb_input_files, filename, 0);
3688 input_files = grow_array(input_files, sizeof(*input_files), &nb_input_files, nb_input_files + 1);
3689 if (!(input_files[nb_input_files - 1] = av_mallocz(sizeof(*input_files[0]))))
3692 input_files[nb_input_files - 1]->ctx = ic;
3693 input_files[nb_input_files - 1]->ist_index = nb_input_streams - ic->nb_streams;
3694 input_files[nb_input_files - 1]->ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
3695 input_files[nb_input_files - 1]->nb_streams = ic->nb_streams;
3696 input_files[nb_input_files - 1]->rate_emu = o->rate_emu;
3698 for (i = 0; i < o->nb_dump_attachment; i++) {
3701 for (j = 0; j < ic->nb_streams; j++) {
3702 AVStream *st = ic->streams[j];
3704 if (check_stream_specifier(ic, st, o->dump_attachment[i].specifier) == 1)
3705 dump_attachment(st, o->dump_attachment[i].u.str);
3709 for (i = 0; i < orig_nb_streams; i++)
3710 av_dict_free(&opts[i]);
3717 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3718 AVCodecContext *avctx)
3724 for (p = kf; *p; p++)
3727 ost->forced_kf_count = n;
3728 ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
3729 if (!ost->forced_kf_pts) {
3730 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3733 for (i = 0; i < n; i++) {
3734 p = i ? strchr(p, ',') + 1 : kf;
3735 t = parse_time_or_die("force_key_frames", p, 1);
3736 ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3740 static uint8_t *get_line(AVIOContext *s)
3746 if (avio_open_dyn_buf(&line) < 0) {
3747 av_log(NULL, AV_LOG_FATAL, "Could not alloc buffer for reading preset.\n");
3751 while ((c = avio_r8(s)) && c != '\n')
3754 avio_close_dyn_buf(line, &buf);
3759 static int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s)
3762 char filename[1000];
3763 const char *base[3] = { getenv("AVCONV_DATADIR"),
3768 for (i = 0; i < FF_ARRAY_ELEMS(base) && ret; i++) {
3772 snprintf(filename, sizeof(filename), "%s%s/%s-%s.avpreset", base[i],
3773 i != 1 ? "" : "/.avconv", codec_name, preset_name);
3774 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3777 snprintf(filename, sizeof(filename), "%s%s/%s.avpreset", base[i],
3778 i != 1 ? "" : "/.avconv", preset_name);
3779 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3785 static void choose_encoder(OptionsContext *o, AVFormatContext *s, OutputStream *ost)
3787 char *codec_name = NULL;
3789 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, ost->st);
3791 ost->st->codec->codec_id = av_guess_codec(s->oformat, NULL, s->filename,
3792 NULL, ost->st->codec->codec_type);
3793 ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
3794 } else if (!strcmp(codec_name, "copy"))
3795 ost->stream_copy = 1;
3797 ost->enc = find_codec_or_die(codec_name, ost->st->codec->codec_type, 1);
3798 ost->st->codec->codec_id = ost->enc->id;
3802 static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVMediaType type)
3805 AVStream *st = avformat_new_stream(oc, NULL);
3806 int idx = oc->nb_streams - 1, ret = 0;
3807 char *bsf = NULL, *next, *codec_tag = NULL;
3808 AVBitStreamFilterContext *bsfc, *bsfc_prev = NULL;
3810 char *buf = NULL, *arg = NULL, *preset = NULL;
3811 AVIOContext *s = NULL;
3814 av_log(NULL, AV_LOG_FATAL, "Could not alloc stream.\n");
3818 if (oc->nb_streams - 1 < o->nb_streamid_map)
3819 st->id = o->streamid_map[oc->nb_streams - 1];
3821 output_streams = grow_array(output_streams, sizeof(*output_streams), &nb_output_streams,
3822 nb_output_streams + 1);
3823 if (!(ost = av_mallocz(sizeof(*ost))))
3825 output_streams[nb_output_streams - 1] = ost;
3827 ost->file_index = nb_output_files;
3830 st->codec->codec_type = type;
3831 choose_encoder(o, oc, ost);
3833 ost->opts = filter_codec_opts(codec_opts, ost->enc->id, oc, st);
3836 avcodec_get_context_defaults3(st->codec, ost->enc);
3837 st->codec->codec_type = type; // XXX hack, avcodec_get_context_defaults2() sets type to unknown for stream copy
3839 MATCH_PER_STREAM_OPT(presets, str, preset, oc, st);
3840 if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) {
3843 if (!buf[0] || buf[0] == '#') {
3847 if (!(arg = strchr(buf, '='))) {
3848 av_log(NULL, AV_LOG_FATAL, "Invalid line found in the preset file.\n");
3852 av_dict_set(&ost->opts, buf, arg, AV_DICT_DONT_OVERWRITE);
3854 } while (!s->eof_reached);
3858 av_log(NULL, AV_LOG_FATAL,
3859 "Preset %s specified for stream %d:%d, but could not be opened.\n",
3860 preset, ost->file_index, ost->index);
3864 ost->max_frames = INT64_MAX;
3865 MATCH_PER_STREAM_OPT(max_frames, i64, ost->max_frames, oc, st);
3867 MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st);
3869 if (next = strchr(bsf, ','))
3871 if (!(bsfc = av_bitstream_filter_init(bsf))) {
3872 av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf);
3876 bsfc_prev->next = bsfc;
3878 ost->bitstream_filters = bsfc;
3884 MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st);
3886 uint32_t tag = strtol(codec_tag, &next, 0);
3888 tag = AV_RL32(codec_tag);
3889 st->codec->codec_tag = tag;
3892 MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st);
3893 if (qscale >= 0 || same_quant) {
3894 st->codec->flags |= CODEC_FLAG_QSCALE;
3895 st->codec->global_quality = FF_QP2LAMBDA * qscale;
3898 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
3899 st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
3901 av_opt_get_int(sws_opts, "sws_flags", 0, &ost->sws_flags);
3903 ost->pix_fmts[0] = ost->pix_fmts[1] = PIX_FMT_NONE;
3908 static void parse_matrix_coeffs(uint16_t *dest, const char *str)
3911 const char *p = str;
3918 av_log(NULL, AV_LOG_FATAL, "Syntax error in matrix \"%s\" at coeff %d\n", str, i);
3925 static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc)
3929 AVCodecContext *video_enc;
3931 ost = new_output_stream(o, oc, AVMEDIA_TYPE_VIDEO);
3933 video_enc = st->codec;
3935 if (!ost->stream_copy) {
3936 const char *p = NULL;
3937 char *forced_key_frames = NULL, *frame_rate = NULL, *frame_size = NULL;
3938 char *frame_aspect_ratio = NULL, *frame_pix_fmt = NULL;
3939 char *intra_matrix = NULL, *inter_matrix = NULL, *filters = NULL;
3942 MATCH_PER_STREAM_OPT(frame_rates, str, frame_rate, oc, st);
3943 if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) {
3944 av_log(NULL, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
3948 MATCH_PER_STREAM_OPT(frame_sizes, str, frame_size, oc, st);
3949 if (frame_size && av_parse_video_size(&video_enc->width, &video_enc->height, frame_size) < 0) {
3950 av_log(NULL, AV_LOG_FATAL, "Invalid frame size: %s.\n", frame_size);
3954 MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
3955 if (frame_aspect_ratio)
3956 ost->frame_aspect_ratio = parse_frame_aspect_ratio(frame_aspect_ratio);
3958 MATCH_PER_STREAM_OPT(frame_pix_fmts, str, frame_pix_fmt, oc, st);
3959 if (frame_pix_fmt && (video_enc->pix_fmt = av_get_pix_fmt(frame_pix_fmt)) == PIX_FMT_NONE) {
3960 av_log(NULL, AV_LOG_FATAL, "Unknown pixel format requested: %s.\n", frame_pix_fmt);
3963 st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
3965 MATCH_PER_STREAM_OPT(intra_matrices, str, intra_matrix, oc, st);
3967 if (!(video_enc->intra_matrix = av_mallocz(sizeof(*video_enc->intra_matrix) * 64))) {
3968 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for intra matrix.\n");
3971 parse_matrix_coeffs(video_enc->intra_matrix, intra_matrix);
3973 MATCH_PER_STREAM_OPT(inter_matrices, str, inter_matrix, oc, st);
3975 if (!(video_enc->inter_matrix = av_mallocz(sizeof(*video_enc->inter_matrix) * 64))) {
3976 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for inter matrix.\n");
3979 parse_matrix_coeffs(video_enc->inter_matrix, inter_matrix);
3982 MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st);
3983 for (i = 0; p; i++) {
3985 int e = sscanf(p, "%d,%d,%d", &start, &end, &q);
3987 av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
3990 video_enc->rc_override =
3991 av_realloc(video_enc->rc_override,
3992 sizeof(RcOverride) * (i + 1));
3993 video_enc->rc_override[i].start_frame = start;
3994 video_enc->rc_override[i].end_frame = end;
3996 video_enc->rc_override[i].qscale = q;
3997 video_enc->rc_override[i].quality_factor = 1.0;
4000 video_enc->rc_override[i].qscale = 0;
4001 video_enc->rc_override[i].quality_factor = -q/100.0;
4006 video_enc->rc_override_count = i;
4007 if (!video_enc->rc_initial_buffer_occupancy)
4008 video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size * 3 / 4;
4009 video_enc->intra_dc_precision = intra_dc_precision - 8;
4014 video_enc->flags |= CODEC_FLAG_PASS1;
4016 video_enc->flags |= CODEC_FLAG_PASS2;
4020 MATCH_PER_STREAM_OPT(forced_key_frames, str, forced_key_frames, oc, st);
4021 if (forced_key_frames)
4022 parse_forced_key_frames(forced_key_frames, ost, video_enc);
4024 MATCH_PER_STREAM_OPT(force_fps, i, ost->force_fps, oc, st);
4026 ost->top_field_first = -1;
4027 MATCH_PER_STREAM_OPT(top_field_first, i, ost->top_field_first, oc, st);
4029 MATCH_PER_STREAM_OPT(filters, str, filters, oc, st);
4031 ost->avfilter = av_strdup(filters);
4033 MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i, ost->copy_initial_nonkeyframes, oc ,st);
4039 static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc)
4043 AVCodecContext *audio_enc;
4045 ost = new_output_stream(o, oc, AVMEDIA_TYPE_AUDIO);
4048 audio_enc = st->codec;
4049 audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
4051 if (!ost->stream_copy) {
4052 char *sample_fmt = NULL, *filters = NULL;;
4054 MATCH_PER_STREAM_OPT(audio_channels, i, audio_enc->channels, oc, st);
4056 MATCH_PER_STREAM_OPT(sample_fmts, str, sample_fmt, oc, st);
4058 (audio_enc->sample_fmt = av_get_sample_fmt(sample_fmt)) == AV_SAMPLE_FMT_NONE) {
4059 av_log(NULL, AV_LOG_FATAL, "Invalid sample format '%s'\n", sample_fmt);
4063 MATCH_PER_STREAM_OPT(audio_sample_rate, i, audio_enc->sample_rate, oc, st);
4065 MATCH_PER_STREAM_OPT(filters, str, filters, oc, st);
4067 ost->avfilter = av_strdup(filters);
4073 static OutputStream *new_data_stream(OptionsContext *o, AVFormatContext *oc)
4077 ost = new_output_stream(o, oc, AVMEDIA_TYPE_DATA);
4078 if (!ost->stream_copy) {
4079 av_log(NULL, AV_LOG_FATAL, "Data stream encoding not supported yet (only streamcopy)\n");
4086 static OutputStream *new_attachment_stream(OptionsContext *o, AVFormatContext *oc)
4088 OutputStream *ost = new_output_stream(o, oc, AVMEDIA_TYPE_ATTACHMENT);
4089 ost->stream_copy = 1;
4093 static OutputStream *new_subtitle_stream(OptionsContext *o, AVFormatContext *oc)
4097 AVCodecContext *subtitle_enc;
4099 ost = new_output_stream(o, oc, AVMEDIA_TYPE_SUBTITLE);
4101 subtitle_enc = st->codec;
4103 subtitle_enc->codec_type = AVMEDIA_TYPE_SUBTITLE;
4108 /* arg format is "output-stream-index:streamid-value". */
4109 static int opt_streamid(OptionsContext *o, const char *opt, const char *arg)
4115 av_strlcpy(idx_str, arg, sizeof(idx_str));
4116 p = strchr(idx_str, ':');
4118 av_log(NULL, AV_LOG_FATAL,
4119 "Invalid value '%s' for option '%s', required syntax is 'index:value'\n",
4124 idx = parse_number_or_die(opt, idx_str, OPT_INT, 0, INT_MAX);
4125 o->streamid_map = grow_array(o->streamid_map, sizeof(*o->streamid_map), &o->nb_streamid_map, idx+1);
4126 o->streamid_map[idx] = parse_number_or_die(opt, p, OPT_INT, 0, INT_MAX);
4130 static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata)
4132 AVFormatContext *is = ifile->ctx;
4133 AVFormatContext *os = ofile->ctx;
4136 for (i = 0; i < is->nb_chapters; i++) {
4137 AVChapter *in_ch = is->chapters[i], *out_ch;
4138 int64_t ts_off = av_rescale_q(ofile->start_time - ifile->ts_offset,
4139 AV_TIME_BASE_Q, in_ch->time_base);
4140 int64_t rt = (ofile->recording_time == INT64_MAX) ? INT64_MAX :
4141 av_rescale_q(ofile->recording_time, AV_TIME_BASE_Q, in_ch->time_base);
4144 if (in_ch->end < ts_off)
4146 if (rt != INT64_MAX && in_ch->start > rt + ts_off)
4149 out_ch = av_mallocz(sizeof(AVChapter));
4151 return AVERROR(ENOMEM);
4153 out_ch->id = in_ch->id;
4154 out_ch->time_base = in_ch->time_base;
4155 out_ch->start = FFMAX(0, in_ch->start - ts_off);
4156 out_ch->end = FFMIN(rt, in_ch->end - ts_off);
4159 av_dict_copy(&out_ch->metadata, in_ch->metadata, 0);
4162 os->chapters = av_realloc(os->chapters, sizeof(AVChapter) * os->nb_chapters);
4164 return AVERROR(ENOMEM);
4165 os->chapters[os->nb_chapters - 1] = out_ch;
4170 static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
4171 AVFormatContext *oc)
4175 switch (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type) {
4176 case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc); break;
4177 case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc); break;
4179 av_log(NULL, AV_LOG_FATAL, "Only video and audio filters are supported "
4184 ost->source_index = -1;
4185 ost->filter = ofilter;
4189 if (ost->stream_copy) {
4190 av_log(NULL, AV_LOG_ERROR, "Streamcopy requested for output stream %d:%d, "
4191 "which is fed from a complex filtergraph. Filtering and streamcopy "
4192 "cannot be used together.\n", ost->file_index, ost->index);
4196 if (configure_output_filter(ofilter->graph, ofilter, ofilter->out_tmp) < 0) {
4197 av_log(NULL, AV_LOG_FATAL, "Error configuring filter.\n");
4200 avfilter_inout_free(&ofilter->out_tmp);
4203 static void opt_output_file(void *optctx, const char *filename)
4205 OptionsContext *o = optctx;
4206 AVFormatContext *oc;
4208 AVOutputFormat *file_oformat;
4212 if (configure_complex_filters() < 0) {
4213 av_log(NULL, AV_LOG_FATAL, "Error configuring filters.\n");
4217 if (!strcmp(filename, "-"))
4220 oc = avformat_alloc_context();
4222 print_error(filename, AVERROR(ENOMEM));
4227 file_oformat = av_guess_format(o->format, NULL, NULL);
4228 if (!file_oformat) {
4229 av_log(NULL, AV_LOG_FATAL, "Requested output format '%s' is not a suitable output format\n", o->format);
4233 file_oformat = av_guess_format(NULL, filename, NULL);
4234 if (!file_oformat) {
4235 av_log(NULL, AV_LOG_FATAL, "Unable to find a suitable output format for '%s'\n",
4241 oc->oformat = file_oformat;
4242 oc->interrupt_callback = int_cb;
4243 av_strlcpy(oc->filename, filename, sizeof(oc->filename));
4245 /* create streams for all unlabeled output pads */
4246 for (i = 0; i < nb_filtergraphs; i++) {
4247 FilterGraph *fg = filtergraphs[i];
4248 for (j = 0; j < fg->nb_outputs; j++) {
4249 OutputFilter *ofilter = fg->outputs[j];
4251 if (!ofilter->out_tmp || ofilter->out_tmp->name)
4254 switch (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type) {
4255 case AVMEDIA_TYPE_VIDEO: o->video_disable = 1; break;
4256 case AVMEDIA_TYPE_AUDIO: o->audio_disable = 1; break;
4257 case AVMEDIA_TYPE_SUBTITLE: o->subtitle_disable = 1; break;
4259 init_output_filter(ofilter, o, oc);
4263 if (!o->nb_stream_maps) {
4264 /* pick the "best" stream of each type */
4265 #define NEW_STREAM(type, index)\
4267 ost = new_ ## type ## _stream(o, oc);\
4268 ost->source_index = index;\
4269 ost->sync_ist = input_streams[index];\
4270 input_streams[index]->discard = 0;\
4271 input_streams[index]->st->discard = AVDISCARD_NONE;\
4274 /* video: highest resolution */
4275 if (!o->video_disable && oc->oformat->video_codec != CODEC_ID_NONE) {
4276 int area = 0, idx = -1;
4277 for (i = 0; i < nb_input_streams; i++) {
4278 ist = input_streams[i];
4279 if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
4280 ist->st->codec->width * ist->st->codec->height > area) {
4281 area = ist->st->codec->width * ist->st->codec->height;
4285 NEW_STREAM(video, idx);
4288 /* audio: most channels */
4289 if (!o->audio_disable && oc->oformat->audio_codec != CODEC_ID_NONE) {
4290 int channels = 0, idx = -1;
4291 for (i = 0; i < nb_input_streams; i++) {
4292 ist = input_streams[i];
4293 if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
4294 ist->st->codec->channels > channels) {
4295 channels = ist->st->codec->channels;
4299 NEW_STREAM(audio, idx);
4302 /* subtitles: pick first */
4303 if (!o->subtitle_disable && oc->oformat->subtitle_codec != CODEC_ID_NONE) {
4304 for (i = 0; i < nb_input_streams; i++)
4305 if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
4306 NEW_STREAM(subtitle, i);
4310 /* do something with data? */
4312 for (i = 0; i < o->nb_stream_maps; i++) {
4313 StreamMap *map = &o->stream_maps[i];
4318 if (map->linklabel) {
4320 OutputFilter *ofilter = NULL;
4323 for (j = 0; j < nb_filtergraphs; j++) {
4324 fg = filtergraphs[j];
4325 for (k = 0; k < fg->nb_outputs; k++) {
4326 AVFilterInOut *out = fg->outputs[k]->out_tmp;
4327 if (out && !strcmp(out->name, map->linklabel)) {
4328 ofilter = fg->outputs[k];
4335 av_log(NULL, AV_LOG_FATAL, "Output with label '%s' does not exist "
4336 "in any defined filter graph.\n", map->linklabel);
4339 init_output_filter(ofilter, o, oc);
4341 ist = input_streams[input_files[map->file_index]->ist_index + map->stream_index];
4342 switch (ist->st->codec->codec_type) {
4343 case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc); break;
4344 case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc); break;
4345 case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(o, oc); break;
4346 case AVMEDIA_TYPE_DATA: ost = new_data_stream(o, oc); break;
4347 case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc); break;
4349 av_log(NULL, AV_LOG_FATAL, "Cannot map stream #%d:%d - unsupported type.\n",
4350 map->file_index, map->stream_index);
4354 ost->source_index = input_files[map->file_index]->ist_index + map->stream_index;
4355 ost->sync_ist = input_streams[input_files[map->sync_file_index]->ist_index +
4356 map->sync_stream_index];
4358 ist->st->discard = AVDISCARD_NONE;
4363 /* handle attached files */
4364 for (i = 0; i < o->nb_attachments; i++) {
4366 uint8_t *attachment;
4370 if ((err = avio_open2(&pb, o->attachments[i], AVIO_FLAG_READ, &int_cb, NULL)) < 0) {
4371 av_log(NULL, AV_LOG_FATAL, "Could not open attachment file %s.\n",
4375 if ((len = avio_size(pb)) <= 0) {
4376 av_log(NULL, AV_LOG_FATAL, "Could not get size of the attachment %s.\n",
4380 if (!(attachment = av_malloc(len))) {
4381 av_log(NULL, AV_LOG_FATAL, "Attachment %s too large to fit into memory.\n",
4385 avio_read(pb, attachment, len);
4387 ost = new_attachment_stream(o, oc);
4388 ost->stream_copy = 0;
4389 ost->source_index = -1;
4390 ost->attachment_filename = o->attachments[i];
4391 ost->st->codec->extradata = attachment;
4392 ost->st->codec->extradata_size = len;
4394 p = strrchr(o->attachments[i], '/');
4395 av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
4399 output_files = grow_array(output_files, sizeof(*output_files), &nb_output_files, nb_output_files + 1);
4400 if (!(output_files[nb_output_files - 1] = av_mallocz(sizeof(*output_files[0]))))
4403 output_files[nb_output_files - 1]->ctx = oc;
4404 output_files[nb_output_files - 1]->ost_index = nb_output_streams - oc->nb_streams;
4405 output_files[nb_output_files - 1]->recording_time = o->recording_time;
4406 if (o->recording_time != INT64_MAX)
4407 oc->duration = o->recording_time;
4408 output_files[nb_output_files - 1]->start_time = o->start_time;
4409 output_files[nb_output_files - 1]->limit_filesize = o->limit_filesize;
4410 av_dict_copy(&output_files[nb_output_files - 1]->opts, format_opts, 0);
4412 /* check filename in case of an image number is expected */
4413 if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
4414 if (!av_filename_number_test(oc->filename)) {
4415 print_error(oc->filename, AVERROR(EINVAL));
4420 if (!(oc->oformat->flags & AVFMT_NOFILE)) {
4421 /* test if it already exists to avoid losing precious files */
4422 assert_file_overwrite(filename);
4425 if ((err = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,
4426 &oc->interrupt_callback,
4427 &output_files[nb_output_files - 1]->opts)) < 0) {
4428 print_error(filename, err);
4433 if (o->mux_preload) {
4435 snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE));
4436 av_dict_set(&output_files[nb_output_files - 1]->opts, "preload", buf, 0);
4438 oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
4439 oc->flags |= AVFMT_FLAG_NONBLOCK;
4442 for (i = 0; i < o->nb_metadata_map; i++) {
4444 int in_file_index = strtol(o->metadata_map[i].u.str, &p, 0);
4446 if (in_file_index < 0)
4448 if (in_file_index >= nb_input_files) {
4449 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d while processing metadata maps\n", in_file_index);
4452 copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc, input_files[in_file_index]->ctx, o);
4456 if (o->chapters_input_file >= nb_input_files) {
4457 if (o->chapters_input_file == INT_MAX) {
4458 /* copy chapters from the first input file that has them*/
4459 o->chapters_input_file = -1;
4460 for (i = 0; i < nb_input_files; i++)
4461 if (input_files[i]->ctx->nb_chapters) {
4462 o->chapters_input_file = i;
4466 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d in chapter mapping.\n",
4467 o->chapters_input_file);
4471 if (o->chapters_input_file >= 0)
4472 copy_chapters(input_files[o->chapters_input_file], output_files[nb_output_files - 1],
4473 !o->metadata_chapters_manual);
4475 /* copy global metadata by default */
4476 if (!o->metadata_global_manual && nb_input_files)
4477 av_dict_copy(&oc->metadata, input_files[0]->ctx->metadata,
4478 AV_DICT_DONT_OVERWRITE);
4479 if (!o->metadata_streams_manual)
4480 for (i = output_files[nb_output_files - 1]->ost_index; i < nb_output_streams; i++) {
4482 if (output_streams[i]->source_index < 0) /* this is true e.g. for attached files */
4484 ist = input_streams[output_streams[i]->source_index];
4485 av_dict_copy(&output_streams[i]->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
4488 /* process manually set metadata */
4489 for (i = 0; i < o->nb_metadata; i++) {
4492 const char *stream_spec;
4493 int index = 0, j, ret;
4495 val = strchr(o->metadata[i].u.str, '=');
4497 av_log(NULL, AV_LOG_FATAL, "No '=' character in metadata string %s.\n",
4498 o->metadata[i].u.str);
4503 parse_meta_type(o->metadata[i].specifier, &type, &index, &stream_spec);
4505 for (j = 0; j < oc->nb_streams; j++) {
4506 if ((ret = check_stream_specifier(oc, oc->streams[j], stream_spec)) > 0) {
4507 av_dict_set(&oc->streams[j]->metadata, o->metadata[i].u.str, *val ? val : NULL, 0);
4511 printf("ret %d, stream_spec %s\n", ret, stream_spec);
4519 if (index < 0 || index >= oc->nb_chapters) {
4520 av_log(NULL, AV_LOG_FATAL, "Invalid chapter index %d in metadata specifier.\n", index);
4523 m = &oc->chapters[index]->metadata;
4526 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", o->metadata[i].specifier);
4529 av_dict_set(m, o->metadata[i].u.str, *val ? val : NULL, 0);
4536 /* same option as mencoder */
4537 static int opt_pass(const char *opt, const char *arg)
4539 do_pass = parse_number_or_die(opt, arg, OPT_INT, 1, 2);
4543 static int64_t getutime(void)
4546 struct rusage rusage;
4548 getrusage(RUSAGE_SELF, &rusage);
4549 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4550 #elif HAVE_GETPROCESSTIMES
4552 FILETIME c, e, k, u;
4553 proc = GetCurrentProcess();
4554 GetProcessTimes(proc, &c, &e, &k, &u);
4555 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4557 return av_gettime();
4561 static int64_t getmaxrss(void)
4563 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4564 struct rusage rusage;
4565 getrusage(RUSAGE_SELF, &rusage);
4566 return (int64_t)rusage.ru_maxrss * 1024;
4567 #elif HAVE_GETPROCESSMEMORYINFO
4569 PROCESS_MEMORY_COUNTERS memcounters;
4570 proc = GetCurrentProcess();
4571 memcounters.cb = sizeof(memcounters);
4572 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4573 return memcounters.PeakPagefileUsage;
4579 static int opt_audio_qscale(OptionsContext *o, const char *opt, const char *arg)
4581 return parse_option(o, "q:a", arg, options);
4584 static void show_usage(void)
4586 printf("Hyper fast Audio and Video encoder\n");
4587 printf("usage: %s [options] [[infile options] -i infile]... {[outfile options] outfile}...\n", program_name);
4591 static void show_help(void)
4593 int flags = AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_ENCODING_PARAM;
4594 av_log_set_callback(log_callback_help);
4596 show_help_options(options, "Main options:\n",
4597 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB, 0);
4598 show_help_options(options, "\nAdvanced options:\n",
4599 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB,
4601 show_help_options(options, "\nVideo options:\n",
4602 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4604 show_help_options(options, "\nAdvanced Video options:\n",
4605 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4606 OPT_VIDEO | OPT_EXPERT);
4607 show_help_options(options, "\nAudio options:\n",
4608 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4610 show_help_options(options, "\nAdvanced Audio options:\n",
4611 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4612 OPT_AUDIO | OPT_EXPERT);
4613 show_help_options(options, "\nSubtitle options:\n",
4614 OPT_SUBTITLE | OPT_GRAB,
4616 show_help_options(options, "\nAudio/Video grab options:\n",
4620 show_help_children(avcodec_get_class(), flags);
4621 show_help_children(avformat_get_class(), flags);
4622 show_help_children(sws_get_class(), flags);
4625 static int opt_target(OptionsContext *o, const char *opt, const char *arg)
4627 enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN;
4628 static const char *const frame_rates[] = { "25", "30000/1001", "24000/1001" };
4630 if (!strncmp(arg, "pal-", 4)) {
4633 } else if (!strncmp(arg, "ntsc-", 5)) {
4636 } else if (!strncmp(arg, "film-", 5)) {
4640 /* Try to determine PAL/NTSC by peeking in the input files */
4641 if (nb_input_files) {
4643 for (j = 0; j < nb_input_files; j++) {
4644 for (i = 0; i < input_files[j]->nb_streams; i++) {
4645 AVCodecContext *c = input_files[j]->ctx->streams[i]->codec;
4646 if (c->codec_type != AVMEDIA_TYPE_VIDEO)
4648 fr = c->time_base.den * 1000 / c->time_base.num;
4652 } else if ((fr == 29970) || (fr == 23976)) {
4657 if (norm != UNKNOWN)
4661 if (norm != UNKNOWN)
4662 av_log(NULL, AV_LOG_INFO, "Assuming %s for target.\n", norm == PAL ? "PAL" : "NTSC");
4665 if (norm == UNKNOWN) {
4666 av_log(NULL, AV_LOG_FATAL, "Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n");
4667 av_log(NULL, AV_LOG_FATAL, "Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n");
4668 av_log(NULL, AV_LOG_FATAL, "or set a framerate with \"-r xxx\".\n");
4672 if (!strcmp(arg, "vcd")) {
4673 opt_video_codec(o, "c:v", "mpeg1video");
4674 opt_audio_codec(o, "c:a", "mp2");
4675 parse_option(o, "f", "vcd", options);
4677 parse_option(o, "s", norm == PAL ? "352x288" : "352x240", options);
4678 parse_option(o, "r", frame_rates[norm], options);
4679 opt_default("g", norm == PAL ? "15" : "18");
4681 opt_default("b", "1150000");
4682 opt_default("maxrate", "1150000");
4683 opt_default("minrate", "1150000");
4684 opt_default("bufsize", "327680"); // 40*1024*8;
4686 opt_default("b:a", "224000");
4687 parse_option(o, "ar", "44100", options);
4688 parse_option(o, "ac", "2", options);
4690 opt_default("packetsize", "2324");
4691 opt_default("muxrate", "1411200"); // 2352 * 75 * 8;
4693 /* We have to offset the PTS, so that it is consistent with the SCR.
4694 SCR starts at 36000, but the first two packs contain only padding
4695 and the first pack from the other stream, respectively, may also have
4696 been written before.
4697 So the real data starts at SCR 36000+3*1200. */
4698 o->mux_preload = (36000 + 3 * 1200) / 90000.0; // 0.44
4699 } else if (!strcmp(arg, "svcd")) {
4701 opt_video_codec(o, "c:v", "mpeg2video");
4702 opt_audio_codec(o, "c:a", "mp2");
4703 parse_option(o, "f", "svcd", options);
4705 parse_option(o, "s", norm == PAL ? "480x576" : "480x480", options);
4706 parse_option(o, "r", frame_rates[norm], options);
4707 opt_default("g", norm == PAL ? "15" : "18");
4709 opt_default("b", "2040000");
4710 opt_default("maxrate", "2516000");
4711 opt_default("minrate", "0"); // 1145000;
4712 opt_default("bufsize", "1835008"); // 224*1024*8;
4713 opt_default("flags", "+scan_offset");
4716 opt_default("b:a", "224000");
4717 parse_option(o, "ar", "44100", options);
4719 opt_default("packetsize", "2324");
4721 } else if (!strcmp(arg, "dvd")) {
4723 opt_video_codec(o, "c:v", "mpeg2video");
4724 opt_audio_codec(o, "c:a", "ac3");
4725 parse_option(o, "f", "dvd", options);
4727 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4728 parse_option(o, "r", frame_rates[norm], options);
4729 opt_default("g", norm == PAL ? "15" : "18");
4731 opt_default("b", "6000000");
4732 opt_default("maxrate", "9000000");
4733 opt_default("minrate", "0"); // 1500000;
4734 opt_default("bufsize", "1835008"); // 224*1024*8;
4736 opt_default("packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
4737 opt_default("muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
4739 opt_default("b:a", "448000");
4740 parse_option(o, "ar", "48000", options);
4742 } else if (!strncmp(arg, "dv", 2)) {
4744 parse_option(o, "f", "dv", options);
4746 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4747 parse_option(o, "pix_fmt", !strncmp(arg, "dv50", 4) ? "yuv422p" :
4748 norm == PAL ? "yuv420p" : "yuv411p", options);
4749 parse_option(o, "r", frame_rates[norm], options);
4751 parse_option(o, "ar", "48000", options);
4752 parse_option(o, "ac", "2", options);
4755 av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg);
4756 return AVERROR(EINVAL);
4761 static int opt_vstats_file(const char *opt, const char *arg)
4763 av_free (vstats_filename);
4764 vstats_filename = av_strdup (arg);
4768 static int opt_vstats(const char *opt, const char *arg)
4771 time_t today2 = time(NULL);
4772 struct tm *today = localtime(&today2);
4774 snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min,
4776 return opt_vstats_file(opt, filename);
4779 static int opt_video_frames(OptionsContext *o, const char *opt, const char *arg)
4781 return parse_option(o, "frames:v", arg, options);
4784 static int opt_audio_frames(OptionsContext *o, const char *opt, const char *arg)
4786 return parse_option(o, "frames:a", arg, options);
4789 static int opt_data_frames(OptionsContext *o, const char *opt, const char *arg)
4791 return parse_option(o, "frames:d", arg, options);
4794 static int opt_video_tag(OptionsContext *o, const char *opt, const char *arg)
4796 return parse_option(o, "tag:v", arg, options);
4799 static int opt_audio_tag(OptionsContext *o, const char *opt, const char *arg)
4801 return parse_option(o, "tag:a", arg, options);
4804 static int opt_subtitle_tag(OptionsContext *o, const char *opt, const char *arg)
4806 return parse_option(o, "tag:s", arg, options);
4809 static int opt_video_filters(OptionsContext *o, const char *opt, const char *arg)
4811 return parse_option(o, "filter:v", arg, options);
4814 static int opt_audio_filters(OptionsContext *o, const char *opt, const char *arg)
4816 return parse_option(o, "filter:a", arg, options);
4819 static int opt_vsync(const char *opt, const char *arg)
4821 if (!av_strcasecmp(arg, "cfr")) video_sync_method = VSYNC_CFR;
4822 else if (!av_strcasecmp(arg, "vfr")) video_sync_method = VSYNC_VFR;
4823 else if (!av_strcasecmp(arg, "passthrough")) video_sync_method = VSYNC_PASSTHROUGH;
4825 if (video_sync_method == VSYNC_AUTO)
4826 video_sync_method = parse_number_or_die("vsync", arg, OPT_INT, VSYNC_AUTO, VSYNC_VFR);
4830 static int opt_deinterlace(const char *opt, const char *arg)
4832 av_log(NULL, AV_LOG_WARNING, "-%s is deprecated, use -filter:v yadif instead\n", opt);
4837 static int opt_cpuflags(const char *opt, const char *arg)
4839 int flags = av_parse_cpu_flags(arg);
4844 av_set_cpu_flags_mask(flags);
4848 static void parse_cpuflags(int argc, char **argv, const OptionDef *options)
4850 int idx = locate_option(argc, argv, options, "cpuflags");
4851 if (idx && argv[idx + 1])
4852 opt_cpuflags("cpuflags", argv[idx + 1]);
4855 static int opt_channel_layout(OptionsContext *o, const char *opt, const char *arg)
4857 char layout_str[32];
4860 int ret, channels, ac_str_size;
4863 layout = av_get_channel_layout(arg);
4865 av_log(NULL, AV_LOG_ERROR, "Unknown channel layout: %s\n", arg);
4866 return AVERROR(EINVAL);
4868 snprintf(layout_str, sizeof(layout_str), "%"PRIu64, layout);
4869 ret = opt_default(opt, layout_str);
4873 /* set 'ac' option based on channel layout */
4874 channels = av_get_channel_layout_nb_channels(layout);
4875 snprintf(layout_str, sizeof(layout_str), "%d", channels);
4876 stream_str = strchr(opt, ':');
4877 ac_str_size = 3 + (stream_str ? strlen(stream_str) : 0);
4878 ac_str = av_mallocz(ac_str_size);
4880 return AVERROR(ENOMEM);
4881 av_strlcpy(ac_str, "ac", 3);
4883 av_strlcat(ac_str, stream_str, ac_str_size);
4884 ret = parse_option(o, ac_str, layout_str, options);
4890 static int opt_filter_complex(const char *opt, const char *arg)
4892 filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
4893 &nb_filtergraphs, nb_filtergraphs + 1);
4894 if (!(filtergraphs[nb_filtergraphs - 1] = av_mallocz(sizeof(*filtergraphs[0]))))
4895 return AVERROR(ENOMEM);
4896 filtergraphs[nb_filtergraphs - 1]->index = nb_filtergraphs - 1;
4897 filtergraphs[nb_filtergraphs - 1]->graph_desc = arg;
4901 #define OFFSET(x) offsetof(OptionsContext, x)
4902 static const OptionDef options[] = {
4904 #include "cmdutils_common_opts.h"
4905 { "f", HAS_ARG | OPT_STRING | OPT_OFFSET, {.off = OFFSET(format)}, "force format", "fmt" },
4906 { "i", HAS_ARG | OPT_FUNC2, {(void*)opt_input_file}, "input file name", "filename" },
4907 { "y", OPT_BOOL, {(void*)&file_overwrite}, "overwrite output files" },
4908 { "c", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4909 { "codec", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4910 { "pre", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(presets)}, "preset name", "preset" },
4911 { "map", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map}, "set input stream mapping", "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
4912 { "map_metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata_map)}, "set metadata information of outfile from infile",
4913 "outfile[,metadata]:infile[,metadata]" },
4914 { "map_chapters", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(chapters_input_file)}, "set chapters mapping", "input_file_index" },
4915 { "t", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(recording_time)}, "record or transcode \"duration\" seconds of audio/video", "duration" },
4916 { "fs", HAS_ARG | OPT_INT64 | OPT_OFFSET, {.off = OFFSET(limit_filesize)}, "set the limit file size in bytes", "limit_size" }, //
4917 { "ss", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(start_time)}, "set the start time offset", "time_off" },
4918 { "itsoffset", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(input_ts_offset)}, "set the input ts offset", "time_off" },
4919 { "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(ts_scale)}, "set the input ts scale", "scale" },
4920 { "metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata)}, "add metadata", "string=string" },
4921 { "dframes", HAS_ARG | OPT_FUNC2, {(void*)opt_data_frames}, "set the number of data frames to record", "number" },
4922 { "benchmark", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark},
4923 "add timings for benchmarking" },
4924 { "timelimit", HAS_ARG, {(void*)opt_timelimit}, "set max runtime in seconds", "limit" },
4925 { "dump", OPT_BOOL | OPT_EXPERT, {(void*)&do_pkt_dump},
4926 "dump each input packet" },
4927 { "hex", OPT_BOOL | OPT_EXPERT, {(void*)&do_hex_dump},
4928 "when dumping packets, also dump the payload" },
4929 { "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(rate_emu)}, "read input at native frame rate", "" },
4930 { "target", HAS_ARG | OPT_FUNC2, {(void*)opt_target}, "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" },
4931 { "vsync", HAS_ARG | OPT_EXPERT, {(void*)opt_vsync}, "video sync method", "" },
4932 { "async", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&audio_sync_method}, "audio sync method", "" },
4933 { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&audio_drift_threshold}, "audio drift threshold", "threshold" },
4934 { "copyts", OPT_BOOL | OPT_EXPERT, {(void*)©_ts}, "copy timestamps" },
4935 { "copytb", OPT_BOOL | OPT_EXPERT, {(void*)©_tb}, "copy input stream time base when stream copying" },
4936 { "shortest", OPT_BOOL | OPT_EXPERT, {(void*)&opt_shortest}, "finish encoding within shortest input" }, //
4937 { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&dts_delta_threshold}, "timestamp discontinuity delta threshold", "threshold" },
4938 { "xerror", OPT_BOOL, {(void*)&exit_on_error}, "exit on error", "error" },
4939 { "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC, {.off = OFFSET(copy_initial_nonkeyframes)}, "copy initial non-keyframes" },
4940 { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC, {.off = OFFSET(max_frames)}, "set the number of frames to record", "number" },
4941 { "tag", OPT_STRING | HAS_ARG | OPT_SPEC, {.off = OFFSET(codec_tags)}, "force codec tag/fourcc", "fourcc/tag" },
4942 { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4943 { "qscale", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4944 { "filter", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(filters)}, "set stream filterchain", "filter_list" },
4945 { "filter_complex", HAS_ARG | OPT_EXPERT, {(void*)opt_filter_complex}, "create a complex filtergraph", "graph_description" },
4946 { "stats", OPT_BOOL, {&print_stats}, "print progress report during encoding", },
4947 { "attach", HAS_ARG | OPT_FUNC2, {(void*)opt_attach}, "add an attachment to the output file", "filename" },
4948 { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(dump_attachment)}, "extract an attachment into a file", "filename" },
4949 { "cpuflags", HAS_ARG | OPT_EXPERT, {(void*)opt_cpuflags}, "set CPU flags mask", "mask" },
4952 { "vframes", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_frames}, "set the number of video frames to record", "number" },
4953 { "r", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_rates)}, "set frame rate (Hz value, fraction or abbreviation)", "rate" },
4954 { "s", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_sizes)}, "set frame size (WxH or abbreviation)", "size" },
4955 { "aspect", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_aspect_ratios)}, "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" },
4956 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_pix_fmts)}, "set pixel format", "format" },
4957 { "vn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET, {.off = OFFSET(video_disable)}, "disable video" },
4958 { "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" },
4959 { "rc_override", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(rc_overrides)}, "rate control override for specific intervals", "override" },
4960 { "vcodec", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_codec}, "force video codec ('copy' to copy stream)", "codec" },
4961 { "same_quant", OPT_BOOL | OPT_VIDEO, {(void*)&same_quant},
4962 "use same quantizer as source (implies VBR)" },
4963 { "pass", HAS_ARG | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" },
4964 { "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename_prefix}, "select two pass log file name prefix", "prefix" },
4965 { "deinterlace", OPT_EXPERT | OPT_VIDEO, {(void*)opt_deinterlace},
4966 "this option is deprecated, use the yadif filter instead" },
4967 { "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" },
4968 { "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_vstats_file}, "dump video coding statistics to file", "file" },
4969 { "vf", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_filters}, "video filters", "filter list" },
4970 { "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(intra_matrices)}, "specify intra matrix coeffs", "matrix" },
4971 { "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(inter_matrices)}, "specify inter matrix coeffs", "matrix" },
4972 { "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_INT| OPT_SPEC, {.off = OFFSET(top_field_first)}, "top=1/bottom=0/auto=-1 field first", "" },
4973 { "dc", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_dc_precision}, "intra_dc_precision", "precision" },
4974 { "vtag", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_tag}, "force video tag/fourcc", "fourcc/tag" },
4975 { "qphist", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, { (void *)&qp_hist }, "show QP histogram" },
4976 { "force_fps", OPT_BOOL | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(force_fps)}, "force the selected framerate, disable the best supported framerate selection" },
4977 { "streamid", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_streamid}, "set the value of an outfile streamid", "streamIndex:value" },
4978 { "force_key_frames", OPT_STRING | HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(forced_key_frames)}, "force key frames at specified timestamps", "timestamps" },
4981 { "aframes", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_frames}, "set the number of audio frames to record", "number" },
4982 { "aq", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_qscale}, "set audio quality (codec-specific)", "quality", },
4983 { "ar", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_sample_rate)}, "set audio sampling rate (in Hz)", "rate" },
4984 { "ac", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_channels)}, "set number of audio channels", "channels" },
4985 { "an", OPT_BOOL | OPT_AUDIO | OPT_OFFSET, {.off = OFFSET(audio_disable)}, "disable audio" },
4986 { "acodec", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_codec}, "force audio codec ('copy' to copy stream)", "codec" },
4987 { "atag", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_tag}, "force audio tag/fourcc", "fourcc/tag" },
4988 { "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" }, //
4989 { "sample_fmt", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_SPEC | OPT_STRING, {.off = OFFSET(sample_fmts)}, "set sample format", "format" },
4990 { "channel_layout", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_channel_layout}, "set channel layout", "layout" },
4991 { "af", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_filters}, "audio filters", "filter list" },
4993 /* subtitle options */
4994 { "sn", OPT_BOOL | OPT_SUBTITLE | OPT_OFFSET, {.off = OFFSET(subtitle_disable)}, "disable subtitle" },
4995 { "scodec", HAS_ARG | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_codec}, "force subtitle codec ('copy' to copy stream)", "codec" },
4996 { "stag", HAS_ARG | OPT_EXPERT | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_tag}, "force subtitle tag/fourcc", "fourcc/tag" },
4999 { "isync", OPT_BOOL | OPT_EXPERT | OPT_GRAB, {(void*)&input_sync}, "sync read on input", "" },
5002 { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_max_delay)}, "set the maximum demux-decode delay", "seconds" },
5003 { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_preload)}, "set the initial demux-decode delay", "seconds" },
5005 { "bsf", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(bitstream_filters)}, "A comma-separated list of bitstream filters", "bitstream_filters" },
5007 /* data codec support */
5008 { "dcodec", HAS_ARG | OPT_DATA | OPT_FUNC2, {(void*)opt_data_codec}, "force data codec ('copy' to copy stream)", "codec" },
5010 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
5014 int main(int argc, char **argv)
5016 OptionsContext o = { 0 };
5021 av_log_set_flags(AV_LOG_SKIP_REPEATED);
5022 parse_loglevel(argc, argv, options);
5024 avcodec_register_all();
5026 avdevice_register_all();
5028 avfilter_register_all();
5030 avformat_network_init();
5034 parse_cpuflags(argc, argv, options);
5037 parse_options(&o, argc, argv, options, opt_output_file);
5039 if (nb_output_files <= 0 && nb_input_files == 0) {
5041 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
5045 /* file converter / grab */
5046 if (nb_output_files <= 0) {
5047 fprintf(stderr, "At least one output file must be specified\n");
5051 if (nb_input_files == 0) {
5052 av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
5057 if (transcode() < 0)
5059 ti = getutime() - ti;
5061 int maxrss = getmaxrss() / 1024;
5062 printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);