3 * Copyright (c) 2000-2011 The libav developers.
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31 #include "libavformat/avformat.h"
32 #include "libavdevice/avdevice.h"
33 #include "libswscale/swscale.h"
34 #include "libavresample/avresample.h"
35 #include "libavutil/opt.h"
36 #include "libavutil/audioconvert.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/colorspace.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/intreadwrite.h"
42 #include "libavutil/dict.h"
43 #include "libavutil/mathematics.h"
44 #include "libavutil/pixdesc.h"
45 #include "libavutil/avstring.h"
46 #include "libavutil/libm.h"
47 #include "libavutil/imgutils.h"
48 #include "libavformat/os_support.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersrc.h"
53 # include "libavfilter/buffersink.h"
54 # include "libavfilter/vsrc_buffer.h"
56 #if HAVE_SYS_RESOURCE_H
57 #include <sys/types.h>
59 #include <sys/resource.h>
60 #elif HAVE_GETPROCESSTIMES
63 #if HAVE_GETPROCESSMEMORYINFO
69 #include <sys/select.h>
76 #include "libavutil/avassert.h"
79 #define VSYNC_PASSTHROUGH 0
83 const char program_name[] = "avconv";
84 const int program_birth_year = 2000;
86 /* select an input stream for an output stream */
87 typedef struct StreamMap {
88 int disabled; /** 1 is this mapping is disabled by a negative map */
92 int sync_stream_index;
93 char *linklabel; /** name of an output link, for mapping lavfi outputs */
97 * select an input file for an output file
99 typedef struct MetadataMap {
100 int file; ///< file index
101 char type; ///< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram
102 int index; ///< stream/chapter/program number
105 static const OptionDef options[];
107 static int video_discard = 0;
108 static int same_quant = 0;
109 static int do_deinterlace = 0;
110 static int intra_dc_precision = 8;
111 static int qp_hist = 0;
113 static int file_overwrite = 0;
114 static int do_benchmark = 0;
115 static int do_hex_dump = 0;
116 static int do_pkt_dump = 0;
117 static int do_pass = 0;
118 static char *pass_logfilename_prefix = NULL;
119 static int video_sync_method = VSYNC_AUTO;
120 static int audio_sync_method = 0;
121 static float audio_drift_threshold = 0.1;
122 static int copy_ts = 0;
123 static int copy_tb = 1;
124 static int opt_shortest = 0;
125 static char *vstats_filename;
126 static FILE *vstats_file;
128 static int audio_volume = 256;
130 static int exit_on_error = 0;
131 static int using_stdin = 0;
132 static int64_t video_size = 0;
133 static int64_t audio_size = 0;
134 static int64_t extra_size = 0;
135 static int nb_frames_dup = 0;
136 static int nb_frames_drop = 0;
137 static int input_sync;
139 static float dts_delta_threshold = 10;
141 static int print_stats = 1;
143 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
145 typedef struct InputFilter {
146 AVFilterContext *filter;
147 struct InputStream *ist;
148 struct FilterGraph *graph;
151 typedef struct OutputFilter {
152 AVFilterContext *filter;
153 struct OutputStream *ost;
154 struct FilterGraph *graph;
156 /* temporary storage until stream maps are processed */
157 AVFilterInOut *out_tmp;
160 typedef struct FilterGraph {
162 const char *graph_desc;
164 AVFilterGraph *graph;
166 InputFilter **inputs;
168 OutputFilter **outputs;
172 typedef struct FrameBuffer {
178 enum PixelFormat pix_fmt;
181 struct InputStream *ist;
182 struct FrameBuffer *next;
185 typedef struct InputStream {
188 int discard; /* true if stream data should be discarded */
189 int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
191 AVFrame *decoded_frame;
193 int64_t start; /* time when read started */
194 /* predicted dts of the next packet read for this stream or (when there are
195 * several frames in a packet) of the next frame in current packet */
197 /* dts of the last packet read for this stream */
199 PtsCorrectionContext pts_ctx;
201 int is_start; /* is 1 at the start and after a discontinuity */
202 int showed_multi_packet_warning;
207 int resample_pix_fmt;
209 int resample_sample_fmt;
210 int resample_sample_rate;
211 int resample_channels;
212 uint64_t resample_channel_layout;
214 /* a pool of free buffers for decoded data */
215 FrameBuffer *buffer_pool;
217 /* decoded data from this stream goes into all those filters
218 * currently video and audio only */
219 InputFilter **filters;
223 typedef struct InputFile {
224 AVFormatContext *ctx;
225 int eof_reached; /* true if eof reached */
226 int ist_index; /* index of first stream in ist_table */
227 int buffer_size; /* current total buffer size */
229 int nb_streams; /* number of stream that avconv is aware of; may be different
230 from ctx.nb_streams if new streams appear during av_read_frame() */
234 typedef struct OutputStream {
235 int file_index; /* file index */
236 int index; /* stream index in the output file */
237 int source_index; /* InputStream index */
238 AVStream *st; /* stream in the output file */
239 int encoding_needed; /* true if encoding needed for this stream */
241 /* input pts and corresponding output pts
243 // double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
244 struct InputStream *sync_ist; /* input stream to sync against */
245 int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
246 /* pts of the first frame encoded for this stream, used for limiting
249 AVBitStreamFilterContext *bitstream_filters;
252 AVFrame *filtered_frame;
255 AVRational frame_rate;
259 float frame_aspect_ratio;
262 /* forced key frames */
263 int64_t *forced_kf_pts;
269 OutputFilter *filter;
274 int is_past_recording_time;
276 const char *attachment_filename;
277 int copy_initial_nonkeyframes;
279 enum PixelFormat pix_fmts[2];
283 typedef struct OutputFile {
284 AVFormatContext *ctx;
286 int ost_index; /* index of the first stream in output_streams */
287 int64_t recording_time; /* desired length of the resulting file in microseconds */
288 int64_t start_time; /* start time in microseconds */
289 uint64_t limit_filesize;
292 static InputStream **input_streams = NULL;
293 static int nb_input_streams = 0;
294 static InputFile **input_files = NULL;
295 static int nb_input_files = 0;
297 static OutputStream **output_streams = NULL;
298 static int nb_output_streams = 0;
299 static OutputFile **output_files = NULL;
300 static int nb_output_files = 0;
302 static FilterGraph **filtergraphs;
305 typedef struct OptionsContext {
306 /* input/output options */
310 SpecifierOpt *codec_names;
312 SpecifierOpt *audio_channels;
313 int nb_audio_channels;
314 SpecifierOpt *audio_sample_rate;
315 int nb_audio_sample_rate;
316 SpecifierOpt *frame_rates;
318 SpecifierOpt *frame_sizes;
320 SpecifierOpt *frame_pix_fmts;
321 int nb_frame_pix_fmts;
324 int64_t input_ts_offset;
327 SpecifierOpt *ts_scale;
329 SpecifierOpt *dump_attachment;
330 int nb_dump_attachment;
333 StreamMap *stream_maps;
335 /* first item specifies output metadata, second is input */
336 MetadataMap (*meta_data_maps)[2];
337 int nb_meta_data_maps;
338 int metadata_global_manual;
339 int metadata_streams_manual;
340 int metadata_chapters_manual;
341 const char **attachments;
344 int chapters_input_file;
346 int64_t recording_time;
347 uint64_t limit_filesize;
353 int subtitle_disable;
356 /* indexed by output file stream index */
360 SpecifierOpt *metadata;
362 SpecifierOpt *max_frames;
364 SpecifierOpt *bitstream_filters;
365 int nb_bitstream_filters;
366 SpecifierOpt *codec_tags;
368 SpecifierOpt *sample_fmts;
370 SpecifierOpt *qscale;
372 SpecifierOpt *forced_key_frames;
373 int nb_forced_key_frames;
374 SpecifierOpt *force_fps;
376 SpecifierOpt *frame_aspect_ratios;
377 int nb_frame_aspect_ratios;
378 SpecifierOpt *rc_overrides;
380 SpecifierOpt *intra_matrices;
381 int nb_intra_matrices;
382 SpecifierOpt *inter_matrices;
383 int nb_inter_matrices;
384 SpecifierOpt *top_field_first;
385 int nb_top_field_first;
386 SpecifierOpt *metadata_map;
388 SpecifierOpt *presets;
390 SpecifierOpt *copy_initial_nonkeyframes;
391 int nb_copy_initial_nonkeyframes;
392 SpecifierOpt *filters;
396 #define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\
399 for (i = 0; i < o->nb_ ## name; i++) {\
400 char *spec = o->name[i].specifier;\
401 if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0)\
402 outvar = o->name[i].u.type;\
408 static void reset_options(OptionsContext *o)
410 const OptionDef *po = options;
413 /* all OPT_SPEC and OPT_STRING can be freed in generic way */
415 void *dst = (uint8_t*)o + po->u.off;
417 if (po->flags & OPT_SPEC) {
418 SpecifierOpt **so = dst;
419 int i, *count = (int*)(so + 1);
420 for (i = 0; i < *count; i++) {
421 av_freep(&(*so)[i].specifier);
422 if (po->flags & OPT_STRING)
423 av_freep(&(*so)[i].u.str);
427 } else if (po->flags & OPT_OFFSET && po->flags & OPT_STRING)
432 for (i = 0; i < o->nb_stream_maps; i++)
433 av_freep(&o->stream_maps[i].linklabel);
434 av_freep(&o->stream_maps);
435 av_freep(&o->meta_data_maps);
436 av_freep(&o->streamid_map);
438 memset(o, 0, sizeof(*o));
440 o->mux_max_delay = 0.7;
441 o->recording_time = INT64_MAX;
442 o->limit_filesize = UINT64_MAX;
443 o->chapters_input_file = INT_MAX;
449 static int alloc_buffer(InputStream *ist, AVCodecContext *s, FrameBuffer **pbuf)
451 FrameBuffer *buf = av_mallocz(sizeof(*buf));
453 const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
454 int h_chroma_shift, v_chroma_shift;
455 int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
456 int w = s->width, h = s->height;
459 return AVERROR(ENOMEM);
461 if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
466 avcodec_align_dimensions(s, &w, &h);
467 if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
468 s->pix_fmt, 32)) < 0) {
472 /* XXX this shouldn't be needed, but some tests break without this line
473 * those decoders are buggy and need to be fixed.
474 * the following tests fail:
475 * cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
477 memset(buf->base[0], 128, ret);
479 avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
480 for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
481 const int h_shift = i==0 ? 0 : h_chroma_shift;
482 const int v_shift = i==0 ? 0 : v_chroma_shift;
483 if (s->flags & CODEC_FLAG_EMU_EDGE)
484 buf->data[i] = buf->base[i];
486 buf->data[i] = buf->base[i] +
487 FFALIGN((buf->linesize[i]*edge >> v_shift) +
488 (pixel_size*edge >> h_shift), 32);
492 buf->pix_fmt = s->pix_fmt;
499 static void free_buffer_pool(InputStream *ist)
501 FrameBuffer *buf = ist->buffer_pool;
503 ist->buffer_pool = buf->next;
504 av_freep(&buf->base[0]);
506 buf = ist->buffer_pool;
510 static void unref_buffer(InputStream *ist, FrameBuffer *buf)
512 av_assert0(buf->refcount);
514 if (!buf->refcount) {
515 buf->next = ist->buffer_pool;
516 ist->buffer_pool = buf;
520 static int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
522 InputStream *ist = s->opaque;
526 if (!ist->buffer_pool && (ret = alloc_buffer(ist, s, &ist->buffer_pool)) < 0)
529 buf = ist->buffer_pool;
530 ist->buffer_pool = buf->next;
532 if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
533 av_freep(&buf->base[0]);
535 if ((ret = alloc_buffer(ist, s, &buf)) < 0)
541 frame->type = FF_BUFFER_TYPE_USER;
542 frame->extended_data = frame->data;
543 frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
544 frame->width = buf->w;
545 frame->height = buf->h;
546 frame->format = buf->pix_fmt;
547 frame->sample_aspect_ratio = s->sample_aspect_ratio;
549 for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
550 frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't
551 frame->data[i] = buf->data[i];
552 frame->linesize[i] = buf->linesize[i];
558 static void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
560 InputStream *ist = s->opaque;
561 FrameBuffer *buf = frame->opaque;
564 for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
565 frame->data[i] = NULL;
567 unref_buffer(ist, buf);
570 static void filter_release_buffer(AVFilterBuffer *fb)
572 FrameBuffer *buf = fb->priv;
574 unref_buffer(buf->ist, buf);
578 * Define a function for building a string containing a list of
581 #define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name, separator) \
582 static char *choose_ ## var ## s(OutputStream *ost) \
584 if (ost->st->codec->var != none) { \
585 get_name(ost->st->codec->var); \
586 return av_strdup(name); \
587 } else if (ost->enc->supported_list) { \
589 AVIOContext *s = NULL; \
593 if (avio_open_dyn_buf(&s) < 0) \
596 for (p = ost->enc->supported_list; *p != none; p++) { \
598 avio_printf(s, "%s" separator, name); \
600 len = avio_close_dyn_buf(s, &ret); \
607 #define GET_PIX_FMT_NAME(pix_fmt)\
608 const char *name = av_get_pix_fmt_name(pix_fmt);
610 DEF_CHOOSE_FORMAT(enum PixelFormat, pix_fmt, pix_fmts, PIX_FMT_NONE,
611 GET_PIX_FMT_NAME, ":")
613 #define GET_SAMPLE_FMT_NAME(sample_fmt)\
614 const char *name = av_get_sample_fmt_name(sample_fmt)
616 DEF_CHOOSE_FORMAT(enum AVSampleFormat, sample_fmt, sample_fmts,
617 AV_SAMPLE_FMT_NONE, GET_SAMPLE_FMT_NAME, ",")
619 #define GET_SAMPLE_RATE_NAME(rate)\
621 snprintf(name, sizeof(name), "%d", rate);
623 DEF_CHOOSE_FORMAT(int, sample_rate, supported_samplerates, 0,
624 GET_SAMPLE_RATE_NAME, ",")
626 #define GET_CH_LAYOUT_NAME(ch_layout)\
628 snprintf(name, sizeof(name), "0x%"PRIx64, ch_layout);
630 DEF_CHOOSE_FORMAT(uint64_t, channel_layout, channel_layouts, 0,
631 GET_CH_LAYOUT_NAME, ",")
633 static int configure_audio_filters(FilterGraph *fg, AVFilterContext **in_filter,
634 AVFilterContext **out_filter)
636 InputStream *ist = fg->inputs[0]->ist;
637 OutputStream *ost = fg->outputs[0]->ost;
638 AVCodecContext *codec = ost->st->codec;
639 AVCodecContext *icodec = ist->st->codec;
640 char *sample_fmts, *sample_rates, *channel_layouts;
644 avfilter_graph_free(&fg->graph);
645 if (!(fg->graph = avfilter_graph_alloc()))
646 return AVERROR(ENOMEM);
648 snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:"
649 "channel_layout=0x%"PRIx64, ist->st->time_base.num,
650 ist->st->time_base.den, icodec->sample_rate,
651 av_get_sample_fmt_name(icodec->sample_fmt), icodec->channel_layout);
652 ret = avfilter_graph_create_filter(&fg->inputs[0]->filter,
653 avfilter_get_by_name("abuffer"),
654 "src", args, NULL, fg->graph);
658 ret = avfilter_graph_create_filter(&fg->outputs[0]->filter,
659 avfilter_get_by_name("abuffersink"),
660 "out", NULL, NULL, fg->graph);
664 *in_filter = fg->inputs[0]->filter;
665 *out_filter = fg->outputs[0]->filter;
667 if (codec->channels && !codec->channel_layout)
668 codec->channel_layout = av_get_default_channel_layout(codec->channels);
670 sample_fmts = choose_sample_fmts(ost);
671 sample_rates = choose_sample_rates(ost);
672 channel_layouts = choose_channel_layouts(ost);
673 if (sample_fmts || sample_rates || channel_layouts) {
674 AVFilterContext *format;
679 len += snprintf(args + len, sizeof(args) - len, "sample_fmts=%s:",
682 len += snprintf(args + len, sizeof(args) - len, "sample_rates=%s:",
685 len += snprintf(args + len, sizeof(args) - len, "channel_layouts=%s:",
689 av_freep(&sample_fmts);
690 av_freep(&sample_rates);
691 av_freep(&channel_layouts);
693 ret = avfilter_graph_create_filter(&format,
694 avfilter_get_by_name("aformat"),
695 "aformat", args, NULL, fg->graph);
699 ret = avfilter_link(format, 0, fg->outputs[0]->filter, 0);
703 *out_filter = format;
706 if (audio_sync_method > 0) {
707 AVFilterContext *async;
711 av_log(NULL, AV_LOG_WARNING, "-async has been deprecated. Used the "
712 "asyncts audio filter instead.\n");
714 if (audio_sync_method > 1)
715 len += snprintf(args + len, sizeof(args) - len, "compensate=1:"
716 "max_comp=%d:", audio_sync_method);
717 snprintf(args + len, sizeof(args) - len, "min_delta=%f",
718 audio_drift_threshold);
720 ret = avfilter_graph_create_filter(&async,
721 avfilter_get_by_name("asyncts"),
722 "async", args, NULL, fg->graph);
726 ret = avfilter_link(*in_filter, 0, async, 0);
736 static int configure_video_filters(FilterGraph *fg, AVFilterContext **in_filter,
737 AVFilterContext **out_filter)
739 InputStream *ist = fg->inputs[0]->ist;
740 OutputStream *ost = fg->outputs[0]->ost;
741 AVFilterContext *filter;
742 AVCodecContext *codec = ost->st->codec;
744 AVRational sample_aspect_ratio;
748 if (ist->st->sample_aspect_ratio.num) {
749 sample_aspect_ratio = ist->st->sample_aspect_ratio;
751 sample_aspect_ratio = ist->st->codec->sample_aspect_ratio;
753 snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
754 ist->st->codec->height, ist->st->codec->pix_fmt,
755 ist->st->time_base.num, ist->st->time_base.den,
756 sample_aspect_ratio.num, sample_aspect_ratio.den);
758 ret = avfilter_graph_create_filter(&fg->inputs[0]->filter,
759 avfilter_get_by_name("buffer"),
760 "src", args, NULL, fg->graph);
763 ret = avfilter_graph_create_filter(&fg->outputs[0]->filter,
764 avfilter_get_by_name("buffersink"),
765 "out", NULL, NULL, fg->graph);
768 *in_filter = fg->inputs[0]->filter;
769 *out_filter = fg->outputs[0]->filter;
771 if (codec->width || codec->height) {
772 snprintf(args, 255, "%d:%d:flags=0x%X",
775 (unsigned)ost->sws_flags);
776 if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
777 NULL, args, NULL, fg->graph)) < 0)
779 if ((ret = avfilter_link(*in_filter, 0, filter, 0)) < 0)
784 if (ost->frame_rate.num) {
785 snprintf(args, sizeof(args), "fps=%d/%d", ost->frame_rate.num,
786 ost->frame_rate.den);
787 ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("fps"),
788 "fps", args, NULL, fg->graph);
792 ret = avfilter_link(filter, 0, *out_filter, 0);
795 *out_filter = filter;
798 if ((pix_fmts = choose_pix_fmts(ost))) {
799 if ((ret = avfilter_graph_create_filter(&filter,
800 avfilter_get_by_name("format"),
801 "format", pix_fmts, NULL,
804 if ((ret = avfilter_link(filter, 0, *out_filter, 0)) < 0)
807 *out_filter = filter;
811 snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
812 fg->graph->scale_sws_opts = av_strdup(args);
817 static int configure_simple_filtergraph(FilterGraph *fg)
819 OutputStream *ost = fg->outputs[0]->ost;
820 AVFilterContext *in_filter, *out_filter;
823 avfilter_graph_free(&fg->graph);
824 fg->graph = avfilter_graph_alloc();
826 switch (ost->st->codec->codec_type) {
827 case AVMEDIA_TYPE_VIDEO:
828 ret = configure_video_filters(fg, &in_filter, &out_filter);
830 case AVMEDIA_TYPE_AUDIO:
831 ret = configure_audio_filters(fg, &in_filter, &out_filter);
833 default: av_assert0(0);
839 AVFilterInOut *outputs = avfilter_inout_alloc();
840 AVFilterInOut *inputs = avfilter_inout_alloc();
842 outputs->name = av_strdup("in");
843 outputs->filter_ctx = in_filter;
844 outputs->pad_idx = 0;
845 outputs->next = NULL;
847 inputs->name = av_strdup("out");
848 inputs->filter_ctx = out_filter;
852 if ((ret = avfilter_graph_parse(fg->graph, ost->avfilter, inputs, outputs, NULL)) < 0)
855 if ((ret = avfilter_link(in_filter, 0, out_filter, 0)) < 0)
859 if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
862 ost->filter = fg->outputs[0];
867 static FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
869 FilterGraph *fg = av_mallocz(sizeof(*fg));
873 fg->index = nb_filtergraphs;
875 fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs), &fg->nb_outputs,
877 if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
879 fg->outputs[0]->ost = ost;
880 fg->outputs[0]->graph = fg;
882 fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs), &fg->nb_inputs,
884 if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
886 fg->inputs[0]->ist = ist;
887 fg->inputs[0]->graph = fg;
889 ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
890 &ist->nb_filters, ist->nb_filters + 1);
891 ist->filters[ist->nb_filters - 1] = fg->inputs[0];
893 filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
894 &nb_filtergraphs, nb_filtergraphs + 1);
895 filtergraphs[nb_filtergraphs - 1] = fg;
900 static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
903 enum AVMediaType type = in->filter_ctx->input_pads[in->pad_idx].type;
906 // TODO: support other filter types
907 if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
908 av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
917 int file_idx = strtol(in->name, &p, 0);
919 if (file_idx < 0 || file_idx >= nb_input_files) {
920 av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtegraph description %s.\n",
921 file_idx, fg->graph_desc);
924 s = input_files[file_idx]->ctx;
926 for (i = 0; i < s->nb_streams; i++) {
927 if (s->streams[i]->codec->codec_type != type)
929 if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
935 av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
936 "matches no streams.\n", p, fg->graph_desc);
939 ist = input_streams[input_files[file_idx]->ist_index + st->index];
941 /* find the first unused stream of corresponding type */
942 for (i = 0; i < nb_input_streams; i++) {
943 ist = input_streams[i];
944 if (ist->st->codec->codec_type == type && ist->discard)
947 if (i == nb_input_streams) {
948 av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
949 "unlabeled input pad %d on filter %s", in->pad_idx,
950 in->filter_ctx->name);
955 ist->decoding_needed = 1;
956 ist->st->discard = AVDISCARD_NONE;
958 fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs),
959 &fg->nb_inputs, fg->nb_inputs + 1);
960 if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
962 fg->inputs[fg->nb_inputs - 1]->ist = ist;
963 fg->inputs[fg->nb_inputs - 1]->graph = fg;
965 ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
966 &ist->nb_filters, ist->nb_filters + 1);
967 ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
970 static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
973 AVCodecContext *codec = ofilter->ost->st->codec;
974 AVFilterContext *last_filter = out->filter_ctx;
975 int pad_idx = out->pad_idx;
979 ret = avfilter_graph_create_filter(&ofilter->filter,
980 avfilter_get_by_name("buffersink"),
981 "out", NULL, pix_fmts, fg->graph);
985 if (codec->width || codec->height) {
987 AVFilterContext *filter;
989 snprintf(args, sizeof(args), "%d:%d:flags=0x%X",
992 (unsigned)ofilter->ost->sws_flags);
993 if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
994 NULL, args, NULL, fg->graph)) < 0)
996 if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
999 last_filter = filter;
1003 if ((pix_fmts = choose_pix_fmts(ofilter->ost))) {
1004 AVFilterContext *filter;
1005 if ((ret = avfilter_graph_create_filter(&filter,
1006 avfilter_get_by_name("format"),
1007 "format", pix_fmts, NULL,
1010 if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1013 last_filter = filter;
1015 av_freep(&pix_fmts);
1018 if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
1024 static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
1026 OutputStream *ost = ofilter->ost;
1027 AVCodecContext *codec = ost->st->codec;
1028 AVFilterContext *last_filter = out->filter_ctx;
1029 int pad_idx = out->pad_idx;
1030 char *sample_fmts, *sample_rates, *channel_layouts;
1033 ret = avfilter_graph_create_filter(&ofilter->filter,
1034 avfilter_get_by_name("abuffersink"),
1035 "out", NULL, NULL, fg->graph);
1039 if (codec->channels && !codec->channel_layout)
1040 codec->channel_layout = av_get_default_channel_layout(codec->channels);
1042 sample_fmts = choose_sample_fmts(ost);
1043 sample_rates = choose_sample_rates(ost);
1044 channel_layouts = choose_channel_layouts(ost);
1045 if (sample_fmts || sample_rates || channel_layouts) {
1046 AVFilterContext *format;
1051 len += snprintf(args + len, sizeof(args) - len, "sample_fmts=%s:",
1054 len += snprintf(args + len, sizeof(args) - len, "sample_rates=%s:",
1056 if (channel_layouts)
1057 len += snprintf(args + len, sizeof(args) - len, "channel_layouts=%s:",
1061 av_freep(&sample_fmts);
1062 av_freep(&sample_rates);
1063 av_freep(&channel_layouts);
1065 ret = avfilter_graph_create_filter(&format,
1066 avfilter_get_by_name("aformat"),
1067 "aformat", args, NULL, fg->graph);
1071 ret = avfilter_link(last_filter, pad_idx, format, 0);
1075 last_filter = format;
1079 if (audio_sync_method > 0) {
1080 AVFilterContext *async;
1084 av_log(NULL, AV_LOG_WARNING, "-async has been deprecated. Used the "
1085 "asyncts audio filter instead.\n");
1087 if (audio_sync_method > 1)
1088 len += snprintf(args + len, sizeof(args) - len, "compensate=1:"
1089 "max_comp=%d:", audio_sync_method);
1090 snprintf(args + len, sizeof(args) - len, "min_delta=%f",
1091 audio_drift_threshold);
1093 ret = avfilter_graph_create_filter(&async,
1094 avfilter_get_by_name("asyncts"),
1095 "async", args, NULL, fg->graph);
1099 ret = avfilter_link(last_filter, pad_idx, async, 0);
1103 last_filter = async;
1107 if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
1113 static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
1115 switch (out->filter_ctx->output_pads[out->pad_idx].type) {
1116 case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
1117 case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
1118 default: av_assert0(0);
1122 static int configure_complex_filter(FilterGraph *fg)
1124 AVFilterInOut *inputs, *outputs, *cur;
1125 int ret, i, init = !fg->graph;
1127 avfilter_graph_free(&fg->graph);
1128 if (!(fg->graph = avfilter_graph_alloc()))
1129 return AVERROR(ENOMEM);
1131 if ((ret = avfilter_graph_parse2(fg->graph, fg->graph_desc, &inputs, &outputs)) < 0)
1134 for (cur = inputs; init && cur; cur = cur->next)
1135 init_input_filter(fg, cur);
1137 for (cur = inputs, i = 0; cur; cur = cur->next, i++) {
1138 InputFilter *ifilter = fg->inputs[i];
1139 InputStream *ist = ifilter->ist;
1144 switch (cur->filter_ctx->input_pads[cur->pad_idx].type) {
1145 case AVMEDIA_TYPE_VIDEO:
1146 sar = ist->st->sample_aspect_ratio.num ?
1147 ist->st->sample_aspect_ratio :
1148 ist->st->codec->sample_aspect_ratio;
1149 snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
1150 ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
1152 filter = avfilter_get_by_name("buffer");
1154 case AVMEDIA_TYPE_AUDIO:
1155 snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:"
1156 "sample_fmt=%s:channel_layout=0x%"PRIx64,
1157 ist->st->time_base.num, ist->st->time_base.den,
1158 ist->st->codec->sample_rate,
1159 av_get_sample_fmt_name(ist->st->codec->sample_fmt),
1160 ist->st->codec->channel_layout);
1161 filter = avfilter_get_by_name("abuffer");
1167 if ((ret = avfilter_graph_create_filter(&ifilter->filter,
1169 args, NULL, fg->graph)) < 0)
1171 if ((ret = avfilter_link(ifilter->filter, 0,
1172 cur->filter_ctx, cur->pad_idx)) < 0)
1175 avfilter_inout_free(&inputs);
1178 /* we already know the mappings between lavfi outputs and output streams,
1179 * so we can finish the setup */
1180 for (cur = outputs, i = 0; cur; cur = cur->next, i++)
1181 configure_output_filter(fg, fg->outputs[i], cur);
1182 avfilter_inout_free(&outputs);
1184 if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
1187 /* wait until output mappings are processed */
1188 for (cur = outputs; cur;) {
1189 fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs),
1190 &fg->nb_outputs, fg->nb_outputs + 1);
1191 if (!(fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]))))
1193 fg->outputs[fg->nb_outputs - 1]->graph = fg;
1194 fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
1196 fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
1203 static int configure_complex_filters(void)
1207 for (i = 0; i < nb_filtergraphs; i++)
1208 if (!filtergraphs[i]->graph &&
1209 (ret = configure_complex_filter(filtergraphs[i])) < 0)
1214 static int configure_filtergraph(FilterGraph *fg)
1216 return fg->graph_desc ? configure_complex_filter(fg) :
1217 configure_simple_filtergraph(fg);
1220 static int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
1223 for (i = 0; i < fg->nb_inputs; i++)
1224 if (fg->inputs[i]->ist == ist)
1229 static void term_exit(void)
1231 av_log(NULL, AV_LOG_QUIET, "");
1234 static volatile int received_sigterm = 0;
1235 static volatile int received_nb_signals = 0;
1238 sigterm_handler(int sig)
1240 received_sigterm = sig;
1241 received_nb_signals++;
1245 static void term_init(void)
1247 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
1248 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
1250 signal(SIGXCPU, sigterm_handler);
1254 static int decode_interrupt_cb(void *ctx)
1256 return received_nb_signals > 1;
1259 static const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
1261 void exit_program(int ret)
1265 for (i = 0; i < nb_filtergraphs; i++) {
1266 avfilter_graph_free(&filtergraphs[i]->graph);
1267 for (j = 0; j < filtergraphs[i]->nb_inputs; j++)
1268 av_freep(&filtergraphs[i]->inputs[j]);
1269 av_freep(&filtergraphs[i]->inputs);
1270 for (j = 0; j < filtergraphs[i]->nb_outputs; j++)
1271 av_freep(&filtergraphs[i]->outputs[j]);
1272 av_freep(&filtergraphs[i]->outputs);
1273 av_freep(&filtergraphs[i]);
1275 av_freep(&filtergraphs);
1278 for (i = 0; i < nb_output_files; i++) {
1279 AVFormatContext *s = output_files[i]->ctx;
1280 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
1282 avformat_free_context(s);
1283 av_dict_free(&output_files[i]->opts);
1284 av_freep(&output_files[i]);
1286 for (i = 0; i < nb_output_streams; i++) {
1287 AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
1289 AVBitStreamFilterContext *next = bsfc->next;
1290 av_bitstream_filter_close(bsfc);
1293 output_streams[i]->bitstream_filters = NULL;
1295 av_freep(&output_streams[i]->avfilter);
1296 av_freep(&output_streams[i]->filtered_frame);
1297 av_freep(&output_streams[i]);
1299 for (i = 0; i < nb_input_files; i++) {
1300 avformat_close_input(&input_files[i]->ctx);
1301 av_freep(&input_files[i]);
1303 for (i = 0; i < nb_input_streams; i++) {
1304 av_freep(&input_streams[i]->decoded_frame);
1305 av_dict_free(&input_streams[i]->opts);
1306 free_buffer_pool(input_streams[i]);
1307 av_freep(&input_streams[i]->filters);
1308 av_freep(&input_streams[i]);
1312 fclose(vstats_file);
1313 av_free(vstats_filename);
1315 av_freep(&input_streams);
1316 av_freep(&input_files);
1317 av_freep(&output_streams);
1318 av_freep(&output_files);
1323 avformat_network_deinit();
1325 if (received_sigterm) {
1326 av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
1327 (int) received_sigterm);
1334 static void assert_avoptions(AVDictionary *m)
1336 AVDictionaryEntry *t;
1337 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1338 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
1343 static void assert_codec_experimental(AVCodecContext *c, int encoder)
1345 const char *codec_string = encoder ? "encoder" : "decoder";
1347 if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL &&
1348 c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
1349 av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
1350 "results.\nAdd '-strict experimental' if you want to use it.\n",
1351 codec_string, c->codec->name);
1352 codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id);
1353 if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
1354 av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
1355 codec_string, codec->name);
1361 * Update the requested input sample format based on the output sample format.
1362 * This is currently only used to request float output from decoders which
1363 * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
1364 * Ideally this will be removed in the future when decoders do not do format
1365 * conversion and only output in their native format.
1367 static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
1368 AVCodecContext *enc)
1370 /* if sample formats match or a decoder sample format has already been
1371 requested, just return */
1372 if (enc->sample_fmt == dec->sample_fmt ||
1373 dec->request_sample_fmt > AV_SAMPLE_FMT_NONE)
1376 /* if decoder supports more than one output format */
1377 if (dec_codec && dec_codec->sample_fmts &&
1378 dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
1379 dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
1380 const enum AVSampleFormat *p;
1381 int min_dec = -1, min_inc = -1;
1383 /* find a matching sample format in the encoder */
1384 for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
1385 if (*p == enc->sample_fmt) {
1386 dec->request_sample_fmt = *p;
1388 } else if (*p > enc->sample_fmt) {
1389 min_inc = FFMIN(min_inc, *p - enc->sample_fmt);
1391 min_dec = FFMIN(min_dec, enc->sample_fmt - *p);
1394 /* if none match, provide the one that matches quality closest */
1395 dec->request_sample_fmt = min_inc > 0 ? enc->sample_fmt + min_inc :
1396 enc->sample_fmt - min_dec;
1400 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
1402 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
1403 AVCodecContext *avctx = ost->st->codec;
1407 * Audio encoders may split the packets -- #frames in != #packets out.
1408 * But there is no reordering, so we can limit the number of output packets
1409 * by simply dropping them here.
1410 * Counting encoded video frames needs to be done separately because of
1411 * reordering, see do_video_out()
1413 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
1414 if (ost->frame_number >= ost->max_frames) {
1415 av_free_packet(pkt);
1418 ost->frame_number++;
1422 AVPacket new_pkt = *pkt;
1423 int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
1424 &new_pkt.data, &new_pkt.size,
1425 pkt->data, pkt->size,
1426 pkt->flags & AV_PKT_FLAG_KEY);
1428 av_free_packet(pkt);
1429 new_pkt.destruct = av_destruct_packet;
1431 av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
1432 bsfc->filter->name, pkt->stream_index,
1433 avctx->codec ? avctx->codec->name : "copy");
1443 pkt->stream_index = ost->index;
1444 ret = av_interleaved_write_frame(s, pkt);
1446 print_error("av_interleaved_write_frame()", ret);
1451 static int check_recording_time(OutputStream *ost)
1453 OutputFile *of = output_files[ost->file_index];
1455 if (of->recording_time != INT64_MAX &&
1456 av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,
1457 AV_TIME_BASE_Q) >= 0) {
1458 ost->is_past_recording_time = 1;
1464 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
1467 AVCodecContext *enc = ost->st->codec;
1471 av_init_packet(&pkt);
1475 if (!check_recording_time(ost))
1478 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1479 frame->pts = ost->sync_opts;
1480 ost->sync_opts = frame->pts + frame->nb_samples;
1482 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
1483 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1488 if (pkt.pts != AV_NOPTS_VALUE)
1489 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1490 if (pkt.dts != AV_NOPTS_VALUE)
1491 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1492 if (pkt.duration > 0)
1493 pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
1495 write_frame(s, &pkt, ost);
1497 audio_size += pkt.size;
1501 static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
1503 AVCodecContext *dec;
1504 AVPicture *picture2;
1505 AVPicture picture_tmp;
1508 dec = ist->st->codec;
1510 /* deinterlace : must be done before any resize */
1511 if (do_deinterlace) {
1514 /* create temporary picture */
1515 size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
1516 buf = av_malloc(size);
1520 picture2 = &picture_tmp;
1521 avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
1523 if (avpicture_deinterlace(picture2, picture,
1524 dec->pix_fmt, dec->width, dec->height) < 0) {
1525 /* if error, do not deinterlace */
1526 av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
1535 if (picture != picture2)
1536 *picture = *picture2;
1540 static void do_subtitle_out(AVFormatContext *s,
1546 static uint8_t *subtitle_out = NULL;
1547 int subtitle_out_max_size = 1024 * 1024;
1548 int subtitle_out_size, nb, i;
1549 AVCodecContext *enc;
1552 if (pts == AV_NOPTS_VALUE) {
1553 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1559 enc = ost->st->codec;
1561 if (!subtitle_out) {
1562 subtitle_out = av_malloc(subtitle_out_max_size);
1565 /* Note: DVB subtitle need one packet to draw them and one other
1566 packet to clear them */
1567 /* XXX: signal it in the codec context ? */
1568 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE)
1573 for (i = 0; i < nb; i++) {
1574 ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
1575 if (!check_recording_time(ost))
1578 sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
1579 // start_display_time is required to be 0
1580 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1581 sub->end_display_time -= sub->start_display_time;
1582 sub->start_display_time = 0;
1583 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1584 subtitle_out_max_size, sub);
1585 if (subtitle_out_size < 0) {
1586 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1590 av_init_packet(&pkt);
1591 pkt.data = subtitle_out;
1592 pkt.size = subtitle_out_size;
1593 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
1594 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {
1595 /* XXX: the pts correction is handled here. Maybe handling
1596 it in the codec would be better */
1598 pkt.pts += 90 * sub->start_display_time;
1600 pkt.pts += 90 * sub->end_display_time;
1602 write_frame(s, &pkt, ost);
1606 static void do_video_out(AVFormatContext *s,
1608 AVFrame *in_picture,
1609 int *frame_size, float quality)
1611 int ret, format_video_sync;
1613 AVCodecContext *enc = ost->st->codec;
1617 format_video_sync = video_sync_method;
1618 if (format_video_sync == VSYNC_AUTO)
1619 format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
1620 (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
1621 if (format_video_sync != VSYNC_PASSTHROUGH &&
1622 ost->frame_number &&
1623 in_picture->pts != AV_NOPTS_VALUE &&
1624 in_picture->pts < ost->sync_opts) {
1626 av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
1630 if (in_picture->pts == AV_NOPTS_VALUE)
1631 in_picture->pts = ost->sync_opts;
1632 ost->sync_opts = in_picture->pts;
1635 if (!ost->frame_number)
1636 ost->first_pts = in_picture->pts;
1638 av_init_packet(&pkt);
1642 if (!check_recording_time(ost) ||
1643 ost->frame_number >= ost->max_frames)
1646 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1647 enc->codec->id == CODEC_ID_RAWVIDEO) {
1648 /* raw pictures are written as AVPicture structure to
1649 avoid any copies. We support temporarily the older
1651 enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
1652 enc->coded_frame->top_field_first = in_picture->top_field_first;
1653 pkt.data = (uint8_t *)in_picture;
1654 pkt.size = sizeof(AVPicture);
1655 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1656 pkt.flags |= AV_PKT_FLAG_KEY;
1658 write_frame(s, &pkt, ost);
1661 AVFrame big_picture;
1663 big_picture = *in_picture;
1664 /* better than nothing: use input picture interlaced
1666 big_picture.interlaced_frame = in_picture->interlaced_frame;
1667 if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
1668 if (ost->top_field_first == -1)
1669 big_picture.top_field_first = in_picture->top_field_first;
1671 big_picture.top_field_first = !!ost->top_field_first;
1674 /* handles same_quant here. This is not correct because it may
1675 not be a global option */
1676 big_picture.quality = quality;
1677 if (!enc->me_threshold)
1678 big_picture.pict_type = 0;
1679 if (ost->forced_kf_index < ost->forced_kf_count &&
1680 big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1681 big_picture.pict_type = AV_PICTURE_TYPE_I;
1682 ost->forced_kf_index++;
1684 ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
1686 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1691 if (pkt.pts != AV_NOPTS_VALUE)
1692 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1693 if (pkt.dts != AV_NOPTS_VALUE)
1694 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1696 write_frame(s, &pkt, ost);
1697 *frame_size = pkt.size;
1698 video_size += pkt.size;
1700 /* if two pass, output log */
1701 if (ost->logfile && enc->stats_out) {
1702 fprintf(ost->logfile, "%s", enc->stats_out);
1708 * For video, number of frames in == number of packets out.
1709 * But there may be reordering, so we can't throw away frames on encoder
1710 * flush, we need to limit them here, before they go into encoder.
1712 ost->frame_number++;
1715 static double psnr(double d)
1717 return -10.0 * log(d) / log(10.0);
1720 static void do_video_stats(AVFormatContext *os, OutputStream *ost,
1723 AVCodecContext *enc;
1725 double ti1, bitrate, avg_bitrate;
1727 /* this is executed just the first time do_video_stats is called */
1729 vstats_file = fopen(vstats_filename, "w");
1736 enc = ost->st->codec;
1737 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1738 frame_number = ost->frame_number;
1739 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
1740 if (enc->flags&CODEC_FLAG_PSNR)
1741 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1743 fprintf(vstats_file,"f_size= %6d ", frame_size);
1744 /* compute pts value */
1745 ti1 = ost->sync_opts * av_q2d(enc->time_base);
1749 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1750 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
1751 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1752 (double)video_size / 1024, ti1, bitrate, avg_bitrate);
1753 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
1757 /* check for new output on any of the filtergraphs */
1758 static int poll_filters(void)
1760 AVFilterBufferRef *picref;
1761 AVFrame *filtered_frame = NULL;
1764 for (i = 0; i < nb_output_streams; i++) {
1765 OutputStream *ost = output_streams[i];
1766 OutputFile *of = output_files[ost->file_index];
1769 if (!ost->filter || ost->is_past_recording_time)
1772 if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
1773 return AVERROR(ENOMEM);
1775 avcodec_get_frame_defaults(ost->filtered_frame);
1776 filtered_frame = ost->filtered_frame;
1779 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
1780 !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
1781 ret = av_buffersink_read_samples(ost->filter->filter, &picref,
1782 ost->st->codec->frame_size);
1784 ret = av_buffersink_read(ost->filter->filter, &picref);
1789 avfilter_copy_buf_props(filtered_frame, picref);
1790 if (picref->pts != AV_NOPTS_VALUE)
1791 filtered_frame->pts = av_rescale_q(picref->pts,
1792 ost->filter->filter->inputs[0]->time_base,
1793 ost->st->codec->time_base) -
1794 av_rescale_q(of->start_time,
1796 ost->st->codec->time_base);
1798 if (of->start_time && filtered_frame->pts < of->start_time) {
1799 avfilter_unref_buffer(picref);
1803 switch (ost->filter->filter->inputs[0]->type) {
1804 case AVMEDIA_TYPE_VIDEO:
1805 if (!ost->frame_aspect_ratio)
1806 ost->st->codec->sample_aspect_ratio = picref->video->pixel_aspect;
1808 do_video_out(of->ctx, ost, filtered_frame, &frame_size,
1809 same_quant ? ost->last_quality :
1810 ost->st->codec->global_quality);
1811 if (vstats_filename && frame_size)
1812 do_video_stats(of->ctx, ost, frame_size);
1814 case AVMEDIA_TYPE_AUDIO:
1815 do_audio_out(of->ctx, ost, filtered_frame);
1818 // TODO support subtitle filters
1822 avfilter_unref_buffer(picref);
1828 static void print_report(int is_last_report, int64_t timer_start)
1832 AVFormatContext *oc;
1834 AVCodecContext *enc;
1835 int frame_number, vid, i;
1836 double bitrate, ti1, pts;
1837 static int64_t last_time = -1;
1838 static int qp_histogram[52];
1840 if (!print_stats && !is_last_report)
1843 if (!is_last_report) {
1845 /* display the report every 0.5 seconds */
1846 cur_time = av_gettime();
1847 if (last_time == -1) {
1848 last_time = cur_time;
1851 if ((cur_time - last_time) < 500000)
1853 last_time = cur_time;
1857 oc = output_files[0]->ctx;
1859 total_size = avio_size(oc->pb);
1860 if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
1861 total_size = avio_tell(oc->pb);
1866 for (i = 0; i < nb_output_streams; i++) {
1868 ost = output_streams[i];
1869 enc = ost->st->codec;
1870 if (!ost->stream_copy && enc->coded_frame)
1871 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1872 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1873 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1875 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1876 float t = (av_gettime() - timer_start) / 1000000.0;
1878 frame_number = ost->frame_number;
1879 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
1880 frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
1882 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1886 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1888 for (j = 0; j < 32; j++)
1889 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j] + 1) / log(2)));
1891 if (enc->flags&CODEC_FLAG_PSNR) {
1893 double error, error_sum = 0;
1894 double scale, scale_sum = 0;
1895 char type[3] = { 'Y','U','V' };
1896 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1897 for (j = 0; j < 3; j++) {
1898 if (is_last_report) {
1899 error = enc->error[j];
1900 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1902 error = enc->coded_frame->error[j];
1903 scale = enc->width * enc->height * 255.0 * 255.0;
1909 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
1911 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1915 /* compute min output value */
1916 pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
1917 if ((pts < ti1) && (pts > 0))
1923 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
1925 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1926 "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
1927 (double)total_size / 1024, ti1, bitrate);
1929 if (nb_frames_dup || nb_frames_drop)
1930 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1931 nb_frames_dup, nb_frames_drop);
1933 av_log(NULL, AV_LOG_INFO, "%s \r", buf);
1937 if (is_last_report) {
1938 int64_t raw= audio_size + video_size + extra_size;
1939 av_log(NULL, AV_LOG_INFO, "\n");
1940 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
1941 video_size / 1024.0,
1942 audio_size / 1024.0,
1943 extra_size / 1024.0,
1944 100.0 * (total_size - raw) / raw
1949 static void flush_encoders(void)
1953 for (i = 0; i < nb_output_streams; i++) {
1954 OutputStream *ost = output_streams[i];
1955 AVCodecContext *enc = ost->st->codec;
1956 AVFormatContext *os = output_files[ost->file_index]->ctx;
1957 int stop_encoding = 0;
1959 if (!ost->encoding_needed)
1962 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1964 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO)
1968 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1972 switch (ost->st->codec->codec_type) {
1973 case AVMEDIA_TYPE_AUDIO:
1974 encode = avcodec_encode_audio2;
1978 case AVMEDIA_TYPE_VIDEO:
1979 encode = avcodec_encode_video2;
1990 av_init_packet(&pkt);
1994 ret = encode(enc, &pkt, NULL, &got_packet);
1996 av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
2000 if (ost->logfile && enc->stats_out) {
2001 fprintf(ost->logfile, "%s", enc->stats_out);
2007 if (pkt.pts != AV_NOPTS_VALUE)
2008 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
2009 if (pkt.dts != AV_NOPTS_VALUE)
2010 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
2011 write_frame(os, &pkt, ost);
2021 * Check whether a packet from ist should be written into ost at this time
2023 static int check_output_constraints(InputStream *ist, OutputStream *ost)
2025 OutputFile *of = output_files[ost->file_index];
2026 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2028 if (ost->source_index != ist_index)
2031 if (of->start_time && ist->last_dts < of->start_time)
2037 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2039 OutputFile *of = output_files[ost->file_index];
2040 int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
2043 av_init_packet(&opkt);
2045 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2046 !ost->copy_initial_nonkeyframes)
2049 if (of->recording_time != INT64_MAX &&
2050 ist->last_dts >= of->recording_time + of->start_time) {
2051 ost->is_past_recording_time = 1;
2055 /* force the input stream PTS */
2056 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
2057 audio_size += pkt->size;
2058 else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2059 video_size += pkt->size;
2063 if (pkt->pts != AV_NOPTS_VALUE)
2064 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
2066 opkt.pts = AV_NOPTS_VALUE;
2068 if (pkt->dts == AV_NOPTS_VALUE)
2069 opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
2071 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
2072 opkt.dts -= ost_tb_start_time;
2074 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
2075 opkt.flags = pkt->flags;
2077 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2078 if ( ost->st->codec->codec_id != CODEC_ID_H264
2079 && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
2080 && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
2081 && ost->st->codec->codec_id != CODEC_ID_VC1
2083 if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
2084 opkt.destruct = av_destruct_packet;
2086 opkt.data = pkt->data;
2087 opkt.size = pkt->size;
2090 write_frame(of->ctx, &opkt, ost);
2091 ost->st->codec->frame_number++;
2092 av_free_packet(&opkt);
2095 static void rate_emu_sleep(InputStream *ist)
2097 if (input_files[ist->file_index]->rate_emu) {
2098 int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
2099 int64_t now = av_gettime() - ist->start;
2105 static int guess_input_channel_layout(InputStream *ist)
2107 AVCodecContext *dec = ist->st->codec;
2109 if (!dec->channel_layout) {
2110 char layout_name[256];
2112 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2113 if (!dec->channel_layout)
2115 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2116 dec->channels, dec->channel_layout);
2117 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2118 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2123 static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2125 AVFrame *decoded_frame;
2126 AVCodecContext *avctx = ist->st->codec;
2127 int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
2128 int i, ret, resample_changed;
2130 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
2131 return AVERROR(ENOMEM);
2133 avcodec_get_frame_defaults(ist->decoded_frame);
2134 decoded_frame = ist->decoded_frame;
2136 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
2142 /* no audio frame */
2144 for (i = 0; i < ist->nb_filters; i++)
2145 av_buffersrc_buffer(ist->filters[i]->filter, NULL);
2149 /* if the decoder provides a pts, use it instead of the last packet pts.
2150 the decoder could be delaying output by a packet or more. */
2151 if (decoded_frame->pts != AV_NOPTS_VALUE)
2152 ist->next_dts = decoded_frame->pts;
2153 else if (pkt->pts != AV_NOPTS_VALUE) {
2154 decoded_frame->pts = pkt->pts;
2155 pkt->pts = AV_NOPTS_VALUE;
2158 // preprocess audio (volume)
2159 if (audio_volume != 256) {
2160 int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
2161 void *samples = decoded_frame->data[0];
2162 switch (avctx->sample_fmt) {
2163 case AV_SAMPLE_FMT_U8:
2165 uint8_t *volp = samples;
2166 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2167 int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
2168 *volp++ = av_clip_uint8(v);
2172 case AV_SAMPLE_FMT_S16:
2174 int16_t *volp = samples;
2175 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2176 int v = ((*volp) * audio_volume + 128) >> 8;
2177 *volp++ = av_clip_int16(v);
2181 case AV_SAMPLE_FMT_S32:
2183 int32_t *volp = samples;
2184 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2185 int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
2186 *volp++ = av_clipl_int32(v);
2190 case AV_SAMPLE_FMT_FLT:
2192 float *volp = samples;
2193 float scale = audio_volume / 256.f;
2194 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2199 case AV_SAMPLE_FMT_DBL:
2201 double *volp = samples;
2202 double scale = audio_volume / 256.;
2203 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2209 av_log(NULL, AV_LOG_FATAL,
2210 "Audio volume adjustment on sample format %s is not supported.\n",
2211 av_get_sample_fmt_name(ist->st->codec->sample_fmt));
2216 rate_emu_sleep(ist);
2218 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2219 ist->resample_channels != avctx->channels ||
2220 ist->resample_channel_layout != decoded_frame->channel_layout ||
2221 ist->resample_sample_rate != decoded_frame->sample_rate;
2222 if (resample_changed) {
2223 char layout1[64], layout2[64];
2225 if (!guess_input_channel_layout(ist)) {
2226 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2227 "layout for Input Stream #%d.%d\n", ist->file_index,
2231 decoded_frame->channel_layout = avctx->channel_layout;
2233 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2234 ist->resample_channel_layout);
2235 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2236 decoded_frame->channel_layout);
2238 av_log(NULL, AV_LOG_INFO,
2239 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2240 ist->file_index, ist->st->index,
2241 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2242 ist->resample_channels, layout1,
2243 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2244 avctx->channels, layout2);
2246 ist->resample_sample_fmt = decoded_frame->format;
2247 ist->resample_sample_rate = decoded_frame->sample_rate;
2248 ist->resample_channel_layout = decoded_frame->channel_layout;
2249 ist->resample_channels = avctx->channels;
2251 for (i = 0; i < nb_filtergraphs; i++)
2252 if (ist_in_filtergraph(filtergraphs[i], ist) &&
2253 configure_filtergraph(filtergraphs[i]) < 0) {
2254 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2259 for (i = 0; i < ist->nb_filters; i++)
2260 av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
2265 static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2267 AVFrame *decoded_frame;
2268 void *buffer_to_free = NULL;
2269 int i, ret = 0, resample_changed;
2272 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
2273 return AVERROR(ENOMEM);
2275 avcodec_get_frame_defaults(ist->decoded_frame);
2276 decoded_frame = ist->decoded_frame;
2278 ret = avcodec_decode_video2(ist->st->codec,
2279 decoded_frame, got_output, pkt);
2283 quality = same_quant ? decoded_frame->quality : 0;
2285 /* no picture yet */
2287 for (i = 0; i < ist->nb_filters; i++)
2288 av_buffersrc_buffer(ist->filters[i]->filter, NULL);
2291 decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
2292 decoded_frame->pkt_dts);
2294 pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
2296 rate_emu_sleep(ist);
2298 if (ist->st->sample_aspect_ratio.num)
2299 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2301 resample_changed = ist->resample_width != decoded_frame->width ||
2302 ist->resample_height != decoded_frame->height ||
2303 ist->resample_pix_fmt != decoded_frame->format;
2304 if (resample_changed) {
2305 av_log(NULL, AV_LOG_INFO,
2306 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2307 ist->file_index, ist->st->index,
2308 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2309 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2311 ist->resample_width = decoded_frame->width;
2312 ist->resample_height = decoded_frame->height;
2313 ist->resample_pix_fmt = decoded_frame->format;
2315 for (i = 0; i < nb_filtergraphs; i++)
2316 if (ist_in_filtergraph(filtergraphs[i], ist) &&
2317 configure_filtergraph(filtergraphs[i]) < 0) {
2318 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2323 for (i = 0; i < ist->nb_filters; i++) {
2324 // XXX what an ugly hack
2325 if (ist->filters[i]->graph->nb_outputs == 1)
2326 ist->filters[i]->graph->outputs[0]->ost->last_quality = quality;
2328 if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
2329 FrameBuffer *buf = decoded_frame->opaque;
2330 AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
2331 decoded_frame->data, decoded_frame->linesize,
2332 AV_PERM_READ | AV_PERM_PRESERVE,
2333 ist->st->codec->width, ist->st->codec->height,
2334 ist->st->codec->pix_fmt);
2336 avfilter_copy_frame_props(fb, decoded_frame);
2337 fb->buf->priv = buf;
2338 fb->buf->free = filter_release_buffer;
2341 av_buffersrc_buffer(ist->filters[i]->filter, fb);
2343 av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
2346 av_free(buffer_to_free);
2350 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2352 AVSubtitle subtitle;
2353 int i, ret = avcodec_decode_subtitle2(ist->st->codec,
2354 &subtitle, got_output, pkt);
2360 rate_emu_sleep(ist);
2362 for (i = 0; i < nb_output_streams; i++) {
2363 OutputStream *ost = output_streams[i];
2365 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
2368 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
2371 avsubtitle_free(&subtitle);
2375 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2376 static int output_packet(InputStream *ist, const AVPacket *pkt)
2382 if (ist->next_dts == AV_NOPTS_VALUE)
2383 ist->next_dts = ist->last_dts;
2387 av_init_packet(&avpkt);
2395 if (pkt->dts != AV_NOPTS_VALUE)
2396 ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2398 // while we have more to decode or while the decoder did output something on EOF
2399 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2403 ist->last_dts = ist->next_dts;
2405 if (avpkt.size && avpkt.size != pkt->size) {
2406 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2407 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2408 ist->showed_multi_packet_warning = 1;
2411 switch (ist->st->codec->codec_type) {
2412 case AVMEDIA_TYPE_AUDIO:
2413 ret = transcode_audio (ist, &avpkt, &got_output);
2415 case AVMEDIA_TYPE_VIDEO:
2416 ret = transcode_video (ist, &avpkt, &got_output);
2418 ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2419 else if (ist->st->r_frame_rate.num)
2420 ist->next_dts += av_rescale_q(1, (AVRational){ist->st->r_frame_rate.den,
2421 ist->st->r_frame_rate.num},
2423 else if (ist->st->codec->time_base.num != 0) {
2424 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
2425 ist->st->codec->ticks_per_frame;
2426 ist->next_dts += av_rescale_q(ticks, ist->st->codec->time_base, AV_TIME_BASE_Q);
2429 case AVMEDIA_TYPE_SUBTITLE:
2430 ret = transcode_subtitles(ist, &avpkt, &got_output);
2438 // touch data and size only if not EOF
2448 /* handle stream copy */
2449 if (!ist->decoding_needed) {
2450 rate_emu_sleep(ist);
2451 ist->last_dts = ist->next_dts;
2452 switch (ist->st->codec->codec_type) {
2453 case AVMEDIA_TYPE_AUDIO:
2454 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
2455 ist->st->codec->sample_rate;
2457 case AVMEDIA_TYPE_VIDEO:
2458 if (ist->st->codec->time_base.num != 0) {
2459 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
2460 ist->next_dts += ((int64_t)AV_TIME_BASE *
2461 ist->st->codec->time_base.num * ticks) /
2462 ist->st->codec->time_base.den;
2467 for (i = 0; pkt && i < nb_output_streams; i++) {
2468 OutputStream *ost = output_streams[i];
2470 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2473 do_streamcopy(ist, ost, pkt);
2479 static void print_sdp(void)
2483 AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
2487 for (i = 0; i < nb_output_files; i++)
2488 avc[i] = output_files[i]->ctx;
2490 av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
2491 printf("SDP:\n%s\n", sdp);
2496 static int init_input_stream(int ist_index, char *error, int error_len)
2499 InputStream *ist = input_streams[ist_index];
2500 if (ist->decoding_needed) {
2501 AVCodec *codec = ist->dec;
2503 snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
2504 ist->st->codec->codec_id, ist->file_index, ist->st->index);
2505 return AVERROR(EINVAL);
2508 /* update requested sample format for the decoder based on the
2509 corresponding encoder sample format */
2510 for (i = 0; i < nb_output_streams; i++) {
2511 OutputStream *ost = output_streams[i];
2512 if (ost->source_index == ist_index) {
2513 update_sample_fmt(ist->st->codec, codec, ost->st->codec);
2518 if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
2519 ist->st->codec->get_buffer = codec_get_buffer;
2520 ist->st->codec->release_buffer = codec_release_buffer;
2521 ist->st->codec->opaque = ist;
2524 if (!av_dict_get(ist->opts, "threads", NULL, 0))
2525 av_dict_set(&ist->opts, "threads", "auto", 0);
2526 if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
2527 snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
2528 ist->file_index, ist->st->index);
2529 return AVERROR(EINVAL);
2531 assert_codec_experimental(ist->st->codec, 0);
2532 assert_avoptions(ist->opts);
2535 ist->last_dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2536 ist->next_dts = AV_NOPTS_VALUE;
2537 init_pts_correction(&ist->pts_ctx);
2543 static InputStream *get_input_stream(OutputStream *ost)
2545 if (ost->source_index >= 0)
2546 return input_streams[ost->source_index];
2549 FilterGraph *fg = ost->filter->graph;
2552 for (i = 0; i < fg->nb_inputs; i++)
2553 if (fg->inputs[i]->ist->st->codec->codec_type == ost->st->codec->codec_type)
2554 return fg->inputs[i]->ist;
2560 static int transcode_init(void)
2562 int ret = 0, i, j, k;
2563 AVFormatContext *oc;
2564 AVCodecContext *codec, *icodec;
2570 /* init framerate emulation */
2571 for (i = 0; i < nb_input_files; i++) {
2572 InputFile *ifile = input_files[i];
2573 if (ifile->rate_emu)
2574 for (j = 0; j < ifile->nb_streams; j++)
2575 input_streams[j + ifile->ist_index]->start = av_gettime();
2578 /* output stream init */
2579 for (i = 0; i < nb_output_files; i++) {
2580 oc = output_files[i]->ctx;
2581 if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
2582 av_dump_format(oc, i, oc->filename, 1);
2583 av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2584 return AVERROR(EINVAL);
2588 /* init complex filtergraphs */
2589 for (i = 0; i < nb_filtergraphs; i++)
2590 if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
2593 /* for each output stream, we compute the right encoding parameters */
2594 for (i = 0; i < nb_output_streams; i++) {
2595 ost = output_streams[i];
2596 oc = output_files[ost->file_index]->ctx;
2597 ist = get_input_stream(ost);
2599 if (ost->attachment_filename)
2602 codec = ost->st->codec;
2605 icodec = ist->st->codec;
2607 ost->st->disposition = ist->st->disposition;
2608 codec->bits_per_raw_sample = icodec->bits_per_raw_sample;
2609 codec->chroma_sample_location = icodec->chroma_sample_location;
2612 if (ost->stream_copy) {
2613 uint64_t extra_size;
2615 av_assert0(ist && !ost->filter);
2617 extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2619 if (extra_size > INT_MAX) {
2620 return AVERROR(EINVAL);
2623 /* if stream_copy is selected, no need to decode or encode */
2624 codec->codec_id = icodec->codec_id;
2625 codec->codec_type = icodec->codec_type;
2627 if (!codec->codec_tag) {
2628 if (!oc->oformat->codec_tag ||
2629 av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
2630 av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
2631 codec->codec_tag = icodec->codec_tag;
2634 codec->bit_rate = icodec->bit_rate;
2635 codec->rc_max_rate = icodec->rc_max_rate;
2636 codec->rc_buffer_size = icodec->rc_buffer_size;
2637 codec->field_order = icodec->field_order;
2638 codec->extradata = av_mallocz(extra_size);
2639 if (!codec->extradata) {
2640 return AVERROR(ENOMEM);
2642 memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
2643 codec->extradata_size = icodec->extradata_size;
2645 codec->time_base = icodec->time_base;
2646 codec->time_base.num *= icodec->ticks_per_frame;
2647 av_reduce(&codec->time_base.num, &codec->time_base.den,
2648 codec->time_base.num, codec->time_base.den, INT_MAX);
2650 codec->time_base = ist->st->time_base;
2652 switch (codec->codec_type) {
2653 case AVMEDIA_TYPE_AUDIO:
2654 if (audio_volume != 256) {
2655 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2658 codec->channel_layout = icodec->channel_layout;
2659 codec->sample_rate = icodec->sample_rate;
2660 codec->channels = icodec->channels;
2661 codec->frame_size = icodec->frame_size;
2662 codec->audio_service_type = icodec->audio_service_type;
2663 codec->block_align = icodec->block_align;
2665 case AVMEDIA_TYPE_VIDEO:
2666 codec->pix_fmt = icodec->pix_fmt;
2667 codec->width = icodec->width;
2668 codec->height = icodec->height;
2669 codec->has_b_frames = icodec->has_b_frames;
2670 if (!codec->sample_aspect_ratio.num) {
2671 codec->sample_aspect_ratio =
2672 ost->st->sample_aspect_ratio =
2673 ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
2674 ist->st->codec->sample_aspect_ratio.num ?
2675 ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
2678 case AVMEDIA_TYPE_SUBTITLE:
2679 codec->width = icodec->width;
2680 codec->height = icodec->height;
2682 case AVMEDIA_TYPE_DATA:
2683 case AVMEDIA_TYPE_ATTACHMENT:
2690 /* should only happen when a default codec is not present. */
2691 snprintf(error, sizeof(error), "Automatic encoder selection "
2692 "failed for output stream #%d:%d. Default encoder for "
2693 "format %s is probably disabled. Please choose an "
2694 "encoder manually.\n", ost->file_index, ost->index,
2696 ret = AVERROR(EINVAL);
2701 ist->decoding_needed = 1;
2702 ost->encoding_needed = 1;
2705 * We want CFR output if and only if one of those is true:
2706 * 1) user specified output framerate with -r
2707 * 2) user specified -vsync cfr
2708 * 3) output format is CFR and the user didn't force vsync to
2709 * something else than CFR
2711 * in such a case, set ost->frame_rate
2713 if (codec->codec_type == AVMEDIA_TYPE_VIDEO &&
2714 !ost->frame_rate.num && ist &&
2715 (video_sync_method == VSYNC_CFR ||
2716 (video_sync_method == VSYNC_AUTO &&
2717 !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
2718 ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
2719 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
2720 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
2721 ost->frame_rate = ost->enc->supported_framerates[idx];
2726 (codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2727 codec->codec_type == AVMEDIA_TYPE_AUDIO)) {
2729 fg = init_simple_filtergraph(ist, ost);
2730 if (configure_simple_filtergraph(fg)) {
2731 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2736 switch (codec->codec_type) {
2737 case AVMEDIA_TYPE_AUDIO:
2738 codec->sample_fmt = ost->filter->filter->inputs[0]->format;
2739 codec->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
2740 codec->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
2741 codec->channels = av_get_channel_layout_nb_channels(codec->channel_layout);
2742 codec->time_base = (AVRational){ 1, codec->sample_rate };
2744 case AVMEDIA_TYPE_VIDEO:
2745 codec->time_base = ost->filter->filter->inputs[0]->time_base;
2747 codec->width = ost->filter->filter->inputs[0]->w;
2748 codec->height = ost->filter->filter->inputs[0]->h;
2749 codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2750 ost->frame_aspect_ratio ? // overridden by the -aspect cli option
2751 av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
2752 ost->filter->filter->inputs[0]->sample_aspect_ratio;
2753 codec->pix_fmt = ost->filter->filter->inputs[0]->format;
2755 if (codec->width != icodec->width ||
2756 codec->height != icodec->height ||
2757 codec->pix_fmt != icodec->pix_fmt) {
2758 codec->bits_per_raw_sample = 0;
2762 case AVMEDIA_TYPE_SUBTITLE:
2763 codec->time_base = (AVRational){1, 1000};
2770 if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
2771 char logfilename[1024];
2774 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
2775 pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
2777 if (!strcmp(ost->enc->name, "libx264")) {
2778 av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
2780 if (codec->flags & CODEC_FLAG_PASS1) {
2781 f = fopen(logfilename, "wb");
2783 av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
2784 logfilename, strerror(errno));
2790 size_t logbuffer_size;
2791 if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
2792 av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
2796 codec->stats_in = logbuffer;
2803 /* open each encoder */
2804 for (i = 0; i < nb_output_streams; i++) {
2805 ost = output_streams[i];
2806 if (ost->encoding_needed) {
2807 AVCodec *codec = ost->enc;
2808 AVCodecContext *dec = NULL;
2810 if ((ist = get_input_stream(ost)))
2811 dec = ist->st->codec;
2812 if (dec && dec->subtitle_header) {
2813 ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size);
2814 if (!ost->st->codec->subtitle_header) {
2815 ret = AVERROR(ENOMEM);
2818 memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2819 ost->st->codec->subtitle_header_size = dec->subtitle_header_size;
2821 if (!av_dict_get(ost->opts, "threads", NULL, 0))
2822 av_dict_set(&ost->opts, "threads", "auto", 0);
2823 if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) {
2824 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
2825 ost->file_index, ost->index);
2826 ret = AVERROR(EINVAL);
2829 assert_codec_experimental(ost->st->codec, 1);
2830 assert_avoptions(ost->opts);
2831 if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
2832 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2833 "It takes bits/s as argument, not kbits/s\n");
2834 extra_size += ost->st->codec->extradata_size;
2836 if (ost->st->codec->me_threshold)
2837 input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
2841 /* init input streams */
2842 for (i = 0; i < nb_input_streams; i++)
2843 if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
2846 /* discard unused programs */
2847 for (i = 0; i < nb_input_files; i++) {
2848 InputFile *ifile = input_files[i];
2849 for (j = 0; j < ifile->ctx->nb_programs; j++) {
2850 AVProgram *p = ifile->ctx->programs[j];
2851 int discard = AVDISCARD_ALL;
2853 for (k = 0; k < p->nb_stream_indexes; k++)
2854 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
2855 discard = AVDISCARD_DEFAULT;
2858 p->discard = discard;
2862 /* open files and write file headers */
2863 for (i = 0; i < nb_output_files; i++) {
2864 oc = output_files[i]->ctx;
2865 oc->interrupt_callback = int_cb;
2866 if (avformat_write_header(oc, &output_files[i]->opts) < 0) {
2867 snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i);
2868 ret = AVERROR(EINVAL);
2871 assert_avoptions(output_files[i]->opts);
2872 if (strcmp(oc->oformat->name, "rtp")) {
2878 /* dump the file output parameters - cannot be done before in case
2880 for (i = 0; i < nb_output_files; i++) {
2881 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
2884 /* dump the stream mapping */
2885 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
2886 for (i = 0; i < nb_input_streams; i++) {
2887 ist = input_streams[i];
2889 for (j = 0; j < ist->nb_filters; j++) {
2890 AVFilterLink *link = ist->filters[j]->filter->outputs[0];
2891 if (ist->filters[j]->graph->graph_desc) {
2892 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
2893 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
2894 link->dst->filter->name);
2895 if (link->dst->input_count > 1)
2896 av_log(NULL, AV_LOG_INFO, ":%s", link->dstpad->name);
2897 if (nb_filtergraphs > 1)
2898 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
2899 av_log(NULL, AV_LOG_INFO, "\n");
2904 for (i = 0; i < nb_output_streams; i++) {
2905 ost = output_streams[i];
2907 if (ost->attachment_filename) {
2908 /* an attached file */
2909 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
2910 ost->attachment_filename, ost->file_index, ost->index);
2914 if (ost->filter && ost->filter->graph->graph_desc) {
2915 /* output from a complex graph */
2916 AVFilterLink *link = ost->filter->filter->inputs[0];
2917 av_log(NULL, AV_LOG_INFO, " %s", link->src->filter->name);
2918 if (link->src->output_count > 1)
2919 av_log(NULL, AV_LOG_INFO, ":%s", link->srcpad->name);
2920 if (nb_filtergraphs > 1)
2921 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
2923 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
2924 ost->index, ost->enc ? ost->enc->name : "?");
2928 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
2929 input_streams[ost->source_index]->file_index,
2930 input_streams[ost->source_index]->st->index,
2933 if (ost->sync_ist != input_streams[ost->source_index])
2934 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2935 ost->sync_ist->file_index,
2936 ost->sync_ist->st->index);
2937 if (ost->stream_copy)
2938 av_log(NULL, AV_LOG_INFO, " (copy)");
2940 av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ?
2941 input_streams[ost->source_index]->dec->name : "?",
2942 ost->enc ? ost->enc->name : "?");
2943 av_log(NULL, AV_LOG_INFO, "\n");
2947 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
2959 * The following code is the main loop of the file converter
2961 static int transcode(void)
2964 AVFormatContext *is, *os;
2968 int no_packet_count = 0;
2969 int64_t timer_start;
2971 if (!(no_packet = av_mallocz(nb_input_files)))
2974 ret = transcode_init();
2978 av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
2981 timer_start = av_gettime();
2983 for (; received_sigterm == 0;) {
2984 int file_index, ist_index, past_recording_time = 1;
2988 ipts_min = INT64_MAX;
2990 /* check if there's any stream where output is still needed */
2991 for (i = 0; i < nb_output_streams; i++) {
2993 ost = output_streams[i];
2994 of = output_files[ost->file_index];
2995 os = output_files[ost->file_index]->ctx;
2996 if (ost->is_past_recording_time ||
2997 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
2999 if (ost->frame_number > ost->max_frames) {
3001 for (j = 0; j < of->ctx->nb_streams; j++)
3002 output_streams[of->ost_index + j]->is_past_recording_time = 1;
3005 past_recording_time = 0;
3007 if (past_recording_time)
3010 /* select the stream that we must read now by looking at the
3011 smallest output pts */
3013 for (i = 0; i < nb_input_streams; i++) {
3015 ist = input_streams[i];
3016 ipts = ist->last_dts;
3017 if (ist->discard || no_packet[ist->file_index])
3019 if (!input_files[ist->file_index]->eof_reached) {
3020 if (ipts < ipts_min) {
3022 file_index = ist->file_index;
3026 /* if none, if is finished */
3027 if (file_index < 0) {
3028 if (no_packet_count) {
3029 no_packet_count = 0;
3030 memset(no_packet, 0, nb_input_files);
3037 /* read a frame from it and output it in the fifo */
3038 is = input_files[file_index]->ctx;
3039 ret = av_read_frame(is, &pkt);
3040 if (ret == AVERROR(EAGAIN)) {
3041 no_packet[file_index] = 1;
3046 input_files[file_index]->eof_reached = 1;
3048 for (i = 0; i < input_files[file_index]->nb_streams; i++) {
3049 ist = input_streams[input_files[file_index]->ist_index + i];
3050 if (ist->decoding_needed)
3051 output_packet(ist, NULL);
3060 no_packet_count = 0;
3061 memset(no_packet, 0, nb_input_files);
3064 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3065 is->streams[pkt.stream_index]);
3067 /* the following test is needed in case new streams appear
3068 dynamically in stream : we ignore them */
3069 if (pkt.stream_index >= input_files[file_index]->nb_streams)
3070 goto discard_packet;
3071 ist_index = input_files[file_index]->ist_index + pkt.stream_index;
3072 ist = input_streams[ist_index];
3074 goto discard_packet;
3076 if (pkt.dts != AV_NOPTS_VALUE)
3077 pkt.dts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3078 if (pkt.pts != AV_NOPTS_VALUE)
3079 pkt.pts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3081 if (pkt.pts != AV_NOPTS_VALUE)
3082 pkt.pts *= ist->ts_scale;
3083 if (pkt.dts != AV_NOPTS_VALUE)
3084 pkt.dts *= ist->ts_scale;
3086 //fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n",
3088 // pkt.dts, input_files[ist->file_index].ts_offset,
3089 // ist->st->codec->codec_type);
3090 if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE
3091 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3092 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3093 int64_t delta = pkt_dts - ist->next_dts;
3094 if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
3095 input_files[ist->file_index]->ts_offset -= delta;
3096 av_log(NULL, AV_LOG_DEBUG,
3097 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3098 delta, input_files[ist->file_index]->ts_offset);
3099 pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3100 if (pkt.pts != AV_NOPTS_VALUE)
3101 pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3105 // fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
3106 if (output_packet(ist, &pkt) < 0 || poll_filters() < 0) {
3107 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
3108 ist->file_index, ist->st->index);
3111 av_free_packet(&pkt);
3116 av_free_packet(&pkt);
3118 /* dump report by using the output first video and audio streams */
3119 print_report(0, timer_start);
3122 /* at the end of stream, we must flush the decoder buffers */
3123 for (i = 0; i < nb_input_streams; i++) {
3124 ist = input_streams[i];
3125 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3126 output_packet(ist, NULL);
3134 /* write the trailer if needed and close file */
3135 for (i = 0; i < nb_output_files; i++) {
3136 os = output_files[i]->ctx;
3137 av_write_trailer(os);
3140 /* dump report by using the first video and audio streams */
3141 print_report(1, timer_start);
3143 /* close each encoder */
3144 for (i = 0; i < nb_output_streams; i++) {
3145 ost = output_streams[i];
3146 if (ost->encoding_needed) {
3147 av_freep(&ost->st->codec->stats_in);
3148 avcodec_close(ost->st->codec);
3152 /* close each decoder */
3153 for (i = 0; i < nb_input_streams; i++) {
3154 ist = input_streams[i];
3155 if (ist->decoding_needed) {
3156 avcodec_close(ist->st->codec);
3164 av_freep(&no_packet);
3166 if (output_streams) {
3167 for (i = 0; i < nb_output_streams; i++) {
3168 ost = output_streams[i];
3170 if (ost->stream_copy)
3171 av_freep(&ost->st->codec->extradata);
3173 fclose(ost->logfile);
3174 ost->logfile = NULL;
3176 av_freep(&ost->st->codec->subtitle_header);
3177 av_free(ost->forced_kf_pts);
3178 av_dict_free(&ost->opts);
3185 static double parse_frame_aspect_ratio(const char *arg)
3192 p = strchr(arg, ':');
3194 x = strtol(arg, &end, 10);
3196 y = strtol(end + 1, &end, 10);
3198 ar = (double)x / (double)y;
3200 ar = strtod(arg, NULL);
3203 av_log(NULL, AV_LOG_FATAL, "Incorrect aspect ratio specification.\n");
3209 static int opt_audio_codec(OptionsContext *o, const char *opt, const char *arg)
3211 return parse_option(o, "codec:a", arg, options);
3214 static int opt_video_codec(OptionsContext *o, const char *opt, const char *arg)
3216 return parse_option(o, "codec:v", arg, options);
3219 static int opt_subtitle_codec(OptionsContext *o, const char *opt, const char *arg)
3221 return parse_option(o, "codec:s", arg, options);
3224 static int opt_data_codec(OptionsContext *o, const char *opt, const char *arg)
3226 return parse_option(o, "codec:d", arg, options);
3229 static int opt_map(OptionsContext *o, const char *opt, const char *arg)
3231 StreamMap *m = NULL;
3232 int i, negative = 0, file_idx;
3233 int sync_file_idx = -1, sync_stream_idx;
3241 map = av_strdup(arg);
3243 /* parse sync stream first, just pick first matching stream */
3244 if (sync = strchr(map, ',')) {
3246 sync_file_idx = strtol(sync + 1, &sync, 0);
3247 if (sync_file_idx >= nb_input_files || sync_file_idx < 0) {
3248 av_log(NULL, AV_LOG_FATAL, "Invalid sync file index: %d.\n", sync_file_idx);
3253 for (i = 0; i < input_files[sync_file_idx]->nb_streams; i++)
3254 if (check_stream_specifier(input_files[sync_file_idx]->ctx,
3255 input_files[sync_file_idx]->ctx->streams[i], sync) == 1) {
3256 sync_stream_idx = i;
3259 if (i == input_files[sync_file_idx]->nb_streams) {
3260 av_log(NULL, AV_LOG_FATAL, "Sync stream specification in map %s does not "
3261 "match any streams.\n", arg);
3267 if (map[0] == '[') {
3268 /* this mapping refers to lavfi output */
3269 const char *c = map + 1;
3270 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
3271 &o->nb_stream_maps, o->nb_stream_maps + 1);
3272 m = &o->stream_maps[o->nb_stream_maps - 1];
3273 m->linklabel = av_get_token(&c, "]");
3274 if (!m->linklabel) {
3275 av_log(NULL, AV_LOG_ERROR, "Invalid output link label: %s.\n", map);
3279 file_idx = strtol(map, &p, 0);
3280 if (file_idx >= nb_input_files || file_idx < 0) {
3281 av_log(NULL, AV_LOG_FATAL, "Invalid input file index: %d.\n", file_idx);
3285 /* disable some already defined maps */
3286 for (i = 0; i < o->nb_stream_maps; i++) {
3287 m = &o->stream_maps[i];
3288 if (file_idx == m->file_index &&
3289 check_stream_specifier(input_files[m->file_index]->ctx,
3290 input_files[m->file_index]->ctx->streams[m->stream_index],
3291 *p == ':' ? p + 1 : p) > 0)
3295 for (i = 0; i < input_files[file_idx]->nb_streams; i++) {
3296 if (check_stream_specifier(input_files[file_idx]->ctx, input_files[file_idx]->ctx->streams[i],
3297 *p == ':' ? p + 1 : p) <= 0)
3299 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
3300 &o->nb_stream_maps, o->nb_stream_maps + 1);
3301 m = &o->stream_maps[o->nb_stream_maps - 1];
3303 m->file_index = file_idx;
3304 m->stream_index = i;
3306 if (sync_file_idx >= 0) {
3307 m->sync_file_index = sync_file_idx;
3308 m->sync_stream_index = sync_stream_idx;
3310 m->sync_file_index = file_idx;
3311 m->sync_stream_index = i;
3317 av_log(NULL, AV_LOG_FATAL, "Stream map '%s' matches no streams.\n", arg);
3325 static int opt_attach(OptionsContext *o, const char *opt, const char *arg)
3327 o->attachments = grow_array(o->attachments, sizeof(*o->attachments),
3328 &o->nb_attachments, o->nb_attachments + 1);
3329 o->attachments[o->nb_attachments - 1] = arg;
3334 * Parse a metadata specifier in arg.
3335 * @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram)
3336 * @param index for type c/p, chapter/program index is written here
3337 * @param stream_spec for type s, the stream specifier is written here
3339 static void parse_meta_type(char *arg, char *type, int *index, const char **stream_spec)
3347 if (*(++arg) && *arg != ':') {
3348 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", arg);
3351 *stream_spec = *arg == ':' ? arg + 1 : "";
3355 if (*(++arg) == ':')
3356 *index = strtol(++arg, NULL, 0);
3359 av_log(NULL, AV_LOG_FATAL, "Invalid metadata type %c.\n", *arg);
3366 static int copy_metadata(char *outspec, char *inspec, AVFormatContext *oc, AVFormatContext *ic, OptionsContext *o)
3368 AVDictionary **meta_in = NULL;
3369 AVDictionary **meta_out;
3371 char type_in, type_out;
3372 const char *istream_spec = NULL, *ostream_spec = NULL;
3373 int idx_in = 0, idx_out = 0;
3375 parse_meta_type(inspec, &type_in, &idx_in, &istream_spec);
3376 parse_meta_type(outspec, &type_out, &idx_out, &ostream_spec);
3378 if (type_in == 'g' || type_out == 'g')
3379 o->metadata_global_manual = 1;
3380 if (type_in == 's' || type_out == 's')
3381 o->metadata_streams_manual = 1;
3382 if (type_in == 'c' || type_out == 'c')
3383 o->metadata_chapters_manual = 1;
3385 #define METADATA_CHECK_INDEX(index, nb_elems, desc)\
3386 if ((index) < 0 || (index) >= (nb_elems)) {\
3387 av_log(NULL, AV_LOG_FATAL, "Invalid %s index %d while processing metadata maps.\n",\
3392 #define SET_DICT(type, meta, context, index)\
3395 meta = &context->metadata;\
3398 METADATA_CHECK_INDEX(index, context->nb_chapters, "chapter")\
3399 meta = &context->chapters[index]->metadata;\
3402 METADATA_CHECK_INDEX(index, context->nb_programs, "program")\
3403 meta = &context->programs[index]->metadata;\
3407 SET_DICT(type_in, meta_in, ic, idx_in);
3408 SET_DICT(type_out, meta_out, oc, idx_out);
3410 /* for input streams choose first matching stream */
3411 if (type_in == 's') {
3412 for (i = 0; i < ic->nb_streams; i++) {
3413 if ((ret = check_stream_specifier(ic, ic->streams[i], istream_spec)) > 0) {
3414 meta_in = &ic->streams[i]->metadata;
3420 av_log(NULL, AV_LOG_FATAL, "Stream specifier %s does not match any streams.\n", istream_spec);
3425 if (type_out == 's') {
3426 for (i = 0; i < oc->nb_streams; i++) {
3427 if ((ret = check_stream_specifier(oc, oc->streams[i], ostream_spec)) > 0) {
3428 meta_out = &oc->streams[i]->metadata;
3429 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3434 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3439 static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
3441 const char *codec_string = encoder ? "encoder" : "decoder";
3445 avcodec_find_encoder_by_name(name) :
3446 avcodec_find_decoder_by_name(name);
3448 av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name);
3451 if (codec->type != type) {
3452 av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name);
3458 static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st)
3460 char *codec_name = NULL;
3462 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, st);
3464 AVCodec *codec = find_codec_or_die(codec_name, st->codec->codec_type, 0);
3465 st->codec->codec_id = codec->id;
3468 return avcodec_find_decoder(st->codec->codec_id);
3472 * Add all the streams from the given input file to the global
3473 * list of input streams.
3475 static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
3479 for (i = 0; i < ic->nb_streams; i++) {
3480 AVStream *st = ic->streams[i];
3481 AVCodecContext *dec = st->codec;
3482 InputStream *ist = av_mallocz(sizeof(*ist));
3487 input_streams = grow_array(input_streams, sizeof(*input_streams), &nb_input_streams, nb_input_streams + 1);
3488 input_streams[nb_input_streams - 1] = ist;
3491 ist->file_index = nb_input_files;
3493 st->discard = AVDISCARD_ALL;
3494 ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, ic, st);
3496 ist->ts_scale = 1.0;
3497 MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st);
3499 ist->dec = choose_decoder(o, ic, st);
3501 switch (dec->codec_type) {
3502 case AVMEDIA_TYPE_VIDEO:
3503 ist->resample_height = dec->height;
3504 ist->resample_width = dec->width;
3505 ist->resample_pix_fmt = dec->pix_fmt;
3508 case AVMEDIA_TYPE_AUDIO:
3509 guess_input_channel_layout(ist);
3511 ist->resample_sample_fmt = dec->sample_fmt;
3512 ist->resample_sample_rate = dec->sample_rate;
3513 ist->resample_channels = dec->channels;
3514 ist->resample_channel_layout = dec->channel_layout;
3517 case AVMEDIA_TYPE_DATA:
3518 case AVMEDIA_TYPE_SUBTITLE:
3519 case AVMEDIA_TYPE_ATTACHMENT:
3520 case AVMEDIA_TYPE_UNKNOWN:
3528 static void assert_file_overwrite(const char *filename)
3530 if (!file_overwrite &&
3531 (strchr(filename, ':') == NULL || filename[1] == ':' ||
3532 av_strstart(filename, "file:", NULL))) {
3533 if (avio_check(filename, 0) == 0) {
3535 fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
3537 if (!read_yesno()) {
3538 fprintf(stderr, "Not overwriting - exiting\n");
3543 fprintf(stderr,"File '%s' already exists. Exiting.\n", filename);
3550 static void dump_attachment(AVStream *st, const char *filename)
3553 AVIOContext *out = NULL;
3554 AVDictionaryEntry *e;
3556 if (!st->codec->extradata_size) {
3557 av_log(NULL, AV_LOG_WARNING, "No extradata to dump in stream #%d:%d.\n",
3558 nb_input_files - 1, st->index);
3561 if (!*filename && (e = av_dict_get(st->metadata, "filename", NULL, 0)))
3562 filename = e->value;
3564 av_log(NULL, AV_LOG_FATAL, "No filename specified and no 'filename' tag"
3565 "in stream #%d:%d.\n", nb_input_files - 1, st->index);
3569 assert_file_overwrite(filename);
3571 if ((ret = avio_open2(&out, filename, AVIO_FLAG_WRITE, &int_cb, NULL)) < 0) {
3572 av_log(NULL, AV_LOG_FATAL, "Could not open file %s for writing.\n",
3577 avio_write(out, st->codec->extradata, st->codec->extradata_size);
3582 static int opt_input_file(OptionsContext *o, const char *opt, const char *filename)
3584 AVFormatContext *ic;
3585 AVInputFormat *file_iformat = NULL;
3589 AVDictionary **opts;
3590 int orig_nb_streams; // number of streams before avformat_find_stream_info
3593 if (!(file_iformat = av_find_input_format(o->format))) {
3594 av_log(NULL, AV_LOG_FATAL, "Unknown input format: '%s'\n", o->format);
3599 if (!strcmp(filename, "-"))
3602 using_stdin |= !strncmp(filename, "pipe:", 5) ||
3603 !strcmp(filename, "/dev/stdin");
3605 /* get default parameters from command line */
3606 ic = avformat_alloc_context();
3608 print_error(filename, AVERROR(ENOMEM));
3611 if (o->nb_audio_sample_rate) {
3612 snprintf(buf, sizeof(buf), "%d", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i);
3613 av_dict_set(&format_opts, "sample_rate", buf, 0);
3615 if (o->nb_audio_channels) {
3616 /* because we set audio_channels based on both the "ac" and
3617 * "channel_layout" options, we need to check that the specified
3618 * demuxer actually has the "channels" option before setting it */
3619 if (file_iformat && file_iformat->priv_class &&
3620 av_opt_find(&file_iformat->priv_class, "channels", NULL, 0,
3621 AV_OPT_SEARCH_FAKE_OBJ)) {
3622 snprintf(buf, sizeof(buf), "%d",
3623 o->audio_channels[o->nb_audio_channels - 1].u.i);
3624 av_dict_set(&format_opts, "channels", buf, 0);
3627 if (o->nb_frame_rates) {
3628 av_dict_set(&format_opts, "framerate", o->frame_rates[o->nb_frame_rates - 1].u.str, 0);
3630 if (o->nb_frame_sizes) {
3631 av_dict_set(&format_opts, "video_size", o->frame_sizes[o->nb_frame_sizes - 1].u.str, 0);
3633 if (o->nb_frame_pix_fmts)
3634 av_dict_set(&format_opts, "pixel_format", o->frame_pix_fmts[o->nb_frame_pix_fmts - 1].u.str, 0);
3636 ic->flags |= AVFMT_FLAG_NONBLOCK;
3637 ic->interrupt_callback = int_cb;
3639 /* open the input file with generic libav function */
3640 err = avformat_open_input(&ic, filename, file_iformat, &format_opts);
3642 print_error(filename, err);
3645 assert_avoptions(format_opts);
3647 /* apply forced codec ids */
3648 for (i = 0; i < ic->nb_streams; i++)
3649 choose_decoder(o, ic, ic->streams[i]);
3651 /* Set AVCodecContext options for avformat_find_stream_info */
3652 opts = setup_find_stream_info_opts(ic, codec_opts);
3653 orig_nb_streams = ic->nb_streams;
3655 /* If not enough info to get the stream parameters, we decode the
3656 first frames to get it. (used in mpeg case for example) */
3657 ret = avformat_find_stream_info(ic, opts);
3659 av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
3660 avformat_close_input(&ic);
3664 timestamp = o->start_time;
3665 /* add the stream start time */
3666 if (ic->start_time != AV_NOPTS_VALUE)
3667 timestamp += ic->start_time;
3669 /* if seeking requested, we execute it */
3670 if (o->start_time != 0) {
3671 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
3673 av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
3674 filename, (double)timestamp / AV_TIME_BASE);
3678 /* update the current parameters so that they match the one of the input stream */
3679 add_input_streams(o, ic);
3681 /* dump the file content */
3682 av_dump_format(ic, nb_input_files, filename, 0);
3684 input_files = grow_array(input_files, sizeof(*input_files), &nb_input_files, nb_input_files + 1);
3685 if (!(input_files[nb_input_files - 1] = av_mallocz(sizeof(*input_files[0]))))
3688 input_files[nb_input_files - 1]->ctx = ic;
3689 input_files[nb_input_files - 1]->ist_index = nb_input_streams - ic->nb_streams;
3690 input_files[nb_input_files - 1]->ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
3691 input_files[nb_input_files - 1]->nb_streams = ic->nb_streams;
3692 input_files[nb_input_files - 1]->rate_emu = o->rate_emu;
3694 for (i = 0; i < o->nb_dump_attachment; i++) {
3697 for (j = 0; j < ic->nb_streams; j++) {
3698 AVStream *st = ic->streams[j];
3700 if (check_stream_specifier(ic, st, o->dump_attachment[i].specifier) == 1)
3701 dump_attachment(st, o->dump_attachment[i].u.str);
3705 for (i = 0; i < orig_nb_streams; i++)
3706 av_dict_free(&opts[i]);
3713 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3714 AVCodecContext *avctx)
3720 for (p = kf; *p; p++)
3723 ost->forced_kf_count = n;
3724 ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
3725 if (!ost->forced_kf_pts) {
3726 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3729 for (i = 0; i < n; i++) {
3730 p = i ? strchr(p, ',') + 1 : kf;
3731 t = parse_time_or_die("force_key_frames", p, 1);
3732 ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3736 static uint8_t *get_line(AVIOContext *s)
3742 if (avio_open_dyn_buf(&line) < 0) {
3743 av_log(NULL, AV_LOG_FATAL, "Could not alloc buffer for reading preset.\n");
3747 while ((c = avio_r8(s)) && c != '\n')
3750 avio_close_dyn_buf(line, &buf);
3755 static int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s)
3758 char filename[1000];
3759 const char *base[3] = { getenv("AVCONV_DATADIR"),
3764 for (i = 0; i < FF_ARRAY_ELEMS(base) && ret; i++) {
3768 snprintf(filename, sizeof(filename), "%s%s/%s-%s.avpreset", base[i],
3769 i != 1 ? "" : "/.avconv", codec_name, preset_name);
3770 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3773 snprintf(filename, sizeof(filename), "%s%s/%s.avpreset", base[i],
3774 i != 1 ? "" : "/.avconv", preset_name);
3775 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3781 static void choose_encoder(OptionsContext *o, AVFormatContext *s, OutputStream *ost)
3783 char *codec_name = NULL;
3785 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, ost->st);
3787 ost->st->codec->codec_id = av_guess_codec(s->oformat, NULL, s->filename,
3788 NULL, ost->st->codec->codec_type);
3789 ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
3790 } else if (!strcmp(codec_name, "copy"))
3791 ost->stream_copy = 1;
3793 ost->enc = find_codec_or_die(codec_name, ost->st->codec->codec_type, 1);
3794 ost->st->codec->codec_id = ost->enc->id;
3798 static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVMediaType type)
3801 AVStream *st = avformat_new_stream(oc, NULL);
3802 int idx = oc->nb_streams - 1, ret = 0;
3803 char *bsf = NULL, *next, *codec_tag = NULL;
3804 AVBitStreamFilterContext *bsfc, *bsfc_prev = NULL;
3806 char *buf = NULL, *arg = NULL, *preset = NULL;
3807 AVIOContext *s = NULL;
3810 av_log(NULL, AV_LOG_FATAL, "Could not alloc stream.\n");
3814 if (oc->nb_streams - 1 < o->nb_streamid_map)
3815 st->id = o->streamid_map[oc->nb_streams - 1];
3817 output_streams = grow_array(output_streams, sizeof(*output_streams), &nb_output_streams,
3818 nb_output_streams + 1);
3819 if (!(ost = av_mallocz(sizeof(*ost))))
3821 output_streams[nb_output_streams - 1] = ost;
3823 ost->file_index = nb_output_files;
3826 st->codec->codec_type = type;
3827 choose_encoder(o, oc, ost);
3829 ost->opts = filter_codec_opts(codec_opts, ost->enc->id, oc, st);
3832 avcodec_get_context_defaults3(st->codec, ost->enc);
3833 st->codec->codec_type = type; // XXX hack, avcodec_get_context_defaults2() sets type to unknown for stream copy
3835 MATCH_PER_STREAM_OPT(presets, str, preset, oc, st);
3836 if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) {
3839 if (!buf[0] || buf[0] == '#') {
3843 if (!(arg = strchr(buf, '='))) {
3844 av_log(NULL, AV_LOG_FATAL, "Invalid line found in the preset file.\n");
3848 av_dict_set(&ost->opts, buf, arg, AV_DICT_DONT_OVERWRITE);
3850 } while (!s->eof_reached);
3854 av_log(NULL, AV_LOG_FATAL,
3855 "Preset %s specified for stream %d:%d, but could not be opened.\n",
3856 preset, ost->file_index, ost->index);
3860 ost->max_frames = INT64_MAX;
3861 MATCH_PER_STREAM_OPT(max_frames, i64, ost->max_frames, oc, st);
3863 MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st);
3865 if (next = strchr(bsf, ','))
3867 if (!(bsfc = av_bitstream_filter_init(bsf))) {
3868 av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf);
3872 bsfc_prev->next = bsfc;
3874 ost->bitstream_filters = bsfc;
3880 MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st);
3882 uint32_t tag = strtol(codec_tag, &next, 0);
3884 tag = AV_RL32(codec_tag);
3885 st->codec->codec_tag = tag;
3888 MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st);
3889 if (qscale >= 0 || same_quant) {
3890 st->codec->flags |= CODEC_FLAG_QSCALE;
3891 st->codec->global_quality = FF_QP2LAMBDA * qscale;
3894 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
3895 st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
3897 av_opt_get_int(sws_opts, "sws_flags", 0, &ost->sws_flags);
3899 ost->pix_fmts[0] = ost->pix_fmts[1] = PIX_FMT_NONE;
3904 static void parse_matrix_coeffs(uint16_t *dest, const char *str)
3907 const char *p = str;
3914 av_log(NULL, AV_LOG_FATAL, "Syntax error in matrix \"%s\" at coeff %d\n", str, i);
3921 static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc)
3925 AVCodecContext *video_enc;
3927 ost = new_output_stream(o, oc, AVMEDIA_TYPE_VIDEO);
3929 video_enc = st->codec;
3931 if (!ost->stream_copy) {
3932 const char *p = NULL;
3933 char *forced_key_frames = NULL, *frame_rate = NULL, *frame_size = NULL;
3934 char *frame_aspect_ratio = NULL, *frame_pix_fmt = NULL;
3935 char *intra_matrix = NULL, *inter_matrix = NULL, *filters = NULL;
3938 MATCH_PER_STREAM_OPT(frame_rates, str, frame_rate, oc, st);
3939 if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) {
3940 av_log(NULL, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
3944 MATCH_PER_STREAM_OPT(frame_sizes, str, frame_size, oc, st);
3945 if (frame_size && av_parse_video_size(&video_enc->width, &video_enc->height, frame_size) < 0) {
3946 av_log(NULL, AV_LOG_FATAL, "Invalid frame size: %s.\n", frame_size);
3950 MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
3951 if (frame_aspect_ratio)
3952 ost->frame_aspect_ratio = parse_frame_aspect_ratio(frame_aspect_ratio);
3954 MATCH_PER_STREAM_OPT(frame_pix_fmts, str, frame_pix_fmt, oc, st);
3955 if (frame_pix_fmt && (video_enc->pix_fmt = av_get_pix_fmt(frame_pix_fmt)) == PIX_FMT_NONE) {
3956 av_log(NULL, AV_LOG_FATAL, "Unknown pixel format requested: %s.\n", frame_pix_fmt);
3959 st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
3961 MATCH_PER_STREAM_OPT(intra_matrices, str, intra_matrix, oc, st);
3963 if (!(video_enc->intra_matrix = av_mallocz(sizeof(*video_enc->intra_matrix) * 64))) {
3964 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for intra matrix.\n");
3967 parse_matrix_coeffs(video_enc->intra_matrix, intra_matrix);
3969 MATCH_PER_STREAM_OPT(inter_matrices, str, inter_matrix, oc, st);
3971 if (!(video_enc->inter_matrix = av_mallocz(sizeof(*video_enc->inter_matrix) * 64))) {
3972 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for inter matrix.\n");
3975 parse_matrix_coeffs(video_enc->inter_matrix, inter_matrix);
3978 MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st);
3979 for (i = 0; p; i++) {
3981 int e = sscanf(p, "%d,%d,%d", &start, &end, &q);
3983 av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
3986 video_enc->rc_override =
3987 av_realloc(video_enc->rc_override,
3988 sizeof(RcOverride) * (i + 1));
3989 video_enc->rc_override[i].start_frame = start;
3990 video_enc->rc_override[i].end_frame = end;
3992 video_enc->rc_override[i].qscale = q;
3993 video_enc->rc_override[i].quality_factor = 1.0;
3996 video_enc->rc_override[i].qscale = 0;
3997 video_enc->rc_override[i].quality_factor = -q/100.0;
4002 video_enc->rc_override_count = i;
4003 if (!video_enc->rc_initial_buffer_occupancy)
4004 video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size * 3 / 4;
4005 video_enc->intra_dc_precision = intra_dc_precision - 8;
4010 video_enc->flags |= CODEC_FLAG_PASS1;
4012 video_enc->flags |= CODEC_FLAG_PASS2;
4016 MATCH_PER_STREAM_OPT(forced_key_frames, str, forced_key_frames, oc, st);
4017 if (forced_key_frames)
4018 parse_forced_key_frames(forced_key_frames, ost, video_enc);
4020 MATCH_PER_STREAM_OPT(force_fps, i, ost->force_fps, oc, st);
4022 ost->top_field_first = -1;
4023 MATCH_PER_STREAM_OPT(top_field_first, i, ost->top_field_first, oc, st);
4025 MATCH_PER_STREAM_OPT(filters, str, filters, oc, st);
4027 ost->avfilter = av_strdup(filters);
4029 MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i, ost->copy_initial_nonkeyframes, oc ,st);
4035 static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc)
4039 AVCodecContext *audio_enc;
4041 ost = new_output_stream(o, oc, AVMEDIA_TYPE_AUDIO);
4044 audio_enc = st->codec;
4045 audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
4047 if (!ost->stream_copy) {
4048 char *sample_fmt = NULL, *filters = NULL;;
4050 MATCH_PER_STREAM_OPT(audio_channels, i, audio_enc->channels, oc, st);
4052 MATCH_PER_STREAM_OPT(sample_fmts, str, sample_fmt, oc, st);
4054 (audio_enc->sample_fmt = av_get_sample_fmt(sample_fmt)) == AV_SAMPLE_FMT_NONE) {
4055 av_log(NULL, AV_LOG_FATAL, "Invalid sample format '%s'\n", sample_fmt);
4059 MATCH_PER_STREAM_OPT(audio_sample_rate, i, audio_enc->sample_rate, oc, st);
4061 MATCH_PER_STREAM_OPT(filters, str, filters, oc, st);
4063 ost->avfilter = av_strdup(filters);
4069 static OutputStream *new_data_stream(OptionsContext *o, AVFormatContext *oc)
4073 ost = new_output_stream(o, oc, AVMEDIA_TYPE_DATA);
4074 if (!ost->stream_copy) {
4075 av_log(NULL, AV_LOG_FATAL, "Data stream encoding not supported yet (only streamcopy)\n");
4082 static OutputStream *new_attachment_stream(OptionsContext *o, AVFormatContext *oc)
4084 OutputStream *ost = new_output_stream(o, oc, AVMEDIA_TYPE_ATTACHMENT);
4085 ost->stream_copy = 1;
4089 static OutputStream *new_subtitle_stream(OptionsContext *o, AVFormatContext *oc)
4093 AVCodecContext *subtitle_enc;
4095 ost = new_output_stream(o, oc, AVMEDIA_TYPE_SUBTITLE);
4097 subtitle_enc = st->codec;
4099 subtitle_enc->codec_type = AVMEDIA_TYPE_SUBTITLE;
4104 /* arg format is "output-stream-index:streamid-value". */
4105 static int opt_streamid(OptionsContext *o, const char *opt, const char *arg)
4111 av_strlcpy(idx_str, arg, sizeof(idx_str));
4112 p = strchr(idx_str, ':');
4114 av_log(NULL, AV_LOG_FATAL,
4115 "Invalid value '%s' for option '%s', required syntax is 'index:value'\n",
4120 idx = parse_number_or_die(opt, idx_str, OPT_INT, 0, INT_MAX);
4121 o->streamid_map = grow_array(o->streamid_map, sizeof(*o->streamid_map), &o->nb_streamid_map, idx+1);
4122 o->streamid_map[idx] = parse_number_or_die(opt, p, OPT_INT, 0, INT_MAX);
4126 static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata)
4128 AVFormatContext *is = ifile->ctx;
4129 AVFormatContext *os = ofile->ctx;
4132 for (i = 0; i < is->nb_chapters; i++) {
4133 AVChapter *in_ch = is->chapters[i], *out_ch;
4134 int64_t ts_off = av_rescale_q(ofile->start_time - ifile->ts_offset,
4135 AV_TIME_BASE_Q, in_ch->time_base);
4136 int64_t rt = (ofile->recording_time == INT64_MAX) ? INT64_MAX :
4137 av_rescale_q(ofile->recording_time, AV_TIME_BASE_Q, in_ch->time_base);
4140 if (in_ch->end < ts_off)
4142 if (rt != INT64_MAX && in_ch->start > rt + ts_off)
4145 out_ch = av_mallocz(sizeof(AVChapter));
4147 return AVERROR(ENOMEM);
4149 out_ch->id = in_ch->id;
4150 out_ch->time_base = in_ch->time_base;
4151 out_ch->start = FFMAX(0, in_ch->start - ts_off);
4152 out_ch->end = FFMIN(rt, in_ch->end - ts_off);
4155 av_dict_copy(&out_ch->metadata, in_ch->metadata, 0);
4158 os->chapters = av_realloc(os->chapters, sizeof(AVChapter) * os->nb_chapters);
4160 return AVERROR(ENOMEM);
4161 os->chapters[os->nb_chapters - 1] = out_ch;
4166 static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
4167 AVFormatContext *oc)
4171 switch (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type) {
4172 case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc); break;
4173 case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc); break;
4175 av_log(NULL, AV_LOG_FATAL, "Only video and audio filters are supported "
4180 ost->source_index = -1;
4181 ost->filter = ofilter;
4185 if (ost->stream_copy) {
4186 av_log(NULL, AV_LOG_ERROR, "Streamcopy requested for output stream %d:%d, "
4187 "which is fed from a complex filtergraph. Filtering and streamcopy "
4188 "cannot be used together.\n", ost->file_index, ost->index);
4192 if (configure_output_filter(ofilter->graph, ofilter, ofilter->out_tmp) < 0) {
4193 av_log(NULL, AV_LOG_FATAL, "Error configuring filter.\n");
4196 avfilter_inout_free(&ofilter->out_tmp);
4199 static void opt_output_file(void *optctx, const char *filename)
4201 OptionsContext *o = optctx;
4202 AVFormatContext *oc;
4204 AVOutputFormat *file_oformat;
4208 if (configure_complex_filters() < 0) {
4209 av_log(NULL, AV_LOG_FATAL, "Error configuring filters.\n");
4213 if (!strcmp(filename, "-"))
4216 oc = avformat_alloc_context();
4218 print_error(filename, AVERROR(ENOMEM));
4223 file_oformat = av_guess_format(o->format, NULL, NULL);
4224 if (!file_oformat) {
4225 av_log(NULL, AV_LOG_FATAL, "Requested output format '%s' is not a suitable output format\n", o->format);
4229 file_oformat = av_guess_format(NULL, filename, NULL);
4230 if (!file_oformat) {
4231 av_log(NULL, AV_LOG_FATAL, "Unable to find a suitable output format for '%s'\n",
4237 oc->oformat = file_oformat;
4238 oc->interrupt_callback = int_cb;
4239 av_strlcpy(oc->filename, filename, sizeof(oc->filename));
4241 /* create streams for all unlabeled output pads */
4242 for (i = 0; i < nb_filtergraphs; i++) {
4243 FilterGraph *fg = filtergraphs[i];
4244 for (j = 0; j < fg->nb_outputs; j++) {
4245 OutputFilter *ofilter = fg->outputs[j];
4247 if (!ofilter->out_tmp || ofilter->out_tmp->name)
4250 switch (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type) {
4251 case AVMEDIA_TYPE_VIDEO: o->video_disable = 1; break;
4252 case AVMEDIA_TYPE_AUDIO: o->audio_disable = 1; break;
4253 case AVMEDIA_TYPE_SUBTITLE: o->subtitle_disable = 1; break;
4255 init_output_filter(ofilter, o, oc);
4259 if (!o->nb_stream_maps) {
4260 /* pick the "best" stream of each type */
4261 #define NEW_STREAM(type, index)\
4263 ost = new_ ## type ## _stream(o, oc);\
4264 ost->source_index = index;\
4265 ost->sync_ist = input_streams[index];\
4266 input_streams[index]->discard = 0;\
4267 input_streams[index]->st->discard = AVDISCARD_NONE;\
4270 /* video: highest resolution */
4271 if (!o->video_disable && oc->oformat->video_codec != CODEC_ID_NONE) {
4272 int area = 0, idx = -1;
4273 for (i = 0; i < nb_input_streams; i++) {
4274 ist = input_streams[i];
4275 if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
4276 ist->st->codec->width * ist->st->codec->height > area) {
4277 area = ist->st->codec->width * ist->st->codec->height;
4281 NEW_STREAM(video, idx);
4284 /* audio: most channels */
4285 if (!o->audio_disable && oc->oformat->audio_codec != CODEC_ID_NONE) {
4286 int channels = 0, idx = -1;
4287 for (i = 0; i < nb_input_streams; i++) {
4288 ist = input_streams[i];
4289 if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
4290 ist->st->codec->channels > channels) {
4291 channels = ist->st->codec->channels;
4295 NEW_STREAM(audio, idx);
4298 /* subtitles: pick first */
4299 if (!o->subtitle_disable && oc->oformat->subtitle_codec != CODEC_ID_NONE) {
4300 for (i = 0; i < nb_input_streams; i++)
4301 if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
4302 NEW_STREAM(subtitle, i);
4306 /* do something with data? */
4308 for (i = 0; i < o->nb_stream_maps; i++) {
4309 StreamMap *map = &o->stream_maps[i];
4314 if (map->linklabel) {
4316 OutputFilter *ofilter = NULL;
4319 for (j = 0; j < nb_filtergraphs; j++) {
4320 fg = filtergraphs[j];
4321 for (k = 0; k < fg->nb_outputs; k++) {
4322 AVFilterInOut *out = fg->outputs[k]->out_tmp;
4323 if (out && !strcmp(out->name, map->linklabel)) {
4324 ofilter = fg->outputs[k];
4331 av_log(NULL, AV_LOG_FATAL, "Output with label '%s' does not exist "
4332 "in any defined filter graph.\n", map->linklabel);
4335 init_output_filter(ofilter, o, oc);
4337 ist = input_streams[input_files[map->file_index]->ist_index + map->stream_index];
4338 switch (ist->st->codec->codec_type) {
4339 case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc); break;
4340 case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc); break;
4341 case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(o, oc); break;
4342 case AVMEDIA_TYPE_DATA: ost = new_data_stream(o, oc); break;
4343 case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc); break;
4345 av_log(NULL, AV_LOG_FATAL, "Cannot map stream #%d:%d - unsupported type.\n",
4346 map->file_index, map->stream_index);
4350 ost->source_index = input_files[map->file_index]->ist_index + map->stream_index;
4351 ost->sync_ist = input_streams[input_files[map->sync_file_index]->ist_index +
4352 map->sync_stream_index];
4354 ist->st->discard = AVDISCARD_NONE;
4359 /* handle attached files */
4360 for (i = 0; i < o->nb_attachments; i++) {
4362 uint8_t *attachment;
4366 if ((err = avio_open2(&pb, o->attachments[i], AVIO_FLAG_READ, &int_cb, NULL)) < 0) {
4367 av_log(NULL, AV_LOG_FATAL, "Could not open attachment file %s.\n",
4371 if ((len = avio_size(pb)) <= 0) {
4372 av_log(NULL, AV_LOG_FATAL, "Could not get size of the attachment %s.\n",
4376 if (!(attachment = av_malloc(len))) {
4377 av_log(NULL, AV_LOG_FATAL, "Attachment %s too large to fit into memory.\n",
4381 avio_read(pb, attachment, len);
4383 ost = new_attachment_stream(o, oc);
4384 ost->stream_copy = 0;
4385 ost->source_index = -1;
4386 ost->attachment_filename = o->attachments[i];
4387 ost->st->codec->extradata = attachment;
4388 ost->st->codec->extradata_size = len;
4390 p = strrchr(o->attachments[i], '/');
4391 av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
4395 output_files = grow_array(output_files, sizeof(*output_files), &nb_output_files, nb_output_files + 1);
4396 if (!(output_files[nb_output_files - 1] = av_mallocz(sizeof(*output_files[0]))))
4399 output_files[nb_output_files - 1]->ctx = oc;
4400 output_files[nb_output_files - 1]->ost_index = nb_output_streams - oc->nb_streams;
4401 output_files[nb_output_files - 1]->recording_time = o->recording_time;
4402 if (o->recording_time != INT64_MAX)
4403 oc->duration = o->recording_time;
4404 output_files[nb_output_files - 1]->start_time = o->start_time;
4405 output_files[nb_output_files - 1]->limit_filesize = o->limit_filesize;
4406 av_dict_copy(&output_files[nb_output_files - 1]->opts, format_opts, 0);
4408 /* check filename in case of an image number is expected */
4409 if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
4410 if (!av_filename_number_test(oc->filename)) {
4411 print_error(oc->filename, AVERROR(EINVAL));
4416 if (!(oc->oformat->flags & AVFMT_NOFILE)) {
4417 /* test if it already exists to avoid losing precious files */
4418 assert_file_overwrite(filename);
4421 if ((err = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,
4422 &oc->interrupt_callback,
4423 &output_files[nb_output_files - 1]->opts)) < 0) {
4424 print_error(filename, err);
4429 if (o->mux_preload) {
4431 snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE));
4432 av_dict_set(&output_files[nb_output_files - 1]->opts, "preload", buf, 0);
4434 oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
4435 oc->flags |= AVFMT_FLAG_NONBLOCK;
4438 for (i = 0; i < o->nb_metadata_map; i++) {
4440 int in_file_index = strtol(o->metadata_map[i].u.str, &p, 0);
4442 if (in_file_index < 0)
4444 if (in_file_index >= nb_input_files) {
4445 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d while processing metadata maps\n", in_file_index);
4448 copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc, input_files[in_file_index]->ctx, o);
4452 if (o->chapters_input_file >= nb_input_files) {
4453 if (o->chapters_input_file == INT_MAX) {
4454 /* copy chapters from the first input file that has them*/
4455 o->chapters_input_file = -1;
4456 for (i = 0; i < nb_input_files; i++)
4457 if (input_files[i]->ctx->nb_chapters) {
4458 o->chapters_input_file = i;
4462 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d in chapter mapping.\n",
4463 o->chapters_input_file);
4467 if (o->chapters_input_file >= 0)
4468 copy_chapters(input_files[o->chapters_input_file], output_files[nb_output_files - 1],
4469 !o->metadata_chapters_manual);
4471 /* copy global metadata by default */
4472 if (!o->metadata_global_manual && nb_input_files)
4473 av_dict_copy(&oc->metadata, input_files[0]->ctx->metadata,
4474 AV_DICT_DONT_OVERWRITE);
4475 if (!o->metadata_streams_manual)
4476 for (i = output_files[nb_output_files - 1]->ost_index; i < nb_output_streams; i++) {
4478 if (output_streams[i]->source_index < 0) /* this is true e.g. for attached files */
4480 ist = input_streams[output_streams[i]->source_index];
4481 av_dict_copy(&output_streams[i]->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
4484 /* process manually set metadata */
4485 for (i = 0; i < o->nb_metadata; i++) {
4488 const char *stream_spec;
4489 int index = 0, j, ret;
4491 val = strchr(o->metadata[i].u.str, '=');
4493 av_log(NULL, AV_LOG_FATAL, "No '=' character in metadata string %s.\n",
4494 o->metadata[i].u.str);
4499 parse_meta_type(o->metadata[i].specifier, &type, &index, &stream_spec);
4501 for (j = 0; j < oc->nb_streams; j++) {
4502 if ((ret = check_stream_specifier(oc, oc->streams[j], stream_spec)) > 0) {
4503 av_dict_set(&oc->streams[j]->metadata, o->metadata[i].u.str, *val ? val : NULL, 0);
4507 printf("ret %d, stream_spec %s\n", ret, stream_spec);
4515 if (index < 0 || index >= oc->nb_chapters) {
4516 av_log(NULL, AV_LOG_FATAL, "Invalid chapter index %d in metadata specifier.\n", index);
4519 m = &oc->chapters[index]->metadata;
4522 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", o->metadata[i].specifier);
4525 av_dict_set(m, o->metadata[i].u.str, *val ? val : NULL, 0);
4532 /* same option as mencoder */
4533 static int opt_pass(const char *opt, const char *arg)
4535 do_pass = parse_number_or_die(opt, arg, OPT_INT, 1, 2);
4539 static int64_t getutime(void)
4542 struct rusage rusage;
4544 getrusage(RUSAGE_SELF, &rusage);
4545 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4546 #elif HAVE_GETPROCESSTIMES
4548 FILETIME c, e, k, u;
4549 proc = GetCurrentProcess();
4550 GetProcessTimes(proc, &c, &e, &k, &u);
4551 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4553 return av_gettime();
4557 static int64_t getmaxrss(void)
4559 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4560 struct rusage rusage;
4561 getrusage(RUSAGE_SELF, &rusage);
4562 return (int64_t)rusage.ru_maxrss * 1024;
4563 #elif HAVE_GETPROCESSMEMORYINFO
4565 PROCESS_MEMORY_COUNTERS memcounters;
4566 proc = GetCurrentProcess();
4567 memcounters.cb = sizeof(memcounters);
4568 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4569 return memcounters.PeakPagefileUsage;
4575 static int opt_audio_qscale(OptionsContext *o, const char *opt, const char *arg)
4577 return parse_option(o, "q:a", arg, options);
4580 static void show_usage(void)
4582 printf("Hyper fast Audio and Video encoder\n");
4583 printf("usage: %s [options] [[infile options] -i infile]... {[outfile options] outfile}...\n", program_name);
4587 static void show_help(void)
4589 int flags = AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_ENCODING_PARAM;
4590 av_log_set_callback(log_callback_help);
4592 show_help_options(options, "Main options:\n",
4593 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB, 0);
4594 show_help_options(options, "\nAdvanced options:\n",
4595 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB,
4597 show_help_options(options, "\nVideo options:\n",
4598 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4600 show_help_options(options, "\nAdvanced Video options:\n",
4601 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4602 OPT_VIDEO | OPT_EXPERT);
4603 show_help_options(options, "\nAudio options:\n",
4604 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4606 show_help_options(options, "\nAdvanced Audio options:\n",
4607 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4608 OPT_AUDIO | OPT_EXPERT);
4609 show_help_options(options, "\nSubtitle options:\n",
4610 OPT_SUBTITLE | OPT_GRAB,
4612 show_help_options(options, "\nAudio/Video grab options:\n",
4616 show_help_children(avcodec_get_class(), flags);
4617 show_help_children(avformat_get_class(), flags);
4618 show_help_children(sws_get_class(), flags);
4621 static int opt_target(OptionsContext *o, const char *opt, const char *arg)
4623 enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN;
4624 static const char *const frame_rates[] = { "25", "30000/1001", "24000/1001" };
4626 if (!strncmp(arg, "pal-", 4)) {
4629 } else if (!strncmp(arg, "ntsc-", 5)) {
4632 } else if (!strncmp(arg, "film-", 5)) {
4636 /* Try to determine PAL/NTSC by peeking in the input files */
4637 if (nb_input_files) {
4639 for (j = 0; j < nb_input_files; j++) {
4640 for (i = 0; i < input_files[j]->nb_streams; i++) {
4641 AVCodecContext *c = input_files[j]->ctx->streams[i]->codec;
4642 if (c->codec_type != AVMEDIA_TYPE_VIDEO)
4644 fr = c->time_base.den * 1000 / c->time_base.num;
4648 } else if ((fr == 29970) || (fr == 23976)) {
4653 if (norm != UNKNOWN)
4657 if (norm != UNKNOWN)
4658 av_log(NULL, AV_LOG_INFO, "Assuming %s for target.\n", norm == PAL ? "PAL" : "NTSC");
4661 if (norm == UNKNOWN) {
4662 av_log(NULL, AV_LOG_FATAL, "Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n");
4663 av_log(NULL, AV_LOG_FATAL, "Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n");
4664 av_log(NULL, AV_LOG_FATAL, "or set a framerate with \"-r xxx\".\n");
4668 if (!strcmp(arg, "vcd")) {
4669 opt_video_codec(o, "c:v", "mpeg1video");
4670 opt_audio_codec(o, "c:a", "mp2");
4671 parse_option(o, "f", "vcd", options);
4673 parse_option(o, "s", norm == PAL ? "352x288" : "352x240", options);
4674 parse_option(o, "r", frame_rates[norm], options);
4675 opt_default("g", norm == PAL ? "15" : "18");
4677 opt_default("b", "1150000");
4678 opt_default("maxrate", "1150000");
4679 opt_default("minrate", "1150000");
4680 opt_default("bufsize", "327680"); // 40*1024*8;
4682 opt_default("b:a", "224000");
4683 parse_option(o, "ar", "44100", options);
4684 parse_option(o, "ac", "2", options);
4686 opt_default("packetsize", "2324");
4687 opt_default("muxrate", "1411200"); // 2352 * 75 * 8;
4689 /* We have to offset the PTS, so that it is consistent with the SCR.
4690 SCR starts at 36000, but the first two packs contain only padding
4691 and the first pack from the other stream, respectively, may also have
4692 been written before.
4693 So the real data starts at SCR 36000+3*1200. */
4694 o->mux_preload = (36000 + 3 * 1200) / 90000.0; // 0.44
4695 } else if (!strcmp(arg, "svcd")) {
4697 opt_video_codec(o, "c:v", "mpeg2video");
4698 opt_audio_codec(o, "c:a", "mp2");
4699 parse_option(o, "f", "svcd", options);
4701 parse_option(o, "s", norm == PAL ? "480x576" : "480x480", options);
4702 parse_option(o, "r", frame_rates[norm], options);
4703 opt_default("g", norm == PAL ? "15" : "18");
4705 opt_default("b", "2040000");
4706 opt_default("maxrate", "2516000");
4707 opt_default("minrate", "0"); // 1145000;
4708 opt_default("bufsize", "1835008"); // 224*1024*8;
4709 opt_default("flags", "+scan_offset");
4712 opt_default("b:a", "224000");
4713 parse_option(o, "ar", "44100", options);
4715 opt_default("packetsize", "2324");
4717 } else if (!strcmp(arg, "dvd")) {
4719 opt_video_codec(o, "c:v", "mpeg2video");
4720 opt_audio_codec(o, "c:a", "ac3");
4721 parse_option(o, "f", "dvd", options);
4723 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4724 parse_option(o, "r", frame_rates[norm], options);
4725 opt_default("g", norm == PAL ? "15" : "18");
4727 opt_default("b", "6000000");
4728 opt_default("maxrate", "9000000");
4729 opt_default("minrate", "0"); // 1500000;
4730 opt_default("bufsize", "1835008"); // 224*1024*8;
4732 opt_default("packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
4733 opt_default("muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
4735 opt_default("b:a", "448000");
4736 parse_option(o, "ar", "48000", options);
4738 } else if (!strncmp(arg, "dv", 2)) {
4740 parse_option(o, "f", "dv", options);
4742 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4743 parse_option(o, "pix_fmt", !strncmp(arg, "dv50", 4) ? "yuv422p" :
4744 norm == PAL ? "yuv420p" : "yuv411p", options);
4745 parse_option(o, "r", frame_rates[norm], options);
4747 parse_option(o, "ar", "48000", options);
4748 parse_option(o, "ac", "2", options);
4751 av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg);
4752 return AVERROR(EINVAL);
4757 static int opt_vstats_file(const char *opt, const char *arg)
4759 av_free (vstats_filename);
4760 vstats_filename = av_strdup (arg);
4764 static int opt_vstats(const char *opt, const char *arg)
4767 time_t today2 = time(NULL);
4768 struct tm *today = localtime(&today2);
4770 snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min,
4772 return opt_vstats_file(opt, filename);
4775 static int opt_video_frames(OptionsContext *o, const char *opt, const char *arg)
4777 return parse_option(o, "frames:v", arg, options);
4780 static int opt_audio_frames(OptionsContext *o, const char *opt, const char *arg)
4782 return parse_option(o, "frames:a", arg, options);
4785 static int opt_data_frames(OptionsContext *o, const char *opt, const char *arg)
4787 return parse_option(o, "frames:d", arg, options);
4790 static int opt_video_tag(OptionsContext *o, const char *opt, const char *arg)
4792 return parse_option(o, "tag:v", arg, options);
4795 static int opt_audio_tag(OptionsContext *o, const char *opt, const char *arg)
4797 return parse_option(o, "tag:a", arg, options);
4800 static int opt_subtitle_tag(OptionsContext *o, const char *opt, const char *arg)
4802 return parse_option(o, "tag:s", arg, options);
4805 static int opt_video_filters(OptionsContext *o, const char *opt, const char *arg)
4807 return parse_option(o, "filter:v", arg, options);
4810 static int opt_audio_filters(OptionsContext *o, const char *opt, const char *arg)
4812 return parse_option(o, "filter:a", arg, options);
4815 static int opt_vsync(const char *opt, const char *arg)
4817 if (!av_strcasecmp(arg, "cfr")) video_sync_method = VSYNC_CFR;
4818 else if (!av_strcasecmp(arg, "vfr")) video_sync_method = VSYNC_VFR;
4819 else if (!av_strcasecmp(arg, "passthrough")) video_sync_method = VSYNC_PASSTHROUGH;
4821 if (video_sync_method == VSYNC_AUTO)
4822 video_sync_method = parse_number_or_die("vsync", arg, OPT_INT, VSYNC_AUTO, VSYNC_VFR);
4826 static int opt_deinterlace(const char *opt, const char *arg)
4828 av_log(NULL, AV_LOG_WARNING, "-%s is deprecated, use -filter:v yadif instead\n", opt);
4833 static int opt_cpuflags(const char *opt, const char *arg)
4835 int flags = av_parse_cpu_flags(arg);
4840 av_set_cpu_flags_mask(flags);
4844 static void parse_cpuflags(int argc, char **argv, const OptionDef *options)
4846 int idx = locate_option(argc, argv, options, "cpuflags");
4847 if (idx && argv[idx + 1])
4848 opt_cpuflags("cpuflags", argv[idx + 1]);
4851 static int opt_channel_layout(OptionsContext *o, const char *opt, const char *arg)
4853 char layout_str[32];
4856 int ret, channels, ac_str_size;
4859 layout = av_get_channel_layout(arg);
4861 av_log(NULL, AV_LOG_ERROR, "Unknown channel layout: %s\n", arg);
4862 return AVERROR(EINVAL);
4864 snprintf(layout_str, sizeof(layout_str), "%"PRIu64, layout);
4865 ret = opt_default(opt, layout_str);
4869 /* set 'ac' option based on channel layout */
4870 channels = av_get_channel_layout_nb_channels(layout);
4871 snprintf(layout_str, sizeof(layout_str), "%d", channels);
4872 stream_str = strchr(opt, ':');
4873 ac_str_size = 3 + (stream_str ? strlen(stream_str) : 0);
4874 ac_str = av_mallocz(ac_str_size);
4876 return AVERROR(ENOMEM);
4877 av_strlcpy(ac_str, "ac", 3);
4879 av_strlcat(ac_str, stream_str, ac_str_size);
4880 ret = parse_option(o, ac_str, layout_str, options);
4886 static int opt_filter_complex(const char *opt, const char *arg)
4888 filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
4889 &nb_filtergraphs, nb_filtergraphs + 1);
4890 if (!(filtergraphs[nb_filtergraphs - 1] = av_mallocz(sizeof(*filtergraphs[0]))))
4891 return AVERROR(ENOMEM);
4892 filtergraphs[nb_filtergraphs - 1]->index = nb_filtergraphs - 1;
4893 filtergraphs[nb_filtergraphs - 1]->graph_desc = arg;
4897 #define OFFSET(x) offsetof(OptionsContext, x)
4898 static const OptionDef options[] = {
4900 #include "cmdutils_common_opts.h"
4901 { "f", HAS_ARG | OPT_STRING | OPT_OFFSET, {.off = OFFSET(format)}, "force format", "fmt" },
4902 { "i", HAS_ARG | OPT_FUNC2, {(void*)opt_input_file}, "input file name", "filename" },
4903 { "y", OPT_BOOL, {(void*)&file_overwrite}, "overwrite output files" },
4904 { "c", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4905 { "codec", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4906 { "pre", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(presets)}, "preset name", "preset" },
4907 { "map", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map}, "set input stream mapping", "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
4908 { "map_metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata_map)}, "set metadata information of outfile from infile",
4909 "outfile[,metadata]:infile[,metadata]" },
4910 { "map_chapters", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(chapters_input_file)}, "set chapters mapping", "input_file_index" },
4911 { "t", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(recording_time)}, "record or transcode \"duration\" seconds of audio/video", "duration" },
4912 { "fs", HAS_ARG | OPT_INT64 | OPT_OFFSET, {.off = OFFSET(limit_filesize)}, "set the limit file size in bytes", "limit_size" }, //
4913 { "ss", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(start_time)}, "set the start time offset", "time_off" },
4914 { "itsoffset", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(input_ts_offset)}, "set the input ts offset", "time_off" },
4915 { "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(ts_scale)}, "set the input ts scale", "scale" },
4916 { "metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata)}, "add metadata", "string=string" },
4917 { "dframes", HAS_ARG | OPT_FUNC2, {(void*)opt_data_frames}, "set the number of data frames to record", "number" },
4918 { "benchmark", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark},
4919 "add timings for benchmarking" },
4920 { "timelimit", HAS_ARG, {(void*)opt_timelimit}, "set max runtime in seconds", "limit" },
4921 { "dump", OPT_BOOL | OPT_EXPERT, {(void*)&do_pkt_dump},
4922 "dump each input packet" },
4923 { "hex", OPT_BOOL | OPT_EXPERT, {(void*)&do_hex_dump},
4924 "when dumping packets, also dump the payload" },
4925 { "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(rate_emu)}, "read input at native frame rate", "" },
4926 { "target", HAS_ARG | OPT_FUNC2, {(void*)opt_target}, "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" },
4927 { "vsync", HAS_ARG | OPT_EXPERT, {(void*)opt_vsync}, "video sync method", "" },
4928 { "async", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&audio_sync_method}, "audio sync method", "" },
4929 { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&audio_drift_threshold}, "audio drift threshold", "threshold" },
4930 { "copyts", OPT_BOOL | OPT_EXPERT, {(void*)©_ts}, "copy timestamps" },
4931 { "copytb", OPT_BOOL | OPT_EXPERT, {(void*)©_tb}, "copy input stream time base when stream copying" },
4932 { "shortest", OPT_BOOL | OPT_EXPERT, {(void*)&opt_shortest}, "finish encoding within shortest input" }, //
4933 { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&dts_delta_threshold}, "timestamp discontinuity delta threshold", "threshold" },
4934 { "xerror", OPT_BOOL, {(void*)&exit_on_error}, "exit on error", "error" },
4935 { "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC, {.off = OFFSET(copy_initial_nonkeyframes)}, "copy initial non-keyframes" },
4936 { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC, {.off = OFFSET(max_frames)}, "set the number of frames to record", "number" },
4937 { "tag", OPT_STRING | HAS_ARG | OPT_SPEC, {.off = OFFSET(codec_tags)}, "force codec tag/fourcc", "fourcc/tag" },
4938 { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4939 { "qscale", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4940 { "filter", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(filters)}, "set stream filterchain", "filter_list" },
4941 { "filter_complex", HAS_ARG | OPT_EXPERT, {(void*)opt_filter_complex}, "create a complex filtergraph", "graph_description" },
4942 { "stats", OPT_BOOL, {&print_stats}, "print progress report during encoding", },
4943 { "attach", HAS_ARG | OPT_FUNC2, {(void*)opt_attach}, "add an attachment to the output file", "filename" },
4944 { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(dump_attachment)}, "extract an attachment into a file", "filename" },
4945 { "cpuflags", HAS_ARG | OPT_EXPERT, {(void*)opt_cpuflags}, "set CPU flags mask", "mask" },
4948 { "vframes", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_frames}, "set the number of video frames to record", "number" },
4949 { "r", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_rates)}, "set frame rate (Hz value, fraction or abbreviation)", "rate" },
4950 { "s", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_sizes)}, "set frame size (WxH or abbreviation)", "size" },
4951 { "aspect", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_aspect_ratios)}, "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" },
4952 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_pix_fmts)}, "set pixel format", "format" },
4953 { "vn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET, {.off = OFFSET(video_disable)}, "disable video" },
4954 { "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" },
4955 { "rc_override", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(rc_overrides)}, "rate control override for specific intervals", "override" },
4956 { "vcodec", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_codec}, "force video codec ('copy' to copy stream)", "codec" },
4957 { "same_quant", OPT_BOOL | OPT_VIDEO, {(void*)&same_quant},
4958 "use same quantizer as source (implies VBR)" },
4959 { "pass", HAS_ARG | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" },
4960 { "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename_prefix}, "select two pass log file name prefix", "prefix" },
4961 { "deinterlace", OPT_EXPERT | OPT_VIDEO, {(void*)opt_deinterlace},
4962 "this option is deprecated, use the yadif filter instead" },
4963 { "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" },
4964 { "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_vstats_file}, "dump video coding statistics to file", "file" },
4965 { "vf", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_filters}, "video filters", "filter list" },
4966 { "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(intra_matrices)}, "specify intra matrix coeffs", "matrix" },
4967 { "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(inter_matrices)}, "specify inter matrix coeffs", "matrix" },
4968 { "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_INT| OPT_SPEC, {.off = OFFSET(top_field_first)}, "top=1/bottom=0/auto=-1 field first", "" },
4969 { "dc", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_dc_precision}, "intra_dc_precision", "precision" },
4970 { "vtag", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_tag}, "force video tag/fourcc", "fourcc/tag" },
4971 { "qphist", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, { (void *)&qp_hist }, "show QP histogram" },
4972 { "force_fps", OPT_BOOL | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(force_fps)}, "force the selected framerate, disable the best supported framerate selection" },
4973 { "streamid", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_streamid}, "set the value of an outfile streamid", "streamIndex:value" },
4974 { "force_key_frames", OPT_STRING | HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(forced_key_frames)}, "force key frames at specified timestamps", "timestamps" },
4977 { "aframes", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_frames}, "set the number of audio frames to record", "number" },
4978 { "aq", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_qscale}, "set audio quality (codec-specific)", "quality", },
4979 { "ar", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_sample_rate)}, "set audio sampling rate (in Hz)", "rate" },
4980 { "ac", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_channels)}, "set number of audio channels", "channels" },
4981 { "an", OPT_BOOL | OPT_AUDIO | OPT_OFFSET, {.off = OFFSET(audio_disable)}, "disable audio" },
4982 { "acodec", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_codec}, "force audio codec ('copy' to copy stream)", "codec" },
4983 { "atag", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_tag}, "force audio tag/fourcc", "fourcc/tag" },
4984 { "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" }, //
4985 { "sample_fmt", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_SPEC | OPT_STRING, {.off = OFFSET(sample_fmts)}, "set sample format", "format" },
4986 { "channel_layout", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_channel_layout}, "set channel layout", "layout" },
4987 { "af", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_filters}, "audio filters", "filter list" },
4989 /* subtitle options */
4990 { "sn", OPT_BOOL | OPT_SUBTITLE | OPT_OFFSET, {.off = OFFSET(subtitle_disable)}, "disable subtitle" },
4991 { "scodec", HAS_ARG | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_codec}, "force subtitle codec ('copy' to copy stream)", "codec" },
4992 { "stag", HAS_ARG | OPT_EXPERT | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_tag}, "force subtitle tag/fourcc", "fourcc/tag" },
4995 { "isync", OPT_BOOL | OPT_EXPERT | OPT_GRAB, {(void*)&input_sync}, "sync read on input", "" },
4998 { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_max_delay)}, "set the maximum demux-decode delay", "seconds" },
4999 { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_preload)}, "set the initial demux-decode delay", "seconds" },
5001 { "bsf", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(bitstream_filters)}, "A comma-separated list of bitstream filters", "bitstream_filters" },
5003 /* data codec support */
5004 { "dcodec", HAS_ARG | OPT_DATA | OPT_FUNC2, {(void*)opt_data_codec}, "force data codec ('copy' to copy stream)", "codec" },
5006 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
5010 int main(int argc, char **argv)
5012 OptionsContext o = { 0 };
5017 av_log_set_flags(AV_LOG_SKIP_REPEATED);
5018 parse_loglevel(argc, argv, options);
5020 avcodec_register_all();
5022 avdevice_register_all();
5024 avfilter_register_all();
5026 avformat_network_init();
5030 parse_cpuflags(argc, argv, options);
5033 parse_options(&o, argc, argv, options, opt_output_file);
5035 if (nb_output_files <= 0 && nb_input_files == 0) {
5037 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
5041 /* file converter / grab */
5042 if (nb_output_files <= 0) {
5043 fprintf(stderr, "At least one output file must be specified\n");
5047 if (nb_input_files == 0) {
5048 av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
5053 if (transcode() < 0)
5055 ti = getutime() - ti;
5057 int maxrss = getmaxrss() / 1024;
5058 printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);