3 * Copyright (c) 2000-2011 The libav developers.
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31 #include "libavformat/avformat.h"
32 #include "libavdevice/avdevice.h"
33 #include "libswscale/swscale.h"
34 #include "libavresample/avresample.h"
35 #include "libavutil/opt.h"
36 #include "libavutil/audioconvert.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/colorspace.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/intreadwrite.h"
42 #include "libavutil/dict.h"
43 #include "libavutil/mathematics.h"
44 #include "libavutil/pixdesc.h"
45 #include "libavutil/avstring.h"
46 #include "libavutil/libm.h"
47 #include "libavutil/imgutils.h"
48 #include "libavformat/os_support.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersrc.h"
53 # include "libavfilter/buffersink.h"
54 # include "libavfilter/vsrc_buffer.h"
56 #if HAVE_SYS_RESOURCE_H
57 #include <sys/types.h>
59 #include <sys/resource.h>
60 #elif HAVE_GETPROCESSTIMES
63 #if HAVE_GETPROCESSMEMORYINFO
69 #include <sys/select.h>
76 #include "libavutil/avassert.h"
79 #define VSYNC_PASSTHROUGH 0
83 const char program_name[] = "avconv";
84 const int program_birth_year = 2000;
86 /* select an input stream for an output stream */
87 typedef struct StreamMap {
88 int disabled; /** 1 is this mapping is disabled by a negative map */
92 int sync_stream_index;
93 char *linklabel; /** name of an output link, for mapping lavfi outputs */
97 * select an input file for an output file
99 typedef struct MetadataMap {
100 int file; ///< file index
101 char type; ///< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram
102 int index; ///< stream/chapter/program number
105 static const OptionDef options[];
107 static int video_discard = 0;
108 static int same_quant = 0;
109 static int do_deinterlace = 0;
110 static int intra_dc_precision = 8;
111 static int qp_hist = 0;
113 static int file_overwrite = 0;
114 static int do_benchmark = 0;
115 static int do_hex_dump = 0;
116 static int do_pkt_dump = 0;
117 static int do_pass = 0;
118 static char *pass_logfilename_prefix = NULL;
119 static int video_sync_method = VSYNC_AUTO;
120 static int audio_sync_method = 0;
121 static float audio_drift_threshold = 0.1;
122 static int copy_ts = 0;
123 static int copy_tb = 1;
124 static int opt_shortest = 0;
125 static char *vstats_filename;
126 static FILE *vstats_file;
128 static int audio_volume = 256;
130 static int exit_on_error = 0;
131 static int using_stdin = 0;
132 static int64_t video_size = 0;
133 static int64_t audio_size = 0;
134 static int64_t extra_size = 0;
135 static int nb_frames_dup = 0;
136 static int nb_frames_drop = 0;
137 static int input_sync;
139 static float dts_delta_threshold = 10;
141 static int print_stats = 1;
143 static uint8_t *audio_buf;
144 static unsigned int allocated_audio_buf_size;
145 static uint8_t *async_buf;
146 static unsigned int allocated_async_buf_size;
148 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
150 typedef struct InputFilter {
151 AVFilterContext *filter;
152 struct InputStream *ist;
153 struct FilterGraph *graph;
156 typedef struct OutputFilter {
157 AVFilterContext *filter;
158 struct OutputStream *ost;
159 struct FilterGraph *graph;
161 /* temporary storage until stream maps are processed */
162 AVFilterInOut *out_tmp;
165 typedef struct FilterGraph {
167 const char *graph_desc;
169 AVFilterGraph *graph;
171 InputFilter **inputs;
173 OutputFilter **outputs;
177 typedef struct FrameBuffer {
183 enum PixelFormat pix_fmt;
186 struct InputStream *ist;
187 struct FrameBuffer *next;
190 typedef struct InputStream {
193 int discard; /* true if stream data should be discarded */
194 int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
196 AVFrame *decoded_frame;
198 int64_t start; /* time when read started */
199 /* predicted dts of the next packet read for this stream or (when there are
200 * several frames in a packet) of the next frame in current packet */
202 /* dts of the last packet read for this stream */
204 PtsCorrectionContext pts_ctx;
206 int is_start; /* is 1 at the start and after a discontinuity */
207 int showed_multi_packet_warning;
212 int resample_pix_fmt;
214 /* a pool of free buffers for decoded data */
215 FrameBuffer *buffer_pool;
217 /* decoded data from this stream goes into all those filters
218 * currently video only */
219 InputFilter **filters;
223 typedef struct InputFile {
224 AVFormatContext *ctx;
225 int eof_reached; /* true if eof reached */
226 int ist_index; /* index of first stream in ist_table */
227 int buffer_size; /* current total buffer size */
229 int nb_streams; /* number of stream that avconv is aware of; may be different
230 from ctx.nb_streams if new streams appear during av_read_frame() */
234 typedef struct OutputStream {
235 int file_index; /* file index */
236 int index; /* stream index in the output file */
237 int source_index; /* InputStream index */
238 AVStream *st; /* stream in the output file */
239 int encoding_needed; /* true if encoding needed for this stream */
241 /* input pts and corresponding output pts
243 // double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
244 struct InputStream *sync_ist; /* input stream to sync against */
245 int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
246 /* pts of the first frame encoded for this stream, used for limiting
249 AVBitStreamFilterContext *bitstream_filters;
252 AVFrame *output_frame;
253 AVFrame *filtered_frame;
256 AVRational frame_rate;
260 float frame_aspect_ratio;
263 /* forced key frames */
264 int64_t *forced_kf_pts;
270 AVAudioResampleContext *avr;
271 int resample_sample_fmt;
272 int resample_channels;
273 uint64_t resample_channel_layout;
274 int resample_sample_rate;
275 AVFifoBuffer *fifo; /* for compression: one audio fifo per codec */
278 OutputFilter *filter;
283 int is_past_recording_time;
285 const char *attachment_filename;
286 int copy_initial_nonkeyframes;
288 enum PixelFormat pix_fmts[2];
292 typedef struct OutputFile {
293 AVFormatContext *ctx;
295 int ost_index; /* index of the first stream in output_streams */
296 int64_t recording_time; /* desired length of the resulting file in microseconds */
297 int64_t start_time; /* start time in microseconds */
298 uint64_t limit_filesize;
301 static InputStream **input_streams = NULL;
302 static int nb_input_streams = 0;
303 static InputFile **input_files = NULL;
304 static int nb_input_files = 0;
306 static OutputStream **output_streams = NULL;
307 static int nb_output_streams = 0;
308 static OutputFile **output_files = NULL;
309 static int nb_output_files = 0;
311 static FilterGraph **filtergraphs;
314 typedef struct OptionsContext {
315 /* input/output options */
319 SpecifierOpt *codec_names;
321 SpecifierOpt *audio_channels;
322 int nb_audio_channels;
323 SpecifierOpt *audio_sample_rate;
324 int nb_audio_sample_rate;
325 SpecifierOpt *frame_rates;
327 SpecifierOpt *frame_sizes;
329 SpecifierOpt *frame_pix_fmts;
330 int nb_frame_pix_fmts;
333 int64_t input_ts_offset;
336 SpecifierOpt *ts_scale;
338 SpecifierOpt *dump_attachment;
339 int nb_dump_attachment;
342 StreamMap *stream_maps;
344 /* first item specifies output metadata, second is input */
345 MetadataMap (*meta_data_maps)[2];
346 int nb_meta_data_maps;
347 int metadata_global_manual;
348 int metadata_streams_manual;
349 int metadata_chapters_manual;
350 const char **attachments;
353 int chapters_input_file;
355 int64_t recording_time;
356 uint64_t limit_filesize;
362 int subtitle_disable;
365 /* indexed by output file stream index */
369 SpecifierOpt *metadata;
371 SpecifierOpt *max_frames;
373 SpecifierOpt *bitstream_filters;
374 int nb_bitstream_filters;
375 SpecifierOpt *codec_tags;
377 SpecifierOpt *sample_fmts;
379 SpecifierOpt *qscale;
381 SpecifierOpt *forced_key_frames;
382 int nb_forced_key_frames;
383 SpecifierOpt *force_fps;
385 SpecifierOpt *frame_aspect_ratios;
386 int nb_frame_aspect_ratios;
387 SpecifierOpt *rc_overrides;
389 SpecifierOpt *intra_matrices;
390 int nb_intra_matrices;
391 SpecifierOpt *inter_matrices;
392 int nb_inter_matrices;
393 SpecifierOpt *top_field_first;
394 int nb_top_field_first;
395 SpecifierOpt *metadata_map;
397 SpecifierOpt *presets;
399 SpecifierOpt *copy_initial_nonkeyframes;
400 int nb_copy_initial_nonkeyframes;
401 SpecifierOpt *filters;
405 #define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\
408 for (i = 0; i < o->nb_ ## name; i++) {\
409 char *spec = o->name[i].specifier;\
410 if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0)\
411 outvar = o->name[i].u.type;\
417 static void reset_options(OptionsContext *o)
419 const OptionDef *po = options;
422 /* all OPT_SPEC and OPT_STRING can be freed in generic way */
424 void *dst = (uint8_t*)o + po->u.off;
426 if (po->flags & OPT_SPEC) {
427 SpecifierOpt **so = dst;
428 int i, *count = (int*)(so + 1);
429 for (i = 0; i < *count; i++) {
430 av_freep(&(*so)[i].specifier);
431 if (po->flags & OPT_STRING)
432 av_freep(&(*so)[i].u.str);
436 } else if (po->flags & OPT_OFFSET && po->flags & OPT_STRING)
441 for (i = 0; i < o->nb_stream_maps; i++)
442 av_freep(&o->stream_maps[i].linklabel);
443 av_freep(&o->stream_maps);
444 av_freep(&o->meta_data_maps);
445 av_freep(&o->streamid_map);
447 memset(o, 0, sizeof(*o));
449 o->mux_max_delay = 0.7;
450 o->recording_time = INT64_MAX;
451 o->limit_filesize = UINT64_MAX;
452 o->chapters_input_file = INT_MAX;
458 static int alloc_buffer(InputStream *ist, AVCodecContext *s, FrameBuffer **pbuf)
460 FrameBuffer *buf = av_mallocz(sizeof(*buf));
462 const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
463 int h_chroma_shift, v_chroma_shift;
464 int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
465 int w = s->width, h = s->height;
468 return AVERROR(ENOMEM);
470 if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
475 avcodec_align_dimensions(s, &w, &h);
476 if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
477 s->pix_fmt, 32)) < 0) {
481 /* XXX this shouldn't be needed, but some tests break without this line
482 * those decoders are buggy and need to be fixed.
483 * the following tests fail:
484 * cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
486 memset(buf->base[0], 128, ret);
488 avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
489 for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
490 const int h_shift = i==0 ? 0 : h_chroma_shift;
491 const int v_shift = i==0 ? 0 : v_chroma_shift;
492 if (s->flags & CODEC_FLAG_EMU_EDGE)
493 buf->data[i] = buf->base[i];
495 buf->data[i] = buf->base[i] +
496 FFALIGN((buf->linesize[i]*edge >> v_shift) +
497 (pixel_size*edge >> h_shift), 32);
501 buf->pix_fmt = s->pix_fmt;
508 static void free_buffer_pool(InputStream *ist)
510 FrameBuffer *buf = ist->buffer_pool;
512 ist->buffer_pool = buf->next;
513 av_freep(&buf->base[0]);
515 buf = ist->buffer_pool;
519 static void unref_buffer(InputStream *ist, FrameBuffer *buf)
521 av_assert0(buf->refcount);
523 if (!buf->refcount) {
524 buf->next = ist->buffer_pool;
525 ist->buffer_pool = buf;
529 static int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
531 InputStream *ist = s->opaque;
535 if (!ist->buffer_pool && (ret = alloc_buffer(ist, s, &ist->buffer_pool)) < 0)
538 buf = ist->buffer_pool;
539 ist->buffer_pool = buf->next;
541 if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
542 av_freep(&buf->base[0]);
544 if ((ret = alloc_buffer(ist, s, &buf)) < 0)
550 frame->type = FF_BUFFER_TYPE_USER;
551 frame->extended_data = frame->data;
552 frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
553 frame->width = buf->w;
554 frame->height = buf->h;
555 frame->format = buf->pix_fmt;
556 frame->sample_aspect_ratio = s->sample_aspect_ratio;
558 for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
559 frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't
560 frame->data[i] = buf->data[i];
561 frame->linesize[i] = buf->linesize[i];
567 static void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
569 InputStream *ist = s->opaque;
570 FrameBuffer *buf = frame->opaque;
573 for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
574 frame->data[i] = NULL;
576 unref_buffer(ist, buf);
579 static void filter_release_buffer(AVFilterBuffer *fb)
581 FrameBuffer *buf = fb->priv;
583 unref_buffer(buf->ist, buf);
586 static char *choose_pixel_fmts(OutputStream *ost)
588 if (ost->st->codec->pix_fmt != PIX_FMT_NONE) {
589 return av_strdup(av_get_pix_fmt_name(ost->st->codec->pix_fmt));
590 } else if (ost->enc->pix_fmts) {
591 const enum PixelFormat *p;
592 AVIOContext *s = NULL;
596 if (avio_open_dyn_buf(&s) < 0)
599 for (p = ost->enc->pix_fmts; *p != PIX_FMT_NONE; p++)
600 avio_printf(s, "%s:", av_get_pix_fmt_name(*p));
601 len = avio_close_dyn_buf(s, &ret);
608 static int configure_video_filters(FilterGraph *fg)
610 InputStream *ist = fg->inputs[0]->ist;
611 OutputStream *ost = fg->outputs[0]->ost;
612 AVFilterContext *in_filter, *out_filter, *filter;
613 AVCodecContext *codec = ost->st->codec;
615 AVRational sample_aspect_ratio;
619 avfilter_graph_free(&fg->graph);
620 fg->graph = avfilter_graph_alloc();
622 if (ist->st->sample_aspect_ratio.num) {
623 sample_aspect_ratio = ist->st->sample_aspect_ratio;
625 sample_aspect_ratio = ist->st->codec->sample_aspect_ratio;
627 snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
628 ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
629 sample_aspect_ratio.num, sample_aspect_ratio.den);
631 ret = avfilter_graph_create_filter(&fg->inputs[0]->filter,
632 avfilter_get_by_name("buffer"),
633 "src", args, NULL, fg->graph);
636 ret = avfilter_graph_create_filter(&fg->outputs[0]->filter,
637 avfilter_get_by_name("buffersink"),
638 "out", NULL, NULL, fg->graph);
641 in_filter = fg->inputs[0]->filter;
642 out_filter = fg->outputs[0]->filter;
644 if (codec->width || codec->height) {
645 snprintf(args, 255, "%d:%d:flags=0x%X",
648 (unsigned)ost->sws_flags);
649 if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
650 NULL, args, NULL, fg->graph)) < 0)
652 if ((ret = avfilter_link(in_filter, 0, filter, 0)) < 0)
657 if ((pix_fmts = choose_pixel_fmts(ost))) {
658 if ((ret = avfilter_graph_create_filter(&filter,
659 avfilter_get_by_name("format"),
660 "format", pix_fmts, NULL,
663 if ((ret = avfilter_link(filter, 0, out_filter, 0)) < 0)
670 snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
671 fg->graph->scale_sws_opts = av_strdup(args);
674 AVFilterInOut *outputs = avfilter_inout_alloc();
675 AVFilterInOut *inputs = avfilter_inout_alloc();
677 outputs->name = av_strdup("in");
678 outputs->filter_ctx = in_filter;
679 outputs->pad_idx = 0;
680 outputs->next = NULL;
682 inputs->name = av_strdup("out");
683 inputs->filter_ctx = out_filter;
687 if ((ret = avfilter_graph_parse(fg->graph, ost->avfilter, inputs, outputs, NULL)) < 0)
690 if ((ret = avfilter_link(in_filter, 0, out_filter, 0)) < 0)
694 if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
697 ost->filter = fg->outputs[0];
702 static FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
704 FilterGraph *fg = av_mallocz(sizeof(*fg));
708 fg->index = nb_filtergraphs;
710 fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs), &fg->nb_outputs,
712 if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
714 fg->outputs[0]->ost = ost;
715 fg->outputs[0]->graph = fg;
717 fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs), &fg->nb_inputs,
719 if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
721 fg->inputs[0]->ist = ist;
722 fg->inputs[0]->graph = fg;
724 ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
725 &ist->nb_filters, ist->nb_filters + 1);
726 ist->filters[ist->nb_filters - 1] = fg->inputs[0];
728 filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
729 &nb_filtergraphs, nb_filtergraphs + 1);
730 filtergraphs[nb_filtergraphs - 1] = fg;
735 static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
738 enum AVMediaType type = in->filter_ctx->input_pads[in->pad_idx].type;
741 // TODO: support other filter types
742 if (type != AVMEDIA_TYPE_VIDEO) {
743 av_log(NULL, AV_LOG_FATAL, "Only video filters supported currently.\n");
751 int file_idx = strtol(in->name, &p, 0);
753 if (file_idx < 0 || file_idx >= nb_input_files) {
754 av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtegraph description %s.\n",
755 file_idx, fg->graph_desc);
758 s = input_files[file_idx]->ctx;
760 for (i = 0; i < s->nb_streams; i++) {
761 if (s->streams[i]->codec->codec_type != type)
763 if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
769 av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
770 "matches no streams.\n", p, fg->graph_desc);
773 ist = input_streams[input_files[file_idx]->ist_index + st->index];
775 /* find the first unused stream of corresponding type */
776 for (i = 0; i < nb_input_streams; i++) {
777 ist = input_streams[i];
778 if (ist->st->codec->codec_type == type && ist->discard)
781 if (i == nb_input_streams) {
782 av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
783 "unlabeled input pad %d on filter %s", in->pad_idx,
784 in->filter_ctx->name);
789 ist->decoding_needed = 1;
790 ist->st->discard = AVDISCARD_NONE;
792 fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs),
793 &fg->nb_inputs, fg->nb_inputs + 1);
794 if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
796 fg->inputs[fg->nb_inputs - 1]->ist = ist;
797 fg->inputs[fg->nb_inputs - 1]->graph = fg;
799 ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
800 &ist->nb_filters, ist->nb_filters + 1);
801 ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
804 static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
807 AVCodecContext *codec = ofilter->ost->st->codec;
808 AVFilterContext *last_filter = out->filter_ctx;
809 int pad_idx = out->pad_idx;
813 ret = avfilter_graph_create_filter(&ofilter->filter,
814 avfilter_get_by_name("buffersink"),
815 "out", NULL, pix_fmts, fg->graph);
819 if (codec->width || codec->height) {
821 AVFilterContext *filter;
823 snprintf(args, sizeof(args), "%d:%d:flags=0x%X",
826 (unsigned)ofilter->ost->sws_flags);
827 if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
828 NULL, args, NULL, fg->graph)) < 0)
830 if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
833 last_filter = filter;
837 if ((pix_fmts = choose_pixel_fmts(ofilter->ost))) {
838 AVFilterContext *filter;
839 if ((ret = avfilter_graph_create_filter(&filter,
840 avfilter_get_by_name("format"),
841 "format", pix_fmts, NULL,
844 if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
847 last_filter = filter;
852 if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
858 static int configure_complex_filter(FilterGraph *fg)
860 AVFilterInOut *inputs, *outputs, *cur;
861 int ret, i, init = !fg->graph;
863 avfilter_graph_free(&fg->graph);
864 if (!(fg->graph = avfilter_graph_alloc()))
865 return AVERROR(ENOMEM);
867 if ((ret = avfilter_graph_parse2(fg->graph, fg->graph_desc, &inputs, &outputs)) < 0)
870 for (cur = inputs; init && cur; cur = cur->next)
871 init_input_filter(fg, cur);
873 for (cur = inputs, i = 0; cur; cur = cur->next, i++) {
874 InputFilter *ifilter = fg->inputs[i];
875 InputStream *ist = ifilter->ist;
879 sar = ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
880 ist->st->codec->sample_aspect_ratio;
881 snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
882 ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
885 if ((ret = avfilter_graph_create_filter(&ifilter->filter,
886 avfilter_get_by_name("buffer"), cur->name,
887 args, NULL, fg->graph)) < 0)
889 if ((ret = avfilter_link(ifilter->filter, 0,
890 cur->filter_ctx, cur->pad_idx)) < 0)
893 avfilter_inout_free(&inputs);
896 /* we already know the mappings between lavfi outputs and output streams,
897 * so we can finish the setup */
898 for (cur = outputs, i = 0; cur; cur = cur->next, i++)
899 configure_output_filter(fg, fg->outputs[i], cur);
900 avfilter_inout_free(&outputs);
902 if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
905 /* wait until output mappings are processed */
906 for (cur = outputs; cur;) {
907 fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs),
908 &fg->nb_outputs, fg->nb_outputs + 1);
909 if (!(fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]))))
911 fg->outputs[fg->nb_outputs - 1]->graph = fg;
912 fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
914 fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
921 static int configure_complex_filters(void)
925 for (i = 0; i < nb_filtergraphs; i++)
926 if (!filtergraphs[i]->graph &&
927 (ret = configure_complex_filter(filtergraphs[i])) < 0)
932 static int configure_filtergraph(FilterGraph *fg)
934 return fg->graph_desc ? configure_complex_filter(fg) : configure_video_filters(fg);
937 static int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
940 for (i = 0; i < fg->nb_inputs; i++)
941 if (fg->inputs[i]->ist == ist)
946 static void term_exit(void)
948 av_log(NULL, AV_LOG_QUIET, "");
951 static volatile int received_sigterm = 0;
952 static volatile int received_nb_signals = 0;
955 sigterm_handler(int sig)
957 received_sigterm = sig;
958 received_nb_signals++;
962 static void term_init(void)
964 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
965 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
967 signal(SIGXCPU, sigterm_handler);
971 static int decode_interrupt_cb(void *ctx)
973 return received_nb_signals > 1;
976 static const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
978 void exit_program(int ret)
982 for (i = 0; i < nb_filtergraphs; i++) {
983 avfilter_graph_free(&filtergraphs[i]->graph);
984 for (j = 0; j < filtergraphs[i]->nb_inputs; j++)
985 av_freep(&filtergraphs[i]->inputs[j]);
986 av_freep(&filtergraphs[i]->inputs);
987 for (j = 0; j < filtergraphs[i]->nb_outputs; j++)
988 av_freep(&filtergraphs[i]->outputs[j]);
989 av_freep(&filtergraphs[i]->outputs);
990 av_freep(&filtergraphs[i]);
992 av_freep(&filtergraphs);
995 for (i = 0; i < nb_output_files; i++) {
996 AVFormatContext *s = output_files[i]->ctx;
997 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
999 avformat_free_context(s);
1000 av_dict_free(&output_files[i]->opts);
1001 av_freep(&output_files[i]);
1003 for (i = 0; i < nb_output_streams; i++) {
1004 AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
1006 AVBitStreamFilterContext *next = bsfc->next;
1007 av_bitstream_filter_close(bsfc);
1010 output_streams[i]->bitstream_filters = NULL;
1012 if (output_streams[i]->output_frame) {
1013 AVFrame *frame = output_streams[i]->output_frame;
1014 if (frame->extended_data != frame->data)
1015 av_freep(&frame->extended_data);
1019 av_freep(&output_streams[i]->avfilter);
1020 av_freep(&output_streams[i]->filtered_frame);
1021 av_freep(&output_streams[i]);
1023 for (i = 0; i < nb_input_files; i++) {
1024 avformat_close_input(&input_files[i]->ctx);
1025 av_freep(&input_files[i]);
1027 for (i = 0; i < nb_input_streams; i++) {
1028 av_freep(&input_streams[i]->decoded_frame);
1029 av_dict_free(&input_streams[i]->opts);
1030 free_buffer_pool(input_streams[i]);
1031 av_freep(&input_streams[i]->filters);
1032 av_freep(&input_streams[i]);
1036 fclose(vstats_file);
1037 av_free(vstats_filename);
1039 av_freep(&input_streams);
1040 av_freep(&input_files);
1041 av_freep(&output_streams);
1042 av_freep(&output_files);
1046 allocated_audio_buf_size = 0;
1048 allocated_async_buf_size = 0;
1051 avformat_network_deinit();
1053 if (received_sigterm) {
1054 av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
1055 (int) received_sigterm);
1062 static void assert_avoptions(AVDictionary *m)
1064 AVDictionaryEntry *t;
1065 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1066 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
1071 static void assert_codec_experimental(AVCodecContext *c, int encoder)
1073 const char *codec_string = encoder ? "encoder" : "decoder";
1075 if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL &&
1076 c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
1077 av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
1078 "results.\nAdd '-strict experimental' if you want to use it.\n",
1079 codec_string, c->codec->name);
1080 codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id);
1081 if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
1082 av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
1083 codec_string, codec->name);
1088 static void choose_sample_fmt(AVStream *st, AVCodec *codec)
1090 if (codec && codec->sample_fmts) {
1091 const enum AVSampleFormat *p = codec->sample_fmts;
1092 for (; *p != -1; p++) {
1093 if (*p == st->codec->sample_fmt)
1097 av_log(NULL, AV_LOG_WARNING,
1098 "Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
1099 av_get_sample_fmt_name(st->codec->sample_fmt),
1101 av_get_sample_fmt_name(codec->sample_fmts[0]));
1102 st->codec->sample_fmt = codec->sample_fmts[0];
1108 * Update the requested input sample format based on the output sample format.
1109 * This is currently only used to request float output from decoders which
1110 * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
1111 * Ideally this will be removed in the future when decoders do not do format
1112 * conversion and only output in their native format.
1114 static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
1115 AVCodecContext *enc)
1117 /* if sample formats match or a decoder sample format has already been
1118 requested, just return */
1119 if (enc->sample_fmt == dec->sample_fmt ||
1120 dec->request_sample_fmt > AV_SAMPLE_FMT_NONE)
1123 /* if decoder supports more than one output format */
1124 if (dec_codec && dec_codec->sample_fmts &&
1125 dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
1126 dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
1127 const enum AVSampleFormat *p;
1128 int min_dec = -1, min_inc = -1;
1130 /* find a matching sample format in the encoder */
1131 for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
1132 if (*p == enc->sample_fmt) {
1133 dec->request_sample_fmt = *p;
1135 } else if (*p > enc->sample_fmt) {
1136 min_inc = FFMIN(min_inc, *p - enc->sample_fmt);
1138 min_dec = FFMIN(min_dec, enc->sample_fmt - *p);
1141 /* if none match, provide the one that matches quality closest */
1142 dec->request_sample_fmt = min_inc > 0 ? enc->sample_fmt + min_inc :
1143 enc->sample_fmt - min_dec;
1147 static void choose_sample_rate(AVStream *st, AVCodec *codec)
1149 if (codec && codec->supported_samplerates) {
1150 const int *p = codec->supported_samplerates;
1152 int best_dist = INT_MAX;
1154 int dist = abs(st->codec->sample_rate - *p);
1155 if (dist < best_dist) {
1161 av_log(st->codec, AV_LOG_WARNING, "Requested sampling rate unsupported using closest supported (%d)\n", best);
1163 st->codec->sample_rate = best;
1168 get_sync_ipts(const OutputStream *ost, int64_t pts)
1170 OutputFile *of = output_files[ost->file_index];
1171 return (double)(pts - of->start_time) / AV_TIME_BASE;
1174 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
1176 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
1177 AVCodecContext *avctx = ost->st->codec;
1181 * Audio encoders may split the packets -- #frames in != #packets out.
1182 * But there is no reordering, so we can limit the number of output packets
1183 * by simply dropping them here.
1184 * Counting encoded video frames needs to be done separately because of
1185 * reordering, see do_video_out()
1187 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
1188 if (ost->frame_number >= ost->max_frames) {
1189 av_free_packet(pkt);
1192 ost->frame_number++;
1196 AVPacket new_pkt = *pkt;
1197 int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
1198 &new_pkt.data, &new_pkt.size,
1199 pkt->data, pkt->size,
1200 pkt->flags & AV_PKT_FLAG_KEY);
1202 av_free_packet(pkt);
1203 new_pkt.destruct = av_destruct_packet;
1205 av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
1206 bsfc->filter->name, pkt->stream_index,
1207 avctx->codec ? avctx->codec->name : "copy");
1217 pkt->stream_index = ost->index;
1218 ret = av_interleaved_write_frame(s, pkt);
1220 print_error("av_interleaved_write_frame()", ret);
1225 static int check_recording_time(OutputStream *ost)
1227 OutputFile *of = output_files[ost->file_index];
1229 if (of->recording_time != INT64_MAX &&
1230 av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,
1231 AV_TIME_BASE_Q) >= 0) {
1232 ost->is_past_recording_time = 1;
1238 static void get_default_channel_layouts(OutputStream *ost, InputStream *ist)
1240 char layout_name[256];
1241 AVCodecContext *enc = ost->st->codec;
1242 AVCodecContext *dec = ist->st->codec;
1244 if (dec->channel_layout &&
1245 av_get_channel_layout_nb_channels(dec->channel_layout) != dec->channels) {
1246 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1247 dec->channels, dec->channel_layout);
1248 av_log(NULL, AV_LOG_ERROR, "New channel layout (%s) is invalid\n",
1250 dec->channel_layout = 0;
1252 if (!dec->channel_layout) {
1253 if (enc->channel_layout && dec->channels == enc->channels) {
1254 dec->channel_layout = enc->channel_layout;
1256 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1258 if (!dec->channel_layout) {
1259 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1260 "layout for Input Stream #%d.%d\n", ist->file_index,
1265 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1266 dec->channels, dec->channel_layout);
1267 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1268 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1270 if (!enc->channel_layout) {
1271 if (dec->channels == enc->channels) {
1272 enc->channel_layout = dec->channel_layout;
1275 enc->channel_layout = av_get_default_channel_layout(enc->channels);
1277 if (!enc->channel_layout) {
1278 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel layout "
1279 "for Output Stream #%d.%d\n", ost->file_index,
1283 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1284 enc->channels, enc->channel_layout);
1285 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Output Stream "
1286 "#%d.%d : %s\n", ost->file_index, ost->st->index, layout_name);
1290 static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_t size)
1292 int fill_char = 0x00;
1293 if (sample_fmt == AV_SAMPLE_FMT_U8)
1295 memset(buf, fill_char, size);
1298 static int encode_audio_frame(AVFormatContext *s, OutputStream *ost,
1299 const uint8_t *buf, int buf_size)
1301 AVCodecContext *enc = ost->st->codec;
1302 AVFrame *frame = NULL;
1304 int ret, got_packet;
1306 av_init_packet(&pkt);
1311 if (!ost->output_frame) {
1312 ost->output_frame = avcodec_alloc_frame();
1313 if (!ost->output_frame) {
1314 av_log(NULL, AV_LOG_FATAL, "out-of-memory in encode_audio_frame()\n");
1318 frame = ost->output_frame;
1319 if (frame->extended_data != frame->data)
1320 av_freep(&frame->extended_data);
1321 avcodec_get_frame_defaults(frame);
1323 frame->nb_samples = buf_size /
1324 (enc->channels * av_get_bytes_per_sample(enc->sample_fmt));
1325 if ((ret = avcodec_fill_audio_frame(frame, enc->channels, enc->sample_fmt,
1326 buf, buf_size, 1)) < 0) {
1327 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1331 if (!check_recording_time(ost))
1334 frame->pts = ost->sync_opts;
1335 ost->sync_opts += frame->nb_samples;
1339 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
1340 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1345 if (pkt.pts != AV_NOPTS_VALUE)
1346 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1347 if (pkt.dts != AV_NOPTS_VALUE)
1348 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1349 if (pkt.duration > 0)
1350 pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
1352 write_frame(s, &pkt, ost);
1354 audio_size += pkt.size;
1360 static int alloc_audio_output_buf(AVCodecContext *dec, AVCodecContext *enc,
1361 int nb_samples, int *buf_linesize)
1363 int64_t audio_buf_samples;
1366 /* calculate required number of samples to allocate */
1367 audio_buf_samples = ((int64_t)nb_samples * enc->sample_rate + dec->sample_rate) /
1369 audio_buf_samples = 4 * audio_buf_samples + 16; // safety factors for resampling
1370 audio_buf_samples = FFMAX(audio_buf_samples, enc->frame_size);
1371 if (audio_buf_samples > INT_MAX)
1372 return AVERROR(EINVAL);
1374 audio_buf_size = av_samples_get_buffer_size(buf_linesize, enc->channels,
1376 enc->sample_fmt, 0);
1377 if (audio_buf_size < 0)
1378 return audio_buf_size;
1380 av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size);
1382 return AVERROR(ENOMEM);
1387 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
1388 InputStream *ist, AVFrame *decoded_frame)
1392 int size_out, frame_bytes, resample_changed, ret;
1393 AVCodecContext *enc = ost->st->codec;
1394 AVCodecContext *dec = ist->st->codec;
1395 int osize = av_get_bytes_per_sample(enc->sample_fmt);
1396 int isize = av_get_bytes_per_sample(dec->sample_fmt);
1397 uint8_t *buf = decoded_frame->data[0];
1398 int size = decoded_frame->nb_samples * dec->channels * isize;
1399 int out_linesize = 0;
1400 int buf_linesize = decoded_frame->linesize[0];
1402 get_default_channel_layouts(ost, ist);
1404 if (alloc_audio_output_buf(dec, enc, decoded_frame->nb_samples, &out_linesize) < 0) {
1405 av_log(NULL, AV_LOG_FATAL, "Error allocating audio buffer\n");
1409 if (audio_sync_method > 1 ||
1410 enc->channels != dec->channels ||
1411 enc->channel_layout != dec->channel_layout ||
1412 enc->sample_rate != dec->sample_rate ||
1413 dec->sample_fmt != enc->sample_fmt)
1414 ost->audio_resample = 1;
1416 resample_changed = ost->resample_sample_fmt != dec->sample_fmt ||
1417 ost->resample_channels != dec->channels ||
1418 ost->resample_channel_layout != dec->channel_layout ||
1419 ost->resample_sample_rate != dec->sample_rate;
1421 if ((ost->audio_resample && !ost->avr) || resample_changed) {
1422 if (resample_changed) {
1423 av_log(NULL, AV_LOG_INFO, "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:0x%"PRIx64" to rate:%d fmt:%s ch:%d chl:0x%"PRIx64"\n",
1424 ist->file_index, ist->st->index,
1425 ost->resample_sample_rate, av_get_sample_fmt_name(ost->resample_sample_fmt),
1426 ost->resample_channels, ost->resample_channel_layout,
1427 dec->sample_rate, av_get_sample_fmt_name(dec->sample_fmt),
1428 dec->channels, dec->channel_layout);
1429 ost->resample_sample_fmt = dec->sample_fmt;
1430 ost->resample_channels = dec->channels;
1431 ost->resample_channel_layout = dec->channel_layout;
1432 ost->resample_sample_rate = dec->sample_rate;
1434 avresample_close(ost->avr);
1436 /* if audio_sync_method is >1 the resampler is needed for audio drift compensation */
1437 if (audio_sync_method <= 1 &&
1438 ost->resample_sample_fmt == enc->sample_fmt &&
1439 ost->resample_channels == enc->channels &&
1440 ost->resample_channel_layout == enc->channel_layout &&
1441 ost->resample_sample_rate == enc->sample_rate) {
1442 ost->audio_resample = 0;
1443 } else if (ost->audio_resample) {
1445 ost->avr = avresample_alloc_context();
1447 av_log(NULL, AV_LOG_FATAL, "Error allocating context for libavresample\n");
1452 av_opt_set_int(ost->avr, "in_channel_layout", dec->channel_layout, 0);
1453 av_opt_set_int(ost->avr, "in_sample_fmt", dec->sample_fmt, 0);
1454 av_opt_set_int(ost->avr, "in_sample_rate", dec->sample_rate, 0);
1455 av_opt_set_int(ost->avr, "out_channel_layout", enc->channel_layout, 0);
1456 av_opt_set_int(ost->avr, "out_sample_fmt", enc->sample_fmt, 0);
1457 av_opt_set_int(ost->avr, "out_sample_rate", enc->sample_rate, 0);
1458 if (audio_sync_method > 1)
1459 av_opt_set_int(ost->avr, "force_resampling", 1, 0);
1461 /* if both the input and output formats are s16 or u8, use s16 as
1462 the internal sample format */
1463 if (av_get_bytes_per_sample(dec->sample_fmt) <= 2 &&
1464 av_get_bytes_per_sample(enc->sample_fmt) <= 2) {
1465 av_opt_set_int(ost->avr, "internal_sample_fmt", AV_SAMPLE_FMT_S16P, 0);
1468 ret = avresample_open(ost->avr);
1470 av_log(NULL, AV_LOG_FATAL, "Error opening libavresample\n");
1476 if (audio_sync_method > 0) {
1477 double delta = get_sync_ipts(ost, ist->last_dts) * enc->sample_rate - ost->sync_opts -
1478 av_fifo_size(ost->fifo) / (enc->channels * osize);
1479 int idelta = delta * dec->sample_rate / enc->sample_rate;
1480 int byte_delta = idelta * isize * dec->channels;
1482 // FIXME resample delay
1483 if (fabs(delta) > 50) {
1484 if (ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate) {
1485 if (byte_delta < 0) {
1486 byte_delta = FFMAX(byte_delta, -size);
1489 av_log(NULL, AV_LOG_VERBOSE, "discarding %d audio samples\n",
1490 -byte_delta / (isize * dec->channels));
1495 av_fast_malloc(&async_buf, &allocated_async_buf_size,
1498 av_log(NULL, AV_LOG_FATAL, "Out of memory in do_audio_out\n");
1502 if (alloc_audio_output_buf(dec, enc, decoded_frame->nb_samples + idelta, &out_linesize) < 0) {
1503 av_log(NULL, AV_LOG_FATAL, "Error allocating audio buffer\n");
1508 generate_silence(async_buf, dec->sample_fmt, byte_delta);
1509 memcpy(async_buf + byte_delta, buf, size);
1512 buf_linesize = allocated_async_buf_size;
1513 av_log(NULL, AV_LOG_VERBOSE, "adding %d audio samples of silence\n", idelta);
1515 } else if (audio_sync_method > 1) {
1516 int comp = av_clip(delta, -audio_sync_method, audio_sync_method);
1517 av_log(NULL, AV_LOG_VERBOSE, "compensating audio timestamp drift:%f compensation:%d in:%d\n",
1518 delta, comp, enc->sample_rate);
1519 // fprintf(stderr, "drift:%f len:%d opts:%"PRId64" ipts:%"PRId64" fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)(get_sync_ipts(ost) * enc->sample_rate), av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2));
1520 avresample_set_compensation(ost->avr, comp, enc->sample_rate);
1523 } else if (audio_sync_method == 0)
1524 ost->sync_opts = lrintf(get_sync_ipts(ost, ist->last_dts) * enc->sample_rate) -
1525 av_fifo_size(ost->fifo) / (enc->channels * osize); // FIXME wrong
1527 if (ost->audio_resample) {
1529 size_out = avresample_convert(ost->avr, (void **)&buftmp,
1530 allocated_audio_buf_size, out_linesize,
1531 (void **)&buf, buf_linesize,
1532 size / (dec->channels * isize));
1533 size_out = size_out * enc->channels * osize;
1539 /* now encode as many frames as possible */
1540 if (!(enc->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) {
1541 /* output resampled raw samples */
1542 if (av_fifo_realloc2(ost->fifo, av_fifo_size(ost->fifo) + size_out) < 0) {
1543 av_log(NULL, AV_LOG_FATAL, "av_fifo_realloc2() failed\n");
1546 av_fifo_generic_write(ost->fifo, buftmp, size_out, NULL);
1548 frame_bytes = enc->frame_size * osize * enc->channels;
1550 while (av_fifo_size(ost->fifo) >= frame_bytes) {
1551 av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL);
1552 encode_audio_frame(s, ost, audio_buf, frame_bytes);
1555 encode_audio_frame(s, ost, buftmp, size_out);
1559 static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
1561 AVCodecContext *dec;
1562 AVPicture *picture2;
1563 AVPicture picture_tmp;
1566 dec = ist->st->codec;
1568 /* deinterlace : must be done before any resize */
1569 if (do_deinterlace) {
1572 /* create temporary picture */
1573 size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
1574 buf = av_malloc(size);
1578 picture2 = &picture_tmp;
1579 avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
1581 if (avpicture_deinterlace(picture2, picture,
1582 dec->pix_fmt, dec->width, dec->height) < 0) {
1583 /* if error, do not deinterlace */
1584 av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
1593 if (picture != picture2)
1594 *picture = *picture2;
1598 static void do_subtitle_out(AVFormatContext *s,
1604 static uint8_t *subtitle_out = NULL;
1605 int subtitle_out_max_size = 1024 * 1024;
1606 int subtitle_out_size, nb, i;
1607 AVCodecContext *enc;
1610 if (pts == AV_NOPTS_VALUE) {
1611 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1617 enc = ost->st->codec;
1619 if (!subtitle_out) {
1620 subtitle_out = av_malloc(subtitle_out_max_size);
1623 /* Note: DVB subtitle need one packet to draw them and one other
1624 packet to clear them */
1625 /* XXX: signal it in the codec context ? */
1626 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE)
1631 for (i = 0; i < nb; i++) {
1632 ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
1633 if (!check_recording_time(ost))
1636 sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
1637 // start_display_time is required to be 0
1638 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1639 sub->end_display_time -= sub->start_display_time;
1640 sub->start_display_time = 0;
1641 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1642 subtitle_out_max_size, sub);
1643 if (subtitle_out_size < 0) {
1644 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1648 av_init_packet(&pkt);
1649 pkt.data = subtitle_out;
1650 pkt.size = subtitle_out_size;
1651 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
1652 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {
1653 /* XXX: the pts correction is handled here. Maybe handling
1654 it in the codec would be better */
1656 pkt.pts += 90 * sub->start_display_time;
1658 pkt.pts += 90 * sub->end_display_time;
1660 write_frame(s, &pkt, ost);
1664 static void do_video_out(AVFormatContext *s,
1666 AVFrame *in_picture,
1667 int *frame_size, float quality)
1669 int nb_frames, i, ret, format_video_sync;
1670 AVCodecContext *enc;
1671 double sync_ipts, delta;
1673 enc = ost->st->codec;
1675 sync_ipts = get_sync_ipts(ost, in_picture->pts) / av_q2d(enc->time_base);
1676 delta = sync_ipts - ost->sync_opts;
1678 /* by default, we output a single frame */
1683 format_video_sync = video_sync_method;
1684 if (format_video_sync == VSYNC_AUTO)
1685 format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
1686 (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
1688 switch (format_video_sync) {
1690 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1693 else if (delta > 1.1)
1694 nb_frames = lrintf(delta);
1699 else if (delta > 0.6)
1700 ost->sync_opts = lrintf(sync_ipts);
1702 case VSYNC_PASSTHROUGH:
1703 ost->sync_opts = lrintf(sync_ipts);
1709 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1710 if (nb_frames == 0) {
1712 av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
1714 } else if (nb_frames > 1) {
1715 nb_frames_dup += nb_frames - 1;
1716 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1719 if (!ost->frame_number)
1720 ost->first_pts = ost->sync_opts;
1722 /* duplicates frame if needed */
1723 for (i = 0; i < nb_frames; i++) {
1725 av_init_packet(&pkt);
1729 if (!check_recording_time(ost))
1732 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1733 enc->codec->id == CODEC_ID_RAWVIDEO) {
1734 /* raw pictures are written as AVPicture structure to
1735 avoid any copies. We support temporarily the older
1737 enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
1738 enc->coded_frame->top_field_first = in_picture->top_field_first;
1739 pkt.data = (uint8_t *)in_picture;
1740 pkt.size = sizeof(AVPicture);
1741 pkt.pts = av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
1742 pkt.flags |= AV_PKT_FLAG_KEY;
1744 write_frame(s, &pkt, ost);
1747 AVFrame big_picture;
1749 big_picture = *in_picture;
1750 /* better than nothing: use input picture interlaced
1752 big_picture.interlaced_frame = in_picture->interlaced_frame;
1753 if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
1754 if (ost->top_field_first == -1)
1755 big_picture.top_field_first = in_picture->top_field_first;
1757 big_picture.top_field_first = !!ost->top_field_first;
1760 /* handles same_quant here. This is not correct because it may
1761 not be a global option */
1762 big_picture.quality = quality;
1763 if (!enc->me_threshold)
1764 big_picture.pict_type = 0;
1765 big_picture.pts = ost->sync_opts;
1766 if (ost->forced_kf_index < ost->forced_kf_count &&
1767 big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1768 big_picture.pict_type = AV_PICTURE_TYPE_I;
1769 ost->forced_kf_index++;
1771 ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
1773 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1778 if (pkt.pts != AV_NOPTS_VALUE)
1779 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1780 if (pkt.dts != AV_NOPTS_VALUE)
1781 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1783 write_frame(s, &pkt, ost);
1784 *frame_size = pkt.size;
1785 video_size += pkt.size;
1787 /* if two pass, output log */
1788 if (ost->logfile && enc->stats_out) {
1789 fprintf(ost->logfile, "%s", enc->stats_out);
1795 * For video, number of frames in == number of packets out.
1796 * But there may be reordering, so we can't throw away frames on encoder
1797 * flush, we need to limit them here, before they go into encoder.
1799 ost->frame_number++;
1803 static double psnr(double d)
1805 return -10.0 * log(d) / log(10.0);
1808 static void do_video_stats(AVFormatContext *os, OutputStream *ost,
1811 AVCodecContext *enc;
1813 double ti1, bitrate, avg_bitrate;
1815 /* this is executed just the first time do_video_stats is called */
1817 vstats_file = fopen(vstats_filename, "w");
1824 enc = ost->st->codec;
1825 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1826 frame_number = ost->frame_number;
1827 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
1828 if (enc->flags&CODEC_FLAG_PSNR)
1829 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1831 fprintf(vstats_file,"f_size= %6d ", frame_size);
1832 /* compute pts value */
1833 ti1 = ost->sync_opts * av_q2d(enc->time_base);
1837 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1838 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
1839 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1840 (double)video_size / 1024, ti1, bitrate, avg_bitrate);
1841 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
1845 /* check for new output on any of the filtergraphs */
1846 static int poll_filters(void)
1848 AVFilterBufferRef *picref;
1849 AVFrame *filtered_frame = NULL;
1852 for (i = 0; i < nb_output_streams; i++) {
1853 OutputStream *ost = output_streams[i];
1854 OutputFile *of = output_files[ost->file_index];
1856 if (!ost->filter || ost->is_past_recording_time)
1859 if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
1860 return AVERROR(ENOMEM);
1862 avcodec_get_frame_defaults(ost->filtered_frame);
1863 filtered_frame = ost->filtered_frame;
1865 while (av_buffersink_read(ost->filter->filter, &picref) >= 0) {
1866 avfilter_copy_buf_props(filtered_frame, picref);
1867 filtered_frame->pts = av_rescale_q(picref->pts,
1868 ost->filter->filter->inputs[0]->time_base,
1871 if (of->start_time && filtered_frame->pts < of->start_time)
1874 switch (ost->filter->filter->inputs[0]->type) {
1875 case AVMEDIA_TYPE_VIDEO:
1876 if (!ost->frame_aspect_ratio)
1877 ost->st->codec->sample_aspect_ratio = picref->video->pixel_aspect;
1879 do_video_out(of->ctx, ost, filtered_frame, &frame_size,
1880 same_quant ? ost->last_quality :
1881 ost->st->codec->global_quality);
1882 if (vstats_filename && frame_size)
1883 do_video_stats(of->ctx, ost, frame_size);
1886 // TODO support audio/subtitle filters
1890 avfilter_unref_buffer(picref);
1896 static void print_report(int is_last_report, int64_t timer_start)
1900 AVFormatContext *oc;
1902 AVCodecContext *enc;
1903 int frame_number, vid, i;
1904 double bitrate, ti1, pts;
1905 static int64_t last_time = -1;
1906 static int qp_histogram[52];
1908 if (!print_stats && !is_last_report)
1911 if (!is_last_report) {
1913 /* display the report every 0.5 seconds */
1914 cur_time = av_gettime();
1915 if (last_time == -1) {
1916 last_time = cur_time;
1919 if ((cur_time - last_time) < 500000)
1921 last_time = cur_time;
1925 oc = output_files[0]->ctx;
1927 total_size = avio_size(oc->pb);
1928 if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
1929 total_size = avio_tell(oc->pb);
1934 for (i = 0; i < nb_output_streams; i++) {
1936 ost = output_streams[i];
1937 enc = ost->st->codec;
1938 if (!ost->stream_copy && enc->coded_frame)
1939 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1940 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1941 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1943 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1944 float t = (av_gettime() - timer_start) / 1000000.0;
1946 frame_number = ost->frame_number;
1947 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
1948 frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
1950 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1954 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1956 for (j = 0; j < 32; j++)
1957 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j] + 1) / log(2)));
1959 if (enc->flags&CODEC_FLAG_PSNR) {
1961 double error, error_sum = 0;
1962 double scale, scale_sum = 0;
1963 char type[3] = { 'Y','U','V' };
1964 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1965 for (j = 0; j < 3; j++) {
1966 if (is_last_report) {
1967 error = enc->error[j];
1968 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1970 error = enc->coded_frame->error[j];
1971 scale = enc->width * enc->height * 255.0 * 255.0;
1977 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
1979 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1983 /* compute min output value */
1984 pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
1985 if ((pts < ti1) && (pts > 0))
1991 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
1993 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1994 "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
1995 (double)total_size / 1024, ti1, bitrate);
1997 if (nb_frames_dup || nb_frames_drop)
1998 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1999 nb_frames_dup, nb_frames_drop);
2001 av_log(NULL, AV_LOG_INFO, "%s \r", buf);
2005 if (is_last_report) {
2006 int64_t raw= audio_size + video_size + extra_size;
2007 av_log(NULL, AV_LOG_INFO, "\n");
2008 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
2009 video_size / 1024.0,
2010 audio_size / 1024.0,
2011 extra_size / 1024.0,
2012 100.0 * (total_size - raw) / raw
2017 static void flush_encoders(void)
2021 for (i = 0; i < nb_output_streams; i++) {
2022 OutputStream *ost = output_streams[i];
2023 AVCodecContext *enc = ost->st->codec;
2024 AVFormatContext *os = output_files[ost->file_index]->ctx;
2025 int stop_encoding = 0;
2027 if (!ost->encoding_needed)
2030 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
2032 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO)
2037 int fifo_bytes, got_packet;
2038 av_init_packet(&pkt);
2042 switch (ost->st->codec->codec_type) {
2043 case AVMEDIA_TYPE_AUDIO:
2044 fifo_bytes = av_fifo_size(ost->fifo);
2045 if (fifo_bytes > 0) {
2046 /* encode any samples remaining in fifo */
2047 int frame_bytes = fifo_bytes;
2049 av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL);
2051 encode_audio_frame(os, ost, audio_buf, frame_bytes);
2053 /* flush encoder with NULL frames until it is done
2054 returning packets */
2055 if (encode_audio_frame(os, ost, NULL, 0) == 0) {
2061 case AVMEDIA_TYPE_VIDEO:
2062 ret = avcodec_encode_video2(enc, &pkt, NULL, &got_packet);
2064 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
2068 if (ost->logfile && enc->stats_out) {
2069 fprintf(ost->logfile, "%s", enc->stats_out);
2075 if (pkt.pts != AV_NOPTS_VALUE)
2076 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
2077 if (pkt.dts != AV_NOPTS_VALUE)
2078 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
2079 write_frame(os, &pkt, ost);
2091 * Check whether a packet from ist should be written into ost at this time
2093 static int check_output_constraints(InputStream *ist, OutputStream *ost)
2095 OutputFile *of = output_files[ost->file_index];
2096 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2098 if (ost->source_index != ist_index)
2101 if (of->start_time && ist->last_dts < of->start_time)
2107 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2109 OutputFile *of = output_files[ost->file_index];
2110 int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
2113 av_init_packet(&opkt);
2115 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2116 !ost->copy_initial_nonkeyframes)
2119 if (of->recording_time != INT64_MAX &&
2120 ist->last_dts >= of->recording_time + of->start_time) {
2121 ost->is_past_recording_time = 1;
2125 /* force the input stream PTS */
2126 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
2127 audio_size += pkt->size;
2128 else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2129 video_size += pkt->size;
2133 if (pkt->pts != AV_NOPTS_VALUE)
2134 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
2136 opkt.pts = AV_NOPTS_VALUE;
2138 if (pkt->dts == AV_NOPTS_VALUE)
2139 opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
2141 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
2142 opkt.dts -= ost_tb_start_time;
2144 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
2145 opkt.flags = pkt->flags;
2147 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2148 if ( ost->st->codec->codec_id != CODEC_ID_H264
2149 && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
2150 && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
2151 && ost->st->codec->codec_id != CODEC_ID_VC1
2153 if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
2154 opkt.destruct = av_destruct_packet;
2156 opkt.data = pkt->data;
2157 opkt.size = pkt->size;
2160 write_frame(of->ctx, &opkt, ost);
2161 ost->st->codec->frame_number++;
2162 av_free_packet(&opkt);
2165 static void rate_emu_sleep(InputStream *ist)
2167 if (input_files[ist->file_index]->rate_emu) {
2168 int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
2169 int64_t now = av_gettime() - ist->start;
2175 static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2177 AVFrame *decoded_frame;
2178 AVCodecContext *avctx = ist->st->codec;
2179 int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
2182 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
2183 return AVERROR(ENOMEM);
2185 avcodec_get_frame_defaults(ist->decoded_frame);
2186 decoded_frame = ist->decoded_frame;
2188 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
2194 /* no audio frame */
2198 /* if the decoder provides a pts, use it instead of the last packet pts.
2199 the decoder could be delaying output by a packet or more. */
2200 if (decoded_frame->pts != AV_NOPTS_VALUE)
2201 ist->next_dts = decoded_frame->pts;
2203 /* increment next_dts to use for the case where the input stream does not
2204 have timestamps or there are multiple frames in the packet */
2205 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2208 // preprocess audio (volume)
2209 if (audio_volume != 256) {
2210 int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
2211 void *samples = decoded_frame->data[0];
2212 switch (avctx->sample_fmt) {
2213 case AV_SAMPLE_FMT_U8:
2215 uint8_t *volp = samples;
2216 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2217 int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
2218 *volp++ = av_clip_uint8(v);
2222 case AV_SAMPLE_FMT_S16:
2224 int16_t *volp = samples;
2225 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2226 int v = ((*volp) * audio_volume + 128) >> 8;
2227 *volp++ = av_clip_int16(v);
2231 case AV_SAMPLE_FMT_S32:
2233 int32_t *volp = samples;
2234 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2235 int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
2236 *volp++ = av_clipl_int32(v);
2240 case AV_SAMPLE_FMT_FLT:
2242 float *volp = samples;
2243 float scale = audio_volume / 256.f;
2244 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2249 case AV_SAMPLE_FMT_DBL:
2251 double *volp = samples;
2252 double scale = audio_volume / 256.;
2253 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2259 av_log(NULL, AV_LOG_FATAL,
2260 "Audio volume adjustment on sample format %s is not supported.\n",
2261 av_get_sample_fmt_name(ist->st->codec->sample_fmt));
2266 rate_emu_sleep(ist);
2268 for (i = 0; i < nb_output_streams; i++) {
2269 OutputStream *ost = output_streams[i];
2271 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
2273 do_audio_out(output_files[ost->file_index]->ctx, ost, ist, decoded_frame);
2279 static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *pkt_pts)
2281 AVFrame *decoded_frame;
2282 void *buffer_to_free = NULL;
2283 int i, ret = 0, resample_changed;
2286 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
2287 return AVERROR(ENOMEM);
2289 avcodec_get_frame_defaults(ist->decoded_frame);
2290 decoded_frame = ist->decoded_frame;
2291 pkt->pts = *pkt_pts;
2292 pkt->dts = ist->last_dts;
2293 *pkt_pts = AV_NOPTS_VALUE;
2295 ret = avcodec_decode_video2(ist->st->codec,
2296 decoded_frame, got_output, pkt);
2300 quality = same_quant ? decoded_frame->quality : 0;
2302 /* no picture yet */
2304 for (i = 0; i < ist->nb_filters; i++)
2305 av_buffersrc_buffer(ist->filters[i]->filter, NULL);
2308 decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
2309 decoded_frame->pkt_dts);
2311 pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
2313 rate_emu_sleep(ist);
2315 if (ist->st->sample_aspect_ratio.num)
2316 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2318 resample_changed = ist->resample_width != decoded_frame->width ||
2319 ist->resample_height != decoded_frame->height ||
2320 ist->resample_pix_fmt != decoded_frame->format;
2321 if (resample_changed) {
2322 av_log(NULL, AV_LOG_INFO,
2323 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2324 ist->file_index, ist->st->index,
2325 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2326 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2328 ist->resample_width = decoded_frame->width;
2329 ist->resample_height = decoded_frame->height;
2330 ist->resample_pix_fmt = decoded_frame->format;
2332 for (i = 0; i < nb_filtergraphs; i++)
2333 if (ist_in_filtergraph(filtergraphs[i], ist) &&
2334 configure_filtergraph(filtergraphs[i]) < 0) {
2335 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2340 for (i = 0; i < ist->nb_filters; i++) {
2341 // XXX what an ugly hack
2342 if (ist->filters[i]->graph->nb_outputs == 1)
2343 ist->filters[i]->graph->outputs[0]->ost->last_quality = quality;
2345 if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
2346 FrameBuffer *buf = decoded_frame->opaque;
2347 AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
2348 decoded_frame->data, decoded_frame->linesize,
2349 AV_PERM_READ | AV_PERM_PRESERVE,
2350 ist->st->codec->width, ist->st->codec->height,
2351 ist->st->codec->pix_fmt);
2353 avfilter_copy_frame_props(fb, decoded_frame);
2354 fb->buf->priv = buf;
2355 fb->buf->free = filter_release_buffer;
2358 av_buffersrc_buffer(ist->filters[i]->filter, fb);
2360 av_vsrc_buffer_add_frame(ist->filters[i]->filter, decoded_frame,
2361 decoded_frame->pts, decoded_frame->sample_aspect_ratio);
2364 av_free(buffer_to_free);
2368 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2370 AVSubtitle subtitle;
2371 int i, ret = avcodec_decode_subtitle2(ist->st->codec,
2372 &subtitle, got_output, pkt);
2378 rate_emu_sleep(ist);
2380 for (i = 0; i < nb_output_streams; i++) {
2381 OutputStream *ost = output_streams[i];
2383 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
2386 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
2389 avsubtitle_free(&subtitle);
2393 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2394 static int output_packet(InputStream *ist, const AVPacket *pkt)
2398 int64_t pkt_pts = AV_NOPTS_VALUE;
2401 if (ist->next_dts == AV_NOPTS_VALUE)
2402 ist->next_dts = ist->last_dts;
2406 av_init_packet(&avpkt);
2414 if (pkt->dts != AV_NOPTS_VALUE)
2415 ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2416 if (pkt->pts != AV_NOPTS_VALUE)
2417 pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2419 // while we have more to decode or while the decoder did output something on EOF
2420 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2424 ist->last_dts = ist->next_dts;
2426 if (avpkt.size && avpkt.size != pkt->size) {
2427 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2428 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2429 ist->showed_multi_packet_warning = 1;
2432 switch (ist->st->codec->codec_type) {
2433 case AVMEDIA_TYPE_AUDIO:
2434 ret = transcode_audio (ist, &avpkt, &got_output);
2436 case AVMEDIA_TYPE_VIDEO:
2437 ret = transcode_video (ist, &avpkt, &got_output, &pkt_pts);
2439 ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2440 else if (ist->st->r_frame_rate.num)
2441 ist->next_dts += av_rescale_q(1, (AVRational){ist->st->r_frame_rate.den,
2442 ist->st->r_frame_rate.num},
2444 else if (ist->st->codec->time_base.num != 0) {
2445 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
2446 ist->st->codec->ticks_per_frame;
2447 ist->next_dts += av_rescale_q(ticks, ist->st->codec->time_base, AV_TIME_BASE_Q);
2450 case AVMEDIA_TYPE_SUBTITLE:
2451 ret = transcode_subtitles(ist, &avpkt, &got_output);
2459 // touch data and size only if not EOF
2469 /* handle stream copy */
2470 if (!ist->decoding_needed) {
2471 rate_emu_sleep(ist);
2472 ist->last_dts = ist->next_dts;
2473 switch (ist->st->codec->codec_type) {
2474 case AVMEDIA_TYPE_AUDIO:
2475 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
2476 ist->st->codec->sample_rate;
2478 case AVMEDIA_TYPE_VIDEO:
2479 if (ist->st->codec->time_base.num != 0) {
2480 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
2481 ist->next_dts += ((int64_t)AV_TIME_BASE *
2482 ist->st->codec->time_base.num * ticks) /
2483 ist->st->codec->time_base.den;
2488 for (i = 0; pkt && i < nb_output_streams; i++) {
2489 OutputStream *ost = output_streams[i];
2491 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2494 do_streamcopy(ist, ost, pkt);
2500 static void print_sdp(void)
2504 AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
2508 for (i = 0; i < nb_output_files; i++)
2509 avc[i] = output_files[i]->ctx;
2511 av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
2512 printf("SDP:\n%s\n", sdp);
2517 static int init_input_stream(int ist_index, char *error, int error_len)
2520 InputStream *ist = input_streams[ist_index];
2521 if (ist->decoding_needed) {
2522 AVCodec *codec = ist->dec;
2524 snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
2525 ist->st->codec->codec_id, ist->file_index, ist->st->index);
2526 return AVERROR(EINVAL);
2529 /* update requested sample format for the decoder based on the
2530 corresponding encoder sample format */
2531 for (i = 0; i < nb_output_streams; i++) {
2532 OutputStream *ost = output_streams[i];
2533 if (ost->source_index == ist_index) {
2534 update_sample_fmt(ist->st->codec, codec, ost->st->codec);
2539 if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
2540 ist->st->codec->get_buffer = codec_get_buffer;
2541 ist->st->codec->release_buffer = codec_release_buffer;
2542 ist->st->codec->opaque = ist;
2545 if (!av_dict_get(ist->opts, "threads", NULL, 0))
2546 av_dict_set(&ist->opts, "threads", "auto", 0);
2547 if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
2548 snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
2549 ist->file_index, ist->st->index);
2550 return AVERROR(EINVAL);
2552 assert_codec_experimental(ist->st->codec, 0);
2553 assert_avoptions(ist->opts);
2555 if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2556 for (i = 0; i < nb_output_streams; i++) {
2557 OutputStream *ost = output_streams[i];
2558 if (ost->source_index == ist_index) {
2559 if (!ist->st->codec->channel_layout || !ost->st->codec->channel_layout)
2560 get_default_channel_layouts(ost, ist);
2567 ist->last_dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2568 ist->next_dts = AV_NOPTS_VALUE;
2569 init_pts_correction(&ist->pts_ctx);
2575 static InputStream *get_input_stream(OutputStream *ost)
2577 if (ost->source_index >= 0)
2578 return input_streams[ost->source_index];
2581 FilterGraph *fg = ost->filter->graph;
2584 for (i = 0; i < fg->nb_inputs; i++)
2585 if (fg->inputs[i]->ist->st->codec->codec_type == ost->st->codec->codec_type)
2586 return fg->inputs[i]->ist;
2592 static int transcode_init(void)
2594 int ret = 0, i, j, k;
2595 AVFormatContext *oc;
2596 AVCodecContext *codec, *icodec;
2602 /* init framerate emulation */
2603 for (i = 0; i < nb_input_files; i++) {
2604 InputFile *ifile = input_files[i];
2605 if (ifile->rate_emu)
2606 for (j = 0; j < ifile->nb_streams; j++)
2607 input_streams[j + ifile->ist_index]->start = av_gettime();
2610 /* output stream init */
2611 for (i = 0; i < nb_output_files; i++) {
2612 oc = output_files[i]->ctx;
2613 if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
2614 av_dump_format(oc, i, oc->filename, 1);
2615 av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2616 return AVERROR(EINVAL);
2620 /* init complex filtergraphs */
2621 for (i = 0; i < nb_filtergraphs; i++)
2622 if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
2625 /* for each output stream, we compute the right encoding parameters */
2626 for (i = 0; i < nb_output_streams; i++) {
2627 ost = output_streams[i];
2628 oc = output_files[ost->file_index]->ctx;
2629 ist = get_input_stream(ost);
2631 if (ost->attachment_filename)
2634 codec = ost->st->codec;
2637 icodec = ist->st->codec;
2639 ost->st->disposition = ist->st->disposition;
2640 codec->bits_per_raw_sample = icodec->bits_per_raw_sample;
2641 codec->chroma_sample_location = icodec->chroma_sample_location;
2644 if (ost->stream_copy) {
2645 uint64_t extra_size;
2647 av_assert0(ist && !ost->filter);
2649 extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2651 if (extra_size > INT_MAX) {
2652 return AVERROR(EINVAL);
2655 /* if stream_copy is selected, no need to decode or encode */
2656 codec->codec_id = icodec->codec_id;
2657 codec->codec_type = icodec->codec_type;
2659 if (!codec->codec_tag) {
2660 if (!oc->oformat->codec_tag ||
2661 av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
2662 av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
2663 codec->codec_tag = icodec->codec_tag;
2666 codec->bit_rate = icodec->bit_rate;
2667 codec->rc_max_rate = icodec->rc_max_rate;
2668 codec->rc_buffer_size = icodec->rc_buffer_size;
2669 codec->field_order = icodec->field_order;
2670 codec->extradata = av_mallocz(extra_size);
2671 if (!codec->extradata) {
2672 return AVERROR(ENOMEM);
2674 memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
2675 codec->extradata_size = icodec->extradata_size;
2677 codec->time_base = icodec->time_base;
2678 codec->time_base.num *= icodec->ticks_per_frame;
2679 av_reduce(&codec->time_base.num, &codec->time_base.den,
2680 codec->time_base.num, codec->time_base.den, INT_MAX);
2682 codec->time_base = ist->st->time_base;
2684 switch (codec->codec_type) {
2685 case AVMEDIA_TYPE_AUDIO:
2686 if (audio_volume != 256) {
2687 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2690 codec->channel_layout = icodec->channel_layout;
2691 codec->sample_rate = icodec->sample_rate;
2692 codec->channels = icodec->channels;
2693 codec->frame_size = icodec->frame_size;
2694 codec->audio_service_type = icodec->audio_service_type;
2695 codec->block_align = icodec->block_align;
2697 case AVMEDIA_TYPE_VIDEO:
2698 codec->pix_fmt = icodec->pix_fmt;
2699 codec->width = icodec->width;
2700 codec->height = icodec->height;
2701 codec->has_b_frames = icodec->has_b_frames;
2702 if (!codec->sample_aspect_ratio.num) {
2703 codec->sample_aspect_ratio =
2704 ost->st->sample_aspect_ratio =
2705 ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
2706 ist->st->codec->sample_aspect_ratio.num ?
2707 ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
2710 case AVMEDIA_TYPE_SUBTITLE:
2711 codec->width = icodec->width;
2712 codec->height = icodec->height;
2714 case AVMEDIA_TYPE_DATA:
2715 case AVMEDIA_TYPE_ATTACHMENT:
2722 /* should only happen when a default codec is not present. */
2723 snprintf(error, sizeof(error), "Automatic encoder selection "
2724 "failed for output stream #%d:%d. Default encoder for "
2725 "format %s is probably disabled. Please choose an "
2726 "encoder manually.\n", ost->file_index, ost->index,
2728 ret = AVERROR(EINVAL);
2733 ist->decoding_needed = 1;
2734 ost->encoding_needed = 1;
2736 switch (codec->codec_type) {
2737 case AVMEDIA_TYPE_AUDIO:
2738 ost->fifo = av_fifo_alloc(1024);
2740 return AVERROR(ENOMEM);
2743 if (!codec->sample_rate)
2744 codec->sample_rate = icodec->sample_rate;
2745 choose_sample_rate(ost->st, ost->enc);
2746 codec->time_base = (AVRational){ 1, codec->sample_rate };
2748 if (codec->sample_fmt == AV_SAMPLE_FMT_NONE)
2749 codec->sample_fmt = icodec->sample_fmt;
2750 choose_sample_fmt(ost->st, ost->enc);
2752 if (!codec->channels)
2753 codec->channels = icodec->channels;
2754 if (!codec->channel_layout)
2755 codec->channel_layout = icodec->channel_layout;
2756 if (av_get_channel_layout_nb_channels(codec->channel_layout) != codec->channels)
2757 codec->channel_layout = 0;
2759 icodec->request_channels = codec-> channels;
2760 ost->resample_sample_fmt = icodec->sample_fmt;
2761 ost->resample_sample_rate = icodec->sample_rate;
2762 ost->resample_channels = icodec->channels;
2763 ost->resample_channel_layout = icodec->channel_layout;
2765 case AVMEDIA_TYPE_VIDEO:
2768 fg = init_simple_filtergraph(ist, ost);
2769 if (configure_video_filters(fg)) {
2770 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2776 * We want CFR output if and only if one of those is true:
2777 * 1) user specified output framerate with -r
2778 * 2) user specified -vsync cfr
2779 * 3) output format is CFR and the user didn't force vsync to
2780 * something else than CFR
2782 * in such a case, set ost->frame_rate
2784 if (!ost->frame_rate.num && ist &&
2785 (video_sync_method == VSYNC_CFR ||
2786 (video_sync_method == VSYNC_AUTO &&
2787 !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
2788 ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
2789 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
2790 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
2791 ost->frame_rate = ost->enc->supported_framerates[idx];
2794 if (ost->frame_rate.num) {
2795 codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num};
2796 video_sync_method = VSYNC_CFR;
2798 codec->time_base = ist->st->time_base;
2800 codec->time_base = ost->filter->filter->inputs[0]->time_base;
2802 codec->width = ost->filter->filter->inputs[0]->w;
2803 codec->height = ost->filter->filter->inputs[0]->h;
2804 codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2805 ost->frame_aspect_ratio ? // overridden by the -aspect cli option
2806 av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
2807 ost->filter->filter->inputs[0]->sample_aspect_ratio;
2808 codec->pix_fmt = ost->filter->filter->inputs[0]->format;
2810 if (codec->width != icodec->width ||
2811 codec->height != icodec->height ||
2812 codec->pix_fmt != icodec->pix_fmt) {
2813 codec->bits_per_raw_sample = 0;
2817 case AVMEDIA_TYPE_SUBTITLE:
2818 codec->time_base = (AVRational){1, 1000};
2825 if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
2826 char logfilename[1024];
2829 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
2830 pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
2832 if (!strcmp(ost->enc->name, "libx264")) {
2833 av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
2835 if (codec->flags & CODEC_FLAG_PASS1) {
2836 f = fopen(logfilename, "wb");
2838 av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
2839 logfilename, strerror(errno));
2845 size_t logbuffer_size;
2846 if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
2847 av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
2851 codec->stats_in = logbuffer;
2858 /* open each encoder */
2859 for (i = 0; i < nb_output_streams; i++) {
2860 ost = output_streams[i];
2861 if (ost->encoding_needed) {
2862 AVCodec *codec = ost->enc;
2863 AVCodecContext *dec = NULL;
2865 if ((ist = get_input_stream(ost)))
2866 dec = ist->st->codec;
2867 if (dec && dec->subtitle_header) {
2868 ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size);
2869 if (!ost->st->codec->subtitle_header) {
2870 ret = AVERROR(ENOMEM);
2873 memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2874 ost->st->codec->subtitle_header_size = dec->subtitle_header_size;
2876 if (!av_dict_get(ost->opts, "threads", NULL, 0))
2877 av_dict_set(&ost->opts, "threads", "auto", 0);
2878 if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) {
2879 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
2880 ost->file_index, ost->index);
2881 ret = AVERROR(EINVAL);
2884 assert_codec_experimental(ost->st->codec, 1);
2885 assert_avoptions(ost->opts);
2886 if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
2887 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2888 "It takes bits/s as argument, not kbits/s\n");
2889 extra_size += ost->st->codec->extradata_size;
2891 if (ost->st->codec->me_threshold)
2892 input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
2896 /* init input streams */
2897 for (i = 0; i < nb_input_streams; i++)
2898 if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
2901 /* discard unused programs */
2902 for (i = 0; i < nb_input_files; i++) {
2903 InputFile *ifile = input_files[i];
2904 for (j = 0; j < ifile->ctx->nb_programs; j++) {
2905 AVProgram *p = ifile->ctx->programs[j];
2906 int discard = AVDISCARD_ALL;
2908 for (k = 0; k < p->nb_stream_indexes; k++)
2909 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
2910 discard = AVDISCARD_DEFAULT;
2913 p->discard = discard;
2917 /* open files and write file headers */
2918 for (i = 0; i < nb_output_files; i++) {
2919 oc = output_files[i]->ctx;
2920 oc->interrupt_callback = int_cb;
2921 if (avformat_write_header(oc, &output_files[i]->opts) < 0) {
2922 snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i);
2923 ret = AVERROR(EINVAL);
2926 assert_avoptions(output_files[i]->opts);
2927 if (strcmp(oc->oformat->name, "rtp")) {
2933 /* dump the file output parameters - cannot be done before in case
2935 for (i = 0; i < nb_output_files; i++) {
2936 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
2939 /* dump the stream mapping */
2940 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
2941 for (i = 0; i < nb_input_streams; i++) {
2942 ist = input_streams[i];
2944 for (j = 0; j < ist->nb_filters; j++) {
2945 AVFilterLink *link = ist->filters[j]->filter->outputs[0];
2946 if (ist->filters[j]->graph->graph_desc) {
2947 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
2948 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
2949 link->dst->filter->name);
2950 if (link->dst->input_count > 1)
2951 av_log(NULL, AV_LOG_INFO, ":%s", link->dstpad->name);
2952 if (nb_filtergraphs > 1)
2953 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
2954 av_log(NULL, AV_LOG_INFO, "\n");
2959 for (i = 0; i < nb_output_streams; i++) {
2960 ost = output_streams[i];
2962 if (ost->attachment_filename) {
2963 /* an attached file */
2964 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
2965 ost->attachment_filename, ost->file_index, ost->index);
2969 if (ost->filter && ost->filter->graph->graph_desc) {
2970 /* output from a complex graph */
2971 AVFilterLink *link = ost->filter->filter->inputs[0];
2972 av_log(NULL, AV_LOG_INFO, " %s", link->src->filter->name);
2973 if (link->src->output_count > 1)
2974 av_log(NULL, AV_LOG_INFO, ":%s", link->srcpad->name);
2975 if (nb_filtergraphs > 1)
2976 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
2978 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
2979 ost->index, ost->enc ? ost->enc->name : "?");
2983 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
2984 input_streams[ost->source_index]->file_index,
2985 input_streams[ost->source_index]->st->index,
2988 if (ost->sync_ist != input_streams[ost->source_index])
2989 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2990 ost->sync_ist->file_index,
2991 ost->sync_ist->st->index);
2992 if (ost->stream_copy)
2993 av_log(NULL, AV_LOG_INFO, " (copy)");
2995 av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ?
2996 input_streams[ost->source_index]->dec->name : "?",
2997 ost->enc ? ost->enc->name : "?");
2998 av_log(NULL, AV_LOG_INFO, "\n");
3002 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3014 * The following code is the main loop of the file converter
3016 static int transcode(void)
3019 AVFormatContext *is, *os;
3023 int no_packet_count = 0;
3024 int64_t timer_start;
3026 if (!(no_packet = av_mallocz(nb_input_files)))
3029 ret = transcode_init();
3033 av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
3036 timer_start = av_gettime();
3038 for (; received_sigterm == 0;) {
3039 int file_index, ist_index, past_recording_time = 1;
3043 ipts_min = INT64_MAX;
3045 /* check if there's any stream where output is still needed */
3046 for (i = 0; i < nb_output_streams; i++) {
3048 ost = output_streams[i];
3049 of = output_files[ost->file_index];
3050 os = output_files[ost->file_index]->ctx;
3051 if (ost->is_past_recording_time ||
3052 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3054 if (ost->frame_number > ost->max_frames) {
3056 for (j = 0; j < of->ctx->nb_streams; j++)
3057 output_streams[of->ost_index + j]->is_past_recording_time = 1;
3060 past_recording_time = 0;
3062 if (past_recording_time)
3065 /* select the stream that we must read now by looking at the
3066 smallest output pts */
3068 for (i = 0; i < nb_input_streams; i++) {
3070 ist = input_streams[i];
3071 ipts = ist->last_dts;
3072 if (ist->discard || no_packet[ist->file_index])
3074 if (!input_files[ist->file_index]->eof_reached) {
3075 if (ipts < ipts_min) {
3077 file_index = ist->file_index;
3081 /* if none, if is finished */
3082 if (file_index < 0) {
3083 if (no_packet_count) {
3084 no_packet_count = 0;
3085 memset(no_packet, 0, nb_input_files);
3092 /* read a frame from it and output it in the fifo */
3093 is = input_files[file_index]->ctx;
3094 ret = av_read_frame(is, &pkt);
3095 if (ret == AVERROR(EAGAIN)) {
3096 no_packet[file_index] = 1;
3101 input_files[file_index]->eof_reached = 1;
3103 for (i = 0; i < input_files[file_index]->nb_streams; i++) {
3104 ist = input_streams[input_files[file_index]->ist_index + i];
3105 if (ist->decoding_needed)
3106 output_packet(ist, NULL);
3115 no_packet_count = 0;
3116 memset(no_packet, 0, nb_input_files);
3119 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3120 is->streams[pkt.stream_index]);
3122 /* the following test is needed in case new streams appear
3123 dynamically in stream : we ignore them */
3124 if (pkt.stream_index >= input_files[file_index]->nb_streams)
3125 goto discard_packet;
3126 ist_index = input_files[file_index]->ist_index + pkt.stream_index;
3127 ist = input_streams[ist_index];
3129 goto discard_packet;
3131 if (pkt.dts != AV_NOPTS_VALUE)
3132 pkt.dts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3133 if (pkt.pts != AV_NOPTS_VALUE)
3134 pkt.pts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3136 if (pkt.pts != AV_NOPTS_VALUE)
3137 pkt.pts *= ist->ts_scale;
3138 if (pkt.dts != AV_NOPTS_VALUE)
3139 pkt.dts *= ist->ts_scale;
3141 //fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n",
3143 // pkt.dts, input_files[ist->file_index].ts_offset,
3144 // ist->st->codec->codec_type);
3145 if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE
3146 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3147 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3148 int64_t delta = pkt_dts - ist->next_dts;
3149 if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
3150 input_files[ist->file_index]->ts_offset -= delta;
3151 av_log(NULL, AV_LOG_DEBUG,
3152 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3153 delta, input_files[ist->file_index]->ts_offset);
3154 pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3155 if (pkt.pts != AV_NOPTS_VALUE)
3156 pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3160 // fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
3161 if (output_packet(ist, &pkt) < 0 || poll_filters() < 0) {
3162 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
3163 ist->file_index, ist->st->index);
3166 av_free_packet(&pkt);
3171 av_free_packet(&pkt);
3173 /* dump report by using the output first video and audio streams */
3174 print_report(0, timer_start);
3177 /* at the end of stream, we must flush the decoder buffers */
3178 for (i = 0; i < nb_input_streams; i++) {
3179 ist = input_streams[i];
3180 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3181 output_packet(ist, NULL);
3189 /* write the trailer if needed and close file */
3190 for (i = 0; i < nb_output_files; i++) {
3191 os = output_files[i]->ctx;
3192 av_write_trailer(os);
3195 /* dump report by using the first video and audio streams */
3196 print_report(1, timer_start);
3198 /* close each encoder */
3199 for (i = 0; i < nb_output_streams; i++) {
3200 ost = output_streams[i];
3201 if (ost->encoding_needed) {
3202 av_freep(&ost->st->codec->stats_in);
3203 avcodec_close(ost->st->codec);
3207 /* close each decoder */
3208 for (i = 0; i < nb_input_streams; i++) {
3209 ist = input_streams[i];
3210 if (ist->decoding_needed) {
3211 avcodec_close(ist->st->codec);
3219 av_freep(&no_packet);
3221 if (output_streams) {
3222 for (i = 0; i < nb_output_streams; i++) {
3223 ost = output_streams[i];
3225 if (ost->stream_copy)
3226 av_freep(&ost->st->codec->extradata);
3228 fclose(ost->logfile);
3229 ost->logfile = NULL;
3231 av_fifo_free(ost->fifo); /* works even if fifo is not
3232 initialized but set to zero */
3233 av_freep(&ost->st->codec->subtitle_header);
3234 av_free(ost->forced_kf_pts);
3236 avresample_free(&ost->avr);
3237 av_dict_free(&ost->opts);
3244 static double parse_frame_aspect_ratio(const char *arg)
3251 p = strchr(arg, ':');
3253 x = strtol(arg, &end, 10);
3255 y = strtol(end + 1, &end, 10);
3257 ar = (double)x / (double)y;
3259 ar = strtod(arg, NULL);
3262 av_log(NULL, AV_LOG_FATAL, "Incorrect aspect ratio specification.\n");
3268 static int opt_audio_codec(OptionsContext *o, const char *opt, const char *arg)
3270 return parse_option(o, "codec:a", arg, options);
3273 static int opt_video_codec(OptionsContext *o, const char *opt, const char *arg)
3275 return parse_option(o, "codec:v", arg, options);
3278 static int opt_subtitle_codec(OptionsContext *o, const char *opt, const char *arg)
3280 return parse_option(o, "codec:s", arg, options);
3283 static int opt_data_codec(OptionsContext *o, const char *opt, const char *arg)
3285 return parse_option(o, "codec:d", arg, options);
3288 static int opt_map(OptionsContext *o, const char *opt, const char *arg)
3290 StreamMap *m = NULL;
3291 int i, negative = 0, file_idx;
3292 int sync_file_idx = -1, sync_stream_idx;
3300 map = av_strdup(arg);
3302 /* parse sync stream first, just pick first matching stream */
3303 if (sync = strchr(map, ',')) {
3305 sync_file_idx = strtol(sync + 1, &sync, 0);
3306 if (sync_file_idx >= nb_input_files || sync_file_idx < 0) {
3307 av_log(NULL, AV_LOG_FATAL, "Invalid sync file index: %d.\n", sync_file_idx);
3312 for (i = 0; i < input_files[sync_file_idx]->nb_streams; i++)
3313 if (check_stream_specifier(input_files[sync_file_idx]->ctx,
3314 input_files[sync_file_idx]->ctx->streams[i], sync) == 1) {
3315 sync_stream_idx = i;
3318 if (i == input_files[sync_file_idx]->nb_streams) {
3319 av_log(NULL, AV_LOG_FATAL, "Sync stream specification in map %s does not "
3320 "match any streams.\n", arg);
3326 if (map[0] == '[') {
3327 /* this mapping refers to lavfi output */
3328 const char *c = map + 1;
3329 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
3330 &o->nb_stream_maps, o->nb_stream_maps + 1);
3331 m = &o->stream_maps[o->nb_stream_maps - 1];
3332 m->linklabel = av_get_token(&c, "]");
3333 if (!m->linklabel) {
3334 av_log(NULL, AV_LOG_ERROR, "Invalid output link label: %s.\n", map);
3338 file_idx = strtol(map, &p, 0);
3339 if (file_idx >= nb_input_files || file_idx < 0) {
3340 av_log(NULL, AV_LOG_FATAL, "Invalid input file index: %d.\n", file_idx);
3344 /* disable some already defined maps */
3345 for (i = 0; i < o->nb_stream_maps; i++) {
3346 m = &o->stream_maps[i];
3347 if (file_idx == m->file_index &&
3348 check_stream_specifier(input_files[m->file_index]->ctx,
3349 input_files[m->file_index]->ctx->streams[m->stream_index],
3350 *p == ':' ? p + 1 : p) > 0)
3354 for (i = 0; i < input_files[file_idx]->nb_streams; i++) {
3355 if (check_stream_specifier(input_files[file_idx]->ctx, input_files[file_idx]->ctx->streams[i],
3356 *p == ':' ? p + 1 : p) <= 0)
3358 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
3359 &o->nb_stream_maps, o->nb_stream_maps + 1);
3360 m = &o->stream_maps[o->nb_stream_maps - 1];
3362 m->file_index = file_idx;
3363 m->stream_index = i;
3365 if (sync_file_idx >= 0) {
3366 m->sync_file_index = sync_file_idx;
3367 m->sync_stream_index = sync_stream_idx;
3369 m->sync_file_index = file_idx;
3370 m->sync_stream_index = i;
3376 av_log(NULL, AV_LOG_FATAL, "Stream map '%s' matches no streams.\n", arg);
3384 static int opt_attach(OptionsContext *o, const char *opt, const char *arg)
3386 o->attachments = grow_array(o->attachments, sizeof(*o->attachments),
3387 &o->nb_attachments, o->nb_attachments + 1);
3388 o->attachments[o->nb_attachments - 1] = arg;
3393 * Parse a metadata specifier in arg.
3394 * @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram)
3395 * @param index for type c/p, chapter/program index is written here
3396 * @param stream_spec for type s, the stream specifier is written here
3398 static void parse_meta_type(char *arg, char *type, int *index, const char **stream_spec)
3406 if (*(++arg) && *arg != ':') {
3407 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", arg);
3410 *stream_spec = *arg == ':' ? arg + 1 : "";
3414 if (*(++arg) == ':')
3415 *index = strtol(++arg, NULL, 0);
3418 av_log(NULL, AV_LOG_FATAL, "Invalid metadata type %c.\n", *arg);
3425 static int copy_metadata(char *outspec, char *inspec, AVFormatContext *oc, AVFormatContext *ic, OptionsContext *o)
3427 AVDictionary **meta_in = NULL;
3428 AVDictionary **meta_out;
3430 char type_in, type_out;
3431 const char *istream_spec = NULL, *ostream_spec = NULL;
3432 int idx_in = 0, idx_out = 0;
3434 parse_meta_type(inspec, &type_in, &idx_in, &istream_spec);
3435 parse_meta_type(outspec, &type_out, &idx_out, &ostream_spec);
3437 if (type_in == 'g' || type_out == 'g')
3438 o->metadata_global_manual = 1;
3439 if (type_in == 's' || type_out == 's')
3440 o->metadata_streams_manual = 1;
3441 if (type_in == 'c' || type_out == 'c')
3442 o->metadata_chapters_manual = 1;
3444 #define METADATA_CHECK_INDEX(index, nb_elems, desc)\
3445 if ((index) < 0 || (index) >= (nb_elems)) {\
3446 av_log(NULL, AV_LOG_FATAL, "Invalid %s index %d while processing metadata maps.\n",\
3451 #define SET_DICT(type, meta, context, index)\
3454 meta = &context->metadata;\
3457 METADATA_CHECK_INDEX(index, context->nb_chapters, "chapter")\
3458 meta = &context->chapters[index]->metadata;\
3461 METADATA_CHECK_INDEX(index, context->nb_programs, "program")\
3462 meta = &context->programs[index]->metadata;\
3466 SET_DICT(type_in, meta_in, ic, idx_in);
3467 SET_DICT(type_out, meta_out, oc, idx_out);
3469 /* for input streams choose first matching stream */
3470 if (type_in == 's') {
3471 for (i = 0; i < ic->nb_streams; i++) {
3472 if ((ret = check_stream_specifier(ic, ic->streams[i], istream_spec)) > 0) {
3473 meta_in = &ic->streams[i]->metadata;
3479 av_log(NULL, AV_LOG_FATAL, "Stream specifier %s does not match any streams.\n", istream_spec);
3484 if (type_out == 's') {
3485 for (i = 0; i < oc->nb_streams; i++) {
3486 if ((ret = check_stream_specifier(oc, oc->streams[i], ostream_spec)) > 0) {
3487 meta_out = &oc->streams[i]->metadata;
3488 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3493 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3498 static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
3500 const char *codec_string = encoder ? "encoder" : "decoder";
3504 avcodec_find_encoder_by_name(name) :
3505 avcodec_find_decoder_by_name(name);
3507 av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name);
3510 if (codec->type != type) {
3511 av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name);
3517 static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st)
3519 char *codec_name = NULL;
3521 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, st);
3523 AVCodec *codec = find_codec_or_die(codec_name, st->codec->codec_type, 0);
3524 st->codec->codec_id = codec->id;
3527 return avcodec_find_decoder(st->codec->codec_id);
3531 * Add all the streams from the given input file to the global
3532 * list of input streams.
3534 static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
3538 for (i = 0; i < ic->nb_streams; i++) {
3539 AVStream *st = ic->streams[i];
3540 AVCodecContext *dec = st->codec;
3541 InputStream *ist = av_mallocz(sizeof(*ist));
3546 input_streams = grow_array(input_streams, sizeof(*input_streams), &nb_input_streams, nb_input_streams + 1);
3547 input_streams[nb_input_streams - 1] = ist;
3550 ist->file_index = nb_input_files;
3552 st->discard = AVDISCARD_ALL;
3553 ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, ic, st);
3555 ist->ts_scale = 1.0;
3556 MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st);
3558 ist->dec = choose_decoder(o, ic, st);
3560 switch (dec->codec_type) {
3561 case AVMEDIA_TYPE_VIDEO:
3562 ist->resample_height = dec->height;
3563 ist->resample_width = dec->width;
3564 ist->resample_pix_fmt = dec->pix_fmt;
3567 case AVMEDIA_TYPE_AUDIO:
3568 case AVMEDIA_TYPE_DATA:
3569 case AVMEDIA_TYPE_SUBTITLE:
3570 case AVMEDIA_TYPE_ATTACHMENT:
3571 case AVMEDIA_TYPE_UNKNOWN:
3579 static void assert_file_overwrite(const char *filename)
3581 if (!file_overwrite &&
3582 (strchr(filename, ':') == NULL || filename[1] == ':' ||
3583 av_strstart(filename, "file:", NULL))) {
3584 if (avio_check(filename, 0) == 0) {
3586 fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
3588 if (!read_yesno()) {
3589 fprintf(stderr, "Not overwriting - exiting\n");
3594 fprintf(stderr,"File '%s' already exists. Exiting.\n", filename);
3601 static void dump_attachment(AVStream *st, const char *filename)
3604 AVIOContext *out = NULL;
3605 AVDictionaryEntry *e;
3607 if (!st->codec->extradata_size) {
3608 av_log(NULL, AV_LOG_WARNING, "No extradata to dump in stream #%d:%d.\n",
3609 nb_input_files - 1, st->index);
3612 if (!*filename && (e = av_dict_get(st->metadata, "filename", NULL, 0)))
3613 filename = e->value;
3615 av_log(NULL, AV_LOG_FATAL, "No filename specified and no 'filename' tag"
3616 "in stream #%d:%d.\n", nb_input_files - 1, st->index);
3620 assert_file_overwrite(filename);
3622 if ((ret = avio_open2(&out, filename, AVIO_FLAG_WRITE, &int_cb, NULL)) < 0) {
3623 av_log(NULL, AV_LOG_FATAL, "Could not open file %s for writing.\n",
3628 avio_write(out, st->codec->extradata, st->codec->extradata_size);
3633 static int opt_input_file(OptionsContext *o, const char *opt, const char *filename)
3635 AVFormatContext *ic;
3636 AVInputFormat *file_iformat = NULL;
3640 AVDictionary **opts;
3641 int orig_nb_streams; // number of streams before avformat_find_stream_info
3644 if (!(file_iformat = av_find_input_format(o->format))) {
3645 av_log(NULL, AV_LOG_FATAL, "Unknown input format: '%s'\n", o->format);
3650 if (!strcmp(filename, "-"))
3653 using_stdin |= !strncmp(filename, "pipe:", 5) ||
3654 !strcmp(filename, "/dev/stdin");
3656 /* get default parameters from command line */
3657 ic = avformat_alloc_context();
3659 print_error(filename, AVERROR(ENOMEM));
3662 if (o->nb_audio_sample_rate) {
3663 snprintf(buf, sizeof(buf), "%d", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i);
3664 av_dict_set(&format_opts, "sample_rate", buf, 0);
3666 if (o->nb_audio_channels) {
3667 /* because we set audio_channels based on both the "ac" and
3668 * "channel_layout" options, we need to check that the specified
3669 * demuxer actually has the "channels" option before setting it */
3670 if (file_iformat && file_iformat->priv_class &&
3671 av_opt_find(&file_iformat->priv_class, "channels", NULL, 0,
3672 AV_OPT_SEARCH_FAKE_OBJ)) {
3673 snprintf(buf, sizeof(buf), "%d",
3674 o->audio_channels[o->nb_audio_channels - 1].u.i);
3675 av_dict_set(&format_opts, "channels", buf, 0);
3678 if (o->nb_frame_rates) {
3679 av_dict_set(&format_opts, "framerate", o->frame_rates[o->nb_frame_rates - 1].u.str, 0);
3681 if (o->nb_frame_sizes) {
3682 av_dict_set(&format_opts, "video_size", o->frame_sizes[o->nb_frame_sizes - 1].u.str, 0);
3684 if (o->nb_frame_pix_fmts)
3685 av_dict_set(&format_opts, "pixel_format", o->frame_pix_fmts[o->nb_frame_pix_fmts - 1].u.str, 0);
3687 ic->flags |= AVFMT_FLAG_NONBLOCK;
3688 ic->interrupt_callback = int_cb;
3690 /* open the input file with generic libav function */
3691 err = avformat_open_input(&ic, filename, file_iformat, &format_opts);
3693 print_error(filename, err);
3696 assert_avoptions(format_opts);
3698 /* apply forced codec ids */
3699 for (i = 0; i < ic->nb_streams; i++)
3700 choose_decoder(o, ic, ic->streams[i]);
3702 /* Set AVCodecContext options for avformat_find_stream_info */
3703 opts = setup_find_stream_info_opts(ic, codec_opts);
3704 orig_nb_streams = ic->nb_streams;
3706 /* If not enough info to get the stream parameters, we decode the
3707 first frames to get it. (used in mpeg case for example) */
3708 ret = avformat_find_stream_info(ic, opts);
3710 av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
3711 avformat_close_input(&ic);
3715 timestamp = o->start_time;
3716 /* add the stream start time */
3717 if (ic->start_time != AV_NOPTS_VALUE)
3718 timestamp += ic->start_time;
3720 /* if seeking requested, we execute it */
3721 if (o->start_time != 0) {
3722 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
3724 av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
3725 filename, (double)timestamp / AV_TIME_BASE);
3729 /* update the current parameters so that they match the one of the input stream */
3730 add_input_streams(o, ic);
3732 /* dump the file content */
3733 av_dump_format(ic, nb_input_files, filename, 0);
3735 input_files = grow_array(input_files, sizeof(*input_files), &nb_input_files, nb_input_files + 1);
3736 if (!(input_files[nb_input_files - 1] = av_mallocz(sizeof(*input_files[0]))))
3739 input_files[nb_input_files - 1]->ctx = ic;
3740 input_files[nb_input_files - 1]->ist_index = nb_input_streams - ic->nb_streams;
3741 input_files[nb_input_files - 1]->ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
3742 input_files[nb_input_files - 1]->nb_streams = ic->nb_streams;
3743 input_files[nb_input_files - 1]->rate_emu = o->rate_emu;
3745 for (i = 0; i < o->nb_dump_attachment; i++) {
3748 for (j = 0; j < ic->nb_streams; j++) {
3749 AVStream *st = ic->streams[j];
3751 if (check_stream_specifier(ic, st, o->dump_attachment[i].specifier) == 1)
3752 dump_attachment(st, o->dump_attachment[i].u.str);
3756 for (i = 0; i < orig_nb_streams; i++)
3757 av_dict_free(&opts[i]);
3764 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3765 AVCodecContext *avctx)
3771 for (p = kf; *p; p++)
3774 ost->forced_kf_count = n;
3775 ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
3776 if (!ost->forced_kf_pts) {
3777 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3780 for (i = 0; i < n; i++) {
3781 p = i ? strchr(p, ',') + 1 : kf;
3782 t = parse_time_or_die("force_key_frames", p, 1);
3783 ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3787 static uint8_t *get_line(AVIOContext *s)
3793 if (avio_open_dyn_buf(&line) < 0) {
3794 av_log(NULL, AV_LOG_FATAL, "Could not alloc buffer for reading preset.\n");
3798 while ((c = avio_r8(s)) && c != '\n')
3801 avio_close_dyn_buf(line, &buf);
3806 static int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s)
3809 char filename[1000];
3810 const char *base[3] = { getenv("AVCONV_DATADIR"),
3815 for (i = 0; i < FF_ARRAY_ELEMS(base) && ret; i++) {
3819 snprintf(filename, sizeof(filename), "%s%s/%s-%s.avpreset", base[i],
3820 i != 1 ? "" : "/.avconv", codec_name, preset_name);
3821 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3824 snprintf(filename, sizeof(filename), "%s%s/%s.avpreset", base[i],
3825 i != 1 ? "" : "/.avconv", preset_name);
3826 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3832 static void choose_encoder(OptionsContext *o, AVFormatContext *s, OutputStream *ost)
3834 char *codec_name = NULL;
3836 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, ost->st);
3838 ost->st->codec->codec_id = av_guess_codec(s->oformat, NULL, s->filename,
3839 NULL, ost->st->codec->codec_type);
3840 ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
3841 } else if (!strcmp(codec_name, "copy"))
3842 ost->stream_copy = 1;
3844 ost->enc = find_codec_or_die(codec_name, ost->st->codec->codec_type, 1);
3845 ost->st->codec->codec_id = ost->enc->id;
3849 static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVMediaType type)
3852 AVStream *st = avformat_new_stream(oc, NULL);
3853 int idx = oc->nb_streams - 1, ret = 0;
3854 char *bsf = NULL, *next, *codec_tag = NULL;
3855 AVBitStreamFilterContext *bsfc, *bsfc_prev = NULL;
3857 char *buf = NULL, *arg = NULL, *preset = NULL;
3858 AVIOContext *s = NULL;
3861 av_log(NULL, AV_LOG_FATAL, "Could not alloc stream.\n");
3865 if (oc->nb_streams - 1 < o->nb_streamid_map)
3866 st->id = o->streamid_map[oc->nb_streams - 1];
3868 output_streams = grow_array(output_streams, sizeof(*output_streams), &nb_output_streams,
3869 nb_output_streams + 1);
3870 if (!(ost = av_mallocz(sizeof(*ost))))
3872 output_streams[nb_output_streams - 1] = ost;
3874 ost->file_index = nb_output_files;
3877 st->codec->codec_type = type;
3878 choose_encoder(o, oc, ost);
3880 ost->opts = filter_codec_opts(codec_opts, ost->enc->id, oc, st);
3883 avcodec_get_context_defaults3(st->codec, ost->enc);
3884 st->codec->codec_type = type; // XXX hack, avcodec_get_context_defaults2() sets type to unknown for stream copy
3886 MATCH_PER_STREAM_OPT(presets, str, preset, oc, st);
3887 if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) {
3890 if (!buf[0] || buf[0] == '#') {
3894 if (!(arg = strchr(buf, '='))) {
3895 av_log(NULL, AV_LOG_FATAL, "Invalid line found in the preset file.\n");
3899 av_dict_set(&ost->opts, buf, arg, AV_DICT_DONT_OVERWRITE);
3901 } while (!s->eof_reached);
3905 av_log(NULL, AV_LOG_FATAL,
3906 "Preset %s specified for stream %d:%d, but could not be opened.\n",
3907 preset, ost->file_index, ost->index);
3911 ost->max_frames = INT64_MAX;
3912 MATCH_PER_STREAM_OPT(max_frames, i64, ost->max_frames, oc, st);
3914 MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st);
3916 if (next = strchr(bsf, ','))
3918 if (!(bsfc = av_bitstream_filter_init(bsf))) {
3919 av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf);
3923 bsfc_prev->next = bsfc;
3925 ost->bitstream_filters = bsfc;
3931 MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st);
3933 uint32_t tag = strtol(codec_tag, &next, 0);
3935 tag = AV_RL32(codec_tag);
3936 st->codec->codec_tag = tag;
3939 MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st);
3940 if (qscale >= 0 || same_quant) {
3941 st->codec->flags |= CODEC_FLAG_QSCALE;
3942 st->codec->global_quality = FF_QP2LAMBDA * qscale;
3945 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
3946 st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
3948 av_opt_get_int(sws_opts, "sws_flags", 0, &ost->sws_flags);
3950 ost->pix_fmts[0] = ost->pix_fmts[1] = PIX_FMT_NONE;
3955 static void parse_matrix_coeffs(uint16_t *dest, const char *str)
3958 const char *p = str;
3965 av_log(NULL, AV_LOG_FATAL, "Syntax error in matrix \"%s\" at coeff %d\n", str, i);
3972 static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc)
3976 AVCodecContext *video_enc;
3978 ost = new_output_stream(o, oc, AVMEDIA_TYPE_VIDEO);
3980 video_enc = st->codec;
3982 if (!ost->stream_copy) {
3983 const char *p = NULL;
3984 char *forced_key_frames = NULL, *frame_rate = NULL, *frame_size = NULL;
3985 char *frame_aspect_ratio = NULL, *frame_pix_fmt = NULL;
3986 char *intra_matrix = NULL, *inter_matrix = NULL, *filters = NULL;
3989 MATCH_PER_STREAM_OPT(frame_rates, str, frame_rate, oc, st);
3990 if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) {
3991 av_log(NULL, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
3995 MATCH_PER_STREAM_OPT(frame_sizes, str, frame_size, oc, st);
3996 if (frame_size && av_parse_video_size(&video_enc->width, &video_enc->height, frame_size) < 0) {
3997 av_log(NULL, AV_LOG_FATAL, "Invalid frame size: %s.\n", frame_size);
4001 MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
4002 if (frame_aspect_ratio)
4003 ost->frame_aspect_ratio = parse_frame_aspect_ratio(frame_aspect_ratio);
4005 MATCH_PER_STREAM_OPT(frame_pix_fmts, str, frame_pix_fmt, oc, st);
4006 if (frame_pix_fmt && (video_enc->pix_fmt = av_get_pix_fmt(frame_pix_fmt)) == PIX_FMT_NONE) {
4007 av_log(NULL, AV_LOG_FATAL, "Unknown pixel format requested: %s.\n", frame_pix_fmt);
4010 st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
4012 MATCH_PER_STREAM_OPT(intra_matrices, str, intra_matrix, oc, st);
4014 if (!(video_enc->intra_matrix = av_mallocz(sizeof(*video_enc->intra_matrix) * 64))) {
4015 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for intra matrix.\n");
4018 parse_matrix_coeffs(video_enc->intra_matrix, intra_matrix);
4020 MATCH_PER_STREAM_OPT(inter_matrices, str, inter_matrix, oc, st);
4022 if (!(video_enc->inter_matrix = av_mallocz(sizeof(*video_enc->inter_matrix) * 64))) {
4023 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for inter matrix.\n");
4026 parse_matrix_coeffs(video_enc->inter_matrix, inter_matrix);
4029 MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st);
4030 for (i = 0; p; i++) {
4032 int e = sscanf(p, "%d,%d,%d", &start, &end, &q);
4034 av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
4037 video_enc->rc_override =
4038 av_realloc(video_enc->rc_override,
4039 sizeof(RcOverride) * (i + 1));
4040 video_enc->rc_override[i].start_frame = start;
4041 video_enc->rc_override[i].end_frame = end;
4043 video_enc->rc_override[i].qscale = q;
4044 video_enc->rc_override[i].quality_factor = 1.0;
4047 video_enc->rc_override[i].qscale = 0;
4048 video_enc->rc_override[i].quality_factor = -q/100.0;
4053 video_enc->rc_override_count = i;
4054 if (!video_enc->rc_initial_buffer_occupancy)
4055 video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size * 3 / 4;
4056 video_enc->intra_dc_precision = intra_dc_precision - 8;
4061 video_enc->flags |= CODEC_FLAG_PASS1;
4063 video_enc->flags |= CODEC_FLAG_PASS2;
4067 MATCH_PER_STREAM_OPT(forced_key_frames, str, forced_key_frames, oc, st);
4068 if (forced_key_frames)
4069 parse_forced_key_frames(forced_key_frames, ost, video_enc);
4071 MATCH_PER_STREAM_OPT(force_fps, i, ost->force_fps, oc, st);
4073 ost->top_field_first = -1;
4074 MATCH_PER_STREAM_OPT(top_field_first, i, ost->top_field_first, oc, st);
4076 MATCH_PER_STREAM_OPT(filters, str, filters, oc, st);
4078 ost->avfilter = av_strdup(filters);
4080 MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i, ost->copy_initial_nonkeyframes, oc ,st);
4086 static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc)
4090 AVCodecContext *audio_enc;
4092 ost = new_output_stream(o, oc, AVMEDIA_TYPE_AUDIO);
4095 audio_enc = st->codec;
4096 audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
4098 if (!ost->stream_copy) {
4099 char *sample_fmt = NULL;
4101 MATCH_PER_STREAM_OPT(audio_channels, i, audio_enc->channels, oc, st);
4103 MATCH_PER_STREAM_OPT(sample_fmts, str, sample_fmt, oc, st);
4105 (audio_enc->sample_fmt = av_get_sample_fmt(sample_fmt)) == AV_SAMPLE_FMT_NONE) {
4106 av_log(NULL, AV_LOG_FATAL, "Invalid sample format '%s'\n", sample_fmt);
4110 MATCH_PER_STREAM_OPT(audio_sample_rate, i, audio_enc->sample_rate, oc, st);
4116 static OutputStream *new_data_stream(OptionsContext *o, AVFormatContext *oc)
4120 ost = new_output_stream(o, oc, AVMEDIA_TYPE_DATA);
4121 if (!ost->stream_copy) {
4122 av_log(NULL, AV_LOG_FATAL, "Data stream encoding not supported yet (only streamcopy)\n");
4129 static OutputStream *new_attachment_stream(OptionsContext *o, AVFormatContext *oc)
4131 OutputStream *ost = new_output_stream(o, oc, AVMEDIA_TYPE_ATTACHMENT);
4132 ost->stream_copy = 1;
4136 static OutputStream *new_subtitle_stream(OptionsContext *o, AVFormatContext *oc)
4140 AVCodecContext *subtitle_enc;
4142 ost = new_output_stream(o, oc, AVMEDIA_TYPE_SUBTITLE);
4144 subtitle_enc = st->codec;
4146 subtitle_enc->codec_type = AVMEDIA_TYPE_SUBTITLE;
4151 /* arg format is "output-stream-index:streamid-value". */
4152 static int opt_streamid(OptionsContext *o, const char *opt, const char *arg)
4158 av_strlcpy(idx_str, arg, sizeof(idx_str));
4159 p = strchr(idx_str, ':');
4161 av_log(NULL, AV_LOG_FATAL,
4162 "Invalid value '%s' for option '%s', required syntax is 'index:value'\n",
4167 idx = parse_number_or_die(opt, idx_str, OPT_INT, 0, INT_MAX);
4168 o->streamid_map = grow_array(o->streamid_map, sizeof(*o->streamid_map), &o->nb_streamid_map, idx+1);
4169 o->streamid_map[idx] = parse_number_or_die(opt, p, OPT_INT, 0, INT_MAX);
4173 static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata)
4175 AVFormatContext *is = ifile->ctx;
4176 AVFormatContext *os = ofile->ctx;
4179 for (i = 0; i < is->nb_chapters; i++) {
4180 AVChapter *in_ch = is->chapters[i], *out_ch;
4181 int64_t ts_off = av_rescale_q(ofile->start_time - ifile->ts_offset,
4182 AV_TIME_BASE_Q, in_ch->time_base);
4183 int64_t rt = (ofile->recording_time == INT64_MAX) ? INT64_MAX :
4184 av_rescale_q(ofile->recording_time, AV_TIME_BASE_Q, in_ch->time_base);
4187 if (in_ch->end < ts_off)
4189 if (rt != INT64_MAX && in_ch->start > rt + ts_off)
4192 out_ch = av_mallocz(sizeof(AVChapter));
4194 return AVERROR(ENOMEM);
4196 out_ch->id = in_ch->id;
4197 out_ch->time_base = in_ch->time_base;
4198 out_ch->start = FFMAX(0, in_ch->start - ts_off);
4199 out_ch->end = FFMIN(rt, in_ch->end - ts_off);
4202 av_dict_copy(&out_ch->metadata, in_ch->metadata, 0);
4205 os->chapters = av_realloc(os->chapters, sizeof(AVChapter) * os->nb_chapters);
4207 return AVERROR(ENOMEM);
4208 os->chapters[os->nb_chapters - 1] = out_ch;
4213 static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
4214 AVFormatContext *oc)
4218 if (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type != AVMEDIA_TYPE_VIDEO) {
4219 av_log(NULL, AV_LOG_FATAL, "Only video filters are supported currently.\n");
4223 ost = new_video_stream(o, oc);
4224 ost->source_index = -1;
4225 ost->filter = ofilter;
4229 if (ost->stream_copy) {
4230 av_log(NULL, AV_LOG_ERROR, "Streamcopy requested for output stream %d:%d, "
4231 "which is fed from a complex filtergraph. Filtering and streamcopy "
4232 "cannot be used together.\n", ost->file_index, ost->index);
4236 if (configure_output_filter(ofilter->graph, ofilter, ofilter->out_tmp) < 0) {
4237 av_log(NULL, AV_LOG_FATAL, "Error configuring filter.\n");
4240 avfilter_inout_free(&ofilter->out_tmp);
4243 static void opt_output_file(void *optctx, const char *filename)
4245 OptionsContext *o = optctx;
4246 AVFormatContext *oc;
4248 AVOutputFormat *file_oformat;
4252 if (configure_complex_filters() < 0) {
4253 av_log(NULL, AV_LOG_FATAL, "Error configuring filters.\n");
4257 if (!strcmp(filename, "-"))
4260 oc = avformat_alloc_context();
4262 print_error(filename, AVERROR(ENOMEM));
4267 file_oformat = av_guess_format(o->format, NULL, NULL);
4268 if (!file_oformat) {
4269 av_log(NULL, AV_LOG_FATAL, "Requested output format '%s' is not a suitable output format\n", o->format);
4273 file_oformat = av_guess_format(NULL, filename, NULL);
4274 if (!file_oformat) {
4275 av_log(NULL, AV_LOG_FATAL, "Unable to find a suitable output format for '%s'\n",
4281 oc->oformat = file_oformat;
4282 oc->interrupt_callback = int_cb;
4283 av_strlcpy(oc->filename, filename, sizeof(oc->filename));
4285 /* create streams for all unlabeled output pads */
4286 for (i = 0; i < nb_filtergraphs; i++) {
4287 FilterGraph *fg = filtergraphs[i];
4288 for (j = 0; j < fg->nb_outputs; j++) {
4289 OutputFilter *ofilter = fg->outputs[j];
4291 if (!ofilter->out_tmp || ofilter->out_tmp->name)
4294 switch (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type) {
4295 case AVMEDIA_TYPE_VIDEO: o->video_disable = 1; break;
4296 case AVMEDIA_TYPE_AUDIO: o->audio_disable = 1; break;
4297 case AVMEDIA_TYPE_SUBTITLE: o->subtitle_disable = 1; break;
4299 init_output_filter(ofilter, o, oc);
4303 if (!o->nb_stream_maps) {
4304 /* pick the "best" stream of each type */
4305 #define NEW_STREAM(type, index)\
4307 ost = new_ ## type ## _stream(o, oc);\
4308 ost->source_index = index;\
4309 ost->sync_ist = input_streams[index];\
4310 input_streams[index]->discard = 0;\
4311 input_streams[index]->st->discard = AVDISCARD_NONE;\
4314 /* video: highest resolution */
4315 if (!o->video_disable && oc->oformat->video_codec != CODEC_ID_NONE) {
4316 int area = 0, idx = -1;
4317 for (i = 0; i < nb_input_streams; i++) {
4318 ist = input_streams[i];
4319 if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
4320 ist->st->codec->width * ist->st->codec->height > area) {
4321 area = ist->st->codec->width * ist->st->codec->height;
4325 NEW_STREAM(video, idx);
4328 /* audio: most channels */
4329 if (!o->audio_disable && oc->oformat->audio_codec != CODEC_ID_NONE) {
4330 int channels = 0, idx = -1;
4331 for (i = 0; i < nb_input_streams; i++) {
4332 ist = input_streams[i];
4333 if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
4334 ist->st->codec->channels > channels) {
4335 channels = ist->st->codec->channels;
4339 NEW_STREAM(audio, idx);
4342 /* subtitles: pick first */
4343 if (!o->subtitle_disable && oc->oformat->subtitle_codec != CODEC_ID_NONE) {
4344 for (i = 0; i < nb_input_streams; i++)
4345 if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
4346 NEW_STREAM(subtitle, i);
4350 /* do something with data? */
4352 for (i = 0; i < o->nb_stream_maps; i++) {
4353 StreamMap *map = &o->stream_maps[i];
4358 if (map->linklabel) {
4360 OutputFilter *ofilter = NULL;
4363 for (j = 0; j < nb_filtergraphs; j++) {
4364 fg = filtergraphs[j];
4365 for (k = 0; k < fg->nb_outputs; k++) {
4366 AVFilterInOut *out = fg->outputs[k]->out_tmp;
4367 if (out && !strcmp(out->name, map->linklabel)) {
4368 ofilter = fg->outputs[k];
4375 av_log(NULL, AV_LOG_FATAL, "Output with label '%s' does not exist "
4376 "in any defined filter graph.\n", map->linklabel);
4379 init_output_filter(ofilter, o, oc);
4381 ist = input_streams[input_files[map->file_index]->ist_index + map->stream_index];
4382 switch (ist->st->codec->codec_type) {
4383 case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc); break;
4384 case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc); break;
4385 case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(o, oc); break;
4386 case AVMEDIA_TYPE_DATA: ost = new_data_stream(o, oc); break;
4387 case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc); break;
4389 av_log(NULL, AV_LOG_FATAL, "Cannot map stream #%d:%d - unsupported type.\n",
4390 map->file_index, map->stream_index);
4394 ost->source_index = input_files[map->file_index]->ist_index + map->stream_index;
4395 ost->sync_ist = input_streams[input_files[map->sync_file_index]->ist_index +
4396 map->sync_stream_index];
4398 ist->st->discard = AVDISCARD_NONE;
4403 /* handle attached files */
4404 for (i = 0; i < o->nb_attachments; i++) {
4406 uint8_t *attachment;
4410 if ((err = avio_open2(&pb, o->attachments[i], AVIO_FLAG_READ, &int_cb, NULL)) < 0) {
4411 av_log(NULL, AV_LOG_FATAL, "Could not open attachment file %s.\n",
4415 if ((len = avio_size(pb)) <= 0) {
4416 av_log(NULL, AV_LOG_FATAL, "Could not get size of the attachment %s.\n",
4420 if (!(attachment = av_malloc(len))) {
4421 av_log(NULL, AV_LOG_FATAL, "Attachment %s too large to fit into memory.\n",
4425 avio_read(pb, attachment, len);
4427 ost = new_attachment_stream(o, oc);
4428 ost->stream_copy = 0;
4429 ost->source_index = -1;
4430 ost->attachment_filename = o->attachments[i];
4431 ost->st->codec->extradata = attachment;
4432 ost->st->codec->extradata_size = len;
4434 p = strrchr(o->attachments[i], '/');
4435 av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
4439 output_files = grow_array(output_files, sizeof(*output_files), &nb_output_files, nb_output_files + 1);
4440 if (!(output_files[nb_output_files - 1] = av_mallocz(sizeof(*output_files[0]))))
4443 output_files[nb_output_files - 1]->ctx = oc;
4444 output_files[nb_output_files - 1]->ost_index = nb_output_streams - oc->nb_streams;
4445 output_files[nb_output_files - 1]->recording_time = o->recording_time;
4446 if (o->recording_time != INT64_MAX)
4447 oc->duration = o->recording_time;
4448 output_files[nb_output_files - 1]->start_time = o->start_time;
4449 output_files[nb_output_files - 1]->limit_filesize = o->limit_filesize;
4450 av_dict_copy(&output_files[nb_output_files - 1]->opts, format_opts, 0);
4452 /* check filename in case of an image number is expected */
4453 if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
4454 if (!av_filename_number_test(oc->filename)) {
4455 print_error(oc->filename, AVERROR(EINVAL));
4460 if (!(oc->oformat->flags & AVFMT_NOFILE)) {
4461 /* test if it already exists to avoid losing precious files */
4462 assert_file_overwrite(filename);
4465 if ((err = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,
4466 &oc->interrupt_callback,
4467 &output_files[nb_output_files - 1]->opts)) < 0) {
4468 print_error(filename, err);
4473 if (o->mux_preload) {
4475 snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE));
4476 av_dict_set(&output_files[nb_output_files - 1]->opts, "preload", buf, 0);
4478 oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
4479 oc->flags |= AVFMT_FLAG_NONBLOCK;
4482 for (i = 0; i < o->nb_metadata_map; i++) {
4484 int in_file_index = strtol(o->metadata_map[i].u.str, &p, 0);
4486 if (in_file_index < 0)
4488 if (in_file_index >= nb_input_files) {
4489 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d while processing metadata maps\n", in_file_index);
4492 copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc, input_files[in_file_index]->ctx, o);
4496 if (o->chapters_input_file >= nb_input_files) {
4497 if (o->chapters_input_file == INT_MAX) {
4498 /* copy chapters from the first input file that has them*/
4499 o->chapters_input_file = -1;
4500 for (i = 0; i < nb_input_files; i++)
4501 if (input_files[i]->ctx->nb_chapters) {
4502 o->chapters_input_file = i;
4506 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d in chapter mapping.\n",
4507 o->chapters_input_file);
4511 if (o->chapters_input_file >= 0)
4512 copy_chapters(input_files[o->chapters_input_file], output_files[nb_output_files - 1],
4513 !o->metadata_chapters_manual);
4515 /* copy global metadata by default */
4516 if (!o->metadata_global_manual && nb_input_files)
4517 av_dict_copy(&oc->metadata, input_files[0]->ctx->metadata,
4518 AV_DICT_DONT_OVERWRITE);
4519 if (!o->metadata_streams_manual)
4520 for (i = output_files[nb_output_files - 1]->ost_index; i < nb_output_streams; i++) {
4522 if (output_streams[i]->source_index < 0) /* this is true e.g. for attached files */
4524 ist = input_streams[output_streams[i]->source_index];
4525 av_dict_copy(&output_streams[i]->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
4528 /* process manually set metadata */
4529 for (i = 0; i < o->nb_metadata; i++) {
4532 const char *stream_spec;
4533 int index = 0, j, ret;
4535 val = strchr(o->metadata[i].u.str, '=');
4537 av_log(NULL, AV_LOG_FATAL, "No '=' character in metadata string %s.\n",
4538 o->metadata[i].u.str);
4543 parse_meta_type(o->metadata[i].specifier, &type, &index, &stream_spec);
4545 for (j = 0; j < oc->nb_streams; j++) {
4546 if ((ret = check_stream_specifier(oc, oc->streams[j], stream_spec)) > 0) {
4547 av_dict_set(&oc->streams[j]->metadata, o->metadata[i].u.str, *val ? val : NULL, 0);
4551 printf("ret %d, stream_spec %s\n", ret, stream_spec);
4559 if (index < 0 || index >= oc->nb_chapters) {
4560 av_log(NULL, AV_LOG_FATAL, "Invalid chapter index %d in metadata specifier.\n", index);
4563 m = &oc->chapters[index]->metadata;
4566 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", o->metadata[i].specifier);
4569 av_dict_set(m, o->metadata[i].u.str, *val ? val : NULL, 0);
4576 /* same option as mencoder */
4577 static int opt_pass(const char *opt, const char *arg)
4579 do_pass = parse_number_or_die(opt, arg, OPT_INT, 1, 2);
4583 static int64_t getutime(void)
4586 struct rusage rusage;
4588 getrusage(RUSAGE_SELF, &rusage);
4589 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4590 #elif HAVE_GETPROCESSTIMES
4592 FILETIME c, e, k, u;
4593 proc = GetCurrentProcess();
4594 GetProcessTimes(proc, &c, &e, &k, &u);
4595 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4597 return av_gettime();
4601 static int64_t getmaxrss(void)
4603 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4604 struct rusage rusage;
4605 getrusage(RUSAGE_SELF, &rusage);
4606 return (int64_t)rusage.ru_maxrss * 1024;
4607 #elif HAVE_GETPROCESSMEMORYINFO
4609 PROCESS_MEMORY_COUNTERS memcounters;
4610 proc = GetCurrentProcess();
4611 memcounters.cb = sizeof(memcounters);
4612 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4613 return memcounters.PeakPagefileUsage;
4619 static int opt_audio_qscale(OptionsContext *o, const char *opt, const char *arg)
4621 return parse_option(o, "q:a", arg, options);
4624 static void show_usage(void)
4626 printf("Hyper fast Audio and Video encoder\n");
4627 printf("usage: %s [options] [[infile options] -i infile]... {[outfile options] outfile}...\n", program_name);
4631 static void show_help(void)
4633 int flags = AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_ENCODING_PARAM;
4634 av_log_set_callback(log_callback_help);
4636 show_help_options(options, "Main options:\n",
4637 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB, 0);
4638 show_help_options(options, "\nAdvanced options:\n",
4639 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB,
4641 show_help_options(options, "\nVideo options:\n",
4642 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4644 show_help_options(options, "\nAdvanced Video options:\n",
4645 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4646 OPT_VIDEO | OPT_EXPERT);
4647 show_help_options(options, "\nAudio options:\n",
4648 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4650 show_help_options(options, "\nAdvanced Audio options:\n",
4651 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4652 OPT_AUDIO | OPT_EXPERT);
4653 show_help_options(options, "\nSubtitle options:\n",
4654 OPT_SUBTITLE | OPT_GRAB,
4656 show_help_options(options, "\nAudio/Video grab options:\n",
4660 show_help_children(avcodec_get_class(), flags);
4661 show_help_children(avformat_get_class(), flags);
4662 show_help_children(sws_get_class(), flags);
4665 static int opt_target(OptionsContext *o, const char *opt, const char *arg)
4667 enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN;
4668 static const char *const frame_rates[] = { "25", "30000/1001", "24000/1001" };
4670 if (!strncmp(arg, "pal-", 4)) {
4673 } else if (!strncmp(arg, "ntsc-", 5)) {
4676 } else if (!strncmp(arg, "film-", 5)) {
4680 /* Try to determine PAL/NTSC by peeking in the input files */
4681 if (nb_input_files) {
4683 for (j = 0; j < nb_input_files; j++) {
4684 for (i = 0; i < input_files[j]->nb_streams; i++) {
4685 AVCodecContext *c = input_files[j]->ctx->streams[i]->codec;
4686 if (c->codec_type != AVMEDIA_TYPE_VIDEO)
4688 fr = c->time_base.den * 1000 / c->time_base.num;
4692 } else if ((fr == 29970) || (fr == 23976)) {
4697 if (norm != UNKNOWN)
4701 if (norm != UNKNOWN)
4702 av_log(NULL, AV_LOG_INFO, "Assuming %s for target.\n", norm == PAL ? "PAL" : "NTSC");
4705 if (norm == UNKNOWN) {
4706 av_log(NULL, AV_LOG_FATAL, "Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n");
4707 av_log(NULL, AV_LOG_FATAL, "Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n");
4708 av_log(NULL, AV_LOG_FATAL, "or set a framerate with \"-r xxx\".\n");
4712 if (!strcmp(arg, "vcd")) {
4713 opt_video_codec(o, "c:v", "mpeg1video");
4714 opt_audio_codec(o, "c:a", "mp2");
4715 parse_option(o, "f", "vcd", options);
4717 parse_option(o, "s", norm == PAL ? "352x288" : "352x240", options);
4718 parse_option(o, "r", frame_rates[norm], options);
4719 opt_default("g", norm == PAL ? "15" : "18");
4721 opt_default("b", "1150000");
4722 opt_default("maxrate", "1150000");
4723 opt_default("minrate", "1150000");
4724 opt_default("bufsize", "327680"); // 40*1024*8;
4726 opt_default("b:a", "224000");
4727 parse_option(o, "ar", "44100", options);
4728 parse_option(o, "ac", "2", options);
4730 opt_default("packetsize", "2324");
4731 opt_default("muxrate", "1411200"); // 2352 * 75 * 8;
4733 /* We have to offset the PTS, so that it is consistent with the SCR.
4734 SCR starts at 36000, but the first two packs contain only padding
4735 and the first pack from the other stream, respectively, may also have
4736 been written before.
4737 So the real data starts at SCR 36000+3*1200. */
4738 o->mux_preload = (36000 + 3 * 1200) / 90000.0; // 0.44
4739 } else if (!strcmp(arg, "svcd")) {
4741 opt_video_codec(o, "c:v", "mpeg2video");
4742 opt_audio_codec(o, "c:a", "mp2");
4743 parse_option(o, "f", "svcd", options);
4745 parse_option(o, "s", norm == PAL ? "480x576" : "480x480", options);
4746 parse_option(o, "r", frame_rates[norm], options);
4747 opt_default("g", norm == PAL ? "15" : "18");
4749 opt_default("b", "2040000");
4750 opt_default("maxrate", "2516000");
4751 opt_default("minrate", "0"); // 1145000;
4752 opt_default("bufsize", "1835008"); // 224*1024*8;
4753 opt_default("flags", "+scan_offset");
4756 opt_default("b:a", "224000");
4757 parse_option(o, "ar", "44100", options);
4759 opt_default("packetsize", "2324");
4761 } else if (!strcmp(arg, "dvd")) {
4763 opt_video_codec(o, "c:v", "mpeg2video");
4764 opt_audio_codec(o, "c:a", "ac3");
4765 parse_option(o, "f", "dvd", options);
4767 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4768 parse_option(o, "r", frame_rates[norm], options);
4769 opt_default("g", norm == PAL ? "15" : "18");
4771 opt_default("b", "6000000");
4772 opt_default("maxrate", "9000000");
4773 opt_default("minrate", "0"); // 1500000;
4774 opt_default("bufsize", "1835008"); // 224*1024*8;
4776 opt_default("packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
4777 opt_default("muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
4779 opt_default("b:a", "448000");
4780 parse_option(o, "ar", "48000", options);
4782 } else if (!strncmp(arg, "dv", 2)) {
4784 parse_option(o, "f", "dv", options);
4786 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4787 parse_option(o, "pix_fmt", !strncmp(arg, "dv50", 4) ? "yuv422p" :
4788 norm == PAL ? "yuv420p" : "yuv411p", options);
4789 parse_option(o, "r", frame_rates[norm], options);
4791 parse_option(o, "ar", "48000", options);
4792 parse_option(o, "ac", "2", options);
4795 av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg);
4796 return AVERROR(EINVAL);
4801 static int opt_vstats_file(const char *opt, const char *arg)
4803 av_free (vstats_filename);
4804 vstats_filename = av_strdup (arg);
4808 static int opt_vstats(const char *opt, const char *arg)
4811 time_t today2 = time(NULL);
4812 struct tm *today = localtime(&today2);
4814 snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min,
4816 return opt_vstats_file(opt, filename);
4819 static int opt_video_frames(OptionsContext *o, const char *opt, const char *arg)
4821 return parse_option(o, "frames:v", arg, options);
4824 static int opt_audio_frames(OptionsContext *o, const char *opt, const char *arg)
4826 return parse_option(o, "frames:a", arg, options);
4829 static int opt_data_frames(OptionsContext *o, const char *opt, const char *arg)
4831 return parse_option(o, "frames:d", arg, options);
4834 static int opt_video_tag(OptionsContext *o, const char *opt, const char *arg)
4836 return parse_option(o, "tag:v", arg, options);
4839 static int opt_audio_tag(OptionsContext *o, const char *opt, const char *arg)
4841 return parse_option(o, "tag:a", arg, options);
4844 static int opt_subtitle_tag(OptionsContext *o, const char *opt, const char *arg)
4846 return parse_option(o, "tag:s", arg, options);
4849 static int opt_video_filters(OptionsContext *o, const char *opt, const char *arg)
4851 return parse_option(o, "filter:v", arg, options);
4854 static int opt_vsync(const char *opt, const char *arg)
4856 if (!av_strcasecmp(arg, "cfr")) video_sync_method = VSYNC_CFR;
4857 else if (!av_strcasecmp(arg, "vfr")) video_sync_method = VSYNC_VFR;
4858 else if (!av_strcasecmp(arg, "passthrough")) video_sync_method = VSYNC_PASSTHROUGH;
4860 if (video_sync_method == VSYNC_AUTO)
4861 video_sync_method = parse_number_or_die("vsync", arg, OPT_INT, VSYNC_AUTO, VSYNC_VFR);
4865 static int opt_deinterlace(const char *opt, const char *arg)
4867 av_log(NULL, AV_LOG_WARNING, "-%s is deprecated, use -filter:v yadif instead\n", opt);
4872 static int opt_cpuflags(const char *opt, const char *arg)
4874 int flags = av_parse_cpu_flags(arg);
4879 av_set_cpu_flags_mask(flags);
4883 static void parse_cpuflags(int argc, char **argv, const OptionDef *options)
4885 int idx = locate_option(argc, argv, options, "cpuflags");
4886 if (idx && argv[idx + 1])
4887 opt_cpuflags("cpuflags", argv[idx + 1]);
4890 static int opt_channel_layout(OptionsContext *o, const char *opt, const char *arg)
4892 char layout_str[32];
4895 int ret, channels, ac_str_size;
4898 layout = av_get_channel_layout(arg);
4900 av_log(NULL, AV_LOG_ERROR, "Unknown channel layout: %s\n", arg);
4901 return AVERROR(EINVAL);
4903 snprintf(layout_str, sizeof(layout_str), "%"PRIu64, layout);
4904 ret = opt_default(opt, layout_str);
4908 /* set 'ac' option based on channel layout */
4909 channels = av_get_channel_layout_nb_channels(layout);
4910 snprintf(layout_str, sizeof(layout_str), "%d", channels);
4911 stream_str = strchr(opt, ':');
4912 ac_str_size = 3 + (stream_str ? strlen(stream_str) : 0);
4913 ac_str = av_mallocz(ac_str_size);
4915 return AVERROR(ENOMEM);
4916 av_strlcpy(ac_str, "ac", 3);
4918 av_strlcat(ac_str, stream_str, ac_str_size);
4919 ret = parse_option(o, ac_str, layout_str, options);
4925 static int opt_filter_complex(const char *opt, const char *arg)
4927 filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
4928 &nb_filtergraphs, nb_filtergraphs + 1);
4929 if (!(filtergraphs[nb_filtergraphs - 1] = av_mallocz(sizeof(*filtergraphs[0]))))
4930 return AVERROR(ENOMEM);
4931 filtergraphs[nb_filtergraphs - 1]->index = nb_filtergraphs - 1;
4932 filtergraphs[nb_filtergraphs - 1]->graph_desc = arg;
4936 #define OFFSET(x) offsetof(OptionsContext, x)
4937 static const OptionDef options[] = {
4939 #include "cmdutils_common_opts.h"
4940 { "f", HAS_ARG | OPT_STRING | OPT_OFFSET, {.off = OFFSET(format)}, "force format", "fmt" },
4941 { "i", HAS_ARG | OPT_FUNC2, {(void*)opt_input_file}, "input file name", "filename" },
4942 { "y", OPT_BOOL, {(void*)&file_overwrite}, "overwrite output files" },
4943 { "c", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4944 { "codec", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4945 { "pre", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(presets)}, "preset name", "preset" },
4946 { "map", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map}, "set input stream mapping", "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
4947 { "map_metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata_map)}, "set metadata information of outfile from infile",
4948 "outfile[,metadata]:infile[,metadata]" },
4949 { "map_chapters", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(chapters_input_file)}, "set chapters mapping", "input_file_index" },
4950 { "t", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(recording_time)}, "record or transcode \"duration\" seconds of audio/video", "duration" },
4951 { "fs", HAS_ARG | OPT_INT64 | OPT_OFFSET, {.off = OFFSET(limit_filesize)}, "set the limit file size in bytes", "limit_size" }, //
4952 { "ss", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(start_time)}, "set the start time offset", "time_off" },
4953 { "itsoffset", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(input_ts_offset)}, "set the input ts offset", "time_off" },
4954 { "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(ts_scale)}, "set the input ts scale", "scale" },
4955 { "metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata)}, "add metadata", "string=string" },
4956 { "dframes", HAS_ARG | OPT_FUNC2, {(void*)opt_data_frames}, "set the number of data frames to record", "number" },
4957 { "benchmark", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark},
4958 "add timings for benchmarking" },
4959 { "timelimit", HAS_ARG, {(void*)opt_timelimit}, "set max runtime in seconds", "limit" },
4960 { "dump", OPT_BOOL | OPT_EXPERT, {(void*)&do_pkt_dump},
4961 "dump each input packet" },
4962 { "hex", OPT_BOOL | OPT_EXPERT, {(void*)&do_hex_dump},
4963 "when dumping packets, also dump the payload" },
4964 { "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(rate_emu)}, "read input at native frame rate", "" },
4965 { "target", HAS_ARG | OPT_FUNC2, {(void*)opt_target}, "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" },
4966 { "vsync", HAS_ARG | OPT_EXPERT, {(void*)opt_vsync}, "video sync method", "" },
4967 { "async", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&audio_sync_method}, "audio sync method", "" },
4968 { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&audio_drift_threshold}, "audio drift threshold", "threshold" },
4969 { "copyts", OPT_BOOL | OPT_EXPERT, {(void*)©_ts}, "copy timestamps" },
4970 { "copytb", OPT_BOOL | OPT_EXPERT, {(void*)©_tb}, "copy input stream time base when stream copying" },
4971 { "shortest", OPT_BOOL | OPT_EXPERT, {(void*)&opt_shortest}, "finish encoding within shortest input" }, //
4972 { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&dts_delta_threshold}, "timestamp discontinuity delta threshold", "threshold" },
4973 { "xerror", OPT_BOOL, {(void*)&exit_on_error}, "exit on error", "error" },
4974 { "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC, {.off = OFFSET(copy_initial_nonkeyframes)}, "copy initial non-keyframes" },
4975 { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC, {.off = OFFSET(max_frames)}, "set the number of frames to record", "number" },
4976 { "tag", OPT_STRING | HAS_ARG | OPT_SPEC, {.off = OFFSET(codec_tags)}, "force codec tag/fourcc", "fourcc/tag" },
4977 { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4978 { "qscale", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4979 { "filter", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(filters)}, "set stream filterchain", "filter_list" },
4980 { "filter_complex", HAS_ARG | OPT_EXPERT, {(void*)opt_filter_complex}, "create a complex filtergraph", "graph_description" },
4981 { "stats", OPT_BOOL, {&print_stats}, "print progress report during encoding", },
4982 { "attach", HAS_ARG | OPT_FUNC2, {(void*)opt_attach}, "add an attachment to the output file", "filename" },
4983 { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(dump_attachment)}, "extract an attachment into a file", "filename" },
4984 { "cpuflags", HAS_ARG | OPT_EXPERT, {(void*)opt_cpuflags}, "set CPU flags mask", "mask" },
4987 { "vframes", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_frames}, "set the number of video frames to record", "number" },
4988 { "r", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_rates)}, "set frame rate (Hz value, fraction or abbreviation)", "rate" },
4989 { "s", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_sizes)}, "set frame size (WxH or abbreviation)", "size" },
4990 { "aspect", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_aspect_ratios)}, "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" },
4991 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_pix_fmts)}, "set pixel format", "format" },
4992 { "vn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET, {.off = OFFSET(video_disable)}, "disable video" },
4993 { "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" },
4994 { "rc_override", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(rc_overrides)}, "rate control override for specific intervals", "override" },
4995 { "vcodec", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_codec}, "force video codec ('copy' to copy stream)", "codec" },
4996 { "same_quant", OPT_BOOL | OPT_VIDEO, {(void*)&same_quant},
4997 "use same quantizer as source (implies VBR)" },
4998 { "pass", HAS_ARG | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" },
4999 { "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename_prefix}, "select two pass log file name prefix", "prefix" },
5000 { "deinterlace", OPT_EXPERT | OPT_VIDEO, {(void*)opt_deinterlace},
5001 "this option is deprecated, use the yadif filter instead" },
5002 { "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" },
5003 { "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_vstats_file}, "dump video coding statistics to file", "file" },
5004 { "vf", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_filters}, "video filters", "filter list" },
5005 { "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(intra_matrices)}, "specify intra matrix coeffs", "matrix" },
5006 { "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(inter_matrices)}, "specify inter matrix coeffs", "matrix" },
5007 { "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_INT| OPT_SPEC, {.off = OFFSET(top_field_first)}, "top=1/bottom=0/auto=-1 field first", "" },
5008 { "dc", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_dc_precision}, "intra_dc_precision", "precision" },
5009 { "vtag", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_tag}, "force video tag/fourcc", "fourcc/tag" },
5010 { "qphist", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, { (void *)&qp_hist }, "show QP histogram" },
5011 { "force_fps", OPT_BOOL | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(force_fps)}, "force the selected framerate, disable the best supported framerate selection" },
5012 { "streamid", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_streamid}, "set the value of an outfile streamid", "streamIndex:value" },
5013 { "force_key_frames", OPT_STRING | HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(forced_key_frames)}, "force key frames at specified timestamps", "timestamps" },
5016 { "aframes", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_frames}, "set the number of audio frames to record", "number" },
5017 { "aq", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_qscale}, "set audio quality (codec-specific)", "quality", },
5018 { "ar", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_sample_rate)}, "set audio sampling rate (in Hz)", "rate" },
5019 { "ac", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_channels)}, "set number of audio channels", "channels" },
5020 { "an", OPT_BOOL | OPT_AUDIO | OPT_OFFSET, {.off = OFFSET(audio_disable)}, "disable audio" },
5021 { "acodec", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_codec}, "force audio codec ('copy' to copy stream)", "codec" },
5022 { "atag", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_tag}, "force audio tag/fourcc", "fourcc/tag" },
5023 { "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" }, //
5024 { "sample_fmt", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_SPEC | OPT_STRING, {.off = OFFSET(sample_fmts)}, "set sample format", "format" },
5025 { "channel_layout", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_channel_layout}, "set channel layout", "layout" },
5027 /* subtitle options */
5028 { "sn", OPT_BOOL | OPT_SUBTITLE | OPT_OFFSET, {.off = OFFSET(subtitle_disable)}, "disable subtitle" },
5029 { "scodec", HAS_ARG | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_codec}, "force subtitle codec ('copy' to copy stream)", "codec" },
5030 { "stag", HAS_ARG | OPT_EXPERT | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_tag}, "force subtitle tag/fourcc", "fourcc/tag" },
5033 { "isync", OPT_BOOL | OPT_EXPERT | OPT_GRAB, {(void*)&input_sync}, "sync read on input", "" },
5036 { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_max_delay)}, "set the maximum demux-decode delay", "seconds" },
5037 { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_preload)}, "set the initial demux-decode delay", "seconds" },
5039 { "bsf", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(bitstream_filters)}, "A comma-separated list of bitstream filters", "bitstream_filters" },
5041 /* data codec support */
5042 { "dcodec", HAS_ARG | OPT_DATA | OPT_FUNC2, {(void*)opt_data_codec}, "force data codec ('copy' to copy stream)", "codec" },
5044 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
5048 int main(int argc, char **argv)
5050 OptionsContext o = { 0 };
5055 av_log_set_flags(AV_LOG_SKIP_REPEATED);
5056 parse_loglevel(argc, argv, options);
5058 avcodec_register_all();
5060 avdevice_register_all();
5062 avfilter_register_all();
5064 avformat_network_init();
5068 parse_cpuflags(argc, argv, options);
5071 parse_options(&o, argc, argv, options, opt_output_file);
5073 if (nb_output_files <= 0 && nb_input_files == 0) {
5075 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
5079 /* file converter / grab */
5080 if (nb_output_files <= 0) {
5081 fprintf(stderr, "At least one output file must be specified\n");
5085 if (nb_input_files == 0) {
5086 av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
5091 if (transcode() < 0)
5093 ti = getutime() - ti;
5095 int maxrss = getmaxrss() / 1024;
5096 printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);