3 * Copyright (c) 2000-2011 The libav developers.
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31 #include "libavformat/avformat.h"
32 #include "libavdevice/avdevice.h"
33 #include "libswscale/swscale.h"
34 #include "libavresample/avresample.h"
35 #include "libavutil/opt.h"
36 #include "libavutil/audioconvert.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/colorspace.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/intreadwrite.h"
42 #include "libavutil/dict.h"
43 #include "libavutil/mathematics.h"
44 #include "libavutil/pixdesc.h"
45 #include "libavutil/avstring.h"
46 #include "libavutil/libm.h"
47 #include "libavutil/imgutils.h"
48 #include "libavformat/os_support.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersrc.h"
53 # include "libavfilter/vsrc_buffer.h"
55 #if HAVE_SYS_RESOURCE_H
56 #include <sys/types.h>
58 #include <sys/resource.h>
59 #elif HAVE_GETPROCESSTIMES
62 #if HAVE_GETPROCESSMEMORYINFO
68 #include <sys/select.h>
75 #include "libavutil/avassert.h"
78 #define VSYNC_PASSTHROUGH 0
82 const char program_name[] = "avconv";
83 const int program_birth_year = 2000;
85 /* select an input stream for an output stream */
86 typedef struct StreamMap {
87 int disabled; /** 1 is this mapping is disabled by a negative map */
91 int sync_stream_index;
92 char *linklabel; /** name of an output link, for mapping lavfi outputs */
96 * select an input file for an output file
98 typedef struct MetadataMap {
99 int file; ///< file index
100 char type; ///< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram
101 int index; ///< stream/chapter/program number
104 static const OptionDef options[];
106 static int video_discard = 0;
107 static int same_quant = 0;
108 static int do_deinterlace = 0;
109 static int intra_dc_precision = 8;
110 static int qp_hist = 0;
112 static int file_overwrite = 0;
113 static int do_benchmark = 0;
114 static int do_hex_dump = 0;
115 static int do_pkt_dump = 0;
116 static int do_pass = 0;
117 static char *pass_logfilename_prefix = NULL;
118 static int video_sync_method = VSYNC_AUTO;
119 static int audio_sync_method = 0;
120 static float audio_drift_threshold = 0.1;
121 static int copy_ts = 0;
122 static int copy_tb = 1;
123 static int opt_shortest = 0;
124 static char *vstats_filename;
125 static FILE *vstats_file;
127 static int audio_volume = 256;
129 static int exit_on_error = 0;
130 static int using_stdin = 0;
131 static int64_t video_size = 0;
132 static int64_t audio_size = 0;
133 static int64_t extra_size = 0;
134 static int nb_frames_dup = 0;
135 static int nb_frames_drop = 0;
136 static int input_sync;
138 static float dts_delta_threshold = 10;
140 static int print_stats = 1;
142 static uint8_t *audio_buf;
143 static unsigned int allocated_audio_buf_size;
144 static uint8_t *async_buf;
145 static unsigned int allocated_async_buf_size;
147 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
149 typedef struct InputFilter {
150 AVFilterContext *filter;
151 struct InputStream *ist;
152 struct FilterGraph *graph;
155 typedef struct OutputFilter {
156 AVFilterContext *filter;
157 struct OutputStream *ost;
158 struct FilterGraph *graph;
160 /* temporary storage until stream maps are processed */
161 AVFilterInOut *out_tmp;
164 typedef struct FilterGraph {
166 const char *graph_desc;
168 AVFilterGraph *graph;
170 InputFilter **inputs;
172 OutputFilter **outputs;
176 typedef struct FrameBuffer {
182 enum PixelFormat pix_fmt;
185 struct InputStream *ist;
186 struct FrameBuffer *next;
189 typedef struct InputStream {
192 int discard; /* true if stream data should be discarded */
193 int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
195 AVFrame *decoded_frame;
197 int64_t start; /* time when read started */
198 /* predicted dts of the next packet read for this stream or (when there are
199 * several frames in a packet) of the next frame in current packet */
201 /* dts of the last packet read for this stream */
203 PtsCorrectionContext pts_ctx;
205 int is_start; /* is 1 at the start and after a discontinuity */
206 int showed_multi_packet_warning;
211 int resample_pix_fmt;
213 /* a pool of free buffers for decoded data */
214 FrameBuffer *buffer_pool;
216 /* decoded data from this stream goes into all those filters
217 * currently video only */
218 InputFilter **filters;
222 typedef struct InputFile {
223 AVFormatContext *ctx;
224 int eof_reached; /* true if eof reached */
225 int ist_index; /* index of first stream in ist_table */
226 int buffer_size; /* current total buffer size */
228 int nb_streams; /* number of stream that avconv is aware of; may be different
229 from ctx.nb_streams if new streams appear during av_read_frame() */
233 typedef struct OutputStream {
234 int file_index; /* file index */
235 int index; /* stream index in the output file */
236 int source_index; /* InputStream index */
237 AVStream *st; /* stream in the output file */
238 int encoding_needed; /* true if encoding needed for this stream */
240 /* input pts and corresponding output pts
242 // double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
243 struct InputStream *sync_ist; /* input stream to sync against */
244 int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
245 /* pts of the first frame encoded for this stream, used for limiting
248 AVBitStreamFilterContext *bitstream_filters;
251 AVFrame *output_frame;
252 AVFrame *filtered_frame;
255 AVRational frame_rate;
259 float frame_aspect_ratio;
262 /* forced key frames */
263 int64_t *forced_kf_pts;
269 AVAudioResampleContext *avr;
270 int resample_sample_fmt;
271 int resample_channels;
272 uint64_t resample_channel_layout;
273 int resample_sample_rate;
274 AVFifoBuffer *fifo; /* for compression: one audio fifo per codec */
277 OutputFilter *filter;
282 int is_past_recording_time;
284 const char *attachment_filename;
285 int copy_initial_nonkeyframes;
287 enum PixelFormat pix_fmts[2];
291 typedef struct OutputFile {
292 AVFormatContext *ctx;
294 int ost_index; /* index of the first stream in output_streams */
295 int64_t recording_time; /* desired length of the resulting file in microseconds */
296 int64_t start_time; /* start time in microseconds */
297 uint64_t limit_filesize;
300 static InputStream **input_streams = NULL;
301 static int nb_input_streams = 0;
302 static InputFile **input_files = NULL;
303 static int nb_input_files = 0;
305 static OutputStream **output_streams = NULL;
306 static int nb_output_streams = 0;
307 static OutputFile **output_files = NULL;
308 static int nb_output_files = 0;
310 static FilterGraph **filtergraphs;
313 typedef struct OptionsContext {
314 /* input/output options */
318 SpecifierOpt *codec_names;
320 SpecifierOpt *audio_channels;
321 int nb_audio_channels;
322 SpecifierOpt *audio_sample_rate;
323 int nb_audio_sample_rate;
324 SpecifierOpt *frame_rates;
326 SpecifierOpt *frame_sizes;
328 SpecifierOpt *frame_pix_fmts;
329 int nb_frame_pix_fmts;
332 int64_t input_ts_offset;
335 SpecifierOpt *ts_scale;
337 SpecifierOpt *dump_attachment;
338 int nb_dump_attachment;
341 StreamMap *stream_maps;
343 /* first item specifies output metadata, second is input */
344 MetadataMap (*meta_data_maps)[2];
345 int nb_meta_data_maps;
346 int metadata_global_manual;
347 int metadata_streams_manual;
348 int metadata_chapters_manual;
349 const char **attachments;
352 int chapters_input_file;
354 int64_t recording_time;
355 uint64_t limit_filesize;
361 int subtitle_disable;
364 /* indexed by output file stream index */
368 SpecifierOpt *metadata;
370 SpecifierOpt *max_frames;
372 SpecifierOpt *bitstream_filters;
373 int nb_bitstream_filters;
374 SpecifierOpt *codec_tags;
376 SpecifierOpt *sample_fmts;
378 SpecifierOpt *qscale;
380 SpecifierOpt *forced_key_frames;
381 int nb_forced_key_frames;
382 SpecifierOpt *force_fps;
384 SpecifierOpt *frame_aspect_ratios;
385 int nb_frame_aspect_ratios;
386 SpecifierOpt *rc_overrides;
388 SpecifierOpt *intra_matrices;
389 int nb_intra_matrices;
390 SpecifierOpt *inter_matrices;
391 int nb_inter_matrices;
392 SpecifierOpt *top_field_first;
393 int nb_top_field_first;
394 SpecifierOpt *metadata_map;
396 SpecifierOpt *presets;
398 SpecifierOpt *copy_initial_nonkeyframes;
399 int nb_copy_initial_nonkeyframes;
400 SpecifierOpt *filters;
404 #define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\
407 for (i = 0; i < o->nb_ ## name; i++) {\
408 char *spec = o->name[i].specifier;\
409 if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0)\
410 outvar = o->name[i].u.type;\
416 static void reset_options(OptionsContext *o)
418 const OptionDef *po = options;
421 /* all OPT_SPEC and OPT_STRING can be freed in generic way */
423 void *dst = (uint8_t*)o + po->u.off;
425 if (po->flags & OPT_SPEC) {
426 SpecifierOpt **so = dst;
427 int i, *count = (int*)(so + 1);
428 for (i = 0; i < *count; i++) {
429 av_freep(&(*so)[i].specifier);
430 if (po->flags & OPT_STRING)
431 av_freep(&(*so)[i].u.str);
435 } else if (po->flags & OPT_OFFSET && po->flags & OPT_STRING)
440 for (i = 0; i < o->nb_stream_maps; i++)
441 av_freep(&o->stream_maps[i].linklabel);
442 av_freep(&o->stream_maps);
443 av_freep(&o->meta_data_maps);
444 av_freep(&o->streamid_map);
446 memset(o, 0, sizeof(*o));
448 o->mux_max_delay = 0.7;
449 o->recording_time = INT64_MAX;
450 o->limit_filesize = UINT64_MAX;
451 o->chapters_input_file = INT_MAX;
457 static int alloc_buffer(InputStream *ist, AVCodecContext *s, FrameBuffer **pbuf)
459 FrameBuffer *buf = av_mallocz(sizeof(*buf));
461 const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
462 int h_chroma_shift, v_chroma_shift;
463 int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
464 int w = s->width, h = s->height;
467 return AVERROR(ENOMEM);
469 if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
474 avcodec_align_dimensions(s, &w, &h);
475 if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
476 s->pix_fmt, 32)) < 0) {
480 /* XXX this shouldn't be needed, but some tests break without this line
481 * those decoders are buggy and need to be fixed.
482 * the following tests fail:
483 * cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
485 memset(buf->base[0], 128, ret);
487 avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
488 for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
489 const int h_shift = i==0 ? 0 : h_chroma_shift;
490 const int v_shift = i==0 ? 0 : v_chroma_shift;
491 if (s->flags & CODEC_FLAG_EMU_EDGE)
492 buf->data[i] = buf->base[i];
494 buf->data[i] = buf->base[i] +
495 FFALIGN((buf->linesize[i]*edge >> v_shift) +
496 (pixel_size*edge >> h_shift), 32);
500 buf->pix_fmt = s->pix_fmt;
507 static void free_buffer_pool(InputStream *ist)
509 FrameBuffer *buf = ist->buffer_pool;
511 ist->buffer_pool = buf->next;
512 av_freep(&buf->base[0]);
514 buf = ist->buffer_pool;
518 static void unref_buffer(InputStream *ist, FrameBuffer *buf)
520 av_assert0(buf->refcount);
522 if (!buf->refcount) {
523 buf->next = ist->buffer_pool;
524 ist->buffer_pool = buf;
528 static int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
530 InputStream *ist = s->opaque;
534 if (!ist->buffer_pool && (ret = alloc_buffer(ist, s, &ist->buffer_pool)) < 0)
537 buf = ist->buffer_pool;
538 ist->buffer_pool = buf->next;
540 if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
541 av_freep(&buf->base[0]);
543 if ((ret = alloc_buffer(ist, s, &buf)) < 0)
549 frame->type = FF_BUFFER_TYPE_USER;
550 frame->extended_data = frame->data;
551 frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
552 frame->width = buf->w;
553 frame->height = buf->h;
554 frame->format = buf->pix_fmt;
555 frame->sample_aspect_ratio = s->sample_aspect_ratio;
557 for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
558 frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't
559 frame->data[i] = buf->data[i];
560 frame->linesize[i] = buf->linesize[i];
566 static void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
568 InputStream *ist = s->opaque;
569 FrameBuffer *buf = frame->opaque;
572 for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
573 frame->data[i] = NULL;
575 unref_buffer(ist, buf);
578 static void filter_release_buffer(AVFilterBuffer *fb)
580 FrameBuffer *buf = fb->priv;
582 unref_buffer(buf->ist, buf);
585 static const enum PixelFormat *choose_pixel_fmts(OutputStream *ost)
587 if (ost->st->codec->pix_fmt != PIX_FMT_NONE) {
588 ost->pix_fmts[0] = ost->st->codec->pix_fmt;
589 return ost->pix_fmts;
590 } else if (ost->enc->pix_fmts)
591 return ost->enc->pix_fmts;
596 static int configure_video_filters(FilterGraph *fg)
598 InputStream *ist = fg->inputs[0]->ist;
599 OutputStream *ost = fg->outputs[0]->ost;
600 AVFilterContext *last_filter, *filter;
601 /** filter graph containing all filters including input & output */
602 AVCodecContext *codec = ost->st->codec;
603 SinkContext sink_ctx = { .pix_fmts = choose_pixel_fmts(ost) };
604 AVRational sample_aspect_ratio;
608 avfilter_graph_free(&fg->graph);
609 fg->graph = avfilter_graph_alloc();
611 if (ist->st->sample_aspect_ratio.num) {
612 sample_aspect_ratio = ist->st->sample_aspect_ratio;
614 sample_aspect_ratio = ist->st->codec->sample_aspect_ratio;
616 snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
617 ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
618 sample_aspect_ratio.num, sample_aspect_ratio.den);
620 ret = avfilter_graph_create_filter(&fg->inputs[0]->filter,
621 avfilter_get_by_name("buffer"),
622 "src", args, NULL, fg->graph);
625 ret = avfilter_graph_create_filter(&fg->outputs[0]->filter, &sink,
626 "out", NULL, &sink_ctx, fg->graph);
629 last_filter = fg->inputs[0]->filter;
631 if (codec->width || codec->height) {
632 snprintf(args, 255, "%d:%d:flags=0x%X",
635 (unsigned)ost->sws_flags);
636 if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
637 NULL, args, NULL, fg->graph)) < 0)
639 if ((ret = avfilter_link(last_filter, 0, filter, 0)) < 0)
641 last_filter = filter;
644 snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
645 fg->graph->scale_sws_opts = av_strdup(args);
648 AVFilterInOut *outputs = avfilter_inout_alloc();
649 AVFilterInOut *inputs = avfilter_inout_alloc();
651 outputs->name = av_strdup("in");
652 outputs->filter_ctx = last_filter;
653 outputs->pad_idx = 0;
654 outputs->next = NULL;
656 inputs->name = av_strdup("out");
657 inputs->filter_ctx = fg->outputs[0]->filter;
661 if ((ret = avfilter_graph_parse(fg->graph, ost->avfilter, inputs, outputs, NULL)) < 0)
664 if ((ret = avfilter_link(last_filter, 0, fg->outputs[0]->filter, 0)) < 0)
668 if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
671 ost->filter = fg->outputs[0];
676 static FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
678 FilterGraph *fg = av_mallocz(sizeof(*fg));
682 fg->index = nb_filtergraphs;
684 fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs), &fg->nb_outputs,
686 if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
688 fg->outputs[0]->ost = ost;
689 fg->outputs[0]->graph = fg;
691 fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs), &fg->nb_inputs,
693 if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
695 fg->inputs[0]->ist = ist;
696 fg->inputs[0]->graph = fg;
698 ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
699 &ist->nb_filters, ist->nb_filters + 1);
700 ist->filters[ist->nb_filters - 1] = fg->inputs[0];
702 filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
703 &nb_filtergraphs, nb_filtergraphs + 1);
704 filtergraphs[nb_filtergraphs - 1] = fg;
709 static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
712 enum AVMediaType type = in->filter_ctx->input_pads[in->pad_idx].type;
715 // TODO: support other filter types
716 if (type != AVMEDIA_TYPE_VIDEO) {
717 av_log(NULL, AV_LOG_FATAL, "Only video filters supported currently.\n");
725 int file_idx = strtol(in->name, &p, 0);
727 if (file_idx < 0 || file_idx >= nb_input_files) {
728 av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtegraph description %s.\n",
729 file_idx, fg->graph_desc);
732 s = input_files[file_idx]->ctx;
734 for (i = 0; i < s->nb_streams; i++) {
735 if (s->streams[i]->codec->codec_type != type)
737 if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
743 av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
744 "matches no streams.\n", p, fg->graph_desc);
747 ist = input_streams[input_files[file_idx]->ist_index + st->index];
749 /* find the first unused stream of corresponding type */
750 for (i = 0; i < nb_input_streams; i++) {
751 ist = input_streams[i];
752 if (ist->st->codec->codec_type == type && ist->discard)
755 if (i == nb_input_streams) {
756 av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
757 "unlabeled input pad %d on filter %s", in->pad_idx,
758 in->filter_ctx->name);
763 ist->decoding_needed = 1;
764 ist->st->discard = AVDISCARD_NONE;
766 fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs),
767 &fg->nb_inputs, fg->nb_inputs + 1);
768 if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
770 fg->inputs[fg->nb_inputs - 1]->ist = ist;
771 fg->inputs[fg->nb_inputs - 1]->graph = fg;
773 ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
774 &ist->nb_filters, ist->nb_filters + 1);
775 ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
778 static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
780 SinkContext sink_ctx;
781 AVCodecContext *codec = ofilter->ost->st->codec;
782 AVFilterContext *last_filter = out->filter_ctx;
783 int pad_idx = out->pad_idx;
786 sink_ctx.pix_fmts = choose_pixel_fmts(ofilter->ost);
788 ret = avfilter_graph_create_filter(&ofilter->filter, &sink,
789 "out", NULL, &sink_ctx, fg->graph);
793 if (codec->width || codec->height) {
795 snprintf(args, sizeof(args), "%d:%d:flags=0x%X",
798 (unsigned)ofilter->ost->sws_flags);
799 if ((ret = avfilter_graph_create_filter(&last_filter, avfilter_get_by_name("scale"),
800 NULL, args, NULL, fg->graph)) < 0)
802 if ((ret = avfilter_link(out->filter_ctx, out->pad_idx, last_filter, 0)) < 0)
807 if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
813 static int configure_complex_filter(FilterGraph *fg)
815 AVFilterInOut *inputs, *outputs, *cur;
816 int ret, i, init = !fg->graph;
818 avfilter_graph_free(&fg->graph);
819 if (!(fg->graph = avfilter_graph_alloc()))
820 return AVERROR(ENOMEM);
822 if ((ret = avfilter_graph_parse2(fg->graph, fg->graph_desc, &inputs, &outputs)) < 0)
825 for (cur = inputs; init && cur; cur = cur->next)
826 init_input_filter(fg, cur);
828 for (cur = inputs, i = 0; cur; cur = cur->next, i++) {
829 InputFilter *ifilter = fg->inputs[i];
830 InputStream *ist = ifilter->ist;
834 sar = ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
835 ist->st->codec->sample_aspect_ratio;
836 snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
837 ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
840 if ((ret = avfilter_graph_create_filter(&ifilter->filter,
841 avfilter_get_by_name("buffer"), cur->name,
842 args, NULL, fg->graph)) < 0)
844 if ((ret = avfilter_link(ifilter->filter, 0,
845 cur->filter_ctx, cur->pad_idx)) < 0)
848 avfilter_inout_free(&inputs);
851 /* we already know the mappings between lavfi outputs and output streams,
852 * so we can finish the setup */
853 for (cur = outputs, i = 0; cur; cur = cur->next, i++)
854 configure_output_filter(fg, fg->outputs[i], cur);
855 avfilter_inout_free(&outputs);
857 if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
860 /* wait until output mappings are processed */
861 for (cur = outputs; cur;) {
862 fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs),
863 &fg->nb_outputs, fg->nb_outputs + 1);
864 if (!(fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]))))
866 fg->outputs[fg->nb_outputs - 1]->graph = fg;
867 fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
869 fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
876 static int configure_complex_filters(void)
880 for (i = 0; i < nb_filtergraphs; i++)
881 if (!filtergraphs[i]->graph &&
882 (ret = configure_complex_filter(filtergraphs[i])) < 0)
887 static int configure_filtergraph(FilterGraph *fg)
889 return fg->graph_desc ? configure_complex_filter(fg) : configure_video_filters(fg);
892 static int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
895 for (i = 0; i < fg->nb_inputs; i++)
896 if (fg->inputs[i]->ist == ist)
901 static void term_exit(void)
903 av_log(NULL, AV_LOG_QUIET, "");
906 static volatile int received_sigterm = 0;
907 static volatile int received_nb_signals = 0;
910 sigterm_handler(int sig)
912 received_sigterm = sig;
913 received_nb_signals++;
917 static void term_init(void)
919 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
920 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
922 signal(SIGXCPU, sigterm_handler);
926 static int decode_interrupt_cb(void *ctx)
928 return received_nb_signals > 1;
931 static const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
933 void exit_program(int ret)
937 for (i = 0; i < nb_filtergraphs; i++) {
938 avfilter_graph_free(&filtergraphs[i]->graph);
939 for (j = 0; j < filtergraphs[i]->nb_inputs; j++)
940 av_freep(&filtergraphs[i]->inputs[j]);
941 av_freep(&filtergraphs[i]->inputs);
942 for (j = 0; j < filtergraphs[i]->nb_outputs; j++)
943 av_freep(&filtergraphs[i]->outputs[j]);
944 av_freep(&filtergraphs[i]->outputs);
945 av_freep(&filtergraphs[i]);
947 av_freep(&filtergraphs);
950 for (i = 0; i < nb_output_files; i++) {
951 AVFormatContext *s = output_files[i]->ctx;
952 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
954 avformat_free_context(s);
955 av_dict_free(&output_files[i]->opts);
956 av_freep(&output_files[i]);
958 for (i = 0; i < nb_output_streams; i++) {
959 AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
961 AVBitStreamFilterContext *next = bsfc->next;
962 av_bitstream_filter_close(bsfc);
965 output_streams[i]->bitstream_filters = NULL;
967 if (output_streams[i]->output_frame) {
968 AVFrame *frame = output_streams[i]->output_frame;
969 if (frame->extended_data != frame->data)
970 av_freep(&frame->extended_data);
974 av_freep(&output_streams[i]->avfilter);
975 av_freep(&output_streams[i]->filtered_frame);
976 av_freep(&output_streams[i]);
978 for (i = 0; i < nb_input_files; i++) {
979 avformat_close_input(&input_files[i]->ctx);
980 av_freep(&input_files[i]);
982 for (i = 0; i < nb_input_streams; i++) {
983 av_freep(&input_streams[i]->decoded_frame);
984 av_dict_free(&input_streams[i]->opts);
985 free_buffer_pool(input_streams[i]);
986 av_freep(&input_streams[i]->filters);
987 av_freep(&input_streams[i]);
992 av_free(vstats_filename);
994 av_freep(&input_streams);
995 av_freep(&input_files);
996 av_freep(&output_streams);
997 av_freep(&output_files);
1001 allocated_audio_buf_size = 0;
1003 allocated_async_buf_size = 0;
1006 avformat_network_deinit();
1008 if (received_sigterm) {
1009 av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
1010 (int) received_sigterm);
1017 static void assert_avoptions(AVDictionary *m)
1019 AVDictionaryEntry *t;
1020 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1021 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
1026 static void assert_codec_experimental(AVCodecContext *c, int encoder)
1028 const char *codec_string = encoder ? "encoder" : "decoder";
1030 if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL &&
1031 c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
1032 av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
1033 "results.\nAdd '-strict experimental' if you want to use it.\n",
1034 codec_string, c->codec->name);
1035 codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id);
1036 if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
1037 av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
1038 codec_string, codec->name);
1043 static void choose_sample_fmt(AVStream *st, AVCodec *codec)
1045 if (codec && codec->sample_fmts) {
1046 const enum AVSampleFormat *p = codec->sample_fmts;
1047 for (; *p != -1; p++) {
1048 if (*p == st->codec->sample_fmt)
1052 av_log(NULL, AV_LOG_WARNING,
1053 "Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
1054 av_get_sample_fmt_name(st->codec->sample_fmt),
1056 av_get_sample_fmt_name(codec->sample_fmts[0]));
1057 st->codec->sample_fmt = codec->sample_fmts[0];
1063 * Update the requested input sample format based on the output sample format.
1064 * This is currently only used to request float output from decoders which
1065 * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
1066 * Ideally this will be removed in the future when decoders do not do format
1067 * conversion and only output in their native format.
1069 static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
1070 AVCodecContext *enc)
1072 /* if sample formats match or a decoder sample format has already been
1073 requested, just return */
1074 if (enc->sample_fmt == dec->sample_fmt ||
1075 dec->request_sample_fmt > AV_SAMPLE_FMT_NONE)
1078 /* if decoder supports more than one output format */
1079 if (dec_codec && dec_codec->sample_fmts &&
1080 dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
1081 dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
1082 const enum AVSampleFormat *p;
1083 int min_dec = -1, min_inc = -1;
1085 /* find a matching sample format in the encoder */
1086 for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
1087 if (*p == enc->sample_fmt) {
1088 dec->request_sample_fmt = *p;
1090 } else if (*p > enc->sample_fmt) {
1091 min_inc = FFMIN(min_inc, *p - enc->sample_fmt);
1093 min_dec = FFMIN(min_dec, enc->sample_fmt - *p);
1096 /* if none match, provide the one that matches quality closest */
1097 dec->request_sample_fmt = min_inc > 0 ? enc->sample_fmt + min_inc :
1098 enc->sample_fmt - min_dec;
1102 static void choose_sample_rate(AVStream *st, AVCodec *codec)
1104 if (codec && codec->supported_samplerates) {
1105 const int *p = codec->supported_samplerates;
1107 int best_dist = INT_MAX;
1109 int dist = abs(st->codec->sample_rate - *p);
1110 if (dist < best_dist) {
1116 av_log(st->codec, AV_LOG_WARNING, "Requested sampling rate unsupported using closest supported (%d)\n", best);
1118 st->codec->sample_rate = best;
1123 get_sync_ipts(const OutputStream *ost, int64_t pts)
1125 OutputFile *of = output_files[ost->file_index];
1126 return (double)(pts - of->start_time) / AV_TIME_BASE;
1129 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
1131 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
1132 AVCodecContext *avctx = ost->st->codec;
1136 * Audio encoders may split the packets -- #frames in != #packets out.
1137 * But there is no reordering, so we can limit the number of output packets
1138 * by simply dropping them here.
1139 * Counting encoded video frames needs to be done separately because of
1140 * reordering, see do_video_out()
1142 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
1143 if (ost->frame_number >= ost->max_frames) {
1144 av_free_packet(pkt);
1147 ost->frame_number++;
1151 AVPacket new_pkt = *pkt;
1152 int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
1153 &new_pkt.data, &new_pkt.size,
1154 pkt->data, pkt->size,
1155 pkt->flags & AV_PKT_FLAG_KEY);
1157 av_free_packet(pkt);
1158 new_pkt.destruct = av_destruct_packet;
1160 av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
1161 bsfc->filter->name, pkt->stream_index,
1162 avctx->codec ? avctx->codec->name : "copy");
1172 pkt->stream_index = ost->index;
1173 ret = av_interleaved_write_frame(s, pkt);
1175 print_error("av_interleaved_write_frame()", ret);
1180 static int check_recording_time(OutputStream *ost)
1182 OutputFile *of = output_files[ost->file_index];
1184 if (of->recording_time != INT64_MAX &&
1185 av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,
1186 AV_TIME_BASE_Q) >= 0) {
1187 ost->is_past_recording_time = 1;
1193 static void get_default_channel_layouts(OutputStream *ost, InputStream *ist)
1195 char layout_name[256];
1196 AVCodecContext *enc = ost->st->codec;
1197 AVCodecContext *dec = ist->st->codec;
1199 if (dec->channel_layout &&
1200 av_get_channel_layout_nb_channels(dec->channel_layout) != dec->channels) {
1201 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1202 dec->channels, dec->channel_layout);
1203 av_log(NULL, AV_LOG_ERROR, "New channel layout (%s) is invalid\n",
1205 dec->channel_layout = 0;
1207 if (!dec->channel_layout) {
1208 if (enc->channel_layout && dec->channels == enc->channels) {
1209 dec->channel_layout = enc->channel_layout;
1211 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1213 if (!dec->channel_layout) {
1214 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1215 "layout for Input Stream #%d.%d\n", ist->file_index,
1220 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1221 dec->channels, dec->channel_layout);
1222 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1223 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1225 if (!enc->channel_layout) {
1226 if (dec->channels == enc->channels) {
1227 enc->channel_layout = dec->channel_layout;
1230 enc->channel_layout = av_get_default_channel_layout(enc->channels);
1232 if (!enc->channel_layout) {
1233 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel layout "
1234 "for Output Stream #%d.%d\n", ost->file_index,
1238 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1239 enc->channels, enc->channel_layout);
1240 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Output Stream "
1241 "#%d.%d : %s\n", ost->file_index, ost->st->index, layout_name);
1245 static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_t size)
1247 int fill_char = 0x00;
1248 if (sample_fmt == AV_SAMPLE_FMT_U8)
1250 memset(buf, fill_char, size);
1253 static int encode_audio_frame(AVFormatContext *s, OutputStream *ost,
1254 const uint8_t *buf, int buf_size)
1256 AVCodecContext *enc = ost->st->codec;
1257 AVFrame *frame = NULL;
1259 int ret, got_packet;
1261 av_init_packet(&pkt);
1266 if (!ost->output_frame) {
1267 ost->output_frame = avcodec_alloc_frame();
1268 if (!ost->output_frame) {
1269 av_log(NULL, AV_LOG_FATAL, "out-of-memory in encode_audio_frame()\n");
1273 frame = ost->output_frame;
1274 if (frame->extended_data != frame->data)
1275 av_freep(&frame->extended_data);
1276 avcodec_get_frame_defaults(frame);
1278 frame->nb_samples = buf_size /
1279 (enc->channels * av_get_bytes_per_sample(enc->sample_fmt));
1280 if ((ret = avcodec_fill_audio_frame(frame, enc->channels, enc->sample_fmt,
1281 buf, buf_size, 1)) < 0) {
1282 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1286 if (!check_recording_time(ost))
1289 frame->pts = ost->sync_opts;
1290 ost->sync_opts += frame->nb_samples;
1294 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
1295 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1300 if (pkt.pts != AV_NOPTS_VALUE)
1301 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1302 if (pkt.dts != AV_NOPTS_VALUE)
1303 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1304 if (pkt.duration > 0)
1305 pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
1307 write_frame(s, &pkt, ost);
1309 audio_size += pkt.size;
1315 static int alloc_audio_output_buf(AVCodecContext *dec, AVCodecContext *enc,
1316 int nb_samples, int *buf_linesize)
1318 int64_t audio_buf_samples;
1321 /* calculate required number of samples to allocate */
1322 audio_buf_samples = ((int64_t)nb_samples * enc->sample_rate + dec->sample_rate) /
1324 audio_buf_samples = 4 * audio_buf_samples + 16; // safety factors for resampling
1325 audio_buf_samples = FFMAX(audio_buf_samples, enc->frame_size);
1326 if (audio_buf_samples > INT_MAX)
1327 return AVERROR(EINVAL);
1329 audio_buf_size = av_samples_get_buffer_size(buf_linesize, enc->channels,
1331 enc->sample_fmt, 0);
1332 if (audio_buf_size < 0)
1333 return audio_buf_size;
1335 av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size);
1337 return AVERROR(ENOMEM);
1342 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
1343 InputStream *ist, AVFrame *decoded_frame)
1347 int size_out, frame_bytes, resample_changed, ret;
1348 AVCodecContext *enc = ost->st->codec;
1349 AVCodecContext *dec = ist->st->codec;
1350 int osize = av_get_bytes_per_sample(enc->sample_fmt);
1351 int isize = av_get_bytes_per_sample(dec->sample_fmt);
1352 uint8_t *buf = decoded_frame->data[0];
1353 int size = decoded_frame->nb_samples * dec->channels * isize;
1354 int out_linesize = 0;
1355 int buf_linesize = decoded_frame->linesize[0];
1357 get_default_channel_layouts(ost, ist);
1359 if (alloc_audio_output_buf(dec, enc, decoded_frame->nb_samples, &out_linesize) < 0) {
1360 av_log(NULL, AV_LOG_FATAL, "Error allocating audio buffer\n");
1364 if (audio_sync_method > 1 ||
1365 enc->channels != dec->channels ||
1366 enc->channel_layout != dec->channel_layout ||
1367 enc->sample_rate != dec->sample_rate ||
1368 dec->sample_fmt != enc->sample_fmt)
1369 ost->audio_resample = 1;
1371 resample_changed = ost->resample_sample_fmt != dec->sample_fmt ||
1372 ost->resample_channels != dec->channels ||
1373 ost->resample_channel_layout != dec->channel_layout ||
1374 ost->resample_sample_rate != dec->sample_rate;
1376 if ((ost->audio_resample && !ost->avr) || resample_changed) {
1377 if (resample_changed) {
1378 av_log(NULL, AV_LOG_INFO, "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:0x%"PRIx64" to rate:%d fmt:%s ch:%d chl:0x%"PRIx64"\n",
1379 ist->file_index, ist->st->index,
1380 ost->resample_sample_rate, av_get_sample_fmt_name(ost->resample_sample_fmt),
1381 ost->resample_channels, ost->resample_channel_layout,
1382 dec->sample_rate, av_get_sample_fmt_name(dec->sample_fmt),
1383 dec->channels, dec->channel_layout);
1384 ost->resample_sample_fmt = dec->sample_fmt;
1385 ost->resample_channels = dec->channels;
1386 ost->resample_channel_layout = dec->channel_layout;
1387 ost->resample_sample_rate = dec->sample_rate;
1389 avresample_close(ost->avr);
1391 /* if audio_sync_method is >1 the resampler is needed for audio drift compensation */
1392 if (audio_sync_method <= 1 &&
1393 ost->resample_sample_fmt == enc->sample_fmt &&
1394 ost->resample_channels == enc->channels &&
1395 ost->resample_channel_layout == enc->channel_layout &&
1396 ost->resample_sample_rate == enc->sample_rate) {
1397 ost->audio_resample = 0;
1398 } else if (ost->audio_resample) {
1400 ost->avr = avresample_alloc_context();
1402 av_log(NULL, AV_LOG_FATAL, "Error allocating context for libavresample\n");
1407 av_opt_set_int(ost->avr, "in_channel_layout", dec->channel_layout, 0);
1408 av_opt_set_int(ost->avr, "in_sample_fmt", dec->sample_fmt, 0);
1409 av_opt_set_int(ost->avr, "in_sample_rate", dec->sample_rate, 0);
1410 av_opt_set_int(ost->avr, "out_channel_layout", enc->channel_layout, 0);
1411 av_opt_set_int(ost->avr, "out_sample_fmt", enc->sample_fmt, 0);
1412 av_opt_set_int(ost->avr, "out_sample_rate", enc->sample_rate, 0);
1413 if (audio_sync_method > 1)
1414 av_opt_set_int(ost->avr, "force_resampling", 1, 0);
1416 /* if both the input and output formats are s16 or u8, use s16 as
1417 the internal sample format */
1418 if (av_get_bytes_per_sample(dec->sample_fmt) <= 2 &&
1419 av_get_bytes_per_sample(enc->sample_fmt) <= 2) {
1420 av_opt_set_int(ost->avr, "internal_sample_fmt", AV_SAMPLE_FMT_S16P, 0);
1423 ret = avresample_open(ost->avr);
1425 av_log(NULL, AV_LOG_FATAL, "Error opening libavresample\n");
1431 if (audio_sync_method > 0) {
1432 double delta = get_sync_ipts(ost, ist->last_dts) * enc->sample_rate - ost->sync_opts -
1433 av_fifo_size(ost->fifo) / (enc->channels * osize);
1434 int idelta = delta * dec->sample_rate / enc->sample_rate;
1435 int byte_delta = idelta * isize * dec->channels;
1437 // FIXME resample delay
1438 if (fabs(delta) > 50) {
1439 if (ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate) {
1440 if (byte_delta < 0) {
1441 byte_delta = FFMAX(byte_delta, -size);
1444 av_log(NULL, AV_LOG_VERBOSE, "discarding %d audio samples\n",
1445 -byte_delta / (isize * dec->channels));
1450 av_fast_malloc(&async_buf, &allocated_async_buf_size,
1453 av_log(NULL, AV_LOG_FATAL, "Out of memory in do_audio_out\n");
1457 if (alloc_audio_output_buf(dec, enc, decoded_frame->nb_samples + idelta, &out_linesize) < 0) {
1458 av_log(NULL, AV_LOG_FATAL, "Error allocating audio buffer\n");
1463 generate_silence(async_buf, dec->sample_fmt, byte_delta);
1464 memcpy(async_buf + byte_delta, buf, size);
1467 buf_linesize = allocated_async_buf_size;
1468 av_log(NULL, AV_LOG_VERBOSE, "adding %d audio samples of silence\n", idelta);
1470 } else if (audio_sync_method > 1) {
1471 int comp = av_clip(delta, -audio_sync_method, audio_sync_method);
1472 av_log(NULL, AV_LOG_VERBOSE, "compensating audio timestamp drift:%f compensation:%d in:%d\n",
1473 delta, comp, enc->sample_rate);
1474 // fprintf(stderr, "drift:%f len:%d opts:%"PRId64" ipts:%"PRId64" fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)(get_sync_ipts(ost) * enc->sample_rate), av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2));
1475 avresample_set_compensation(ost->avr, comp, enc->sample_rate);
1478 } else if (audio_sync_method == 0)
1479 ost->sync_opts = lrintf(get_sync_ipts(ost, ist->last_dts) * enc->sample_rate) -
1480 av_fifo_size(ost->fifo) / (enc->channels * osize); // FIXME wrong
1482 if (ost->audio_resample) {
1484 size_out = avresample_convert(ost->avr, (void **)&buftmp,
1485 allocated_audio_buf_size, out_linesize,
1486 (void **)&buf, buf_linesize,
1487 size / (dec->channels * isize));
1488 size_out = size_out * enc->channels * osize;
1494 /* now encode as many frames as possible */
1495 if (!(enc->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) {
1496 /* output resampled raw samples */
1497 if (av_fifo_realloc2(ost->fifo, av_fifo_size(ost->fifo) + size_out) < 0) {
1498 av_log(NULL, AV_LOG_FATAL, "av_fifo_realloc2() failed\n");
1501 av_fifo_generic_write(ost->fifo, buftmp, size_out, NULL);
1503 frame_bytes = enc->frame_size * osize * enc->channels;
1505 while (av_fifo_size(ost->fifo) >= frame_bytes) {
1506 av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL);
1507 encode_audio_frame(s, ost, audio_buf, frame_bytes);
1510 encode_audio_frame(s, ost, buftmp, size_out);
1514 static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
1516 AVCodecContext *dec;
1517 AVPicture *picture2;
1518 AVPicture picture_tmp;
1521 dec = ist->st->codec;
1523 /* deinterlace : must be done before any resize */
1524 if (do_deinterlace) {
1527 /* create temporary picture */
1528 size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
1529 buf = av_malloc(size);
1533 picture2 = &picture_tmp;
1534 avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
1536 if (avpicture_deinterlace(picture2, picture,
1537 dec->pix_fmt, dec->width, dec->height) < 0) {
1538 /* if error, do not deinterlace */
1539 av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
1548 if (picture != picture2)
1549 *picture = *picture2;
1553 static void do_subtitle_out(AVFormatContext *s,
1559 static uint8_t *subtitle_out = NULL;
1560 int subtitle_out_max_size = 1024 * 1024;
1561 int subtitle_out_size, nb, i;
1562 AVCodecContext *enc;
1565 if (pts == AV_NOPTS_VALUE) {
1566 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1572 enc = ost->st->codec;
1574 if (!subtitle_out) {
1575 subtitle_out = av_malloc(subtitle_out_max_size);
1578 /* Note: DVB subtitle need one packet to draw them and one other
1579 packet to clear them */
1580 /* XXX: signal it in the codec context ? */
1581 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE)
1586 for (i = 0; i < nb; i++) {
1587 ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
1588 if (!check_recording_time(ost))
1591 sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
1592 // start_display_time is required to be 0
1593 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1594 sub->end_display_time -= sub->start_display_time;
1595 sub->start_display_time = 0;
1596 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1597 subtitle_out_max_size, sub);
1598 if (subtitle_out_size < 0) {
1599 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1603 av_init_packet(&pkt);
1604 pkt.data = subtitle_out;
1605 pkt.size = subtitle_out_size;
1606 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
1607 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {
1608 /* XXX: the pts correction is handled here. Maybe handling
1609 it in the codec would be better */
1611 pkt.pts += 90 * sub->start_display_time;
1613 pkt.pts += 90 * sub->end_display_time;
1615 write_frame(s, &pkt, ost);
1619 static void do_video_out(AVFormatContext *s,
1621 AVFrame *in_picture,
1622 int *frame_size, float quality)
1624 int nb_frames, i, ret, format_video_sync;
1625 AVCodecContext *enc;
1626 double sync_ipts, delta;
1628 enc = ost->st->codec;
1630 sync_ipts = get_sync_ipts(ost, in_picture->pts) / av_q2d(enc->time_base);
1631 delta = sync_ipts - ost->sync_opts;
1633 /* by default, we output a single frame */
1638 format_video_sync = video_sync_method;
1639 if (format_video_sync == VSYNC_AUTO)
1640 format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
1641 (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
1643 switch (format_video_sync) {
1645 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1648 else if (delta > 1.1)
1649 nb_frames = lrintf(delta);
1654 else if (delta > 0.6)
1655 ost->sync_opts = lrintf(sync_ipts);
1657 case VSYNC_PASSTHROUGH:
1658 ost->sync_opts = lrintf(sync_ipts);
1664 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1665 if (nb_frames == 0) {
1667 av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
1669 } else if (nb_frames > 1) {
1670 nb_frames_dup += nb_frames - 1;
1671 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1674 if (!ost->frame_number)
1675 ost->first_pts = ost->sync_opts;
1677 /* duplicates frame if needed */
1678 for (i = 0; i < nb_frames; i++) {
1680 av_init_packet(&pkt);
1684 if (!check_recording_time(ost))
1687 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1688 enc->codec->id == CODEC_ID_RAWVIDEO) {
1689 /* raw pictures are written as AVPicture structure to
1690 avoid any copies. We support temporarily the older
1692 enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
1693 enc->coded_frame->top_field_first = in_picture->top_field_first;
1694 pkt.data = (uint8_t *)in_picture;
1695 pkt.size = sizeof(AVPicture);
1696 pkt.pts = av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
1697 pkt.flags |= AV_PKT_FLAG_KEY;
1699 write_frame(s, &pkt, ost);
1702 AVFrame big_picture;
1704 big_picture = *in_picture;
1705 /* better than nothing: use input picture interlaced
1707 big_picture.interlaced_frame = in_picture->interlaced_frame;
1708 if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
1709 if (ost->top_field_first == -1)
1710 big_picture.top_field_first = in_picture->top_field_first;
1712 big_picture.top_field_first = !!ost->top_field_first;
1715 /* handles same_quant here. This is not correct because it may
1716 not be a global option */
1717 big_picture.quality = quality;
1718 if (!enc->me_threshold)
1719 big_picture.pict_type = 0;
1720 big_picture.pts = ost->sync_opts;
1721 if (ost->forced_kf_index < ost->forced_kf_count &&
1722 big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1723 big_picture.pict_type = AV_PICTURE_TYPE_I;
1724 ost->forced_kf_index++;
1726 ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
1728 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1733 if (pkt.pts != AV_NOPTS_VALUE)
1734 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1735 if (pkt.dts != AV_NOPTS_VALUE)
1736 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1738 write_frame(s, &pkt, ost);
1739 *frame_size = pkt.size;
1740 video_size += pkt.size;
1742 /* if two pass, output log */
1743 if (ost->logfile && enc->stats_out) {
1744 fprintf(ost->logfile, "%s", enc->stats_out);
1750 * For video, number of frames in == number of packets out.
1751 * But there may be reordering, so we can't throw away frames on encoder
1752 * flush, we need to limit them here, before they go into encoder.
1754 ost->frame_number++;
1758 static double psnr(double d)
1760 return -10.0 * log(d) / log(10.0);
1763 static void do_video_stats(AVFormatContext *os, OutputStream *ost,
1766 AVCodecContext *enc;
1768 double ti1, bitrate, avg_bitrate;
1770 /* this is executed just the first time do_video_stats is called */
1772 vstats_file = fopen(vstats_filename, "w");
1779 enc = ost->st->codec;
1780 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1781 frame_number = ost->frame_number;
1782 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
1783 if (enc->flags&CODEC_FLAG_PSNR)
1784 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1786 fprintf(vstats_file,"f_size= %6d ", frame_size);
1787 /* compute pts value */
1788 ti1 = ost->sync_opts * av_q2d(enc->time_base);
1792 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1793 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
1794 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1795 (double)video_size / 1024, ti1, bitrate, avg_bitrate);
1796 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
1800 /* check for new output on any of the filtergraphs */
1801 static int poll_filters(void)
1803 AVFilterBufferRef *picref;
1804 AVFrame *filtered_frame = NULL;
1805 int i, frame_size, ret;
1807 for (i = 0; i < nb_output_streams; i++) {
1808 OutputStream *ost = output_streams[i];
1809 OutputFile *of = output_files[ost->file_index];
1811 if (!ost->filter || ost->is_past_recording_time)
1814 if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
1815 return AVERROR(ENOMEM);
1817 avcodec_get_frame_defaults(ost->filtered_frame);
1818 filtered_frame = ost->filtered_frame;
1820 while (avfilter_poll_frame(ost->filter->filter->inputs[0])) {
1821 AVRational ist_pts_tb;
1822 if ((ret = get_filtered_video_frame(ost->filter->filter,
1823 filtered_frame, &picref,
1826 filtered_frame->pts = av_rescale_q(picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
1828 if (of->start_time && filtered_frame->pts < of->start_time)
1831 switch (ost->filter->filter->inputs[0]->type) {
1832 case AVMEDIA_TYPE_VIDEO:
1833 if (!ost->frame_aspect_ratio)
1834 ost->st->codec->sample_aspect_ratio = picref->video->pixel_aspect;
1836 do_video_out(of->ctx, ost, filtered_frame, &frame_size,
1837 same_quant ? ost->last_quality :
1838 ost->st->codec->global_quality);
1839 if (vstats_filename && frame_size)
1840 do_video_stats(of->ctx, ost, frame_size);
1843 // TODO support audio/subtitle filters
1847 avfilter_unref_buffer(picref);
1853 static void print_report(int is_last_report, int64_t timer_start)
1857 AVFormatContext *oc;
1859 AVCodecContext *enc;
1860 int frame_number, vid, i;
1861 double bitrate, ti1, pts;
1862 static int64_t last_time = -1;
1863 static int qp_histogram[52];
1865 if (!print_stats && !is_last_report)
1868 if (!is_last_report) {
1870 /* display the report every 0.5 seconds */
1871 cur_time = av_gettime();
1872 if (last_time == -1) {
1873 last_time = cur_time;
1876 if ((cur_time - last_time) < 500000)
1878 last_time = cur_time;
1882 oc = output_files[0]->ctx;
1884 total_size = avio_size(oc->pb);
1885 if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
1886 total_size = avio_tell(oc->pb);
1891 for (i = 0; i < nb_output_streams; i++) {
1893 ost = output_streams[i];
1894 enc = ost->st->codec;
1895 if (!ost->stream_copy && enc->coded_frame)
1896 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1897 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1898 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1900 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1901 float t = (av_gettime() - timer_start) / 1000000.0;
1903 frame_number = ost->frame_number;
1904 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
1905 frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
1907 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1911 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1913 for (j = 0; j < 32; j++)
1914 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j] + 1) / log(2)));
1916 if (enc->flags&CODEC_FLAG_PSNR) {
1918 double error, error_sum = 0;
1919 double scale, scale_sum = 0;
1920 char type[3] = { 'Y','U','V' };
1921 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1922 for (j = 0; j < 3; j++) {
1923 if (is_last_report) {
1924 error = enc->error[j];
1925 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1927 error = enc->coded_frame->error[j];
1928 scale = enc->width * enc->height * 255.0 * 255.0;
1934 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
1936 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1940 /* compute min output value */
1941 pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
1942 if ((pts < ti1) && (pts > 0))
1948 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
1950 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1951 "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
1952 (double)total_size / 1024, ti1, bitrate);
1954 if (nb_frames_dup || nb_frames_drop)
1955 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1956 nb_frames_dup, nb_frames_drop);
1958 av_log(NULL, AV_LOG_INFO, "%s \r", buf);
1962 if (is_last_report) {
1963 int64_t raw= audio_size + video_size + extra_size;
1964 av_log(NULL, AV_LOG_INFO, "\n");
1965 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
1966 video_size / 1024.0,
1967 audio_size / 1024.0,
1968 extra_size / 1024.0,
1969 100.0 * (total_size - raw) / raw
1974 static void flush_encoders(void)
1978 for (i = 0; i < nb_output_streams; i++) {
1979 OutputStream *ost = output_streams[i];
1980 AVCodecContext *enc = ost->st->codec;
1981 AVFormatContext *os = output_files[ost->file_index]->ctx;
1982 int stop_encoding = 0;
1984 if (!ost->encoding_needed)
1987 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1989 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO)
1994 int fifo_bytes, got_packet;
1995 av_init_packet(&pkt);
1999 switch (ost->st->codec->codec_type) {
2000 case AVMEDIA_TYPE_AUDIO:
2001 fifo_bytes = av_fifo_size(ost->fifo);
2002 if (fifo_bytes > 0) {
2003 /* encode any samples remaining in fifo */
2004 int frame_bytes = fifo_bytes;
2006 av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL);
2008 /* pad last frame with silence if needed */
2009 if (!(enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME)) {
2010 frame_bytes = enc->frame_size * enc->channels *
2011 av_get_bytes_per_sample(enc->sample_fmt);
2012 if (allocated_audio_buf_size < frame_bytes)
2014 generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes);
2016 encode_audio_frame(os, ost, audio_buf, frame_bytes);
2018 /* flush encoder with NULL frames until it is done
2019 returning packets */
2020 if (encode_audio_frame(os, ost, NULL, 0) == 0) {
2026 case AVMEDIA_TYPE_VIDEO:
2027 ret = avcodec_encode_video2(enc, &pkt, NULL, &got_packet);
2029 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
2033 if (ost->logfile && enc->stats_out) {
2034 fprintf(ost->logfile, "%s", enc->stats_out);
2040 if (pkt.pts != AV_NOPTS_VALUE)
2041 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
2042 if (pkt.dts != AV_NOPTS_VALUE)
2043 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
2044 write_frame(os, &pkt, ost);
2056 * Check whether a packet from ist should be written into ost at this time
2058 static int check_output_constraints(InputStream *ist, OutputStream *ost)
2060 OutputFile *of = output_files[ost->file_index];
2061 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2063 if (ost->source_index != ist_index)
2066 if (of->start_time && ist->last_dts < of->start_time)
2072 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2074 OutputFile *of = output_files[ost->file_index];
2075 int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
2078 av_init_packet(&opkt);
2080 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2081 !ost->copy_initial_nonkeyframes)
2084 if (of->recording_time != INT64_MAX &&
2085 ist->last_dts >= of->recording_time + of->start_time) {
2086 ost->is_past_recording_time = 1;
2090 /* force the input stream PTS */
2091 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
2092 audio_size += pkt->size;
2093 else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2094 video_size += pkt->size;
2098 if (pkt->pts != AV_NOPTS_VALUE)
2099 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
2101 opkt.pts = AV_NOPTS_VALUE;
2103 if (pkt->dts == AV_NOPTS_VALUE)
2104 opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
2106 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
2107 opkt.dts -= ost_tb_start_time;
2109 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
2110 opkt.flags = pkt->flags;
2112 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2113 if ( ost->st->codec->codec_id != CODEC_ID_H264
2114 && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
2115 && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
2116 && ost->st->codec->codec_id != CODEC_ID_VC1
2118 if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
2119 opkt.destruct = av_destruct_packet;
2121 opkt.data = pkt->data;
2122 opkt.size = pkt->size;
2125 write_frame(of->ctx, &opkt, ost);
2126 ost->st->codec->frame_number++;
2127 av_free_packet(&opkt);
2130 static void rate_emu_sleep(InputStream *ist)
2132 if (input_files[ist->file_index]->rate_emu) {
2133 int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
2134 int64_t now = av_gettime() - ist->start;
2140 static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2142 AVFrame *decoded_frame;
2143 AVCodecContext *avctx = ist->st->codec;
2144 int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
2147 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
2148 return AVERROR(ENOMEM);
2150 avcodec_get_frame_defaults(ist->decoded_frame);
2151 decoded_frame = ist->decoded_frame;
2153 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
2159 /* no audio frame */
2163 /* if the decoder provides a pts, use it instead of the last packet pts.
2164 the decoder could be delaying output by a packet or more. */
2165 if (decoded_frame->pts != AV_NOPTS_VALUE)
2166 ist->next_dts = decoded_frame->pts;
2168 /* increment next_dts to use for the case where the input stream does not
2169 have timestamps or there are multiple frames in the packet */
2170 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2173 // preprocess audio (volume)
2174 if (audio_volume != 256) {
2175 int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
2176 void *samples = decoded_frame->data[0];
2177 switch (avctx->sample_fmt) {
2178 case AV_SAMPLE_FMT_U8:
2180 uint8_t *volp = samples;
2181 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2182 int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
2183 *volp++ = av_clip_uint8(v);
2187 case AV_SAMPLE_FMT_S16:
2189 int16_t *volp = samples;
2190 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2191 int v = ((*volp) * audio_volume + 128) >> 8;
2192 *volp++ = av_clip_int16(v);
2196 case AV_SAMPLE_FMT_S32:
2198 int32_t *volp = samples;
2199 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2200 int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
2201 *volp++ = av_clipl_int32(v);
2205 case AV_SAMPLE_FMT_FLT:
2207 float *volp = samples;
2208 float scale = audio_volume / 256.f;
2209 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2214 case AV_SAMPLE_FMT_DBL:
2216 double *volp = samples;
2217 double scale = audio_volume / 256.;
2218 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2224 av_log(NULL, AV_LOG_FATAL,
2225 "Audio volume adjustment on sample format %s is not supported.\n",
2226 av_get_sample_fmt_name(ist->st->codec->sample_fmt));
2231 rate_emu_sleep(ist);
2233 for (i = 0; i < nb_output_streams; i++) {
2234 OutputStream *ost = output_streams[i];
2236 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
2238 do_audio_out(output_files[ost->file_index]->ctx, ost, ist, decoded_frame);
2244 static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *pkt_pts)
2246 AVFrame *decoded_frame;
2247 void *buffer_to_free = NULL;
2248 int i, ret = 0, resample_changed;
2251 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
2252 return AVERROR(ENOMEM);
2254 avcodec_get_frame_defaults(ist->decoded_frame);
2255 decoded_frame = ist->decoded_frame;
2256 pkt->pts = *pkt_pts;
2257 pkt->dts = ist->last_dts;
2258 *pkt_pts = AV_NOPTS_VALUE;
2260 ret = avcodec_decode_video2(ist->st->codec,
2261 decoded_frame, got_output, pkt);
2265 quality = same_quant ? decoded_frame->quality : 0;
2267 /* no picture yet */
2269 for (i = 0; i < ist->nb_filters; i++)
2270 av_buffersrc_buffer(ist->filters[i]->filter, NULL);
2273 decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
2274 decoded_frame->pkt_dts);
2276 pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
2278 rate_emu_sleep(ist);
2280 if (ist->st->sample_aspect_ratio.num)
2281 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2283 resample_changed = ist->resample_width != decoded_frame->width ||
2284 ist->resample_height != decoded_frame->height ||
2285 ist->resample_pix_fmt != decoded_frame->format;
2286 if (resample_changed) {
2287 av_log(NULL, AV_LOG_INFO,
2288 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2289 ist->file_index, ist->st->index,
2290 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2291 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2293 ist->resample_width = decoded_frame->width;
2294 ist->resample_height = decoded_frame->height;
2295 ist->resample_pix_fmt = decoded_frame->format;
2297 for (i = 0; i < nb_filtergraphs; i++)
2298 if (ist_in_filtergraph(filtergraphs[i], ist) &&
2299 configure_filtergraph(filtergraphs[i]) < 0) {
2300 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2305 for (i = 0; i < ist->nb_filters; i++) {
2306 // XXX what an ugly hack
2307 if (ist->filters[i]->graph->nb_outputs == 1)
2308 ist->filters[i]->graph->outputs[0]->ost->last_quality = quality;
2310 if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
2311 FrameBuffer *buf = decoded_frame->opaque;
2312 AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
2313 decoded_frame->data, decoded_frame->linesize,
2314 AV_PERM_READ | AV_PERM_PRESERVE,
2315 ist->st->codec->width, ist->st->codec->height,
2316 ist->st->codec->pix_fmt);
2318 avfilter_copy_frame_props(fb, decoded_frame);
2319 fb->buf->priv = buf;
2320 fb->buf->free = filter_release_buffer;
2323 av_buffersrc_buffer(ist->filters[i]->filter, fb);
2325 av_vsrc_buffer_add_frame(ist->filters[i]->filter, decoded_frame,
2326 decoded_frame->pts, decoded_frame->sample_aspect_ratio);
2329 av_free(buffer_to_free);
2333 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2335 AVSubtitle subtitle;
2336 int i, ret = avcodec_decode_subtitle2(ist->st->codec,
2337 &subtitle, got_output, pkt);
2343 rate_emu_sleep(ist);
2345 for (i = 0; i < nb_output_streams; i++) {
2346 OutputStream *ost = output_streams[i];
2348 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
2351 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
2354 avsubtitle_free(&subtitle);
2358 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2359 static int output_packet(InputStream *ist, const AVPacket *pkt)
2363 int64_t pkt_pts = AV_NOPTS_VALUE;
2366 if (ist->next_dts == AV_NOPTS_VALUE)
2367 ist->next_dts = ist->last_dts;
2371 av_init_packet(&avpkt);
2379 if (pkt->dts != AV_NOPTS_VALUE)
2380 ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2381 if (pkt->pts != AV_NOPTS_VALUE)
2382 pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2384 // while we have more to decode or while the decoder did output something on EOF
2385 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2389 ist->last_dts = ist->next_dts;
2391 if (avpkt.size && avpkt.size != pkt->size) {
2392 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2393 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2394 ist->showed_multi_packet_warning = 1;
2397 switch (ist->st->codec->codec_type) {
2398 case AVMEDIA_TYPE_AUDIO:
2399 ret = transcode_audio (ist, &avpkt, &got_output);
2401 case AVMEDIA_TYPE_VIDEO:
2402 ret = transcode_video (ist, &avpkt, &got_output, &pkt_pts);
2404 ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2405 else if (ist->st->r_frame_rate.num)
2406 ist->next_dts += av_rescale_q(1, (AVRational){ist->st->r_frame_rate.den,
2407 ist->st->r_frame_rate.num},
2409 else if (ist->st->codec->time_base.num != 0) {
2410 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
2411 ist->st->codec->ticks_per_frame;
2412 ist->next_dts += av_rescale_q(ticks, ist->st->codec->time_base, AV_TIME_BASE_Q);
2415 case AVMEDIA_TYPE_SUBTITLE:
2416 ret = transcode_subtitles(ist, &avpkt, &got_output);
2424 // touch data and size only if not EOF
2434 /* handle stream copy */
2435 if (!ist->decoding_needed) {
2436 rate_emu_sleep(ist);
2437 ist->last_dts = ist->next_dts;
2438 switch (ist->st->codec->codec_type) {
2439 case AVMEDIA_TYPE_AUDIO:
2440 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
2441 ist->st->codec->sample_rate;
2443 case AVMEDIA_TYPE_VIDEO:
2444 if (ist->st->codec->time_base.num != 0) {
2445 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
2446 ist->next_dts += ((int64_t)AV_TIME_BASE *
2447 ist->st->codec->time_base.num * ticks) /
2448 ist->st->codec->time_base.den;
2453 for (i = 0; pkt && i < nb_output_streams; i++) {
2454 OutputStream *ost = output_streams[i];
2456 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2459 do_streamcopy(ist, ost, pkt);
2465 static void print_sdp(void)
2469 AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
2473 for (i = 0; i < nb_output_files; i++)
2474 avc[i] = output_files[i]->ctx;
2476 av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
2477 printf("SDP:\n%s\n", sdp);
2482 static int init_input_stream(int ist_index, char *error, int error_len)
2485 InputStream *ist = input_streams[ist_index];
2486 if (ist->decoding_needed) {
2487 AVCodec *codec = ist->dec;
2489 snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
2490 ist->st->codec->codec_id, ist->file_index, ist->st->index);
2491 return AVERROR(EINVAL);
2494 /* update requested sample format for the decoder based on the
2495 corresponding encoder sample format */
2496 for (i = 0; i < nb_output_streams; i++) {
2497 OutputStream *ost = output_streams[i];
2498 if (ost->source_index == ist_index) {
2499 update_sample_fmt(ist->st->codec, codec, ost->st->codec);
2504 if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
2505 ist->st->codec->get_buffer = codec_get_buffer;
2506 ist->st->codec->release_buffer = codec_release_buffer;
2507 ist->st->codec->opaque = ist;
2510 if (!av_dict_get(ist->opts, "threads", NULL, 0))
2511 av_dict_set(&ist->opts, "threads", "auto", 0);
2512 if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
2513 snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
2514 ist->file_index, ist->st->index);
2515 return AVERROR(EINVAL);
2517 assert_codec_experimental(ist->st->codec, 0);
2518 assert_avoptions(ist->opts);
2520 if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2521 for (i = 0; i < nb_output_streams; i++) {
2522 OutputStream *ost = output_streams[i];
2523 if (ost->source_index == ist_index) {
2524 if (!ist->st->codec->channel_layout || !ost->st->codec->channel_layout)
2525 get_default_channel_layouts(ost, ist);
2532 ist->last_dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2533 ist->next_dts = AV_NOPTS_VALUE;
2534 init_pts_correction(&ist->pts_ctx);
2540 static InputStream *get_input_stream(OutputStream *ost)
2542 if (ost->source_index >= 0)
2543 return input_streams[ost->source_index];
2546 FilterGraph *fg = ost->filter->graph;
2549 for (i = 0; i < fg->nb_inputs; i++)
2550 if (fg->inputs[i]->ist->st->codec->codec_type == ost->st->codec->codec_type)
2551 return fg->inputs[i]->ist;
2557 static int transcode_init(void)
2559 int ret = 0, i, j, k;
2560 AVFormatContext *oc;
2561 AVCodecContext *codec, *icodec;
2567 /* init framerate emulation */
2568 for (i = 0; i < nb_input_files; i++) {
2569 InputFile *ifile = input_files[i];
2570 if (ifile->rate_emu)
2571 for (j = 0; j < ifile->nb_streams; j++)
2572 input_streams[j + ifile->ist_index]->start = av_gettime();
2575 /* output stream init */
2576 for (i = 0; i < nb_output_files; i++) {
2577 oc = output_files[i]->ctx;
2578 if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
2579 av_dump_format(oc, i, oc->filename, 1);
2580 av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2581 return AVERROR(EINVAL);
2585 /* init complex filtergraphs */
2586 for (i = 0; i < nb_filtergraphs; i++)
2587 if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
2590 /* for each output stream, we compute the right encoding parameters */
2591 for (i = 0; i < nb_output_streams; i++) {
2592 ost = output_streams[i];
2593 oc = output_files[ost->file_index]->ctx;
2594 ist = get_input_stream(ost);
2596 if (ost->attachment_filename)
2599 codec = ost->st->codec;
2602 icodec = ist->st->codec;
2604 ost->st->disposition = ist->st->disposition;
2605 codec->bits_per_raw_sample = icodec->bits_per_raw_sample;
2606 codec->chroma_sample_location = icodec->chroma_sample_location;
2609 if (ost->stream_copy) {
2610 uint64_t extra_size;
2612 av_assert0(ist && !ost->filter);
2614 extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2616 if (extra_size > INT_MAX) {
2617 return AVERROR(EINVAL);
2620 /* if stream_copy is selected, no need to decode or encode */
2621 codec->codec_id = icodec->codec_id;
2622 codec->codec_type = icodec->codec_type;
2624 if (!codec->codec_tag) {
2625 if (!oc->oformat->codec_tag ||
2626 av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
2627 av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
2628 codec->codec_tag = icodec->codec_tag;
2631 codec->bit_rate = icodec->bit_rate;
2632 codec->rc_max_rate = icodec->rc_max_rate;
2633 codec->rc_buffer_size = icodec->rc_buffer_size;
2634 codec->field_order = icodec->field_order;
2635 codec->extradata = av_mallocz(extra_size);
2636 if (!codec->extradata) {
2637 return AVERROR(ENOMEM);
2639 memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
2640 codec->extradata_size = icodec->extradata_size;
2642 codec->time_base = icodec->time_base;
2643 codec->time_base.num *= icodec->ticks_per_frame;
2644 av_reduce(&codec->time_base.num, &codec->time_base.den,
2645 codec->time_base.num, codec->time_base.den, INT_MAX);
2647 codec->time_base = ist->st->time_base;
2649 switch (codec->codec_type) {
2650 case AVMEDIA_TYPE_AUDIO:
2651 if (audio_volume != 256) {
2652 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2655 codec->channel_layout = icodec->channel_layout;
2656 codec->sample_rate = icodec->sample_rate;
2657 codec->channels = icodec->channels;
2658 codec->frame_size = icodec->frame_size;
2659 codec->audio_service_type = icodec->audio_service_type;
2660 codec->block_align = icodec->block_align;
2662 case AVMEDIA_TYPE_VIDEO:
2663 codec->pix_fmt = icodec->pix_fmt;
2664 codec->width = icodec->width;
2665 codec->height = icodec->height;
2666 codec->has_b_frames = icodec->has_b_frames;
2667 if (!codec->sample_aspect_ratio.num) {
2668 codec->sample_aspect_ratio =
2669 ost->st->sample_aspect_ratio =
2670 ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
2671 ist->st->codec->sample_aspect_ratio.num ?
2672 ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
2675 case AVMEDIA_TYPE_SUBTITLE:
2676 codec->width = icodec->width;
2677 codec->height = icodec->height;
2679 case AVMEDIA_TYPE_DATA:
2680 case AVMEDIA_TYPE_ATTACHMENT:
2687 /* should only happen when a default codec is not present. */
2688 snprintf(error, sizeof(error), "Automatic encoder selection "
2689 "failed for output stream #%d:%d. Default encoder for "
2690 "format %s is probably disabled. Please choose an "
2691 "encoder manually.\n", ost->file_index, ost->index,
2693 ret = AVERROR(EINVAL);
2698 ist->decoding_needed = 1;
2699 ost->encoding_needed = 1;
2701 switch (codec->codec_type) {
2702 case AVMEDIA_TYPE_AUDIO:
2703 ost->fifo = av_fifo_alloc(1024);
2705 return AVERROR(ENOMEM);
2708 if (!codec->sample_rate)
2709 codec->sample_rate = icodec->sample_rate;
2710 choose_sample_rate(ost->st, ost->enc);
2711 codec->time_base = (AVRational){ 1, codec->sample_rate };
2713 if (codec->sample_fmt == AV_SAMPLE_FMT_NONE)
2714 codec->sample_fmt = icodec->sample_fmt;
2715 choose_sample_fmt(ost->st, ost->enc);
2717 if (!codec->channels)
2718 codec->channels = icodec->channels;
2719 if (!codec->channel_layout)
2720 codec->channel_layout = icodec->channel_layout;
2721 if (av_get_channel_layout_nb_channels(codec->channel_layout) != codec->channels)
2722 codec->channel_layout = 0;
2724 icodec->request_channels = codec-> channels;
2725 ost->resample_sample_fmt = icodec->sample_fmt;
2726 ost->resample_sample_rate = icodec->sample_rate;
2727 ost->resample_channels = icodec->channels;
2728 ost->resample_channel_layout = icodec->channel_layout;
2730 case AVMEDIA_TYPE_VIDEO:
2733 fg = init_simple_filtergraph(ist, ost);
2734 if (configure_video_filters(fg)) {
2735 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2741 * We want CFR output if and only if one of those is true:
2742 * 1) user specified output framerate with -r
2743 * 2) user specified -vsync cfr
2744 * 3) output format is CFR and the user didn't force vsync to
2745 * something else than CFR
2747 * in such a case, set ost->frame_rate
2749 if (!ost->frame_rate.num && ist &&
2750 (video_sync_method == VSYNC_CFR ||
2751 (video_sync_method == VSYNC_AUTO &&
2752 !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
2753 ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
2754 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
2755 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
2756 ost->frame_rate = ost->enc->supported_framerates[idx];
2759 if (ost->frame_rate.num) {
2760 codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num};
2761 video_sync_method = VSYNC_CFR;
2763 codec->time_base = ist->st->time_base;
2765 codec->time_base = ost->filter->filter->inputs[0]->time_base;
2767 codec->width = ost->filter->filter->inputs[0]->w;
2768 codec->height = ost->filter->filter->inputs[0]->h;
2769 codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2770 ost->frame_aspect_ratio ? // overridden by the -aspect cli option
2771 av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
2772 ost->filter->filter->inputs[0]->sample_aspect_ratio;
2773 codec->pix_fmt = ost->filter->filter->inputs[0]->format;
2775 if (codec->width != icodec->width ||
2776 codec->height != icodec->height ||
2777 codec->pix_fmt != icodec->pix_fmt) {
2778 codec->bits_per_raw_sample = 0;
2782 case AVMEDIA_TYPE_SUBTITLE:
2783 codec->time_base = (AVRational){1, 1000};
2790 if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
2791 char logfilename[1024];
2794 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
2795 pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
2797 if (!strcmp(ost->enc->name, "libx264")) {
2798 av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
2800 if (codec->flags & CODEC_FLAG_PASS1) {
2801 f = fopen(logfilename, "wb");
2803 av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
2804 logfilename, strerror(errno));
2810 size_t logbuffer_size;
2811 if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
2812 av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
2816 codec->stats_in = logbuffer;
2823 /* open each encoder */
2824 for (i = 0; i < nb_output_streams; i++) {
2825 ost = output_streams[i];
2826 if (ost->encoding_needed) {
2827 AVCodec *codec = ost->enc;
2828 AVCodecContext *dec = NULL;
2830 if ((ist = get_input_stream(ost)))
2831 dec = ist->st->codec;
2832 if (dec && dec->subtitle_header) {
2833 ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size);
2834 if (!ost->st->codec->subtitle_header) {
2835 ret = AVERROR(ENOMEM);
2838 memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2839 ost->st->codec->subtitle_header_size = dec->subtitle_header_size;
2841 if (!av_dict_get(ost->opts, "threads", NULL, 0))
2842 av_dict_set(&ost->opts, "threads", "auto", 0);
2843 if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) {
2844 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
2845 ost->file_index, ost->index);
2846 ret = AVERROR(EINVAL);
2849 assert_codec_experimental(ost->st->codec, 1);
2850 assert_avoptions(ost->opts);
2851 if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
2852 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2853 "It takes bits/s as argument, not kbits/s\n");
2854 extra_size += ost->st->codec->extradata_size;
2856 if (ost->st->codec->me_threshold)
2857 input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
2861 /* init input streams */
2862 for (i = 0; i < nb_input_streams; i++)
2863 if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
2866 /* discard unused programs */
2867 for (i = 0; i < nb_input_files; i++) {
2868 InputFile *ifile = input_files[i];
2869 for (j = 0; j < ifile->ctx->nb_programs; j++) {
2870 AVProgram *p = ifile->ctx->programs[j];
2871 int discard = AVDISCARD_ALL;
2873 for (k = 0; k < p->nb_stream_indexes; k++)
2874 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
2875 discard = AVDISCARD_DEFAULT;
2878 p->discard = discard;
2882 /* open files and write file headers */
2883 for (i = 0; i < nb_output_files; i++) {
2884 oc = output_files[i]->ctx;
2885 oc->interrupt_callback = int_cb;
2886 if (avformat_write_header(oc, &output_files[i]->opts) < 0) {
2887 snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i);
2888 ret = AVERROR(EINVAL);
2891 assert_avoptions(output_files[i]->opts);
2892 if (strcmp(oc->oformat->name, "rtp")) {
2898 /* dump the file output parameters - cannot be done before in case
2900 for (i = 0; i < nb_output_files; i++) {
2901 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
2904 /* dump the stream mapping */
2905 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
2906 for (i = 0; i < nb_input_streams; i++) {
2907 ist = input_streams[i];
2909 for (j = 0; j < ist->nb_filters; j++) {
2910 AVFilterLink *link = ist->filters[j]->filter->outputs[0];
2911 if (ist->filters[j]->graph->graph_desc) {
2912 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
2913 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
2914 link->dst->filter->name);
2915 if (link->dst->input_count > 1)
2916 av_log(NULL, AV_LOG_INFO, ":%s", link->dstpad->name);
2917 if (nb_filtergraphs > 1)
2918 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
2919 av_log(NULL, AV_LOG_INFO, "\n");
2924 for (i = 0; i < nb_output_streams; i++) {
2925 ost = output_streams[i];
2927 if (ost->attachment_filename) {
2928 /* an attached file */
2929 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
2930 ost->attachment_filename, ost->file_index, ost->index);
2934 if (ost->filter && ost->filter->graph->graph_desc) {
2935 /* output from a complex graph */
2936 AVFilterLink *link = ost->filter->filter->inputs[0];
2937 av_log(NULL, AV_LOG_INFO, " %s", link->src->filter->name);
2938 if (link->src->output_count > 1)
2939 av_log(NULL, AV_LOG_INFO, ":%s", link->srcpad->name);
2940 if (nb_filtergraphs > 1)
2941 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
2943 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
2944 ost->index, ost->enc ? ost->enc->name : "?");
2948 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
2949 input_streams[ost->source_index]->file_index,
2950 input_streams[ost->source_index]->st->index,
2953 if (ost->sync_ist != input_streams[ost->source_index])
2954 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2955 ost->sync_ist->file_index,
2956 ost->sync_ist->st->index);
2957 if (ost->stream_copy)
2958 av_log(NULL, AV_LOG_INFO, " (copy)");
2960 av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ?
2961 input_streams[ost->source_index]->dec->name : "?",
2962 ost->enc ? ost->enc->name : "?");
2963 av_log(NULL, AV_LOG_INFO, "\n");
2967 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
2979 * The following code is the main loop of the file converter
2981 static int transcode(void)
2984 AVFormatContext *is, *os;
2988 int no_packet_count = 0;
2989 int64_t timer_start;
2991 if (!(no_packet = av_mallocz(nb_input_files)))
2994 ret = transcode_init();
2998 av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
3001 timer_start = av_gettime();
3003 for (; received_sigterm == 0;) {
3004 int file_index, ist_index, past_recording_time = 1;
3008 ipts_min = INT64_MAX;
3010 /* check if there's any stream where output is still needed */
3011 for (i = 0; i < nb_output_streams; i++) {
3013 ost = output_streams[i];
3014 of = output_files[ost->file_index];
3015 os = output_files[ost->file_index]->ctx;
3016 if (ost->is_past_recording_time ||
3017 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3019 if (ost->frame_number > ost->max_frames) {
3021 for (j = 0; j < of->ctx->nb_streams; j++)
3022 output_streams[of->ost_index + j]->is_past_recording_time = 1;
3025 past_recording_time = 0;
3027 if (past_recording_time)
3030 /* select the stream that we must read now by looking at the
3031 smallest output pts */
3033 for (i = 0; i < nb_input_streams; i++) {
3035 ist = input_streams[i];
3036 ipts = ist->last_dts;
3037 if (ist->discard || no_packet[ist->file_index])
3039 if (!input_files[ist->file_index]->eof_reached) {
3040 if (ipts < ipts_min) {
3042 file_index = ist->file_index;
3046 /* if none, if is finished */
3047 if (file_index < 0) {
3048 if (no_packet_count) {
3049 no_packet_count = 0;
3050 memset(no_packet, 0, nb_input_files);
3057 /* read a frame from it and output it in the fifo */
3058 is = input_files[file_index]->ctx;
3059 ret = av_read_frame(is, &pkt);
3060 if (ret == AVERROR(EAGAIN)) {
3061 no_packet[file_index] = 1;
3066 input_files[file_index]->eof_reached = 1;
3068 for (i = 0; i < input_files[file_index]->nb_streams; i++) {
3069 ist = input_streams[input_files[file_index]->ist_index + i];
3070 if (ist->decoding_needed)
3071 output_packet(ist, NULL);
3080 no_packet_count = 0;
3081 memset(no_packet, 0, nb_input_files);
3084 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3085 is->streams[pkt.stream_index]);
3087 /* the following test is needed in case new streams appear
3088 dynamically in stream : we ignore them */
3089 if (pkt.stream_index >= input_files[file_index]->nb_streams)
3090 goto discard_packet;
3091 ist_index = input_files[file_index]->ist_index + pkt.stream_index;
3092 ist = input_streams[ist_index];
3094 goto discard_packet;
3096 if (pkt.dts != AV_NOPTS_VALUE)
3097 pkt.dts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3098 if (pkt.pts != AV_NOPTS_VALUE)
3099 pkt.pts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3101 if (pkt.pts != AV_NOPTS_VALUE)
3102 pkt.pts *= ist->ts_scale;
3103 if (pkt.dts != AV_NOPTS_VALUE)
3104 pkt.dts *= ist->ts_scale;
3106 //fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n",
3108 // pkt.dts, input_files[ist->file_index].ts_offset,
3109 // ist->st->codec->codec_type);
3110 if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE
3111 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3112 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3113 int64_t delta = pkt_dts - ist->next_dts;
3114 if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
3115 input_files[ist->file_index]->ts_offset -= delta;
3116 av_log(NULL, AV_LOG_DEBUG,
3117 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3118 delta, input_files[ist->file_index]->ts_offset);
3119 pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3120 if (pkt.pts != AV_NOPTS_VALUE)
3121 pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3125 // fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
3126 if (output_packet(ist, &pkt) < 0 || poll_filters() < 0) {
3127 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
3128 ist->file_index, ist->st->index);
3131 av_free_packet(&pkt);
3136 av_free_packet(&pkt);
3138 /* dump report by using the output first video and audio streams */
3139 print_report(0, timer_start);
3142 /* at the end of stream, we must flush the decoder buffers */
3143 for (i = 0; i < nb_input_streams; i++) {
3144 ist = input_streams[i];
3145 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3146 output_packet(ist, NULL);
3154 /* write the trailer if needed and close file */
3155 for (i = 0; i < nb_output_files; i++) {
3156 os = output_files[i]->ctx;
3157 av_write_trailer(os);
3160 /* dump report by using the first video and audio streams */
3161 print_report(1, timer_start);
3163 /* close each encoder */
3164 for (i = 0; i < nb_output_streams; i++) {
3165 ost = output_streams[i];
3166 if (ost->encoding_needed) {
3167 av_freep(&ost->st->codec->stats_in);
3168 avcodec_close(ost->st->codec);
3172 /* close each decoder */
3173 for (i = 0; i < nb_input_streams; i++) {
3174 ist = input_streams[i];
3175 if (ist->decoding_needed) {
3176 avcodec_close(ist->st->codec);
3184 av_freep(&no_packet);
3186 if (output_streams) {
3187 for (i = 0; i < nb_output_streams; i++) {
3188 ost = output_streams[i];
3190 if (ost->stream_copy)
3191 av_freep(&ost->st->codec->extradata);
3193 fclose(ost->logfile);
3194 ost->logfile = NULL;
3196 av_fifo_free(ost->fifo); /* works even if fifo is not
3197 initialized but set to zero */
3198 av_freep(&ost->st->codec->subtitle_header);
3199 av_free(ost->forced_kf_pts);
3201 avresample_free(&ost->avr);
3202 av_dict_free(&ost->opts);
3209 static double parse_frame_aspect_ratio(const char *arg)
3216 p = strchr(arg, ':');
3218 x = strtol(arg, &end, 10);
3220 y = strtol(end + 1, &end, 10);
3222 ar = (double)x / (double)y;
3224 ar = strtod(arg, NULL);
3227 av_log(NULL, AV_LOG_FATAL, "Incorrect aspect ratio specification.\n");
3233 static int opt_audio_codec(OptionsContext *o, const char *opt, const char *arg)
3235 return parse_option(o, "codec:a", arg, options);
3238 static int opt_video_codec(OptionsContext *o, const char *opt, const char *arg)
3240 return parse_option(o, "codec:v", arg, options);
3243 static int opt_subtitle_codec(OptionsContext *o, const char *opt, const char *arg)
3245 return parse_option(o, "codec:s", arg, options);
3248 static int opt_data_codec(OptionsContext *o, const char *opt, const char *arg)
3250 return parse_option(o, "codec:d", arg, options);
3253 static int opt_map(OptionsContext *o, const char *opt, const char *arg)
3255 StreamMap *m = NULL;
3256 int i, negative = 0, file_idx;
3257 int sync_file_idx = -1, sync_stream_idx;
3265 map = av_strdup(arg);
3267 /* parse sync stream first, just pick first matching stream */
3268 if (sync = strchr(map, ',')) {
3270 sync_file_idx = strtol(sync + 1, &sync, 0);
3271 if (sync_file_idx >= nb_input_files || sync_file_idx < 0) {
3272 av_log(NULL, AV_LOG_FATAL, "Invalid sync file index: %d.\n", sync_file_idx);
3277 for (i = 0; i < input_files[sync_file_idx]->nb_streams; i++)
3278 if (check_stream_specifier(input_files[sync_file_idx]->ctx,
3279 input_files[sync_file_idx]->ctx->streams[i], sync) == 1) {
3280 sync_stream_idx = i;
3283 if (i == input_files[sync_file_idx]->nb_streams) {
3284 av_log(NULL, AV_LOG_FATAL, "Sync stream specification in map %s does not "
3285 "match any streams.\n", arg);
3291 if (map[0] == '[') {
3292 /* this mapping refers to lavfi output */
3293 const char *c = map + 1;
3294 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
3295 &o->nb_stream_maps, o->nb_stream_maps + 1);
3296 m = &o->stream_maps[o->nb_stream_maps - 1];
3297 m->linklabel = av_get_token(&c, "]");
3298 if (!m->linklabel) {
3299 av_log(NULL, AV_LOG_ERROR, "Invalid output link label: %s.\n", map);
3303 file_idx = strtol(map, &p, 0);
3304 if (file_idx >= nb_input_files || file_idx < 0) {
3305 av_log(NULL, AV_LOG_FATAL, "Invalid input file index: %d.\n", file_idx);
3309 /* disable some already defined maps */
3310 for (i = 0; i < o->nb_stream_maps; i++) {
3311 m = &o->stream_maps[i];
3312 if (file_idx == m->file_index &&
3313 check_stream_specifier(input_files[m->file_index]->ctx,
3314 input_files[m->file_index]->ctx->streams[m->stream_index],
3315 *p == ':' ? p + 1 : p) > 0)
3319 for (i = 0; i < input_files[file_idx]->nb_streams; i++) {
3320 if (check_stream_specifier(input_files[file_idx]->ctx, input_files[file_idx]->ctx->streams[i],
3321 *p == ':' ? p + 1 : p) <= 0)
3323 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
3324 &o->nb_stream_maps, o->nb_stream_maps + 1);
3325 m = &o->stream_maps[o->nb_stream_maps - 1];
3327 m->file_index = file_idx;
3328 m->stream_index = i;
3330 if (sync_file_idx >= 0) {
3331 m->sync_file_index = sync_file_idx;
3332 m->sync_stream_index = sync_stream_idx;
3334 m->sync_file_index = file_idx;
3335 m->sync_stream_index = i;
3341 av_log(NULL, AV_LOG_FATAL, "Stream map '%s' matches no streams.\n", arg);
3349 static int opt_attach(OptionsContext *o, const char *opt, const char *arg)
3351 o->attachments = grow_array(o->attachments, sizeof(*o->attachments),
3352 &o->nb_attachments, o->nb_attachments + 1);
3353 o->attachments[o->nb_attachments - 1] = arg;
3358 * Parse a metadata specifier in arg.
3359 * @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram)
3360 * @param index for type c/p, chapter/program index is written here
3361 * @param stream_spec for type s, the stream specifier is written here
3363 static void parse_meta_type(char *arg, char *type, int *index, const char **stream_spec)
3371 if (*(++arg) && *arg != ':') {
3372 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", arg);
3375 *stream_spec = *arg == ':' ? arg + 1 : "";
3379 if (*(++arg) == ':')
3380 *index = strtol(++arg, NULL, 0);
3383 av_log(NULL, AV_LOG_FATAL, "Invalid metadata type %c.\n", *arg);
3390 static int copy_metadata(char *outspec, char *inspec, AVFormatContext *oc, AVFormatContext *ic, OptionsContext *o)
3392 AVDictionary **meta_in = NULL;
3393 AVDictionary **meta_out;
3395 char type_in, type_out;
3396 const char *istream_spec = NULL, *ostream_spec = NULL;
3397 int idx_in = 0, idx_out = 0;
3399 parse_meta_type(inspec, &type_in, &idx_in, &istream_spec);
3400 parse_meta_type(outspec, &type_out, &idx_out, &ostream_spec);
3402 if (type_in == 'g' || type_out == 'g')
3403 o->metadata_global_manual = 1;
3404 if (type_in == 's' || type_out == 's')
3405 o->metadata_streams_manual = 1;
3406 if (type_in == 'c' || type_out == 'c')
3407 o->metadata_chapters_manual = 1;
3409 #define METADATA_CHECK_INDEX(index, nb_elems, desc)\
3410 if ((index) < 0 || (index) >= (nb_elems)) {\
3411 av_log(NULL, AV_LOG_FATAL, "Invalid %s index %d while processing metadata maps.\n",\
3416 #define SET_DICT(type, meta, context, index)\
3419 meta = &context->metadata;\
3422 METADATA_CHECK_INDEX(index, context->nb_chapters, "chapter")\
3423 meta = &context->chapters[index]->metadata;\
3426 METADATA_CHECK_INDEX(index, context->nb_programs, "program")\
3427 meta = &context->programs[index]->metadata;\
3431 SET_DICT(type_in, meta_in, ic, idx_in);
3432 SET_DICT(type_out, meta_out, oc, idx_out);
3434 /* for input streams choose first matching stream */
3435 if (type_in == 's') {
3436 for (i = 0; i < ic->nb_streams; i++) {
3437 if ((ret = check_stream_specifier(ic, ic->streams[i], istream_spec)) > 0) {
3438 meta_in = &ic->streams[i]->metadata;
3444 av_log(NULL, AV_LOG_FATAL, "Stream specifier %s does not match any streams.\n", istream_spec);
3449 if (type_out == 's') {
3450 for (i = 0; i < oc->nb_streams; i++) {
3451 if ((ret = check_stream_specifier(oc, oc->streams[i], ostream_spec)) > 0) {
3452 meta_out = &oc->streams[i]->metadata;
3453 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3458 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3463 static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
3465 const char *codec_string = encoder ? "encoder" : "decoder";
3469 avcodec_find_encoder_by_name(name) :
3470 avcodec_find_decoder_by_name(name);
3472 av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name);
3475 if (codec->type != type) {
3476 av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name);
3482 static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st)
3484 char *codec_name = NULL;
3486 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, st);
3488 AVCodec *codec = find_codec_or_die(codec_name, st->codec->codec_type, 0);
3489 st->codec->codec_id = codec->id;
3492 return avcodec_find_decoder(st->codec->codec_id);
3496 * Add all the streams from the given input file to the global
3497 * list of input streams.
3499 static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
3503 for (i = 0; i < ic->nb_streams; i++) {
3504 AVStream *st = ic->streams[i];
3505 AVCodecContext *dec = st->codec;
3506 InputStream *ist = av_mallocz(sizeof(*ist));
3511 input_streams = grow_array(input_streams, sizeof(*input_streams), &nb_input_streams, nb_input_streams + 1);
3512 input_streams[nb_input_streams - 1] = ist;
3515 ist->file_index = nb_input_files;
3517 st->discard = AVDISCARD_ALL;
3518 ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, ic, st);
3520 ist->ts_scale = 1.0;
3521 MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st);
3523 ist->dec = choose_decoder(o, ic, st);
3525 switch (dec->codec_type) {
3526 case AVMEDIA_TYPE_VIDEO:
3527 ist->resample_height = dec->height;
3528 ist->resample_width = dec->width;
3529 ist->resample_pix_fmt = dec->pix_fmt;
3532 case AVMEDIA_TYPE_AUDIO:
3533 case AVMEDIA_TYPE_DATA:
3534 case AVMEDIA_TYPE_SUBTITLE:
3535 case AVMEDIA_TYPE_ATTACHMENT:
3536 case AVMEDIA_TYPE_UNKNOWN:
3544 static void assert_file_overwrite(const char *filename)
3546 if (!file_overwrite &&
3547 (strchr(filename, ':') == NULL || filename[1] == ':' ||
3548 av_strstart(filename, "file:", NULL))) {
3549 if (avio_check(filename, 0) == 0) {
3551 fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
3553 if (!read_yesno()) {
3554 fprintf(stderr, "Not overwriting - exiting\n");
3559 fprintf(stderr,"File '%s' already exists. Exiting.\n", filename);
3566 static void dump_attachment(AVStream *st, const char *filename)
3569 AVIOContext *out = NULL;
3570 AVDictionaryEntry *e;
3572 if (!st->codec->extradata_size) {
3573 av_log(NULL, AV_LOG_WARNING, "No extradata to dump in stream #%d:%d.\n",
3574 nb_input_files - 1, st->index);
3577 if (!*filename && (e = av_dict_get(st->metadata, "filename", NULL, 0)))
3578 filename = e->value;
3580 av_log(NULL, AV_LOG_FATAL, "No filename specified and no 'filename' tag"
3581 "in stream #%d:%d.\n", nb_input_files - 1, st->index);
3585 assert_file_overwrite(filename);
3587 if ((ret = avio_open2(&out, filename, AVIO_FLAG_WRITE, &int_cb, NULL)) < 0) {
3588 av_log(NULL, AV_LOG_FATAL, "Could not open file %s for writing.\n",
3593 avio_write(out, st->codec->extradata, st->codec->extradata_size);
3598 static int opt_input_file(OptionsContext *o, const char *opt, const char *filename)
3600 AVFormatContext *ic;
3601 AVInputFormat *file_iformat = NULL;
3605 AVDictionary **opts;
3606 int orig_nb_streams; // number of streams before avformat_find_stream_info
3609 if (!(file_iformat = av_find_input_format(o->format))) {
3610 av_log(NULL, AV_LOG_FATAL, "Unknown input format: '%s'\n", o->format);
3615 if (!strcmp(filename, "-"))
3618 using_stdin |= !strncmp(filename, "pipe:", 5) ||
3619 !strcmp(filename, "/dev/stdin");
3621 /* get default parameters from command line */
3622 ic = avformat_alloc_context();
3624 print_error(filename, AVERROR(ENOMEM));
3627 if (o->nb_audio_sample_rate) {
3628 snprintf(buf, sizeof(buf), "%d", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i);
3629 av_dict_set(&format_opts, "sample_rate", buf, 0);
3631 if (o->nb_audio_channels) {
3632 /* because we set audio_channels based on both the "ac" and
3633 * "channel_layout" options, we need to check that the specified
3634 * demuxer actually has the "channels" option before setting it */
3635 if (file_iformat && file_iformat->priv_class &&
3636 av_opt_find(&file_iformat->priv_class, "channels", NULL, 0,
3637 AV_OPT_SEARCH_FAKE_OBJ)) {
3638 snprintf(buf, sizeof(buf), "%d",
3639 o->audio_channels[o->nb_audio_channels - 1].u.i);
3640 av_dict_set(&format_opts, "channels", buf, 0);
3643 if (o->nb_frame_rates) {
3644 av_dict_set(&format_opts, "framerate", o->frame_rates[o->nb_frame_rates - 1].u.str, 0);
3646 if (o->nb_frame_sizes) {
3647 av_dict_set(&format_opts, "video_size", o->frame_sizes[o->nb_frame_sizes - 1].u.str, 0);
3649 if (o->nb_frame_pix_fmts)
3650 av_dict_set(&format_opts, "pixel_format", o->frame_pix_fmts[o->nb_frame_pix_fmts - 1].u.str, 0);
3652 ic->flags |= AVFMT_FLAG_NONBLOCK;
3653 ic->interrupt_callback = int_cb;
3655 /* open the input file with generic libav function */
3656 err = avformat_open_input(&ic, filename, file_iformat, &format_opts);
3658 print_error(filename, err);
3661 assert_avoptions(format_opts);
3663 /* apply forced codec ids */
3664 for (i = 0; i < ic->nb_streams; i++)
3665 choose_decoder(o, ic, ic->streams[i]);
3667 /* Set AVCodecContext options for avformat_find_stream_info */
3668 opts = setup_find_stream_info_opts(ic, codec_opts);
3669 orig_nb_streams = ic->nb_streams;
3671 /* If not enough info to get the stream parameters, we decode the
3672 first frames to get it. (used in mpeg case for example) */
3673 ret = avformat_find_stream_info(ic, opts);
3675 av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
3676 avformat_close_input(&ic);
3680 timestamp = o->start_time;
3681 /* add the stream start time */
3682 if (ic->start_time != AV_NOPTS_VALUE)
3683 timestamp += ic->start_time;
3685 /* if seeking requested, we execute it */
3686 if (o->start_time != 0) {
3687 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
3689 av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
3690 filename, (double)timestamp / AV_TIME_BASE);
3694 /* update the current parameters so that they match the one of the input stream */
3695 add_input_streams(o, ic);
3697 /* dump the file content */
3698 av_dump_format(ic, nb_input_files, filename, 0);
3700 input_files = grow_array(input_files, sizeof(*input_files), &nb_input_files, nb_input_files + 1);
3701 if (!(input_files[nb_input_files - 1] = av_mallocz(sizeof(*input_files[0]))))
3704 input_files[nb_input_files - 1]->ctx = ic;
3705 input_files[nb_input_files - 1]->ist_index = nb_input_streams - ic->nb_streams;
3706 input_files[nb_input_files - 1]->ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
3707 input_files[nb_input_files - 1]->nb_streams = ic->nb_streams;
3708 input_files[nb_input_files - 1]->rate_emu = o->rate_emu;
3710 for (i = 0; i < o->nb_dump_attachment; i++) {
3713 for (j = 0; j < ic->nb_streams; j++) {
3714 AVStream *st = ic->streams[j];
3716 if (check_stream_specifier(ic, st, o->dump_attachment[i].specifier) == 1)
3717 dump_attachment(st, o->dump_attachment[i].u.str);
3721 for (i = 0; i < orig_nb_streams; i++)
3722 av_dict_free(&opts[i]);
3729 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3730 AVCodecContext *avctx)
3736 for (p = kf; *p; p++)
3739 ost->forced_kf_count = n;
3740 ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
3741 if (!ost->forced_kf_pts) {
3742 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3745 for (i = 0; i < n; i++) {
3746 p = i ? strchr(p, ',') + 1 : kf;
3747 t = parse_time_or_die("force_key_frames", p, 1);
3748 ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3752 static uint8_t *get_line(AVIOContext *s)
3758 if (avio_open_dyn_buf(&line) < 0) {
3759 av_log(NULL, AV_LOG_FATAL, "Could not alloc buffer for reading preset.\n");
3763 while ((c = avio_r8(s)) && c != '\n')
3766 avio_close_dyn_buf(line, &buf);
3771 static int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s)
3774 char filename[1000];
3775 const char *base[3] = { getenv("AVCONV_DATADIR"),
3780 for (i = 0; i < FF_ARRAY_ELEMS(base) && ret; i++) {
3784 snprintf(filename, sizeof(filename), "%s%s/%s-%s.avpreset", base[i],
3785 i != 1 ? "" : "/.avconv", codec_name, preset_name);
3786 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3789 snprintf(filename, sizeof(filename), "%s%s/%s.avpreset", base[i],
3790 i != 1 ? "" : "/.avconv", preset_name);
3791 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3797 static void choose_encoder(OptionsContext *o, AVFormatContext *s, OutputStream *ost)
3799 char *codec_name = NULL;
3801 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, ost->st);
3803 ost->st->codec->codec_id = av_guess_codec(s->oformat, NULL, s->filename,
3804 NULL, ost->st->codec->codec_type);
3805 ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
3806 } else if (!strcmp(codec_name, "copy"))
3807 ost->stream_copy = 1;
3809 ost->enc = find_codec_or_die(codec_name, ost->st->codec->codec_type, 1);
3810 ost->st->codec->codec_id = ost->enc->id;
3814 static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVMediaType type)
3817 AVStream *st = avformat_new_stream(oc, NULL);
3818 int idx = oc->nb_streams - 1, ret = 0;
3819 char *bsf = NULL, *next, *codec_tag = NULL;
3820 AVBitStreamFilterContext *bsfc, *bsfc_prev = NULL;
3822 char *buf = NULL, *arg = NULL, *preset = NULL;
3823 AVIOContext *s = NULL;
3826 av_log(NULL, AV_LOG_FATAL, "Could not alloc stream.\n");
3830 if (oc->nb_streams - 1 < o->nb_streamid_map)
3831 st->id = o->streamid_map[oc->nb_streams - 1];
3833 output_streams = grow_array(output_streams, sizeof(*output_streams), &nb_output_streams,
3834 nb_output_streams + 1);
3835 if (!(ost = av_mallocz(sizeof(*ost))))
3837 output_streams[nb_output_streams - 1] = ost;
3839 ost->file_index = nb_output_files;
3842 st->codec->codec_type = type;
3843 choose_encoder(o, oc, ost);
3845 ost->opts = filter_codec_opts(codec_opts, ost->enc->id, oc, st);
3848 avcodec_get_context_defaults3(st->codec, ost->enc);
3849 st->codec->codec_type = type; // XXX hack, avcodec_get_context_defaults2() sets type to unknown for stream copy
3851 MATCH_PER_STREAM_OPT(presets, str, preset, oc, st);
3852 if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) {
3855 if (!buf[0] || buf[0] == '#') {
3859 if (!(arg = strchr(buf, '='))) {
3860 av_log(NULL, AV_LOG_FATAL, "Invalid line found in the preset file.\n");
3864 av_dict_set(&ost->opts, buf, arg, AV_DICT_DONT_OVERWRITE);
3866 } while (!s->eof_reached);
3870 av_log(NULL, AV_LOG_FATAL,
3871 "Preset %s specified for stream %d:%d, but could not be opened.\n",
3872 preset, ost->file_index, ost->index);
3876 ost->max_frames = INT64_MAX;
3877 MATCH_PER_STREAM_OPT(max_frames, i64, ost->max_frames, oc, st);
3879 MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st);
3881 if (next = strchr(bsf, ','))
3883 if (!(bsfc = av_bitstream_filter_init(bsf))) {
3884 av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf);
3888 bsfc_prev->next = bsfc;
3890 ost->bitstream_filters = bsfc;
3896 MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st);
3898 uint32_t tag = strtol(codec_tag, &next, 0);
3900 tag = AV_RL32(codec_tag);
3901 st->codec->codec_tag = tag;
3904 MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st);
3905 if (qscale >= 0 || same_quant) {
3906 st->codec->flags |= CODEC_FLAG_QSCALE;
3907 st->codec->global_quality = FF_QP2LAMBDA * qscale;
3910 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
3911 st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
3913 av_opt_get_int(sws_opts, "sws_flags", 0, &ost->sws_flags);
3915 ost->pix_fmts[0] = ost->pix_fmts[1] = PIX_FMT_NONE;
3920 static void parse_matrix_coeffs(uint16_t *dest, const char *str)
3923 const char *p = str;
3930 av_log(NULL, AV_LOG_FATAL, "Syntax error in matrix \"%s\" at coeff %d\n", str, i);
3937 static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc)
3941 AVCodecContext *video_enc;
3943 ost = new_output_stream(o, oc, AVMEDIA_TYPE_VIDEO);
3945 video_enc = st->codec;
3947 if (!ost->stream_copy) {
3948 const char *p = NULL;
3949 char *forced_key_frames = NULL, *frame_rate = NULL, *frame_size = NULL;
3950 char *frame_aspect_ratio = NULL, *frame_pix_fmt = NULL;
3951 char *intra_matrix = NULL, *inter_matrix = NULL, *filters = NULL;
3954 MATCH_PER_STREAM_OPT(frame_rates, str, frame_rate, oc, st);
3955 if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) {
3956 av_log(NULL, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
3960 MATCH_PER_STREAM_OPT(frame_sizes, str, frame_size, oc, st);
3961 if (frame_size && av_parse_video_size(&video_enc->width, &video_enc->height, frame_size) < 0) {
3962 av_log(NULL, AV_LOG_FATAL, "Invalid frame size: %s.\n", frame_size);
3966 MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
3967 if (frame_aspect_ratio)
3968 ost->frame_aspect_ratio = parse_frame_aspect_ratio(frame_aspect_ratio);
3970 MATCH_PER_STREAM_OPT(frame_pix_fmts, str, frame_pix_fmt, oc, st);
3971 if (frame_pix_fmt && (video_enc->pix_fmt = av_get_pix_fmt(frame_pix_fmt)) == PIX_FMT_NONE) {
3972 av_log(NULL, AV_LOG_FATAL, "Unknown pixel format requested: %s.\n", frame_pix_fmt);
3975 st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
3977 MATCH_PER_STREAM_OPT(intra_matrices, str, intra_matrix, oc, st);
3979 if (!(video_enc->intra_matrix = av_mallocz(sizeof(*video_enc->intra_matrix) * 64))) {
3980 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for intra matrix.\n");
3983 parse_matrix_coeffs(video_enc->intra_matrix, intra_matrix);
3985 MATCH_PER_STREAM_OPT(inter_matrices, str, inter_matrix, oc, st);
3987 if (!(video_enc->inter_matrix = av_mallocz(sizeof(*video_enc->inter_matrix) * 64))) {
3988 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for inter matrix.\n");
3991 parse_matrix_coeffs(video_enc->inter_matrix, inter_matrix);
3994 MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st);
3995 for (i = 0; p; i++) {
3997 int e = sscanf(p, "%d,%d,%d", &start, &end, &q);
3999 av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
4002 video_enc->rc_override =
4003 av_realloc(video_enc->rc_override,
4004 sizeof(RcOverride) * (i + 1));
4005 video_enc->rc_override[i].start_frame = start;
4006 video_enc->rc_override[i].end_frame = end;
4008 video_enc->rc_override[i].qscale = q;
4009 video_enc->rc_override[i].quality_factor = 1.0;
4012 video_enc->rc_override[i].qscale = 0;
4013 video_enc->rc_override[i].quality_factor = -q/100.0;
4018 video_enc->rc_override_count = i;
4019 if (!video_enc->rc_initial_buffer_occupancy)
4020 video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size * 3 / 4;
4021 video_enc->intra_dc_precision = intra_dc_precision - 8;
4026 video_enc->flags |= CODEC_FLAG_PASS1;
4028 video_enc->flags |= CODEC_FLAG_PASS2;
4032 MATCH_PER_STREAM_OPT(forced_key_frames, str, forced_key_frames, oc, st);
4033 if (forced_key_frames)
4034 parse_forced_key_frames(forced_key_frames, ost, video_enc);
4036 MATCH_PER_STREAM_OPT(force_fps, i, ost->force_fps, oc, st);
4038 ost->top_field_first = -1;
4039 MATCH_PER_STREAM_OPT(top_field_first, i, ost->top_field_first, oc, st);
4041 MATCH_PER_STREAM_OPT(filters, str, filters, oc, st);
4043 ost->avfilter = av_strdup(filters);
4045 MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i, ost->copy_initial_nonkeyframes, oc ,st);
4051 static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc)
4055 AVCodecContext *audio_enc;
4057 ost = new_output_stream(o, oc, AVMEDIA_TYPE_AUDIO);
4060 audio_enc = st->codec;
4061 audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
4063 if (!ost->stream_copy) {
4064 char *sample_fmt = NULL;
4066 MATCH_PER_STREAM_OPT(audio_channels, i, audio_enc->channels, oc, st);
4068 MATCH_PER_STREAM_OPT(sample_fmts, str, sample_fmt, oc, st);
4070 (audio_enc->sample_fmt = av_get_sample_fmt(sample_fmt)) == AV_SAMPLE_FMT_NONE) {
4071 av_log(NULL, AV_LOG_FATAL, "Invalid sample format '%s'\n", sample_fmt);
4075 MATCH_PER_STREAM_OPT(audio_sample_rate, i, audio_enc->sample_rate, oc, st);
4081 static OutputStream *new_data_stream(OptionsContext *o, AVFormatContext *oc)
4085 ost = new_output_stream(o, oc, AVMEDIA_TYPE_DATA);
4086 if (!ost->stream_copy) {
4087 av_log(NULL, AV_LOG_FATAL, "Data stream encoding not supported yet (only streamcopy)\n");
4094 static OutputStream *new_attachment_stream(OptionsContext *o, AVFormatContext *oc)
4096 OutputStream *ost = new_output_stream(o, oc, AVMEDIA_TYPE_ATTACHMENT);
4097 ost->stream_copy = 1;
4101 static OutputStream *new_subtitle_stream(OptionsContext *o, AVFormatContext *oc)
4105 AVCodecContext *subtitle_enc;
4107 ost = new_output_stream(o, oc, AVMEDIA_TYPE_SUBTITLE);
4109 subtitle_enc = st->codec;
4111 subtitle_enc->codec_type = AVMEDIA_TYPE_SUBTITLE;
4116 /* arg format is "output-stream-index:streamid-value". */
4117 static int opt_streamid(OptionsContext *o, const char *opt, const char *arg)
4123 av_strlcpy(idx_str, arg, sizeof(idx_str));
4124 p = strchr(idx_str, ':');
4126 av_log(NULL, AV_LOG_FATAL,
4127 "Invalid value '%s' for option '%s', required syntax is 'index:value'\n",
4132 idx = parse_number_or_die(opt, idx_str, OPT_INT, 0, INT_MAX);
4133 o->streamid_map = grow_array(o->streamid_map, sizeof(*o->streamid_map), &o->nb_streamid_map, idx+1);
4134 o->streamid_map[idx] = parse_number_or_die(opt, p, OPT_INT, 0, INT_MAX);
4138 static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata)
4140 AVFormatContext *is = ifile->ctx;
4141 AVFormatContext *os = ofile->ctx;
4144 for (i = 0; i < is->nb_chapters; i++) {
4145 AVChapter *in_ch = is->chapters[i], *out_ch;
4146 int64_t ts_off = av_rescale_q(ofile->start_time - ifile->ts_offset,
4147 AV_TIME_BASE_Q, in_ch->time_base);
4148 int64_t rt = (ofile->recording_time == INT64_MAX) ? INT64_MAX :
4149 av_rescale_q(ofile->recording_time, AV_TIME_BASE_Q, in_ch->time_base);
4152 if (in_ch->end < ts_off)
4154 if (rt != INT64_MAX && in_ch->start > rt + ts_off)
4157 out_ch = av_mallocz(sizeof(AVChapter));
4159 return AVERROR(ENOMEM);
4161 out_ch->id = in_ch->id;
4162 out_ch->time_base = in_ch->time_base;
4163 out_ch->start = FFMAX(0, in_ch->start - ts_off);
4164 out_ch->end = FFMIN(rt, in_ch->end - ts_off);
4167 av_dict_copy(&out_ch->metadata, in_ch->metadata, 0);
4170 os->chapters = av_realloc(os->chapters, sizeof(AVChapter) * os->nb_chapters);
4172 return AVERROR(ENOMEM);
4173 os->chapters[os->nb_chapters - 1] = out_ch;
4178 static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
4179 AVFormatContext *oc)
4183 if (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type != AVMEDIA_TYPE_VIDEO) {
4184 av_log(NULL, AV_LOG_FATAL, "Only video filters are supported currently.\n");
4188 ost = new_video_stream(o, oc);
4189 ost->source_index = -1;
4190 ost->filter = ofilter;
4194 if (ost->stream_copy) {
4195 av_log(NULL, AV_LOG_ERROR, "Streamcopy requested for output stream %d:%d, "
4196 "which is fed from a complex filtergraph. Filtering and streamcopy "
4197 "cannot be used together.\n", ost->file_index, ost->index);
4201 if (configure_output_filter(ofilter->graph, ofilter, ofilter->out_tmp) < 0) {
4202 av_log(NULL, AV_LOG_FATAL, "Error configuring filter.\n");
4205 avfilter_inout_free(&ofilter->out_tmp);
4208 static void opt_output_file(void *optctx, const char *filename)
4210 OptionsContext *o = optctx;
4211 AVFormatContext *oc;
4213 AVOutputFormat *file_oformat;
4217 if (configure_complex_filters() < 0) {
4218 av_log(NULL, AV_LOG_FATAL, "Error configuring filters.\n");
4222 if (!strcmp(filename, "-"))
4225 oc = avformat_alloc_context();
4227 print_error(filename, AVERROR(ENOMEM));
4232 file_oformat = av_guess_format(o->format, NULL, NULL);
4233 if (!file_oformat) {
4234 av_log(NULL, AV_LOG_FATAL, "Requested output format '%s' is not a suitable output format\n", o->format);
4238 file_oformat = av_guess_format(NULL, filename, NULL);
4239 if (!file_oformat) {
4240 av_log(NULL, AV_LOG_FATAL, "Unable to find a suitable output format for '%s'\n",
4246 oc->oformat = file_oformat;
4247 oc->interrupt_callback = int_cb;
4248 av_strlcpy(oc->filename, filename, sizeof(oc->filename));
4250 /* create streams for all unlabeled output pads */
4251 for (i = 0; i < nb_filtergraphs; i++) {
4252 FilterGraph *fg = filtergraphs[i];
4253 for (j = 0; j < fg->nb_outputs; j++) {
4254 OutputFilter *ofilter = fg->outputs[j];
4256 if (!ofilter->out_tmp || ofilter->out_tmp->name)
4259 switch (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type) {
4260 case AVMEDIA_TYPE_VIDEO: o->video_disable = 1; break;
4261 case AVMEDIA_TYPE_AUDIO: o->audio_disable = 1; break;
4262 case AVMEDIA_TYPE_SUBTITLE: o->subtitle_disable = 1; break;
4264 init_output_filter(ofilter, o, oc);
4268 if (!o->nb_stream_maps) {
4269 /* pick the "best" stream of each type */
4270 #define NEW_STREAM(type, index)\
4272 ost = new_ ## type ## _stream(o, oc);\
4273 ost->source_index = index;\
4274 ost->sync_ist = input_streams[index];\
4275 input_streams[index]->discard = 0;\
4276 input_streams[index]->st->discard = AVDISCARD_NONE;\
4279 /* video: highest resolution */
4280 if (!o->video_disable && oc->oformat->video_codec != CODEC_ID_NONE) {
4281 int area = 0, idx = -1;
4282 for (i = 0; i < nb_input_streams; i++) {
4283 ist = input_streams[i];
4284 if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
4285 ist->st->codec->width * ist->st->codec->height > area) {
4286 area = ist->st->codec->width * ist->st->codec->height;
4290 NEW_STREAM(video, idx);
4293 /* audio: most channels */
4294 if (!o->audio_disable && oc->oformat->audio_codec != CODEC_ID_NONE) {
4295 int channels = 0, idx = -1;
4296 for (i = 0; i < nb_input_streams; i++) {
4297 ist = input_streams[i];
4298 if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
4299 ist->st->codec->channels > channels) {
4300 channels = ist->st->codec->channels;
4304 NEW_STREAM(audio, idx);
4307 /* subtitles: pick first */
4308 if (!o->subtitle_disable && oc->oformat->subtitle_codec != CODEC_ID_NONE) {
4309 for (i = 0; i < nb_input_streams; i++)
4310 if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
4311 NEW_STREAM(subtitle, i);
4315 /* do something with data? */
4317 for (i = 0; i < o->nb_stream_maps; i++) {
4318 StreamMap *map = &o->stream_maps[i];
4323 if (map->linklabel) {
4325 OutputFilter *ofilter = NULL;
4328 for (j = 0; j < nb_filtergraphs; j++) {
4329 fg = filtergraphs[j];
4330 for (k = 0; k < fg->nb_outputs; k++) {
4331 AVFilterInOut *out = fg->outputs[k]->out_tmp;
4332 if (out && !strcmp(out->name, map->linklabel)) {
4333 ofilter = fg->outputs[k];
4340 av_log(NULL, AV_LOG_FATAL, "Output with label '%s' does not exist "
4341 "in any defined filter graph.\n", map->linklabel);
4344 init_output_filter(ofilter, o, oc);
4346 ist = input_streams[input_files[map->file_index]->ist_index + map->stream_index];
4347 switch (ist->st->codec->codec_type) {
4348 case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc); break;
4349 case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc); break;
4350 case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(o, oc); break;
4351 case AVMEDIA_TYPE_DATA: ost = new_data_stream(o, oc); break;
4352 case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc); break;
4354 av_log(NULL, AV_LOG_FATAL, "Cannot map stream #%d:%d - unsupported type.\n",
4355 map->file_index, map->stream_index);
4359 ost->source_index = input_files[map->file_index]->ist_index + map->stream_index;
4360 ost->sync_ist = input_streams[input_files[map->sync_file_index]->ist_index +
4361 map->sync_stream_index];
4363 ist->st->discard = AVDISCARD_NONE;
4368 /* handle attached files */
4369 for (i = 0; i < o->nb_attachments; i++) {
4371 uint8_t *attachment;
4375 if ((err = avio_open2(&pb, o->attachments[i], AVIO_FLAG_READ, &int_cb, NULL)) < 0) {
4376 av_log(NULL, AV_LOG_FATAL, "Could not open attachment file %s.\n",
4380 if ((len = avio_size(pb)) <= 0) {
4381 av_log(NULL, AV_LOG_FATAL, "Could not get size of the attachment %s.\n",
4385 if (!(attachment = av_malloc(len))) {
4386 av_log(NULL, AV_LOG_FATAL, "Attachment %s too large to fit into memory.\n",
4390 avio_read(pb, attachment, len);
4392 ost = new_attachment_stream(o, oc);
4393 ost->stream_copy = 0;
4394 ost->source_index = -1;
4395 ost->attachment_filename = o->attachments[i];
4396 ost->st->codec->extradata = attachment;
4397 ost->st->codec->extradata_size = len;
4399 p = strrchr(o->attachments[i], '/');
4400 av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
4404 output_files = grow_array(output_files, sizeof(*output_files), &nb_output_files, nb_output_files + 1);
4405 if (!(output_files[nb_output_files - 1] = av_mallocz(sizeof(*output_files[0]))))
4408 output_files[nb_output_files - 1]->ctx = oc;
4409 output_files[nb_output_files - 1]->ost_index = nb_output_streams - oc->nb_streams;
4410 output_files[nb_output_files - 1]->recording_time = o->recording_time;
4411 if (o->recording_time != INT64_MAX)
4412 oc->duration = o->recording_time;
4413 output_files[nb_output_files - 1]->start_time = o->start_time;
4414 output_files[nb_output_files - 1]->limit_filesize = o->limit_filesize;
4415 av_dict_copy(&output_files[nb_output_files - 1]->opts, format_opts, 0);
4417 /* check filename in case of an image number is expected */
4418 if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
4419 if (!av_filename_number_test(oc->filename)) {
4420 print_error(oc->filename, AVERROR(EINVAL));
4425 if (!(oc->oformat->flags & AVFMT_NOFILE)) {
4426 /* test if it already exists to avoid losing precious files */
4427 assert_file_overwrite(filename);
4430 if ((err = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,
4431 &oc->interrupt_callback,
4432 &output_files[nb_output_files - 1]->opts)) < 0) {
4433 print_error(filename, err);
4438 if (o->mux_preload) {
4440 snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE));
4441 av_dict_set(&output_files[nb_output_files - 1]->opts, "preload", buf, 0);
4443 oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
4444 oc->flags |= AVFMT_FLAG_NONBLOCK;
4447 for (i = 0; i < o->nb_metadata_map; i++) {
4449 int in_file_index = strtol(o->metadata_map[i].u.str, &p, 0);
4451 if (in_file_index < 0)
4453 if (in_file_index >= nb_input_files) {
4454 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d while processing metadata maps\n", in_file_index);
4457 copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc, input_files[in_file_index]->ctx, o);
4461 if (o->chapters_input_file >= nb_input_files) {
4462 if (o->chapters_input_file == INT_MAX) {
4463 /* copy chapters from the first input file that has them*/
4464 o->chapters_input_file = -1;
4465 for (i = 0; i < nb_input_files; i++)
4466 if (input_files[i]->ctx->nb_chapters) {
4467 o->chapters_input_file = i;
4471 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d in chapter mapping.\n",
4472 o->chapters_input_file);
4476 if (o->chapters_input_file >= 0)
4477 copy_chapters(input_files[o->chapters_input_file], output_files[nb_output_files - 1],
4478 !o->metadata_chapters_manual);
4480 /* copy global metadata by default */
4481 if (!o->metadata_global_manual && nb_input_files)
4482 av_dict_copy(&oc->metadata, input_files[0]->ctx->metadata,
4483 AV_DICT_DONT_OVERWRITE);
4484 if (!o->metadata_streams_manual)
4485 for (i = output_files[nb_output_files - 1]->ost_index; i < nb_output_streams; i++) {
4487 if (output_streams[i]->source_index < 0) /* this is true e.g. for attached files */
4489 ist = input_streams[output_streams[i]->source_index];
4490 av_dict_copy(&output_streams[i]->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
4493 /* process manually set metadata */
4494 for (i = 0; i < o->nb_metadata; i++) {
4497 const char *stream_spec;
4498 int index = 0, j, ret;
4500 val = strchr(o->metadata[i].u.str, '=');
4502 av_log(NULL, AV_LOG_FATAL, "No '=' character in metadata string %s.\n",
4503 o->metadata[i].u.str);
4508 parse_meta_type(o->metadata[i].specifier, &type, &index, &stream_spec);
4510 for (j = 0; j < oc->nb_streams; j++) {
4511 if ((ret = check_stream_specifier(oc, oc->streams[j], stream_spec)) > 0) {
4512 av_dict_set(&oc->streams[j]->metadata, o->metadata[i].u.str, *val ? val : NULL, 0);
4516 printf("ret %d, stream_spec %s\n", ret, stream_spec);
4524 if (index < 0 || index >= oc->nb_chapters) {
4525 av_log(NULL, AV_LOG_FATAL, "Invalid chapter index %d in metadata specifier.\n", index);
4528 m = &oc->chapters[index]->metadata;
4531 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", o->metadata[i].specifier);
4534 av_dict_set(m, o->metadata[i].u.str, *val ? val : NULL, 0);
4541 /* same option as mencoder */
4542 static int opt_pass(const char *opt, const char *arg)
4544 do_pass = parse_number_or_die(opt, arg, OPT_INT, 1, 2);
4548 static int64_t getutime(void)
4551 struct rusage rusage;
4553 getrusage(RUSAGE_SELF, &rusage);
4554 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4555 #elif HAVE_GETPROCESSTIMES
4557 FILETIME c, e, k, u;
4558 proc = GetCurrentProcess();
4559 GetProcessTimes(proc, &c, &e, &k, &u);
4560 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4562 return av_gettime();
4566 static int64_t getmaxrss(void)
4568 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4569 struct rusage rusage;
4570 getrusage(RUSAGE_SELF, &rusage);
4571 return (int64_t)rusage.ru_maxrss * 1024;
4572 #elif HAVE_GETPROCESSMEMORYINFO
4574 PROCESS_MEMORY_COUNTERS memcounters;
4575 proc = GetCurrentProcess();
4576 memcounters.cb = sizeof(memcounters);
4577 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4578 return memcounters.PeakPagefileUsage;
4584 static int opt_audio_qscale(OptionsContext *o, const char *opt, const char *arg)
4586 return parse_option(o, "q:a", arg, options);
4589 static void show_usage(void)
4591 printf("Hyper fast Audio and Video encoder\n");
4592 printf("usage: %s [options] [[infile options] -i infile]... {[outfile options] outfile}...\n", program_name);
4596 static void show_help(void)
4598 int flags = AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_ENCODING_PARAM;
4599 av_log_set_callback(log_callback_help);
4601 show_help_options(options, "Main options:\n",
4602 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB, 0);
4603 show_help_options(options, "\nAdvanced options:\n",
4604 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB,
4606 show_help_options(options, "\nVideo options:\n",
4607 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4609 show_help_options(options, "\nAdvanced Video options:\n",
4610 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4611 OPT_VIDEO | OPT_EXPERT);
4612 show_help_options(options, "\nAudio options:\n",
4613 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4615 show_help_options(options, "\nAdvanced Audio options:\n",
4616 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4617 OPT_AUDIO | OPT_EXPERT);
4618 show_help_options(options, "\nSubtitle options:\n",
4619 OPT_SUBTITLE | OPT_GRAB,
4621 show_help_options(options, "\nAudio/Video grab options:\n",
4625 show_help_children(avcodec_get_class(), flags);
4626 show_help_children(avformat_get_class(), flags);
4627 show_help_children(sws_get_class(), flags);
4630 static int opt_target(OptionsContext *o, const char *opt, const char *arg)
4632 enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN;
4633 static const char *const frame_rates[] = { "25", "30000/1001", "24000/1001" };
4635 if (!strncmp(arg, "pal-", 4)) {
4638 } else if (!strncmp(arg, "ntsc-", 5)) {
4641 } else if (!strncmp(arg, "film-", 5)) {
4645 /* Try to determine PAL/NTSC by peeking in the input files */
4646 if (nb_input_files) {
4648 for (j = 0; j < nb_input_files; j++) {
4649 for (i = 0; i < input_files[j]->nb_streams; i++) {
4650 AVCodecContext *c = input_files[j]->ctx->streams[i]->codec;
4651 if (c->codec_type != AVMEDIA_TYPE_VIDEO)
4653 fr = c->time_base.den * 1000 / c->time_base.num;
4657 } else if ((fr == 29970) || (fr == 23976)) {
4662 if (norm != UNKNOWN)
4666 if (norm != UNKNOWN)
4667 av_log(NULL, AV_LOG_INFO, "Assuming %s for target.\n", norm == PAL ? "PAL" : "NTSC");
4670 if (norm == UNKNOWN) {
4671 av_log(NULL, AV_LOG_FATAL, "Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n");
4672 av_log(NULL, AV_LOG_FATAL, "Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n");
4673 av_log(NULL, AV_LOG_FATAL, "or set a framerate with \"-r xxx\".\n");
4677 if (!strcmp(arg, "vcd")) {
4678 opt_video_codec(o, "c:v", "mpeg1video");
4679 opt_audio_codec(o, "c:a", "mp2");
4680 parse_option(o, "f", "vcd", options);
4682 parse_option(o, "s", norm == PAL ? "352x288" : "352x240", options);
4683 parse_option(o, "r", frame_rates[norm], options);
4684 opt_default("g", norm == PAL ? "15" : "18");
4686 opt_default("b", "1150000");
4687 opt_default("maxrate", "1150000");
4688 opt_default("minrate", "1150000");
4689 opt_default("bufsize", "327680"); // 40*1024*8;
4691 opt_default("b:a", "224000");
4692 parse_option(o, "ar", "44100", options);
4693 parse_option(o, "ac", "2", options);
4695 opt_default("packetsize", "2324");
4696 opt_default("muxrate", "1411200"); // 2352 * 75 * 8;
4698 /* We have to offset the PTS, so that it is consistent with the SCR.
4699 SCR starts at 36000, but the first two packs contain only padding
4700 and the first pack from the other stream, respectively, may also have
4701 been written before.
4702 So the real data starts at SCR 36000+3*1200. */
4703 o->mux_preload = (36000 + 3 * 1200) / 90000.0; // 0.44
4704 } else if (!strcmp(arg, "svcd")) {
4706 opt_video_codec(o, "c:v", "mpeg2video");
4707 opt_audio_codec(o, "c:a", "mp2");
4708 parse_option(o, "f", "svcd", options);
4710 parse_option(o, "s", norm == PAL ? "480x576" : "480x480", options);
4711 parse_option(o, "r", frame_rates[norm], options);
4712 opt_default("g", norm == PAL ? "15" : "18");
4714 opt_default("b", "2040000");
4715 opt_default("maxrate", "2516000");
4716 opt_default("minrate", "0"); // 1145000;
4717 opt_default("bufsize", "1835008"); // 224*1024*8;
4718 opt_default("flags", "+scan_offset");
4721 opt_default("b:a", "224000");
4722 parse_option(o, "ar", "44100", options);
4724 opt_default("packetsize", "2324");
4726 } else if (!strcmp(arg, "dvd")) {
4728 opt_video_codec(o, "c:v", "mpeg2video");
4729 opt_audio_codec(o, "c:a", "ac3");
4730 parse_option(o, "f", "dvd", options);
4732 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4733 parse_option(o, "r", frame_rates[norm], options);
4734 opt_default("g", norm == PAL ? "15" : "18");
4736 opt_default("b", "6000000");
4737 opt_default("maxrate", "9000000");
4738 opt_default("minrate", "0"); // 1500000;
4739 opt_default("bufsize", "1835008"); // 224*1024*8;
4741 opt_default("packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
4742 opt_default("muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
4744 opt_default("b:a", "448000");
4745 parse_option(o, "ar", "48000", options);
4747 } else if (!strncmp(arg, "dv", 2)) {
4749 parse_option(o, "f", "dv", options);
4751 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4752 parse_option(o, "pix_fmt", !strncmp(arg, "dv50", 4) ? "yuv422p" :
4753 norm == PAL ? "yuv420p" : "yuv411p", options);
4754 parse_option(o, "r", frame_rates[norm], options);
4756 parse_option(o, "ar", "48000", options);
4757 parse_option(o, "ac", "2", options);
4760 av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg);
4761 return AVERROR(EINVAL);
4766 static int opt_vstats_file(const char *opt, const char *arg)
4768 av_free (vstats_filename);
4769 vstats_filename = av_strdup (arg);
4773 static int opt_vstats(const char *opt, const char *arg)
4776 time_t today2 = time(NULL);
4777 struct tm *today = localtime(&today2);
4779 snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min,
4781 return opt_vstats_file(opt, filename);
4784 static int opt_video_frames(OptionsContext *o, const char *opt, const char *arg)
4786 return parse_option(o, "frames:v", arg, options);
4789 static int opt_audio_frames(OptionsContext *o, const char *opt, const char *arg)
4791 return parse_option(o, "frames:a", arg, options);
4794 static int opt_data_frames(OptionsContext *o, const char *opt, const char *arg)
4796 return parse_option(o, "frames:d", arg, options);
4799 static int opt_video_tag(OptionsContext *o, const char *opt, const char *arg)
4801 return parse_option(o, "tag:v", arg, options);
4804 static int opt_audio_tag(OptionsContext *o, const char *opt, const char *arg)
4806 return parse_option(o, "tag:a", arg, options);
4809 static int opt_subtitle_tag(OptionsContext *o, const char *opt, const char *arg)
4811 return parse_option(o, "tag:s", arg, options);
4814 static int opt_video_filters(OptionsContext *o, const char *opt, const char *arg)
4816 return parse_option(o, "filter:v", arg, options);
4819 static int opt_vsync(const char *opt, const char *arg)
4821 if (!av_strcasecmp(arg, "cfr")) video_sync_method = VSYNC_CFR;
4822 else if (!av_strcasecmp(arg, "vfr")) video_sync_method = VSYNC_VFR;
4823 else if (!av_strcasecmp(arg, "passthrough")) video_sync_method = VSYNC_PASSTHROUGH;
4825 if (video_sync_method == VSYNC_AUTO)
4826 video_sync_method = parse_number_or_die("vsync", arg, OPT_INT, VSYNC_AUTO, VSYNC_VFR);
4830 static int opt_deinterlace(const char *opt, const char *arg)
4832 av_log(NULL, AV_LOG_WARNING, "-%s is deprecated, use -filter:v yadif instead\n", opt);
4837 static int opt_cpuflags(const char *opt, const char *arg)
4839 int flags = av_parse_cpu_flags(arg);
4844 av_set_cpu_flags_mask(flags);
4848 static void parse_cpuflags(int argc, char **argv, const OptionDef *options)
4850 int idx = locate_option(argc, argv, options, "cpuflags");
4851 if (idx && argv[idx + 1])
4852 opt_cpuflags("cpuflags", argv[idx + 1]);
4855 static int opt_channel_layout(OptionsContext *o, const char *opt, const char *arg)
4857 char layout_str[32];
4860 int ret, channels, ac_str_size;
4863 layout = av_get_channel_layout(arg);
4865 av_log(NULL, AV_LOG_ERROR, "Unknown channel layout: %s\n", arg);
4866 return AVERROR(EINVAL);
4868 snprintf(layout_str, sizeof(layout_str), "%"PRIu64, layout);
4869 ret = opt_default(opt, layout_str);
4873 /* set 'ac' option based on channel layout */
4874 channels = av_get_channel_layout_nb_channels(layout);
4875 snprintf(layout_str, sizeof(layout_str), "%d", channels);
4876 stream_str = strchr(opt, ':');
4877 ac_str_size = 3 + (stream_str ? strlen(stream_str) : 0);
4878 ac_str = av_mallocz(ac_str_size);
4880 return AVERROR(ENOMEM);
4881 av_strlcpy(ac_str, "ac", 3);
4883 av_strlcat(ac_str, stream_str, ac_str_size);
4884 ret = parse_option(o, ac_str, layout_str, options);
4890 static int opt_filter_complex(const char *opt, const char *arg)
4892 filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
4893 &nb_filtergraphs, nb_filtergraphs + 1);
4894 if (!(filtergraphs[nb_filtergraphs - 1] = av_mallocz(sizeof(*filtergraphs[0]))))
4895 return AVERROR(ENOMEM);
4896 filtergraphs[nb_filtergraphs - 1]->index = nb_filtergraphs - 1;
4897 filtergraphs[nb_filtergraphs - 1]->graph_desc = arg;
4901 #define OFFSET(x) offsetof(OptionsContext, x)
4902 static const OptionDef options[] = {
4904 #include "cmdutils_common_opts.h"
4905 { "f", HAS_ARG | OPT_STRING | OPT_OFFSET, {.off = OFFSET(format)}, "force format", "fmt" },
4906 { "i", HAS_ARG | OPT_FUNC2, {(void*)opt_input_file}, "input file name", "filename" },
4907 { "y", OPT_BOOL, {(void*)&file_overwrite}, "overwrite output files" },
4908 { "c", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4909 { "codec", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4910 { "pre", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(presets)}, "preset name", "preset" },
4911 { "map", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map}, "set input stream mapping", "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
4912 { "map_metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata_map)}, "set metadata information of outfile from infile",
4913 "outfile[,metadata]:infile[,metadata]" },
4914 { "map_chapters", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(chapters_input_file)}, "set chapters mapping", "input_file_index" },
4915 { "t", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(recording_time)}, "record or transcode \"duration\" seconds of audio/video", "duration" },
4916 { "fs", HAS_ARG | OPT_INT64 | OPT_OFFSET, {.off = OFFSET(limit_filesize)}, "set the limit file size in bytes", "limit_size" }, //
4917 { "ss", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(start_time)}, "set the start time offset", "time_off" },
4918 { "itsoffset", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(input_ts_offset)}, "set the input ts offset", "time_off" },
4919 { "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(ts_scale)}, "set the input ts scale", "scale" },
4920 { "metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata)}, "add metadata", "string=string" },
4921 { "dframes", HAS_ARG | OPT_FUNC2, {(void*)opt_data_frames}, "set the number of data frames to record", "number" },
4922 { "benchmark", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark},
4923 "add timings for benchmarking" },
4924 { "timelimit", HAS_ARG, {(void*)opt_timelimit}, "set max runtime in seconds", "limit" },
4925 { "dump", OPT_BOOL | OPT_EXPERT, {(void*)&do_pkt_dump},
4926 "dump each input packet" },
4927 { "hex", OPT_BOOL | OPT_EXPERT, {(void*)&do_hex_dump},
4928 "when dumping packets, also dump the payload" },
4929 { "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(rate_emu)}, "read input at native frame rate", "" },
4930 { "target", HAS_ARG | OPT_FUNC2, {(void*)opt_target}, "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" },
4931 { "vsync", HAS_ARG | OPT_EXPERT, {(void*)opt_vsync}, "video sync method", "" },
4932 { "async", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&audio_sync_method}, "audio sync method", "" },
4933 { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&audio_drift_threshold}, "audio drift threshold", "threshold" },
4934 { "copyts", OPT_BOOL | OPT_EXPERT, {(void*)©_ts}, "copy timestamps" },
4935 { "copytb", OPT_BOOL | OPT_EXPERT, {(void*)©_tb}, "copy input stream time base when stream copying" },
4936 { "shortest", OPT_BOOL | OPT_EXPERT, {(void*)&opt_shortest}, "finish encoding within shortest input" }, //
4937 { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&dts_delta_threshold}, "timestamp discontinuity delta threshold", "threshold" },
4938 { "xerror", OPT_BOOL, {(void*)&exit_on_error}, "exit on error", "error" },
4939 { "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC, {.off = OFFSET(copy_initial_nonkeyframes)}, "copy initial non-keyframes" },
4940 { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC, {.off = OFFSET(max_frames)}, "set the number of frames to record", "number" },
4941 { "tag", OPT_STRING | HAS_ARG | OPT_SPEC, {.off = OFFSET(codec_tags)}, "force codec tag/fourcc", "fourcc/tag" },
4942 { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4943 { "qscale", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4944 { "filter", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(filters)}, "set stream filterchain", "filter_list" },
4945 { "filter_complex", HAS_ARG | OPT_EXPERT, {(void*)opt_filter_complex}, "create a complex filtergraph", "graph_description" },
4946 { "stats", OPT_BOOL, {&print_stats}, "print progress report during encoding", },
4947 { "attach", HAS_ARG | OPT_FUNC2, {(void*)opt_attach}, "add an attachment to the output file", "filename" },
4948 { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(dump_attachment)}, "extract an attachment into a file", "filename" },
4949 { "cpuflags", HAS_ARG | OPT_EXPERT, {(void*)opt_cpuflags}, "set CPU flags mask", "mask" },
4952 { "vframes", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_frames}, "set the number of video frames to record", "number" },
4953 { "r", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_rates)}, "set frame rate (Hz value, fraction or abbreviation)", "rate" },
4954 { "s", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_sizes)}, "set frame size (WxH or abbreviation)", "size" },
4955 { "aspect", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_aspect_ratios)}, "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" },
4956 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_pix_fmts)}, "set pixel format", "format" },
4957 { "vn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET, {.off = OFFSET(video_disable)}, "disable video" },
4958 { "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" },
4959 { "rc_override", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(rc_overrides)}, "rate control override for specific intervals", "override" },
4960 { "vcodec", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_codec}, "force video codec ('copy' to copy stream)", "codec" },
4961 { "same_quant", OPT_BOOL | OPT_VIDEO, {(void*)&same_quant},
4962 "use same quantizer as source (implies VBR)" },
4963 { "pass", HAS_ARG | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" },
4964 { "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename_prefix}, "select two pass log file name prefix", "prefix" },
4965 { "deinterlace", OPT_EXPERT | OPT_VIDEO, {(void*)opt_deinterlace},
4966 "this option is deprecated, use the yadif filter instead" },
4967 { "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" },
4968 { "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_vstats_file}, "dump video coding statistics to file", "file" },
4969 { "vf", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_filters}, "video filters", "filter list" },
4970 { "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(intra_matrices)}, "specify intra matrix coeffs", "matrix" },
4971 { "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(inter_matrices)}, "specify inter matrix coeffs", "matrix" },
4972 { "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_INT| OPT_SPEC, {.off = OFFSET(top_field_first)}, "top=1/bottom=0/auto=-1 field first", "" },
4973 { "dc", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_dc_precision}, "intra_dc_precision", "precision" },
4974 { "vtag", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_tag}, "force video tag/fourcc", "fourcc/tag" },
4975 { "qphist", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, { (void *)&qp_hist }, "show QP histogram" },
4976 { "force_fps", OPT_BOOL | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(force_fps)}, "force the selected framerate, disable the best supported framerate selection" },
4977 { "streamid", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_streamid}, "set the value of an outfile streamid", "streamIndex:value" },
4978 { "force_key_frames", OPT_STRING | HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(forced_key_frames)}, "force key frames at specified timestamps", "timestamps" },
4981 { "aframes", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_frames}, "set the number of audio frames to record", "number" },
4982 { "aq", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_qscale}, "set audio quality (codec-specific)", "quality", },
4983 { "ar", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_sample_rate)}, "set audio sampling rate (in Hz)", "rate" },
4984 { "ac", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_channels)}, "set number of audio channels", "channels" },
4985 { "an", OPT_BOOL | OPT_AUDIO | OPT_OFFSET, {.off = OFFSET(audio_disable)}, "disable audio" },
4986 { "acodec", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_codec}, "force audio codec ('copy' to copy stream)", "codec" },
4987 { "atag", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_tag}, "force audio tag/fourcc", "fourcc/tag" },
4988 { "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" }, //
4989 { "sample_fmt", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_SPEC | OPT_STRING, {.off = OFFSET(sample_fmts)}, "set sample format", "format" },
4990 { "channel_layout", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_channel_layout}, "set channel layout", "layout" },
4992 /* subtitle options */
4993 { "sn", OPT_BOOL | OPT_SUBTITLE | OPT_OFFSET, {.off = OFFSET(subtitle_disable)}, "disable subtitle" },
4994 { "scodec", HAS_ARG | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_codec}, "force subtitle codec ('copy' to copy stream)", "codec" },
4995 { "stag", HAS_ARG | OPT_EXPERT | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_tag}, "force subtitle tag/fourcc", "fourcc/tag" },
4998 { "isync", OPT_BOOL | OPT_EXPERT | OPT_GRAB, {(void*)&input_sync}, "sync read on input", "" },
5001 { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_max_delay)}, "set the maximum demux-decode delay", "seconds" },
5002 { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_preload)}, "set the initial demux-decode delay", "seconds" },
5004 { "bsf", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(bitstream_filters)}, "A comma-separated list of bitstream filters", "bitstream_filters" },
5006 /* data codec support */
5007 { "dcodec", HAS_ARG | OPT_DATA | OPT_FUNC2, {(void*)opt_data_codec}, "force data codec ('copy' to copy stream)", "codec" },
5009 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
5013 int main(int argc, char **argv)
5015 OptionsContext o = { 0 };
5020 av_log_set_flags(AV_LOG_SKIP_REPEATED);
5021 parse_loglevel(argc, argv, options);
5023 avcodec_register_all();
5025 avdevice_register_all();
5027 avfilter_register_all();
5029 avformat_network_init();
5033 parse_cpuflags(argc, argv, options);
5036 parse_options(&o, argc, argv, options, opt_output_file);
5038 if (nb_output_files <= 0 && nb_input_files == 0) {
5040 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
5044 /* file converter / grab */
5045 if (nb_output_files <= 0) {
5046 fprintf(stderr, "At least one output file must be specified\n");
5050 if (nb_input_files == 0) {
5051 av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
5056 if (transcode() < 0)
5058 ti = getutime() - ti;
5060 int maxrss = getmaxrss() / 1024;
5061 printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);