3 * Copyright (c) 2000-2011 The libav developers.
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31 #include "libavformat/avformat.h"
32 #include "libavdevice/avdevice.h"
33 #include "libswscale/swscale.h"
34 #include "libavutil/opt.h"
35 #include "libavcodec/audioconvert.h"
36 #include "libavutil/audioconvert.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/colorspace.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/intreadwrite.h"
42 #include "libavutil/dict.h"
43 #include "libavutil/mathematics.h"
44 #include "libavutil/pixdesc.h"
45 #include "libavutil/avstring.h"
46 #include "libavutil/libm.h"
47 #include "libavutil/imgutils.h"
48 #include "libavformat/os_support.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersrc.h"
53 # include "libavfilter/vsrc_buffer.h"
55 #if HAVE_SYS_RESOURCE_H
56 #include <sys/types.h>
58 #include <sys/resource.h>
59 #elif HAVE_GETPROCESSTIMES
62 #if HAVE_GETPROCESSMEMORYINFO
68 #include <sys/select.h>
75 #include "libavutil/avassert.h"
78 #define VSYNC_PASSTHROUGH 0
82 const char program_name[] = "avconv";
83 const int program_birth_year = 2000;
85 /* select an input stream for an output stream */
86 typedef struct StreamMap {
87 int disabled; /** 1 is this mapping is disabled by a negative map */
91 int sync_stream_index;
92 char *linklabel; /** name of an output link, for mapping lavfi outputs */
96 * select an input file for an output file
98 typedef struct MetadataMap {
99 int file; ///< file index
100 char type; ///< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram
101 int index; ///< stream/chapter/program number
104 static const OptionDef options[];
106 static int video_discard = 0;
107 static int same_quant = 0;
108 static int do_deinterlace = 0;
109 static int intra_dc_precision = 8;
110 static int qp_hist = 0;
112 static int file_overwrite = 0;
113 static int do_benchmark = 0;
114 static int do_hex_dump = 0;
115 static int do_pkt_dump = 0;
116 static int do_pass = 0;
117 static char *pass_logfilename_prefix = NULL;
118 static int video_sync_method = VSYNC_AUTO;
119 static int audio_sync_method = 0;
120 static float audio_drift_threshold = 0.1;
121 static int copy_ts = 0;
122 static int copy_tb = 1;
123 static int opt_shortest = 0;
124 static char *vstats_filename;
125 static FILE *vstats_file;
127 static int audio_volume = 256;
129 static int exit_on_error = 0;
130 static int using_stdin = 0;
131 static int64_t video_size = 0;
132 static int64_t audio_size = 0;
133 static int64_t extra_size = 0;
134 static int nb_frames_dup = 0;
135 static int nb_frames_drop = 0;
136 static int input_sync;
138 static float dts_delta_threshold = 10;
140 static int print_stats = 1;
142 static uint8_t *audio_buf;
143 static unsigned int allocated_audio_buf_size;
144 static uint8_t *async_buf;
145 static unsigned int allocated_async_buf_size;
147 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
149 typedef struct InputFilter {
150 AVFilterContext *filter;
151 struct InputStream *ist;
152 struct FilterGraph *graph;
155 typedef struct OutputFilter {
156 AVFilterContext *filter;
157 struct OutputStream *ost;
158 struct FilterGraph *graph;
160 /* temporary storage until stream maps are processed */
161 AVFilterInOut *out_tmp;
164 typedef struct FilterGraph {
166 const char *graph_desc;
168 AVFilterGraph *graph;
170 InputFilter **inputs;
172 OutputFilter **outputs;
176 typedef struct FrameBuffer {
182 enum PixelFormat pix_fmt;
185 struct InputStream *ist;
186 struct FrameBuffer *next;
189 typedef struct InputStream {
192 int discard; /* true if stream data should be discarded */
193 int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
195 AVFrame *decoded_frame;
197 int64_t start; /* time when read started */
198 /* predicted dts of the next packet read for this stream or (when there are
199 * several frames in a packet) of the next frame in current packet */
201 /* dts of the last packet read for this stream */
203 PtsCorrectionContext pts_ctx;
205 int is_start; /* is 1 at the start and after a discontinuity */
206 int showed_multi_packet_warning;
211 int resample_pix_fmt;
213 /* a pool of free buffers for decoded data */
214 FrameBuffer *buffer_pool;
216 /* decoded data from this stream goes into all those filters
217 * currently video only */
218 InputFilter **filters;
222 typedef struct InputFile {
223 AVFormatContext *ctx;
224 int eof_reached; /* true if eof reached */
225 int ist_index; /* index of first stream in ist_table */
226 int buffer_size; /* current total buffer size */
228 int nb_streams; /* number of stream that avconv is aware of; may be different
229 from ctx.nb_streams if new streams appear during av_read_frame() */
233 typedef struct OutputStream {
234 int file_index; /* file index */
235 int index; /* stream index in the output file */
236 int source_index; /* InputStream index */
237 AVStream *st; /* stream in the output file */
238 int encoding_needed; /* true if encoding needed for this stream */
240 /* input pts and corresponding output pts
242 // double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
243 struct InputStream *sync_ist; /* input stream to sync against */
244 int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
245 /* pts of the first frame encoded for this stream, used for limiting
248 AVBitStreamFilterContext *bitstream_filters;
251 AVFrame *output_frame;
252 AVFrame *filtered_frame;
255 AVRational frame_rate;
259 float frame_aspect_ratio;
262 /* forced key frames */
263 int64_t *forced_kf_pts;
269 ReSampleContext *resample; /* for audio resampling */
270 int resample_sample_fmt;
271 int resample_channels;
272 int resample_sample_rate;
274 AVAudioConvert *reformat_ctx;
275 AVFifoBuffer *fifo; /* for compression: one audio fifo per codec */
278 OutputFilter *filter;
283 int is_past_recording_time;
285 const char *attachment_filename;
286 int copy_initial_nonkeyframes;
288 enum PixelFormat pix_fmts[2];
292 typedef struct OutputFile {
293 AVFormatContext *ctx;
295 int ost_index; /* index of the first stream in output_streams */
296 int64_t recording_time; /* desired length of the resulting file in microseconds */
297 int64_t start_time; /* start time in microseconds */
298 uint64_t limit_filesize;
301 static InputStream **input_streams = NULL;
302 static int nb_input_streams = 0;
303 static InputFile **input_files = NULL;
304 static int nb_input_files = 0;
306 static OutputStream **output_streams = NULL;
307 static int nb_output_streams = 0;
308 static OutputFile **output_files = NULL;
309 static int nb_output_files = 0;
311 static FilterGraph **filtergraphs;
314 typedef struct OptionsContext {
315 /* input/output options */
319 SpecifierOpt *codec_names;
321 SpecifierOpt *audio_channels;
322 int nb_audio_channels;
323 SpecifierOpt *audio_sample_rate;
324 int nb_audio_sample_rate;
325 SpecifierOpt *frame_rates;
327 SpecifierOpt *frame_sizes;
329 SpecifierOpt *frame_pix_fmts;
330 int nb_frame_pix_fmts;
333 int64_t input_ts_offset;
336 SpecifierOpt *ts_scale;
338 SpecifierOpt *dump_attachment;
339 int nb_dump_attachment;
342 StreamMap *stream_maps;
344 /* first item specifies output metadata, second is input */
345 MetadataMap (*meta_data_maps)[2];
346 int nb_meta_data_maps;
347 int metadata_global_manual;
348 int metadata_streams_manual;
349 int metadata_chapters_manual;
350 const char **attachments;
353 int chapters_input_file;
355 int64_t recording_time;
356 uint64_t limit_filesize;
362 int subtitle_disable;
365 /* indexed by output file stream index */
369 SpecifierOpt *metadata;
371 SpecifierOpt *max_frames;
373 SpecifierOpt *bitstream_filters;
374 int nb_bitstream_filters;
375 SpecifierOpt *codec_tags;
377 SpecifierOpt *sample_fmts;
379 SpecifierOpt *qscale;
381 SpecifierOpt *forced_key_frames;
382 int nb_forced_key_frames;
383 SpecifierOpt *force_fps;
385 SpecifierOpt *frame_aspect_ratios;
386 int nb_frame_aspect_ratios;
387 SpecifierOpt *rc_overrides;
389 SpecifierOpt *intra_matrices;
390 int nb_intra_matrices;
391 SpecifierOpt *inter_matrices;
392 int nb_inter_matrices;
393 SpecifierOpt *top_field_first;
394 int nb_top_field_first;
395 SpecifierOpt *metadata_map;
397 SpecifierOpt *presets;
399 SpecifierOpt *copy_initial_nonkeyframes;
400 int nb_copy_initial_nonkeyframes;
401 SpecifierOpt *filters;
405 #define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\
408 for (i = 0; i < o->nb_ ## name; i++) {\
409 char *spec = o->name[i].specifier;\
410 if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0)\
411 outvar = o->name[i].u.type;\
417 static void reset_options(OptionsContext *o)
419 const OptionDef *po = options;
422 /* all OPT_SPEC and OPT_STRING can be freed in generic way */
424 void *dst = (uint8_t*)o + po->u.off;
426 if (po->flags & OPT_SPEC) {
427 SpecifierOpt **so = dst;
428 int i, *count = (int*)(so + 1);
429 for (i = 0; i < *count; i++) {
430 av_freep(&(*so)[i].specifier);
431 if (po->flags & OPT_STRING)
432 av_freep(&(*so)[i].u.str);
436 } else if (po->flags & OPT_OFFSET && po->flags & OPT_STRING)
441 for (i = 0; i < o->nb_stream_maps; i++)
442 av_freep(&o->stream_maps[i].linklabel);
443 av_freep(&o->stream_maps);
444 av_freep(&o->meta_data_maps);
445 av_freep(&o->streamid_map);
447 memset(o, 0, sizeof(*o));
449 o->mux_max_delay = 0.7;
450 o->recording_time = INT64_MAX;
451 o->limit_filesize = UINT64_MAX;
452 o->chapters_input_file = INT_MAX;
458 static int alloc_buffer(InputStream *ist, AVCodecContext *s, FrameBuffer **pbuf)
460 FrameBuffer *buf = av_mallocz(sizeof(*buf));
462 const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
463 int h_chroma_shift, v_chroma_shift;
464 int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
465 int w = s->width, h = s->height;
468 return AVERROR(ENOMEM);
470 if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
475 avcodec_align_dimensions(s, &w, &h);
476 if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
477 s->pix_fmt, 32)) < 0) {
481 /* XXX this shouldn't be needed, but some tests break without this line
482 * those decoders are buggy and need to be fixed.
483 * the following tests fail:
484 * cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
486 memset(buf->base[0], 128, ret);
488 avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
489 for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
490 const int h_shift = i==0 ? 0 : h_chroma_shift;
491 const int v_shift = i==0 ? 0 : v_chroma_shift;
492 if (s->flags & CODEC_FLAG_EMU_EDGE)
493 buf->data[i] = buf->base[i];
495 buf->data[i] = buf->base[i] +
496 FFALIGN((buf->linesize[i]*edge >> v_shift) +
497 (pixel_size*edge >> h_shift), 32);
501 buf->pix_fmt = s->pix_fmt;
508 static void free_buffer_pool(InputStream *ist)
510 FrameBuffer *buf = ist->buffer_pool;
512 ist->buffer_pool = buf->next;
513 av_freep(&buf->base[0]);
515 buf = ist->buffer_pool;
519 static void unref_buffer(InputStream *ist, FrameBuffer *buf)
521 av_assert0(buf->refcount);
523 if (!buf->refcount) {
524 buf->next = ist->buffer_pool;
525 ist->buffer_pool = buf;
529 static int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
531 InputStream *ist = s->opaque;
535 if (!ist->buffer_pool && (ret = alloc_buffer(ist, s, &ist->buffer_pool)) < 0)
538 buf = ist->buffer_pool;
539 ist->buffer_pool = buf->next;
541 if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
542 av_freep(&buf->base[0]);
544 if ((ret = alloc_buffer(ist, s, &buf)) < 0)
550 frame->type = FF_BUFFER_TYPE_USER;
551 frame->extended_data = frame->data;
552 frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
553 frame->width = buf->w;
554 frame->height = buf->h;
555 frame->format = buf->pix_fmt;
556 frame->sample_aspect_ratio = s->sample_aspect_ratio;
558 for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
559 frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't
560 frame->data[i] = buf->data[i];
561 frame->linesize[i] = buf->linesize[i];
567 static void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
569 InputStream *ist = s->opaque;
570 FrameBuffer *buf = frame->opaque;
573 for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
574 frame->data[i] = NULL;
576 unref_buffer(ist, buf);
579 static void filter_release_buffer(AVFilterBuffer *fb)
581 FrameBuffer *buf = fb->priv;
583 unref_buffer(buf->ist, buf);
586 static const enum PixelFormat *choose_pixel_fmts(OutputStream *ost)
588 if (ost->st->codec->pix_fmt != PIX_FMT_NONE) {
589 ost->pix_fmts[0] = ost->st->codec->pix_fmt;
590 return ost->pix_fmts;
591 } else if (ost->enc->pix_fmts)
592 return ost->enc->pix_fmts;
597 static int configure_video_filters(FilterGraph *fg)
599 InputStream *ist = fg->inputs[0]->ist;
600 OutputStream *ost = fg->outputs[0]->ost;
601 AVFilterContext *last_filter, *filter;
602 /** filter graph containing all filters including input & output */
603 AVCodecContext *codec = ost->st->codec;
604 SinkContext sink_ctx = { .pix_fmts = choose_pixel_fmts(ost) };
605 AVRational sample_aspect_ratio;
609 avfilter_graph_free(&fg->graph);
610 fg->graph = avfilter_graph_alloc();
612 if (ist->st->sample_aspect_ratio.num) {
613 sample_aspect_ratio = ist->st->sample_aspect_ratio;
615 sample_aspect_ratio = ist->st->codec->sample_aspect_ratio;
617 snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
618 ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
619 sample_aspect_ratio.num, sample_aspect_ratio.den);
621 ret = avfilter_graph_create_filter(&fg->inputs[0]->filter,
622 avfilter_get_by_name("buffer"),
623 "src", args, NULL, fg->graph);
626 ret = avfilter_graph_create_filter(&fg->outputs[0]->filter, &sink,
627 "out", NULL, &sink_ctx, fg->graph);
630 last_filter = fg->inputs[0]->filter;
632 if (codec->width || codec->height) {
633 snprintf(args, 255, "%d:%d:flags=0x%X",
636 (unsigned)ost->sws_flags);
637 if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
638 NULL, args, NULL, fg->graph)) < 0)
640 if ((ret = avfilter_link(last_filter, 0, filter, 0)) < 0)
642 last_filter = filter;
645 snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
646 fg->graph->scale_sws_opts = av_strdup(args);
649 AVFilterInOut *outputs = avfilter_inout_alloc();
650 AVFilterInOut *inputs = avfilter_inout_alloc();
652 outputs->name = av_strdup("in");
653 outputs->filter_ctx = last_filter;
654 outputs->pad_idx = 0;
655 outputs->next = NULL;
657 inputs->name = av_strdup("out");
658 inputs->filter_ctx = fg->outputs[0]->filter;
662 if ((ret = avfilter_graph_parse(fg->graph, ost->avfilter, inputs, outputs, NULL)) < 0)
665 if ((ret = avfilter_link(last_filter, 0, fg->outputs[0]->filter, 0)) < 0)
669 if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
672 ost->filter = fg->outputs[0];
677 static FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
679 FilterGraph *fg = av_mallocz(sizeof(*fg));
683 fg->index = nb_filtergraphs;
685 fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs), &fg->nb_outputs,
687 if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
689 fg->outputs[0]->ost = ost;
690 fg->outputs[0]->graph = fg;
692 fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs), &fg->nb_inputs,
694 if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
696 fg->inputs[0]->ist = ist;
697 fg->inputs[0]->graph = fg;
699 ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
700 &ist->nb_filters, ist->nb_filters + 1);
701 ist->filters[ist->nb_filters - 1] = fg->inputs[0];
703 filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
704 &nb_filtergraphs, nb_filtergraphs + 1);
705 filtergraphs[nb_filtergraphs - 1] = fg;
710 static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
713 enum AVMediaType type = in->filter_ctx->input_pads[in->pad_idx].type;
716 // TODO: support other filter types
717 if (type != AVMEDIA_TYPE_VIDEO) {
718 av_log(NULL, AV_LOG_FATAL, "Only video filters supported currently.\n");
726 int file_idx = strtol(in->name, &p, 0);
728 if (file_idx < 0 || file_idx > nb_input_files) {
729 av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtegraph description %s.\n",
730 file_idx, fg->graph_desc);
733 s = input_files[file_idx]->ctx;
735 for (i = 0; i < s->nb_streams; i++) {
736 if (s->streams[i]->codec->codec_type != type)
738 if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
744 av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
745 "matches no streams.\n", p, fg->graph_desc);
748 ist = input_streams[input_files[file_idx]->ist_index + st->index];
750 /* find the first unused stream of corresponding type */
751 for (i = 0; i < nb_input_streams; i++) {
752 ist = input_streams[i];
753 if (ist->st->codec->codec_type == type && ist->discard)
756 if (i == nb_input_streams) {
757 av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
758 "unlabeled input pad %d on filter %s", in->pad_idx,
759 in->filter_ctx->name);
764 ist->decoding_needed = 1;
765 ist->st->discard = AVDISCARD_NONE;
767 fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs),
768 &fg->nb_inputs, fg->nb_inputs + 1);
769 if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
771 fg->inputs[fg->nb_inputs - 1]->ist = ist;
772 fg->inputs[fg->nb_inputs - 1]->graph = fg;
774 ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
775 &ist->nb_filters, ist->nb_filters + 1);
776 ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
779 static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
781 SinkContext sink_ctx;
782 AVCodecContext *codec = ofilter->ost->st->codec;
783 AVFilterContext *last_filter = out->filter_ctx;
784 int pad_idx = out->pad_idx;
787 sink_ctx.pix_fmts = choose_pixel_fmts(ofilter->ost);
789 ret = avfilter_graph_create_filter(&ofilter->filter, &sink,
790 "out", NULL, &sink_ctx, fg->graph);
794 if (codec->width || codec->height) {
796 snprintf(args, sizeof(args), "%d:%d:flags=0x%X",
799 (unsigned)ofilter->ost->sws_flags);
800 if ((ret = avfilter_graph_create_filter(&last_filter, avfilter_get_by_name("scale"),
801 NULL, args, NULL, fg->graph)) < 0)
803 if ((ret = avfilter_link(out->filter_ctx, out->pad_idx, last_filter, 0)) < 0)
808 if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
814 static int configure_complex_filter(FilterGraph *fg)
816 AVFilterInOut *inputs, *outputs, *cur;
817 int ret, i, init = !fg->graph;
819 avfilter_graph_free(&fg->graph);
820 if (!(fg->graph = avfilter_graph_alloc()))
821 return AVERROR(ENOMEM);
823 if ((ret = avfilter_graph_parse2(fg->graph, fg->graph_desc, &inputs, &outputs)) < 0)
826 for (cur = inputs; init && cur; cur = cur->next)
827 init_input_filter(fg, cur);
829 for (cur = inputs, i = 0; cur; cur = cur->next, i++) {
830 InputFilter *ifilter = fg->inputs[i];
831 InputStream *ist = ifilter->ist;
835 sar = ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
836 ist->st->codec->sample_aspect_ratio;
837 snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
838 ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
841 if ((ret = avfilter_graph_create_filter(&ifilter->filter,
842 avfilter_get_by_name("buffer"), cur->name,
843 args, NULL, fg->graph)) < 0)
845 if ((ret = avfilter_link(ifilter->filter, 0,
846 cur->filter_ctx, cur->pad_idx)) < 0)
849 avfilter_inout_free(&inputs);
852 /* we already know the mappings between lavfi outputs and output streams,
853 * so we can finish the setup */
854 for (cur = outputs, i = 0; cur; cur = cur->next, i++)
855 configure_output_filter(fg, fg->outputs[i], cur);
856 avfilter_inout_free(&outputs);
858 if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
861 /* wait until output mappings are processed */
862 for (cur = outputs; cur;) {
863 fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs),
864 &fg->nb_outputs, fg->nb_outputs + 1);
865 if (!(fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]))))
867 fg->outputs[fg->nb_outputs - 1]->graph = fg;
868 fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
870 fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
877 static int configure_complex_filters(void)
881 for (i = 0; i < nb_filtergraphs; i++)
882 if (!filtergraphs[i]->graph &&
883 (ret = configure_complex_filter(filtergraphs[i])) < 0)
888 static int configure_filtergraph(FilterGraph *fg)
890 return fg->graph_desc ? configure_complex_filter(fg) : configure_video_filters(fg);
893 static int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
896 for (i = 0; i < fg->nb_inputs; i++)
897 if (fg->inputs[i]->ist == ist)
902 static void term_exit(void)
904 av_log(NULL, AV_LOG_QUIET, "");
907 static volatile int received_sigterm = 0;
908 static volatile int received_nb_signals = 0;
911 sigterm_handler(int sig)
913 received_sigterm = sig;
914 received_nb_signals++;
918 static void term_init(void)
920 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
921 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
923 signal(SIGXCPU, sigterm_handler);
927 static int decode_interrupt_cb(void *ctx)
929 return received_nb_signals > 1;
932 static const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
934 void exit_program(int ret)
938 for (i = 0; i < nb_filtergraphs; i++) {
939 avfilter_graph_free(&filtergraphs[i]->graph);
940 for (j = 0; j < filtergraphs[i]->nb_inputs; j++)
941 av_freep(&filtergraphs[i]->inputs[j]);
942 av_freep(&filtergraphs[i]->inputs);
943 for (j = 0; j < filtergraphs[i]->nb_outputs; j++)
944 av_freep(&filtergraphs[i]->outputs[j]);
945 av_freep(&filtergraphs[i]->outputs);
946 av_freep(&filtergraphs[i]);
948 av_freep(&filtergraphs);
951 for (i = 0; i < nb_output_files; i++) {
952 AVFormatContext *s = output_files[i]->ctx;
953 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
955 avformat_free_context(s);
956 av_dict_free(&output_files[i]->opts);
957 av_freep(&output_files[i]);
959 for (i = 0; i < nb_output_streams; i++) {
960 AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
962 AVBitStreamFilterContext *next = bsfc->next;
963 av_bitstream_filter_close(bsfc);
966 output_streams[i]->bitstream_filters = NULL;
968 if (output_streams[i]->output_frame) {
969 AVFrame *frame = output_streams[i]->output_frame;
970 if (frame->extended_data != frame->data)
971 av_freep(&frame->extended_data);
975 av_freep(&output_streams[i]->avfilter);
976 av_freep(&output_streams[i]->filtered_frame);
977 av_freep(&output_streams[i]);
979 for (i = 0; i < nb_input_files; i++) {
980 avformat_close_input(&input_files[i]->ctx);
981 av_freep(&input_files[i]);
983 for (i = 0; i < nb_input_streams; i++) {
984 av_freep(&input_streams[i]->decoded_frame);
985 av_dict_free(&input_streams[i]->opts);
986 free_buffer_pool(input_streams[i]);
987 av_freep(&input_streams[i]->filters);
988 av_freep(&input_streams[i]);
993 av_free(vstats_filename);
995 av_freep(&input_streams);
996 av_freep(&input_files);
997 av_freep(&output_streams);
998 av_freep(&output_files);
1002 allocated_audio_buf_size = 0;
1004 allocated_async_buf_size = 0;
1007 avformat_network_deinit();
1009 if (received_sigterm) {
1010 av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
1011 (int) received_sigterm);
1018 static void assert_avoptions(AVDictionary *m)
1020 AVDictionaryEntry *t;
1021 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1022 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
1027 static void assert_codec_experimental(AVCodecContext *c, int encoder)
1029 const char *codec_string = encoder ? "encoder" : "decoder";
1031 if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL &&
1032 c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
1033 av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
1034 "results.\nAdd '-strict experimental' if you want to use it.\n",
1035 codec_string, c->codec->name);
1036 codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id);
1037 if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
1038 av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
1039 codec_string, codec->name);
1044 static void choose_sample_fmt(AVStream *st, AVCodec *codec)
1046 if (codec && codec->sample_fmts) {
1047 const enum AVSampleFormat *p = codec->sample_fmts;
1048 for (; *p != -1; p++) {
1049 if (*p == st->codec->sample_fmt)
1053 av_log(NULL, AV_LOG_WARNING,
1054 "Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
1055 av_get_sample_fmt_name(st->codec->sample_fmt),
1057 av_get_sample_fmt_name(codec->sample_fmts[0]));
1058 st->codec->sample_fmt = codec->sample_fmts[0];
1064 * Update the requested input sample format based on the output sample format.
1065 * This is currently only used to request float output from decoders which
1066 * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
1067 * Ideally this will be removed in the future when decoders do not do format
1068 * conversion and only output in their native format.
1070 static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
1071 AVCodecContext *enc)
1073 /* if sample formats match or a decoder sample format has already been
1074 requested, just return */
1075 if (enc->sample_fmt == dec->sample_fmt ||
1076 dec->request_sample_fmt > AV_SAMPLE_FMT_NONE)
1079 /* if decoder supports more than one output format */
1080 if (dec_codec && dec_codec->sample_fmts &&
1081 dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
1082 dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
1083 const enum AVSampleFormat *p;
1084 int min_dec = -1, min_inc = -1;
1086 /* find a matching sample format in the encoder */
1087 for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
1088 if (*p == enc->sample_fmt) {
1089 dec->request_sample_fmt = *p;
1091 } else if (*p > enc->sample_fmt) {
1092 min_inc = FFMIN(min_inc, *p - enc->sample_fmt);
1094 min_dec = FFMIN(min_dec, enc->sample_fmt - *p);
1097 /* if none match, provide the one that matches quality closest */
1098 dec->request_sample_fmt = min_inc > 0 ? enc->sample_fmt + min_inc :
1099 enc->sample_fmt - min_dec;
1103 static void choose_sample_rate(AVStream *st, AVCodec *codec)
1105 if (codec && codec->supported_samplerates) {
1106 const int *p = codec->supported_samplerates;
1108 int best_dist = INT_MAX;
1110 int dist = abs(st->codec->sample_rate - *p);
1111 if (dist < best_dist) {
1117 av_log(st->codec, AV_LOG_WARNING, "Requested sampling rate unsupported using closest supported (%d)\n", best);
1119 st->codec->sample_rate = best;
1124 get_sync_ipts(const OutputStream *ost, int64_t pts)
1126 OutputFile *of = output_files[ost->file_index];
1127 return (double)(pts - of->start_time) / AV_TIME_BASE;
1130 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
1132 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
1133 AVCodecContext *avctx = ost->st->codec;
1137 * Audio encoders may split the packets -- #frames in != #packets out.
1138 * But there is no reordering, so we can limit the number of output packets
1139 * by simply dropping them here.
1140 * Counting encoded video frames needs to be done separately because of
1141 * reordering, see do_video_out()
1143 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
1144 if (ost->frame_number >= ost->max_frames) {
1145 av_free_packet(pkt);
1148 ost->frame_number++;
1152 AVPacket new_pkt = *pkt;
1153 int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
1154 &new_pkt.data, &new_pkt.size,
1155 pkt->data, pkt->size,
1156 pkt->flags & AV_PKT_FLAG_KEY);
1158 av_free_packet(pkt);
1159 new_pkt.destruct = av_destruct_packet;
1161 av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
1162 bsfc->filter->name, pkt->stream_index,
1163 avctx->codec ? avctx->codec->name : "copy");
1173 pkt->stream_index = ost->index;
1174 ret = av_interleaved_write_frame(s, pkt);
1176 print_error("av_interleaved_write_frame()", ret);
1181 static int check_recording_time(OutputStream *ost)
1183 OutputFile *of = output_files[ost->file_index];
1185 if (of->recording_time != INT64_MAX &&
1186 av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,
1187 AV_TIME_BASE_Q) >= 0) {
1188 ost->is_past_recording_time = 1;
1194 static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_t size)
1196 int fill_char = 0x00;
1197 if (sample_fmt == AV_SAMPLE_FMT_U8)
1199 memset(buf, fill_char, size);
1202 static int encode_audio_frame(AVFormatContext *s, OutputStream *ost,
1203 const uint8_t *buf, int buf_size)
1205 AVCodecContext *enc = ost->st->codec;
1206 AVFrame *frame = NULL;
1208 int ret, got_packet;
1210 av_init_packet(&pkt);
1215 if (!ost->output_frame) {
1216 ost->output_frame = avcodec_alloc_frame();
1217 if (!ost->output_frame) {
1218 av_log(NULL, AV_LOG_FATAL, "out-of-memory in encode_audio_frame()\n");
1222 frame = ost->output_frame;
1223 if (frame->extended_data != frame->data)
1224 av_freep(&frame->extended_data);
1225 avcodec_get_frame_defaults(frame);
1227 frame->nb_samples = buf_size /
1228 (enc->channels * av_get_bytes_per_sample(enc->sample_fmt));
1229 if ((ret = avcodec_fill_audio_frame(frame, enc->channels, enc->sample_fmt,
1230 buf, buf_size, 1)) < 0) {
1231 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1235 if (!check_recording_time(ost))
1238 frame->pts = ost->sync_opts;
1239 ost->sync_opts += frame->nb_samples;
1243 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
1244 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1249 if (pkt.pts != AV_NOPTS_VALUE)
1250 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1251 if (pkt.dts != AV_NOPTS_VALUE)
1252 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1253 if (pkt.duration > 0)
1254 pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
1256 write_frame(s, &pkt, ost);
1258 audio_size += pkt.size;
1264 static int alloc_audio_output_buf(AVCodecContext *dec, AVCodecContext *enc,
1267 int64_t audio_buf_samples;
1270 /* calculate required number of samples to allocate */
1271 audio_buf_samples = ((int64_t)nb_samples * enc->sample_rate + dec->sample_rate) /
1273 audio_buf_samples = 4 * audio_buf_samples + 16; // safety factors for resampling
1274 audio_buf_samples = FFMAX(audio_buf_samples, enc->frame_size);
1275 if (audio_buf_samples > INT_MAX)
1276 return AVERROR(EINVAL);
1278 audio_buf_size = av_samples_get_buffer_size(NULL, enc->channels,
1280 enc->sample_fmt, 0);
1281 if (audio_buf_size < 0)
1282 return audio_buf_size;
1284 av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size);
1286 return AVERROR(ENOMEM);
1291 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
1292 InputStream *ist, AVFrame *decoded_frame)
1296 int size_out, frame_bytes, resample_changed;
1297 AVCodecContext *enc = ost->st->codec;
1298 AVCodecContext *dec = ist->st->codec;
1299 int osize = av_get_bytes_per_sample(enc->sample_fmt);
1300 int isize = av_get_bytes_per_sample(dec->sample_fmt);
1301 uint8_t *buf = decoded_frame->data[0];
1302 int size = decoded_frame->nb_samples * dec->channels * isize;
1304 if (alloc_audio_output_buf(dec, enc, decoded_frame->nb_samples) < 0) {
1305 av_log(NULL, AV_LOG_FATAL, "Error allocating audio buffer\n");
1309 if (enc->channels != dec->channels || enc->sample_rate != dec->sample_rate)
1310 ost->audio_resample = 1;
1312 resample_changed = ost->resample_sample_fmt != dec->sample_fmt ||
1313 ost->resample_channels != dec->channels ||
1314 ost->resample_sample_rate != dec->sample_rate;
1316 if ((ost->audio_resample && !ost->resample) || resample_changed) {
1317 if (resample_changed) {
1318 av_log(NULL, AV_LOG_INFO, "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d to rate:%d fmt:%s ch:%d\n",
1319 ist->file_index, ist->st->index,
1320 ost->resample_sample_rate, av_get_sample_fmt_name(ost->resample_sample_fmt), ost->resample_channels,
1321 dec->sample_rate, av_get_sample_fmt_name(dec->sample_fmt), dec->channels);
1322 ost->resample_sample_fmt = dec->sample_fmt;
1323 ost->resample_channels = dec->channels;
1324 ost->resample_sample_rate = dec->sample_rate;
1326 audio_resample_close(ost->resample);
1328 /* if audio_sync_method is >1 the resampler is needed for audio drift compensation */
1329 if (audio_sync_method <= 1 &&
1330 ost->resample_sample_fmt == enc->sample_fmt &&
1331 ost->resample_channels == enc->channels &&
1332 ost->resample_sample_rate == enc->sample_rate) {
1333 ost->resample = NULL;
1334 ost->audio_resample = 0;
1335 } else if (ost->audio_resample) {
1336 if (dec->sample_fmt != AV_SAMPLE_FMT_S16)
1337 av_log(NULL, AV_LOG_WARNING, "Using s16 intermediate sample format for resampling\n");
1338 ost->resample = av_audio_resample_init(enc->channels, dec->channels,
1339 enc->sample_rate, dec->sample_rate,
1340 enc->sample_fmt, dec->sample_fmt,
1342 if (!ost->resample) {
1343 av_log(NULL, AV_LOG_FATAL, "Can not resample %d channels @ %d Hz to %d channels @ %d Hz\n",
1344 dec->channels, dec->sample_rate,
1345 enc->channels, enc->sample_rate);
1351 #define MAKE_SFMT_PAIR(a,b) ((a)+AV_SAMPLE_FMT_NB*(b))
1352 if (!ost->audio_resample && dec->sample_fmt != enc->sample_fmt &&
1353 MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt) != ost->reformat_pair) {
1354 if (ost->reformat_ctx)
1355 av_audio_convert_free(ost->reformat_ctx);
1356 ost->reformat_ctx = av_audio_convert_alloc(enc->sample_fmt, 1,
1357 dec->sample_fmt, 1, NULL, 0);
1358 if (!ost->reformat_ctx) {
1359 av_log(NULL, AV_LOG_FATAL, "Cannot convert %s sample format to %s sample format\n",
1360 av_get_sample_fmt_name(dec->sample_fmt),
1361 av_get_sample_fmt_name(enc->sample_fmt));
1364 ost->reformat_pair = MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt);
1367 if (audio_sync_method > 0) {
1368 double delta = get_sync_ipts(ost, ist->last_dts) * enc->sample_rate - ost->sync_opts -
1369 av_fifo_size(ost->fifo) / (enc->channels * osize);
1370 int idelta = delta * dec->sample_rate / enc->sample_rate;
1371 int byte_delta = idelta * isize * dec->channels;
1373 // FIXME resample delay
1374 if (fabs(delta) > 50) {
1375 if (ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate) {
1376 if (byte_delta < 0) {
1377 byte_delta = FFMAX(byte_delta, -size);
1380 av_log(NULL, AV_LOG_VERBOSE, "discarding %d audio samples\n",
1381 -byte_delta / (isize * dec->channels));
1386 av_fast_malloc(&async_buf, &allocated_async_buf_size,
1389 av_log(NULL, AV_LOG_FATAL, "Out of memory in do_audio_out\n");
1393 if (alloc_audio_output_buf(dec, enc, decoded_frame->nb_samples + idelta) < 0) {
1394 av_log(NULL, AV_LOG_FATAL, "Error allocating audio buffer\n");
1399 generate_silence(async_buf, dec->sample_fmt, byte_delta);
1400 memcpy(async_buf + byte_delta, buf, size);
1403 av_log(NULL, AV_LOG_VERBOSE, "adding %d audio samples of silence\n", idelta);
1405 } else if (audio_sync_method > 1) {
1406 int comp = av_clip(delta, -audio_sync_method, audio_sync_method);
1407 av_assert0(ost->audio_resample);
1408 av_log(NULL, AV_LOG_VERBOSE, "compensating audio timestamp drift:%f compensation:%d in:%d\n",
1409 delta, comp, enc->sample_rate);
1410 // fprintf(stderr, "drift:%f len:%d opts:%"PRId64" ipts:%"PRId64" fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)(get_sync_ipts(ost) * enc->sample_rate), av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2));
1411 av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate);
1414 } else if (audio_sync_method == 0)
1415 ost->sync_opts = lrintf(get_sync_ipts(ost, ist->last_dts) * enc->sample_rate) -
1416 av_fifo_size(ost->fifo) / (enc->channels * osize); // FIXME wrong
1418 if (ost->audio_resample) {
1420 size_out = audio_resample(ost->resample,
1421 (short *)buftmp, (short *)buf,
1422 size / (dec->channels * isize));
1423 size_out = size_out * enc->channels * osize;
1429 if (!ost->audio_resample && dec->sample_fmt != enc->sample_fmt) {
1430 const void *ibuf[6] = { buftmp };
1431 void *obuf[6] = { audio_buf };
1432 int istride[6] = { isize };
1433 int ostride[6] = { osize };
1434 int len = size_out / istride[0];
1435 if (av_audio_convert(ost->reformat_ctx, obuf, ostride, ibuf, istride, len) < 0) {
1436 printf("av_audio_convert() failed\n");
1442 size_out = len * osize;
1445 /* now encode as many frames as possible */
1446 if (!(enc->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) {
1447 /* output resampled raw samples */
1448 if (av_fifo_realloc2(ost->fifo, av_fifo_size(ost->fifo) + size_out) < 0) {
1449 av_log(NULL, AV_LOG_FATAL, "av_fifo_realloc2() failed\n");
1452 av_fifo_generic_write(ost->fifo, buftmp, size_out, NULL);
1454 frame_bytes = enc->frame_size * osize * enc->channels;
1456 while (av_fifo_size(ost->fifo) >= frame_bytes) {
1457 av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL);
1458 encode_audio_frame(s, ost, audio_buf, frame_bytes);
1461 encode_audio_frame(s, ost, buftmp, size_out);
1465 static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
1467 AVCodecContext *dec;
1468 AVPicture *picture2;
1469 AVPicture picture_tmp;
1472 dec = ist->st->codec;
1474 /* deinterlace : must be done before any resize */
1475 if (do_deinterlace) {
1478 /* create temporary picture */
1479 size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
1480 buf = av_malloc(size);
1484 picture2 = &picture_tmp;
1485 avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
1487 if (avpicture_deinterlace(picture2, picture,
1488 dec->pix_fmt, dec->width, dec->height) < 0) {
1489 /* if error, do not deinterlace */
1490 av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
1499 if (picture != picture2)
1500 *picture = *picture2;
1504 static void do_subtitle_out(AVFormatContext *s,
1510 static uint8_t *subtitle_out = NULL;
1511 int subtitle_out_max_size = 1024 * 1024;
1512 int subtitle_out_size, nb, i;
1513 AVCodecContext *enc;
1516 if (pts == AV_NOPTS_VALUE) {
1517 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1523 enc = ost->st->codec;
1525 if (!subtitle_out) {
1526 subtitle_out = av_malloc(subtitle_out_max_size);
1529 /* Note: DVB subtitle need one packet to draw them and one other
1530 packet to clear them */
1531 /* XXX: signal it in the codec context ? */
1532 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE)
1537 for (i = 0; i < nb; i++) {
1538 ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
1539 if (!check_recording_time(ost))
1542 sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
1543 // start_display_time is required to be 0
1544 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1545 sub->end_display_time -= sub->start_display_time;
1546 sub->start_display_time = 0;
1547 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1548 subtitle_out_max_size, sub);
1549 if (subtitle_out_size < 0) {
1550 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1554 av_init_packet(&pkt);
1555 pkt.data = subtitle_out;
1556 pkt.size = subtitle_out_size;
1557 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
1558 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {
1559 /* XXX: the pts correction is handled here. Maybe handling
1560 it in the codec would be better */
1562 pkt.pts += 90 * sub->start_display_time;
1564 pkt.pts += 90 * sub->end_display_time;
1566 write_frame(s, &pkt, ost);
1570 static void do_video_out(AVFormatContext *s,
1572 AVFrame *in_picture,
1573 int *frame_size, float quality)
1575 int nb_frames, i, ret, format_video_sync;
1576 AVCodecContext *enc;
1577 double sync_ipts, delta;
1579 enc = ost->st->codec;
1581 sync_ipts = get_sync_ipts(ost, in_picture->pts) / av_q2d(enc->time_base);
1582 delta = sync_ipts - ost->sync_opts;
1584 /* by default, we output a single frame */
1589 format_video_sync = video_sync_method;
1590 if (format_video_sync == VSYNC_AUTO)
1591 format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
1592 (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
1594 switch (format_video_sync) {
1596 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1599 else if (delta > 1.1)
1600 nb_frames = lrintf(delta);
1605 else if (delta > 0.6)
1606 ost->sync_opts = lrintf(sync_ipts);
1608 case VSYNC_PASSTHROUGH:
1609 ost->sync_opts = lrintf(sync_ipts);
1615 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1616 if (nb_frames == 0) {
1618 av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
1620 } else if (nb_frames > 1) {
1621 nb_frames_dup += nb_frames - 1;
1622 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1625 if (!ost->frame_number)
1626 ost->first_pts = ost->sync_opts;
1628 /* duplicates frame if needed */
1629 for (i = 0; i < nb_frames; i++) {
1631 av_init_packet(&pkt);
1635 if (!check_recording_time(ost))
1638 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1639 enc->codec->id == CODEC_ID_RAWVIDEO) {
1640 /* raw pictures are written as AVPicture structure to
1641 avoid any copies. We support temporarily the older
1643 enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
1644 enc->coded_frame->top_field_first = in_picture->top_field_first;
1645 pkt.data = (uint8_t *)in_picture;
1646 pkt.size = sizeof(AVPicture);
1647 pkt.pts = av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
1648 pkt.flags |= AV_PKT_FLAG_KEY;
1650 write_frame(s, &pkt, ost);
1653 AVFrame big_picture;
1655 big_picture = *in_picture;
1656 /* better than nothing: use input picture interlaced
1658 big_picture.interlaced_frame = in_picture->interlaced_frame;
1659 if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
1660 if (ost->top_field_first == -1)
1661 big_picture.top_field_first = in_picture->top_field_first;
1663 big_picture.top_field_first = !!ost->top_field_first;
1666 /* handles same_quant here. This is not correct because it may
1667 not be a global option */
1668 big_picture.quality = quality;
1669 if (!enc->me_threshold)
1670 big_picture.pict_type = 0;
1671 big_picture.pts = ost->sync_opts;
1672 if (ost->forced_kf_index < ost->forced_kf_count &&
1673 big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1674 big_picture.pict_type = AV_PICTURE_TYPE_I;
1675 ost->forced_kf_index++;
1677 ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
1679 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1684 if (pkt.pts != AV_NOPTS_VALUE)
1685 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1686 if (pkt.dts != AV_NOPTS_VALUE)
1687 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1689 write_frame(s, &pkt, ost);
1690 *frame_size = pkt.size;
1691 video_size += pkt.size;
1693 /* if two pass, output log */
1694 if (ost->logfile && enc->stats_out) {
1695 fprintf(ost->logfile, "%s", enc->stats_out);
1701 * For video, number of frames in == number of packets out.
1702 * But there may be reordering, so we can't throw away frames on encoder
1703 * flush, we need to limit them here, before they go into encoder.
1705 ost->frame_number++;
1709 static double psnr(double d)
1711 return -10.0 * log(d) / log(10.0);
1714 static void do_video_stats(AVFormatContext *os, OutputStream *ost,
1717 AVCodecContext *enc;
1719 double ti1, bitrate, avg_bitrate;
1721 /* this is executed just the first time do_video_stats is called */
1723 vstats_file = fopen(vstats_filename, "w");
1730 enc = ost->st->codec;
1731 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1732 frame_number = ost->frame_number;
1733 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
1734 if (enc->flags&CODEC_FLAG_PSNR)
1735 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1737 fprintf(vstats_file,"f_size= %6d ", frame_size);
1738 /* compute pts value */
1739 ti1 = ost->sync_opts * av_q2d(enc->time_base);
1743 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1744 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
1745 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1746 (double)video_size / 1024, ti1, bitrate, avg_bitrate);
1747 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
1751 /* check for new output on any of the filtergraphs */
1752 static int poll_filters(void)
1754 AVFilterBufferRef *picref;
1755 AVFrame *filtered_frame = NULL;
1756 int i, frame_size, ret;
1758 for (i = 0; i < nb_output_streams; i++) {
1759 OutputStream *ost = output_streams[i];
1760 OutputFile *of = output_files[ost->file_index];
1762 if (!ost->filter || ost->is_past_recording_time)
1765 if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
1766 return AVERROR(ENOMEM);
1768 avcodec_get_frame_defaults(ost->filtered_frame);
1769 filtered_frame = ost->filtered_frame;
1771 while (avfilter_poll_frame(ost->filter->filter->inputs[0])) {
1772 AVRational ist_pts_tb;
1773 if ((ret = get_filtered_video_frame(ost->filter->filter,
1774 filtered_frame, &picref,
1777 filtered_frame->pts = av_rescale_q(picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
1779 if (of->start_time && filtered_frame->pts < of->start_time)
1782 switch (ost->filter->filter->inputs[0]->type) {
1783 case AVMEDIA_TYPE_VIDEO:
1784 if (!ost->frame_aspect_ratio)
1785 ost->st->codec->sample_aspect_ratio = picref->video->pixel_aspect;
1787 do_video_out(of->ctx, ost, filtered_frame, &frame_size,
1788 same_quant ? ost->last_quality :
1789 ost->st->codec->global_quality);
1790 if (vstats_filename && frame_size)
1791 do_video_stats(of->ctx, ost, frame_size);
1794 // TODO support audio/subtitle filters
1798 avfilter_unref_buffer(picref);
1804 static void print_report(int is_last_report, int64_t timer_start)
1808 AVFormatContext *oc;
1810 AVCodecContext *enc;
1811 int frame_number, vid, i;
1812 double bitrate, ti1, pts;
1813 static int64_t last_time = -1;
1814 static int qp_histogram[52];
1816 if (!print_stats && !is_last_report)
1819 if (!is_last_report) {
1821 /* display the report every 0.5 seconds */
1822 cur_time = av_gettime();
1823 if (last_time == -1) {
1824 last_time = cur_time;
1827 if ((cur_time - last_time) < 500000)
1829 last_time = cur_time;
1833 oc = output_files[0]->ctx;
1835 total_size = avio_size(oc->pb);
1836 if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
1837 total_size = avio_tell(oc->pb);
1842 for (i = 0; i < nb_output_streams; i++) {
1844 ost = output_streams[i];
1845 enc = ost->st->codec;
1846 if (!ost->stream_copy && enc->coded_frame)
1847 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1848 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1849 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1851 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1852 float t = (av_gettime() - timer_start) / 1000000.0;
1854 frame_number = ost->frame_number;
1855 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
1856 frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
1858 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1862 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1864 for (j = 0; j < 32; j++)
1865 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j] + 1) / log(2)));
1867 if (enc->flags&CODEC_FLAG_PSNR) {
1869 double error, error_sum = 0;
1870 double scale, scale_sum = 0;
1871 char type[3] = { 'Y','U','V' };
1872 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1873 for (j = 0; j < 3; j++) {
1874 if (is_last_report) {
1875 error = enc->error[j];
1876 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1878 error = enc->coded_frame->error[j];
1879 scale = enc->width * enc->height * 255.0 * 255.0;
1885 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
1887 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1891 /* compute min output value */
1892 pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
1893 if ((pts < ti1) && (pts > 0))
1899 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
1901 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1902 "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
1903 (double)total_size / 1024, ti1, bitrate);
1905 if (nb_frames_dup || nb_frames_drop)
1906 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1907 nb_frames_dup, nb_frames_drop);
1909 av_log(NULL, AV_LOG_INFO, "%s \r", buf);
1913 if (is_last_report) {
1914 int64_t raw= audio_size + video_size + extra_size;
1915 av_log(NULL, AV_LOG_INFO, "\n");
1916 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
1917 video_size / 1024.0,
1918 audio_size / 1024.0,
1919 extra_size / 1024.0,
1920 100.0 * (total_size - raw) / raw
1925 static void flush_encoders(void)
1929 for (i = 0; i < nb_output_streams; i++) {
1930 OutputStream *ost = output_streams[i];
1931 AVCodecContext *enc = ost->st->codec;
1932 AVFormatContext *os = output_files[ost->file_index]->ctx;
1933 int stop_encoding = 0;
1935 if (!ost->encoding_needed)
1938 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1940 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO)
1945 int fifo_bytes, got_packet;
1946 av_init_packet(&pkt);
1950 switch (ost->st->codec->codec_type) {
1951 case AVMEDIA_TYPE_AUDIO:
1952 fifo_bytes = av_fifo_size(ost->fifo);
1953 if (fifo_bytes > 0) {
1954 /* encode any samples remaining in fifo */
1955 int frame_bytes = fifo_bytes;
1957 av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL);
1959 /* pad last frame with silence if needed */
1960 if (!(enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME)) {
1961 frame_bytes = enc->frame_size * enc->channels *
1962 av_get_bytes_per_sample(enc->sample_fmt);
1963 if (allocated_audio_buf_size < frame_bytes)
1965 generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes);
1967 encode_audio_frame(os, ost, audio_buf, frame_bytes);
1969 /* flush encoder with NULL frames until it is done
1970 returning packets */
1971 if (encode_audio_frame(os, ost, NULL, 0) == 0) {
1977 case AVMEDIA_TYPE_VIDEO:
1978 ret = avcodec_encode_video2(enc, &pkt, NULL, &got_packet);
1980 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1984 if (ost->logfile && enc->stats_out) {
1985 fprintf(ost->logfile, "%s", enc->stats_out);
1991 if (pkt.pts != AV_NOPTS_VALUE)
1992 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1993 if (pkt.dts != AV_NOPTS_VALUE)
1994 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1995 write_frame(os, &pkt, ost);
2007 * Check whether a packet from ist should be written into ost at this time
2009 static int check_output_constraints(InputStream *ist, OutputStream *ost)
2011 OutputFile *of = output_files[ost->file_index];
2012 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2014 if (ost->source_index != ist_index)
2017 if (of->start_time && ist->last_dts < of->start_time)
2023 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2025 OutputFile *of = output_files[ost->file_index];
2026 int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
2029 av_init_packet(&opkt);
2031 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2032 !ost->copy_initial_nonkeyframes)
2035 if (of->recording_time != INT64_MAX &&
2036 ist->last_dts >= of->recording_time + of->start_time) {
2037 ost->is_past_recording_time = 1;
2041 /* force the input stream PTS */
2042 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
2043 audio_size += pkt->size;
2044 else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2045 video_size += pkt->size;
2049 if (pkt->pts != AV_NOPTS_VALUE)
2050 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
2052 opkt.pts = AV_NOPTS_VALUE;
2054 if (pkt->dts == AV_NOPTS_VALUE)
2055 opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
2057 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
2058 opkt.dts -= ost_tb_start_time;
2060 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
2061 opkt.flags = pkt->flags;
2063 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2064 if ( ost->st->codec->codec_id != CODEC_ID_H264
2065 && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
2066 && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
2067 && ost->st->codec->codec_id != CODEC_ID_VC1
2069 if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
2070 opkt.destruct = av_destruct_packet;
2072 opkt.data = pkt->data;
2073 opkt.size = pkt->size;
2076 write_frame(of->ctx, &opkt, ost);
2077 ost->st->codec->frame_number++;
2078 av_free_packet(&opkt);
2081 static void rate_emu_sleep(InputStream *ist)
2083 if (input_files[ist->file_index]->rate_emu) {
2084 int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
2085 int64_t now = av_gettime() - ist->start;
2091 static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2093 AVFrame *decoded_frame;
2094 AVCodecContext *avctx = ist->st->codec;
2095 int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
2098 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
2099 return AVERROR(ENOMEM);
2101 avcodec_get_frame_defaults(ist->decoded_frame);
2102 decoded_frame = ist->decoded_frame;
2104 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
2110 /* no audio frame */
2114 /* if the decoder provides a pts, use it instead of the last packet pts.
2115 the decoder could be delaying output by a packet or more. */
2116 if (decoded_frame->pts != AV_NOPTS_VALUE)
2117 ist->next_dts = decoded_frame->pts;
2119 /* increment next_dts to use for the case where the input stream does not
2120 have timestamps or there are multiple frames in the packet */
2121 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2124 // preprocess audio (volume)
2125 if (audio_volume != 256) {
2126 int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
2127 void *samples = decoded_frame->data[0];
2128 switch (avctx->sample_fmt) {
2129 case AV_SAMPLE_FMT_U8:
2131 uint8_t *volp = samples;
2132 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2133 int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
2134 *volp++ = av_clip_uint8(v);
2138 case AV_SAMPLE_FMT_S16:
2140 int16_t *volp = samples;
2141 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2142 int v = ((*volp) * audio_volume + 128) >> 8;
2143 *volp++ = av_clip_int16(v);
2147 case AV_SAMPLE_FMT_S32:
2149 int32_t *volp = samples;
2150 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2151 int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
2152 *volp++ = av_clipl_int32(v);
2156 case AV_SAMPLE_FMT_FLT:
2158 float *volp = samples;
2159 float scale = audio_volume / 256.f;
2160 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2165 case AV_SAMPLE_FMT_DBL:
2167 double *volp = samples;
2168 double scale = audio_volume / 256.;
2169 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2175 av_log(NULL, AV_LOG_FATAL,
2176 "Audio volume adjustment on sample format %s is not supported.\n",
2177 av_get_sample_fmt_name(ist->st->codec->sample_fmt));
2182 rate_emu_sleep(ist);
2184 for (i = 0; i < nb_output_streams; i++) {
2185 OutputStream *ost = output_streams[i];
2187 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
2189 do_audio_out(output_files[ost->file_index]->ctx, ost, ist, decoded_frame);
2195 static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *pkt_pts)
2197 AVFrame *decoded_frame;
2198 void *buffer_to_free = NULL;
2199 int i, ret = 0, resample_changed;
2202 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
2203 return AVERROR(ENOMEM);
2205 avcodec_get_frame_defaults(ist->decoded_frame);
2206 decoded_frame = ist->decoded_frame;
2207 pkt->pts = *pkt_pts;
2208 pkt->dts = ist->last_dts;
2209 *pkt_pts = AV_NOPTS_VALUE;
2211 ret = avcodec_decode_video2(ist->st->codec,
2212 decoded_frame, got_output, pkt);
2216 quality = same_quant ? decoded_frame->quality : 0;
2218 /* no picture yet */
2220 for (i = 0; i < ist->nb_filters; i++)
2221 av_buffersrc_buffer(ist->filters[i]->filter, NULL);
2224 decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
2225 decoded_frame->pkt_dts);
2227 pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
2229 rate_emu_sleep(ist);
2231 if (ist->st->sample_aspect_ratio.num)
2232 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2234 resample_changed = ist->resample_width != decoded_frame->width ||
2235 ist->resample_height != decoded_frame->height ||
2236 ist->resample_pix_fmt != decoded_frame->format;
2237 if (resample_changed) {
2238 av_log(NULL, AV_LOG_INFO,
2239 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2240 ist->file_index, ist->st->index,
2241 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2242 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2244 ist->resample_width = decoded_frame->width;
2245 ist->resample_height = decoded_frame->height;
2246 ist->resample_pix_fmt = decoded_frame->format;
2248 for (i = 0; i < nb_filtergraphs; i++)
2249 if (ist_in_filtergraph(filtergraphs[i], ist) &&
2250 configure_filtergraph(filtergraphs[i]) < 0) {
2251 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2256 for (i = 0; i < ist->nb_filters; i++) {
2257 // XXX what an ugly hack
2258 if (ist->filters[i]->graph->nb_outputs == 1)
2259 ist->filters[i]->graph->outputs[0]->ost->last_quality = quality;
2261 if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
2262 FrameBuffer *buf = decoded_frame->opaque;
2263 AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
2264 decoded_frame->data, decoded_frame->linesize,
2265 AV_PERM_READ | AV_PERM_PRESERVE,
2266 ist->st->codec->width, ist->st->codec->height,
2267 ist->st->codec->pix_fmt);
2269 avfilter_copy_frame_props(fb, decoded_frame);
2270 fb->buf->priv = buf;
2271 fb->buf->free = filter_release_buffer;
2274 av_buffersrc_buffer(ist->filters[i]->filter, fb);
2276 av_vsrc_buffer_add_frame(ist->filters[i]->filter, decoded_frame,
2277 decoded_frame->pts, decoded_frame->sample_aspect_ratio);
2280 av_free(buffer_to_free);
2284 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2286 AVSubtitle subtitle;
2287 int i, ret = avcodec_decode_subtitle2(ist->st->codec,
2288 &subtitle, got_output, pkt);
2294 rate_emu_sleep(ist);
2296 for (i = 0; i < nb_output_streams; i++) {
2297 OutputStream *ost = output_streams[i];
2299 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
2302 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
2305 avsubtitle_free(&subtitle);
2309 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2310 static int output_packet(InputStream *ist, const AVPacket *pkt)
2314 int64_t pkt_pts = AV_NOPTS_VALUE;
2317 if (ist->next_dts == AV_NOPTS_VALUE)
2318 ist->next_dts = ist->last_dts;
2322 av_init_packet(&avpkt);
2330 if (pkt->dts != AV_NOPTS_VALUE)
2331 ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2332 if (pkt->pts != AV_NOPTS_VALUE)
2333 pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2335 // while we have more to decode or while the decoder did output something on EOF
2336 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2340 ist->last_dts = ist->next_dts;
2342 if (avpkt.size && avpkt.size != pkt->size) {
2343 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2344 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2345 ist->showed_multi_packet_warning = 1;
2348 switch (ist->st->codec->codec_type) {
2349 case AVMEDIA_TYPE_AUDIO:
2350 ret = transcode_audio (ist, &avpkt, &got_output);
2352 case AVMEDIA_TYPE_VIDEO:
2353 ret = transcode_video (ist, &avpkt, &got_output, &pkt_pts);
2355 ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2356 else if (ist->st->r_frame_rate.num)
2357 ist->next_dts += av_rescale_q(1, (AVRational){ist->st->r_frame_rate.den,
2358 ist->st->r_frame_rate.num},
2360 else if (ist->st->codec->time_base.num != 0) {
2361 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
2362 ist->st->codec->ticks_per_frame;
2363 ist->next_dts += av_rescale_q(ticks, ist->st->codec->time_base, AV_TIME_BASE_Q);
2366 case AVMEDIA_TYPE_SUBTITLE:
2367 ret = transcode_subtitles(ist, &avpkt, &got_output);
2375 // touch data and size only if not EOF
2385 /* handle stream copy */
2386 if (!ist->decoding_needed) {
2387 rate_emu_sleep(ist);
2388 ist->last_dts = ist->next_dts;
2389 switch (ist->st->codec->codec_type) {
2390 case AVMEDIA_TYPE_AUDIO:
2391 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
2392 ist->st->codec->sample_rate;
2394 case AVMEDIA_TYPE_VIDEO:
2395 if (ist->st->codec->time_base.num != 0) {
2396 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
2397 ist->next_dts += ((int64_t)AV_TIME_BASE *
2398 ist->st->codec->time_base.num * ticks) /
2399 ist->st->codec->time_base.den;
2404 for (i = 0; pkt && i < nb_output_streams; i++) {
2405 OutputStream *ost = output_streams[i];
2407 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2410 do_streamcopy(ist, ost, pkt);
2416 static void print_sdp(void)
2420 AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
2424 for (i = 0; i < nb_output_files; i++)
2425 avc[i] = output_files[i]->ctx;
2427 av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
2428 printf("SDP:\n%s\n", sdp);
2433 static void get_default_channel_layouts(OutputStream *ost, InputStream *ist)
2435 char layout_name[256];
2436 AVCodecContext *enc = ost->st->codec;
2437 AVCodecContext *dec = ist->st->codec;
2439 if (!dec->channel_layout) {
2440 if (enc->channel_layout && dec->channels == enc->channels) {
2441 dec->channel_layout = enc->channel_layout;
2443 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2445 if (!dec->channel_layout) {
2446 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2447 "layout for Input Stream #%d.%d\n", ist->file_index,
2452 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2453 dec->channels, dec->channel_layout);
2454 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2455 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2457 if (!enc->channel_layout) {
2458 if (dec->channels == enc->channels) {
2459 enc->channel_layout = dec->channel_layout;
2462 enc->channel_layout = av_get_default_channel_layout(enc->channels);
2464 if (!enc->channel_layout) {
2465 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel layout "
2466 "for Output Stream #%d.%d\n", ost->file_index,
2470 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2471 enc->channels, enc->channel_layout);
2472 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Output Stream "
2473 "#%d.%d : %s\n", ost->file_index, ost->st->index, layout_name);
2478 static int init_input_stream(int ist_index, char *error, int error_len)
2481 InputStream *ist = input_streams[ist_index];
2482 if (ist->decoding_needed) {
2483 AVCodec *codec = ist->dec;
2485 snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
2486 ist->st->codec->codec_id, ist->file_index, ist->st->index);
2487 return AVERROR(EINVAL);
2490 /* update requested sample format for the decoder based on the
2491 corresponding encoder sample format */
2492 for (i = 0; i < nb_output_streams; i++) {
2493 OutputStream *ost = output_streams[i];
2494 if (ost->source_index == ist_index) {
2495 update_sample_fmt(ist->st->codec, codec, ost->st->codec);
2500 if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
2501 ist->st->codec->get_buffer = codec_get_buffer;
2502 ist->st->codec->release_buffer = codec_release_buffer;
2503 ist->st->codec->opaque = ist;
2506 if (!av_dict_get(ist->opts, "threads", NULL, 0))
2507 av_dict_set(&ist->opts, "threads", "auto", 0);
2508 if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
2509 snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
2510 ist->file_index, ist->st->index);
2511 return AVERROR(EINVAL);
2513 assert_codec_experimental(ist->st->codec, 0);
2514 assert_avoptions(ist->opts);
2516 if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2517 for (i = 0; i < nb_output_streams; i++) {
2518 OutputStream *ost = output_streams[i];
2519 if (ost->source_index == ist_index) {
2520 if (!ist->st->codec->channel_layout || !ost->st->codec->channel_layout)
2521 get_default_channel_layouts(ost, ist);
2528 ist->last_dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2529 ist->next_dts = AV_NOPTS_VALUE;
2530 init_pts_correction(&ist->pts_ctx);
2536 static InputStream *get_input_stream(OutputStream *ost)
2538 if (ost->source_index >= 0)
2539 return input_streams[ost->source_index];
2542 FilterGraph *fg = ost->filter->graph;
2545 for (i = 0; i < fg->nb_inputs; i++)
2546 if (fg->inputs[i]->ist->st->codec->codec_type == ost->st->codec->codec_type)
2547 return fg->inputs[i]->ist;
2553 static int transcode_init(void)
2555 int ret = 0, i, j, k;
2556 AVFormatContext *oc;
2557 AVCodecContext *codec, *icodec;
2563 /* init framerate emulation */
2564 for (i = 0; i < nb_input_files; i++) {
2565 InputFile *ifile = input_files[i];
2566 if (ifile->rate_emu)
2567 for (j = 0; j < ifile->nb_streams; j++)
2568 input_streams[j + ifile->ist_index]->start = av_gettime();
2571 /* output stream init */
2572 for (i = 0; i < nb_output_files; i++) {
2573 oc = output_files[i]->ctx;
2574 if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
2575 av_dump_format(oc, i, oc->filename, 1);
2576 av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2577 return AVERROR(EINVAL);
2581 /* init complex filtergraphs */
2582 for (i = 0; i < nb_filtergraphs; i++)
2583 if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
2586 /* for each output stream, we compute the right encoding parameters */
2587 for (i = 0; i < nb_output_streams; i++) {
2588 ost = output_streams[i];
2589 oc = output_files[ost->file_index]->ctx;
2590 ist = get_input_stream(ost);
2592 if (ost->attachment_filename)
2595 codec = ost->st->codec;
2598 icodec = ist->st->codec;
2600 ost->st->disposition = ist->st->disposition;
2601 codec->bits_per_raw_sample = icodec->bits_per_raw_sample;
2602 codec->chroma_sample_location = icodec->chroma_sample_location;
2605 if (ost->stream_copy) {
2606 uint64_t extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2608 if (extra_size > INT_MAX) {
2609 return AVERROR(EINVAL);
2612 /* if stream_copy is selected, no need to decode or encode */
2613 codec->codec_id = icodec->codec_id;
2614 codec->codec_type = icodec->codec_type;
2616 if (!codec->codec_tag) {
2617 if (!oc->oformat->codec_tag ||
2618 av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
2619 av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
2620 codec->codec_tag = icodec->codec_tag;
2623 codec->bit_rate = icodec->bit_rate;
2624 codec->rc_max_rate = icodec->rc_max_rate;
2625 codec->rc_buffer_size = icodec->rc_buffer_size;
2626 codec->field_order = icodec->field_order;
2627 codec->extradata = av_mallocz(extra_size);
2628 if (!codec->extradata) {
2629 return AVERROR(ENOMEM);
2631 memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
2632 codec->extradata_size = icodec->extradata_size;
2634 codec->time_base = icodec->time_base;
2635 codec->time_base.num *= icodec->ticks_per_frame;
2636 av_reduce(&codec->time_base.num, &codec->time_base.den,
2637 codec->time_base.num, codec->time_base.den, INT_MAX);
2639 codec->time_base = ist->st->time_base;
2641 switch (codec->codec_type) {
2642 case AVMEDIA_TYPE_AUDIO:
2643 if (audio_volume != 256) {
2644 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2647 codec->channel_layout = icodec->channel_layout;
2648 codec->sample_rate = icodec->sample_rate;
2649 codec->channels = icodec->channels;
2650 codec->frame_size = icodec->frame_size;
2651 codec->audio_service_type = icodec->audio_service_type;
2652 codec->block_align = icodec->block_align;
2654 case AVMEDIA_TYPE_VIDEO:
2655 codec->pix_fmt = icodec->pix_fmt;
2656 codec->width = icodec->width;
2657 codec->height = icodec->height;
2658 codec->has_b_frames = icodec->has_b_frames;
2659 if (!codec->sample_aspect_ratio.num) {
2660 codec->sample_aspect_ratio =
2661 ost->st->sample_aspect_ratio =
2662 ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
2663 ist->st->codec->sample_aspect_ratio.num ?
2664 ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
2667 case AVMEDIA_TYPE_SUBTITLE:
2668 codec->width = icodec->width;
2669 codec->height = icodec->height;
2671 case AVMEDIA_TYPE_DATA:
2672 case AVMEDIA_TYPE_ATTACHMENT:
2679 ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
2682 ist->decoding_needed = 1;
2683 ost->encoding_needed = 1;
2685 switch (codec->codec_type) {
2686 case AVMEDIA_TYPE_AUDIO:
2687 ost->fifo = av_fifo_alloc(1024);
2689 return AVERROR(ENOMEM);
2691 ost->reformat_pair = MAKE_SFMT_PAIR(AV_SAMPLE_FMT_NONE,AV_SAMPLE_FMT_NONE);
2693 if (!codec->sample_rate)
2694 codec->sample_rate = icodec->sample_rate;
2695 choose_sample_rate(ost->st, ost->enc);
2696 codec->time_base = (AVRational){ 1, codec->sample_rate };
2698 if (codec->sample_fmt == AV_SAMPLE_FMT_NONE)
2699 codec->sample_fmt = icodec->sample_fmt;
2700 choose_sample_fmt(ost->st, ost->enc);
2702 if (!codec->channels)
2703 codec->channels = icodec->channels;
2704 codec->channel_layout = icodec->channel_layout;
2705 if (av_get_channel_layout_nb_channels(codec->channel_layout) != codec->channels)
2706 codec->channel_layout = 0;
2708 ost->audio_resample = codec-> sample_rate != icodec->sample_rate || audio_sync_method > 1;
2709 icodec->request_channels = codec-> channels;
2710 ost->resample_sample_fmt = icodec->sample_fmt;
2711 ost->resample_sample_rate = icodec->sample_rate;
2712 ost->resample_channels = icodec->channels;
2714 case AVMEDIA_TYPE_VIDEO:
2717 fg = init_simple_filtergraph(ist, ost);
2718 if (configure_video_filters(fg)) {
2719 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2725 * We want CFR output if and only if one of those is true:
2726 * 1) user specified output framerate with -r
2727 * 2) user specified -vsync cfr
2728 * 3) output format is CFR and the user didn't force vsync to
2729 * something else than CFR
2731 * in such a case, set ost->frame_rate
2733 if (!ost->frame_rate.num && ist &&
2734 (video_sync_method == VSYNC_CFR ||
2735 (video_sync_method == VSYNC_AUTO &&
2736 !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
2737 ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
2738 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
2739 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
2740 ost->frame_rate = ost->enc->supported_framerates[idx];
2743 if (ost->frame_rate.num) {
2744 codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num};
2745 video_sync_method = VSYNC_CFR;
2747 codec->time_base = ist->st->time_base;
2749 codec->time_base = ost->filter->filter->inputs[0]->time_base;
2751 codec->width = ost->filter->filter->inputs[0]->w;
2752 codec->height = ost->filter->filter->inputs[0]->h;
2753 codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2754 ost->frame_aspect_ratio ? // overridden by the -aspect cli option
2755 av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
2756 ost->filter->filter->inputs[0]->sample_aspect_ratio;
2757 codec->pix_fmt = ost->filter->filter->inputs[0]->format;
2759 if (codec->width != icodec->width ||
2760 codec->height != icodec->height ||
2761 codec->pix_fmt != icodec->pix_fmt) {
2762 codec->bits_per_raw_sample = 0;
2766 case AVMEDIA_TYPE_SUBTITLE:
2767 codec->time_base = (AVRational){1, 1000};
2774 if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
2775 char logfilename[1024];
2778 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
2779 pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
2781 if (!strcmp(ost->enc->name, "libx264")) {
2782 av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
2784 if (codec->flags & CODEC_FLAG_PASS1) {
2785 f = fopen(logfilename, "wb");
2787 av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
2788 logfilename, strerror(errno));
2794 size_t logbuffer_size;
2795 if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
2796 av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
2800 codec->stats_in = logbuffer;
2807 /* open each encoder */
2808 for (i = 0; i < nb_output_streams; i++) {
2809 ost = output_streams[i];
2810 if (ost->encoding_needed) {
2811 AVCodec *codec = ost->enc;
2812 AVCodecContext *dec = NULL;
2814 snprintf(error, sizeof(error), "Encoder (codec id %d) not found for output stream #%d:%d",
2815 ost->st->codec->codec_id, ost->file_index, ost->index);
2816 ret = AVERROR(EINVAL);
2820 if ((ist = get_input_stream(ost)))
2821 dec = ist->st->codec;
2822 if (dec && dec->subtitle_header) {
2823 ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size);
2824 if (!ost->st->codec->subtitle_header) {
2825 ret = AVERROR(ENOMEM);
2828 memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2829 ost->st->codec->subtitle_header_size = dec->subtitle_header_size;
2831 if (!av_dict_get(ost->opts, "threads", NULL, 0))
2832 av_dict_set(&ost->opts, "threads", "auto", 0);
2833 if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) {
2834 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
2835 ost->file_index, ost->index);
2836 ret = AVERROR(EINVAL);
2839 assert_codec_experimental(ost->st->codec, 1);
2840 assert_avoptions(ost->opts);
2841 if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
2842 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2843 "It takes bits/s as argument, not kbits/s\n");
2844 extra_size += ost->st->codec->extradata_size;
2846 if (ost->st->codec->me_threshold)
2847 input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
2851 /* init input streams */
2852 for (i = 0; i < nb_input_streams; i++)
2853 if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
2856 /* discard unused programs */
2857 for (i = 0; i < nb_input_files; i++) {
2858 InputFile *ifile = input_files[i];
2859 for (j = 0; j < ifile->ctx->nb_programs; j++) {
2860 AVProgram *p = ifile->ctx->programs[j];
2861 int discard = AVDISCARD_ALL;
2863 for (k = 0; k < p->nb_stream_indexes; k++)
2864 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
2865 discard = AVDISCARD_DEFAULT;
2868 p->discard = discard;
2872 /* open files and write file headers */
2873 for (i = 0; i < nb_output_files; i++) {
2874 oc = output_files[i]->ctx;
2875 oc->interrupt_callback = int_cb;
2876 if (avformat_write_header(oc, &output_files[i]->opts) < 0) {
2877 snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i);
2878 ret = AVERROR(EINVAL);
2881 assert_avoptions(output_files[i]->opts);
2882 if (strcmp(oc->oformat->name, "rtp")) {
2888 /* dump the file output parameters - cannot be done before in case
2890 for (i = 0; i < nb_output_files; i++) {
2891 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
2894 /* dump the stream mapping */
2895 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
2896 for (i = 0; i < nb_input_streams; i++) {
2897 ist = input_streams[i];
2899 for (j = 0; j < ist->nb_filters; j++) {
2900 AVFilterLink *link = ist->filters[j]->filter->outputs[0];
2901 if (ist->filters[j]->graph->graph_desc) {
2902 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
2903 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
2904 link->dst->filter->name);
2905 if (link->dst->input_count > 1)
2906 av_log(NULL, AV_LOG_INFO, ":%s", link->dstpad->name);
2907 if (nb_filtergraphs > 1)
2908 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
2909 av_log(NULL, AV_LOG_INFO, "\n");
2914 for (i = 0; i < nb_output_streams; i++) {
2915 ost = output_streams[i];
2917 if (ost->attachment_filename) {
2918 /* an attached file */
2919 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
2920 ost->attachment_filename, ost->file_index, ost->index);
2924 if (ost->filter && ost->filter->graph->graph_desc) {
2925 /* output from a complex graph */
2926 AVFilterLink *link = ost->filter->filter->inputs[0];
2927 av_log(NULL, AV_LOG_INFO, " %s", link->src->filter->name);
2928 if (link->src->output_count > 1)
2929 av_log(NULL, AV_LOG_INFO, ":%s", link->srcpad->name);
2930 if (nb_filtergraphs > 1)
2931 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
2933 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
2934 ost->index, ost->enc ? ost->enc->name : "?");
2938 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
2939 input_streams[ost->source_index]->file_index,
2940 input_streams[ost->source_index]->st->index,
2943 if (ost->sync_ist != input_streams[ost->source_index])
2944 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2945 ost->sync_ist->file_index,
2946 ost->sync_ist->st->index);
2947 if (ost->stream_copy)
2948 av_log(NULL, AV_LOG_INFO, " (copy)");
2950 av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ?
2951 input_streams[ost->source_index]->dec->name : "?",
2952 ost->enc ? ost->enc->name : "?");
2953 av_log(NULL, AV_LOG_INFO, "\n");
2957 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
2969 * The following code is the main loop of the file converter
2971 static int transcode(void)
2974 AVFormatContext *is, *os;
2978 int no_packet_count = 0;
2979 int64_t timer_start;
2981 if (!(no_packet = av_mallocz(nb_input_files)))
2984 ret = transcode_init();
2988 av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
2991 timer_start = av_gettime();
2993 for (; received_sigterm == 0;) {
2994 int file_index, ist_index, past_recording_time = 1;
2998 ipts_min = INT64_MAX;
3000 /* check if there's any stream where output is still needed */
3001 for (i = 0; i < nb_output_streams; i++) {
3003 ost = output_streams[i];
3004 of = output_files[ost->file_index];
3005 os = output_files[ost->file_index]->ctx;
3006 if (ost->is_past_recording_time ||
3007 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3009 if (ost->frame_number > ost->max_frames) {
3011 for (j = 0; j < of->ctx->nb_streams; j++)
3012 output_streams[of->ost_index + j]->is_past_recording_time = 1;
3015 past_recording_time = 0;
3017 if (past_recording_time)
3020 /* select the stream that we must read now by looking at the
3021 smallest output pts */
3023 for (i = 0; i < nb_input_streams; i++) {
3025 ist = input_streams[i];
3026 ipts = ist->last_dts;
3027 if (ist->discard || no_packet[ist->file_index])
3029 if (!input_files[ist->file_index]->eof_reached) {
3030 if (ipts < ipts_min) {
3032 file_index = ist->file_index;
3036 /* if none, if is finished */
3037 if (file_index < 0) {
3038 if (no_packet_count) {
3039 no_packet_count = 0;
3040 memset(no_packet, 0, nb_input_files);
3047 /* read a frame from it and output it in the fifo */
3048 is = input_files[file_index]->ctx;
3049 ret = av_read_frame(is, &pkt);
3050 if (ret == AVERROR(EAGAIN)) {
3051 no_packet[file_index] = 1;
3056 input_files[file_index]->eof_reached = 1;
3058 for (i = 0; i < input_files[file_index]->nb_streams; i++) {
3059 ist = input_streams[input_files[file_index]->ist_index + i];
3060 if (ist->decoding_needed)
3061 output_packet(ist, NULL);
3070 no_packet_count = 0;
3071 memset(no_packet, 0, nb_input_files);
3074 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3075 is->streams[pkt.stream_index]);
3077 /* the following test is needed in case new streams appear
3078 dynamically in stream : we ignore them */
3079 if (pkt.stream_index >= input_files[file_index]->nb_streams)
3080 goto discard_packet;
3081 ist_index = input_files[file_index]->ist_index + pkt.stream_index;
3082 ist = input_streams[ist_index];
3084 goto discard_packet;
3086 if (pkt.dts != AV_NOPTS_VALUE)
3087 pkt.dts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3088 if (pkt.pts != AV_NOPTS_VALUE)
3089 pkt.pts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3091 if (pkt.pts != AV_NOPTS_VALUE)
3092 pkt.pts *= ist->ts_scale;
3093 if (pkt.dts != AV_NOPTS_VALUE)
3094 pkt.dts *= ist->ts_scale;
3096 //fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n",
3098 // pkt.dts, input_files[ist->file_index].ts_offset,
3099 // ist->st->codec->codec_type);
3100 if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE
3101 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3102 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3103 int64_t delta = pkt_dts - ist->next_dts;
3104 if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
3105 input_files[ist->file_index]->ts_offset -= delta;
3106 av_log(NULL, AV_LOG_DEBUG,
3107 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3108 delta, input_files[ist->file_index]->ts_offset);
3109 pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3110 if (pkt.pts != AV_NOPTS_VALUE)
3111 pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3115 // fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
3116 if (output_packet(ist, &pkt) < 0 || poll_filters() < 0) {
3117 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
3118 ist->file_index, ist->st->index);
3121 av_free_packet(&pkt);
3126 av_free_packet(&pkt);
3128 /* dump report by using the output first video and audio streams */
3129 print_report(0, timer_start);
3132 /* at the end of stream, we must flush the decoder buffers */
3133 for (i = 0; i < nb_input_streams; i++) {
3134 ist = input_streams[i];
3135 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3136 output_packet(ist, NULL);
3144 /* write the trailer if needed and close file */
3145 for (i = 0; i < nb_output_files; i++) {
3146 os = output_files[i]->ctx;
3147 av_write_trailer(os);
3150 /* dump report by using the first video and audio streams */
3151 print_report(1, timer_start);
3153 /* close each encoder */
3154 for (i = 0; i < nb_output_streams; i++) {
3155 ost = output_streams[i];
3156 if (ost->encoding_needed) {
3157 av_freep(&ost->st->codec->stats_in);
3158 avcodec_close(ost->st->codec);
3162 /* close each decoder */
3163 for (i = 0; i < nb_input_streams; i++) {
3164 ist = input_streams[i];
3165 if (ist->decoding_needed) {
3166 avcodec_close(ist->st->codec);
3174 av_freep(&no_packet);
3176 if (output_streams) {
3177 for (i = 0; i < nb_output_streams; i++) {
3178 ost = output_streams[i];
3180 if (ost->stream_copy)
3181 av_freep(&ost->st->codec->extradata);
3183 fclose(ost->logfile);
3184 ost->logfile = NULL;
3186 av_fifo_free(ost->fifo); /* works even if fifo is not
3187 initialized but set to zero */
3188 av_freep(&ost->st->codec->subtitle_header);
3189 av_free(ost->forced_kf_pts);
3191 audio_resample_close(ost->resample);
3192 if (ost->reformat_ctx)
3193 av_audio_convert_free(ost->reformat_ctx);
3194 av_dict_free(&ost->opts);
3201 static double parse_frame_aspect_ratio(const char *arg)
3208 p = strchr(arg, ':');
3210 x = strtol(arg, &end, 10);
3212 y = strtol(end + 1, &end, 10);
3214 ar = (double)x / (double)y;
3216 ar = strtod(arg, NULL);
3219 av_log(NULL, AV_LOG_FATAL, "Incorrect aspect ratio specification.\n");
3225 static int opt_audio_codec(OptionsContext *o, const char *opt, const char *arg)
3227 return parse_option(o, "codec:a", arg, options);
3230 static int opt_video_codec(OptionsContext *o, const char *opt, const char *arg)
3232 return parse_option(o, "codec:v", arg, options);
3235 static int opt_subtitle_codec(OptionsContext *o, const char *opt, const char *arg)
3237 return parse_option(o, "codec:s", arg, options);
3240 static int opt_data_codec(OptionsContext *o, const char *opt, const char *arg)
3242 return parse_option(o, "codec:d", arg, options);
3245 static int opt_map(OptionsContext *o, const char *opt, const char *arg)
3247 StreamMap *m = NULL;
3248 int i, negative = 0, file_idx;
3249 int sync_file_idx = -1, sync_stream_idx;
3257 map = av_strdup(arg);
3259 /* parse sync stream first, just pick first matching stream */
3260 if (sync = strchr(map, ',')) {
3262 sync_file_idx = strtol(sync + 1, &sync, 0);
3263 if (sync_file_idx >= nb_input_files || sync_file_idx < 0) {
3264 av_log(NULL, AV_LOG_FATAL, "Invalid sync file index: %d.\n", sync_file_idx);
3269 for (i = 0; i < input_files[sync_file_idx]->nb_streams; i++)
3270 if (check_stream_specifier(input_files[sync_file_idx]->ctx,
3271 input_files[sync_file_idx]->ctx->streams[i], sync) == 1) {
3272 sync_stream_idx = i;
3275 if (i == input_files[sync_file_idx]->nb_streams) {
3276 av_log(NULL, AV_LOG_FATAL, "Sync stream specification in map %s does not "
3277 "match any streams.\n", arg);
3283 if (map[0] == '[') {
3284 /* this mapping refers to lavfi output */
3285 const char *c = map + 1;
3286 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
3287 &o->nb_stream_maps, o->nb_stream_maps + 1);
3288 m = &o->stream_maps[o->nb_stream_maps - 1];
3289 m->linklabel = av_get_token(&c, "]");
3290 if (!m->linklabel) {
3291 av_log(NULL, AV_LOG_ERROR, "Invalid output link label: %s.\n", map);
3295 file_idx = strtol(map, &p, 0);
3296 if (file_idx >= nb_input_files || file_idx < 0) {
3297 av_log(NULL, AV_LOG_FATAL, "Invalid input file index: %d.\n", file_idx);
3301 /* disable some already defined maps */
3302 for (i = 0; i < o->nb_stream_maps; i++) {
3303 m = &o->stream_maps[i];
3304 if (file_idx == m->file_index &&
3305 check_stream_specifier(input_files[m->file_index]->ctx,
3306 input_files[m->file_index]->ctx->streams[m->stream_index],
3307 *p == ':' ? p + 1 : p) > 0)
3311 for (i = 0; i < input_files[file_idx]->nb_streams; i++) {
3312 if (check_stream_specifier(input_files[file_idx]->ctx, input_files[file_idx]->ctx->streams[i],
3313 *p == ':' ? p + 1 : p) <= 0)
3315 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
3316 &o->nb_stream_maps, o->nb_stream_maps + 1);
3317 m = &o->stream_maps[o->nb_stream_maps - 1];
3319 m->file_index = file_idx;
3320 m->stream_index = i;
3322 if (sync_file_idx >= 0) {
3323 m->sync_file_index = sync_file_idx;
3324 m->sync_stream_index = sync_stream_idx;
3326 m->sync_file_index = file_idx;
3327 m->sync_stream_index = i;
3333 av_log(NULL, AV_LOG_FATAL, "Stream map '%s' matches no streams.\n", arg);
3341 static int opt_attach(OptionsContext *o, const char *opt, const char *arg)
3343 o->attachments = grow_array(o->attachments, sizeof(*o->attachments),
3344 &o->nb_attachments, o->nb_attachments + 1);
3345 o->attachments[o->nb_attachments - 1] = arg;
3350 * Parse a metadata specifier in arg.
3351 * @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram)
3352 * @param index for type c/p, chapter/program index is written here
3353 * @param stream_spec for type s, the stream specifier is written here
3355 static void parse_meta_type(char *arg, char *type, int *index, const char **stream_spec)
3363 if (*(++arg) && *arg != ':') {
3364 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", arg);
3367 *stream_spec = *arg == ':' ? arg + 1 : "";
3371 if (*(++arg) == ':')
3372 *index = strtol(++arg, NULL, 0);
3375 av_log(NULL, AV_LOG_FATAL, "Invalid metadata type %c.\n", *arg);
3382 static int copy_metadata(char *outspec, char *inspec, AVFormatContext *oc, AVFormatContext *ic, OptionsContext *o)
3384 AVDictionary **meta_in = NULL;
3385 AVDictionary **meta_out;
3387 char type_in, type_out;
3388 const char *istream_spec = NULL, *ostream_spec = NULL;
3389 int idx_in = 0, idx_out = 0;
3391 parse_meta_type(inspec, &type_in, &idx_in, &istream_spec);
3392 parse_meta_type(outspec, &type_out, &idx_out, &ostream_spec);
3394 if (type_in == 'g' || type_out == 'g')
3395 o->metadata_global_manual = 1;
3396 if (type_in == 's' || type_out == 's')
3397 o->metadata_streams_manual = 1;
3398 if (type_in == 'c' || type_out == 'c')
3399 o->metadata_chapters_manual = 1;
3401 #define METADATA_CHECK_INDEX(index, nb_elems, desc)\
3402 if ((index) < 0 || (index) >= (nb_elems)) {\
3403 av_log(NULL, AV_LOG_FATAL, "Invalid %s index %d while processing metadata maps.\n",\
3408 #define SET_DICT(type, meta, context, index)\
3411 meta = &context->metadata;\
3414 METADATA_CHECK_INDEX(index, context->nb_chapters, "chapter")\
3415 meta = &context->chapters[index]->metadata;\
3418 METADATA_CHECK_INDEX(index, context->nb_programs, "program")\
3419 meta = &context->programs[index]->metadata;\
3423 SET_DICT(type_in, meta_in, ic, idx_in);
3424 SET_DICT(type_out, meta_out, oc, idx_out);
3426 /* for input streams choose first matching stream */
3427 if (type_in == 's') {
3428 for (i = 0; i < ic->nb_streams; i++) {
3429 if ((ret = check_stream_specifier(ic, ic->streams[i], istream_spec)) > 0) {
3430 meta_in = &ic->streams[i]->metadata;
3436 av_log(NULL, AV_LOG_FATAL, "Stream specifier %s does not match any streams.\n", istream_spec);
3441 if (type_out == 's') {
3442 for (i = 0; i < oc->nb_streams; i++) {
3443 if ((ret = check_stream_specifier(oc, oc->streams[i], ostream_spec)) > 0) {
3444 meta_out = &oc->streams[i]->metadata;
3445 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3450 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3455 static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
3457 const char *codec_string = encoder ? "encoder" : "decoder";
3461 avcodec_find_encoder_by_name(name) :
3462 avcodec_find_decoder_by_name(name);
3464 av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name);
3467 if (codec->type != type) {
3468 av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name);
3474 static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st)
3476 char *codec_name = NULL;
3478 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, st);
3480 AVCodec *codec = find_codec_or_die(codec_name, st->codec->codec_type, 0);
3481 st->codec->codec_id = codec->id;
3484 return avcodec_find_decoder(st->codec->codec_id);
3488 * Add all the streams from the given input file to the global
3489 * list of input streams.
3491 static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
3495 for (i = 0; i < ic->nb_streams; i++) {
3496 AVStream *st = ic->streams[i];
3497 AVCodecContext *dec = st->codec;
3498 InputStream *ist = av_mallocz(sizeof(*ist));
3503 input_streams = grow_array(input_streams, sizeof(*input_streams), &nb_input_streams, nb_input_streams + 1);
3504 input_streams[nb_input_streams - 1] = ist;
3507 ist->file_index = nb_input_files;
3509 st->discard = AVDISCARD_ALL;
3510 ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, ic, st);
3512 ist->ts_scale = 1.0;
3513 MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st);
3515 ist->dec = choose_decoder(o, ic, st);
3517 switch (dec->codec_type) {
3518 case AVMEDIA_TYPE_VIDEO:
3520 dec->flags |= CODEC_FLAG_EMU_EDGE;
3521 dec->height >>= dec->lowres;
3522 dec->width >>= dec->lowres;
3525 ist->resample_height = dec->height;
3526 ist->resample_width = dec->width;
3527 ist->resample_pix_fmt = dec->pix_fmt;
3530 case AVMEDIA_TYPE_AUDIO:
3531 case AVMEDIA_TYPE_DATA:
3532 case AVMEDIA_TYPE_SUBTITLE:
3533 case AVMEDIA_TYPE_ATTACHMENT:
3534 case AVMEDIA_TYPE_UNKNOWN:
3542 static void assert_file_overwrite(const char *filename)
3544 if (!file_overwrite &&
3545 (strchr(filename, ':') == NULL || filename[1] == ':' ||
3546 av_strstart(filename, "file:", NULL))) {
3547 if (avio_check(filename, 0) == 0) {
3549 fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
3551 if (!read_yesno()) {
3552 fprintf(stderr, "Not overwriting - exiting\n");
3557 fprintf(stderr,"File '%s' already exists. Exiting.\n", filename);
3564 static void dump_attachment(AVStream *st, const char *filename)
3567 AVIOContext *out = NULL;
3568 AVDictionaryEntry *e;
3570 if (!st->codec->extradata_size) {
3571 av_log(NULL, AV_LOG_WARNING, "No extradata to dump in stream #%d:%d.\n",
3572 nb_input_files - 1, st->index);
3575 if (!*filename && (e = av_dict_get(st->metadata, "filename", NULL, 0)))
3576 filename = e->value;
3578 av_log(NULL, AV_LOG_FATAL, "No filename specified and no 'filename' tag"
3579 "in stream #%d:%d.\n", nb_input_files - 1, st->index);
3583 assert_file_overwrite(filename);
3585 if ((ret = avio_open2(&out, filename, AVIO_FLAG_WRITE, &int_cb, NULL)) < 0) {
3586 av_log(NULL, AV_LOG_FATAL, "Could not open file %s for writing.\n",
3591 avio_write(out, st->codec->extradata, st->codec->extradata_size);
3596 static int opt_input_file(OptionsContext *o, const char *opt, const char *filename)
3598 AVFormatContext *ic;
3599 AVInputFormat *file_iformat = NULL;
3603 AVDictionary **opts;
3604 int orig_nb_streams; // number of streams before avformat_find_stream_info
3607 if (!(file_iformat = av_find_input_format(o->format))) {
3608 av_log(NULL, AV_LOG_FATAL, "Unknown input format: '%s'\n", o->format);
3613 if (!strcmp(filename, "-"))
3616 using_stdin |= !strncmp(filename, "pipe:", 5) ||
3617 !strcmp(filename, "/dev/stdin");
3619 /* get default parameters from command line */
3620 ic = avformat_alloc_context();
3622 print_error(filename, AVERROR(ENOMEM));
3625 if (o->nb_audio_sample_rate) {
3626 snprintf(buf, sizeof(buf), "%d", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i);
3627 av_dict_set(&format_opts, "sample_rate", buf, 0);
3629 if (o->nb_audio_channels) {
3630 snprintf(buf, sizeof(buf), "%d", o->audio_channels[o->nb_audio_channels - 1].u.i);
3631 av_dict_set(&format_opts, "channels", buf, 0);
3633 if (o->nb_frame_rates) {
3634 av_dict_set(&format_opts, "framerate", o->frame_rates[o->nb_frame_rates - 1].u.str, 0);
3636 if (o->nb_frame_sizes) {
3637 av_dict_set(&format_opts, "video_size", o->frame_sizes[o->nb_frame_sizes - 1].u.str, 0);
3639 if (o->nb_frame_pix_fmts)
3640 av_dict_set(&format_opts, "pixel_format", o->frame_pix_fmts[o->nb_frame_pix_fmts - 1].u.str, 0);
3642 ic->flags |= AVFMT_FLAG_NONBLOCK;
3643 ic->interrupt_callback = int_cb;
3645 /* open the input file with generic libav function */
3646 err = avformat_open_input(&ic, filename, file_iformat, &format_opts);
3648 print_error(filename, err);
3651 assert_avoptions(format_opts);
3653 /* apply forced codec ids */
3654 for (i = 0; i < ic->nb_streams; i++)
3655 choose_decoder(o, ic, ic->streams[i]);
3657 /* Set AVCodecContext options for avformat_find_stream_info */
3658 opts = setup_find_stream_info_opts(ic, codec_opts);
3659 orig_nb_streams = ic->nb_streams;
3661 /* If not enough info to get the stream parameters, we decode the
3662 first frames to get it. (used in mpeg case for example) */
3663 ret = avformat_find_stream_info(ic, opts);
3665 av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
3666 avformat_close_input(&ic);
3670 timestamp = o->start_time;
3671 /* add the stream start time */
3672 if (ic->start_time != AV_NOPTS_VALUE)
3673 timestamp += ic->start_time;
3675 /* if seeking requested, we execute it */
3676 if (o->start_time != 0) {
3677 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
3679 av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
3680 filename, (double)timestamp / AV_TIME_BASE);
3684 /* update the current parameters so that they match the one of the input stream */
3685 add_input_streams(o, ic);
3687 /* dump the file content */
3688 av_dump_format(ic, nb_input_files, filename, 0);
3690 input_files = grow_array(input_files, sizeof(*input_files), &nb_input_files, nb_input_files + 1);
3691 if (!(input_files[nb_input_files - 1] = av_mallocz(sizeof(*input_files[0]))))
3694 input_files[nb_input_files - 1]->ctx = ic;
3695 input_files[nb_input_files - 1]->ist_index = nb_input_streams - ic->nb_streams;
3696 input_files[nb_input_files - 1]->ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
3697 input_files[nb_input_files - 1]->nb_streams = ic->nb_streams;
3698 input_files[nb_input_files - 1]->rate_emu = o->rate_emu;
3700 for (i = 0; i < o->nb_dump_attachment; i++) {
3703 for (j = 0; j < ic->nb_streams; j++) {
3704 AVStream *st = ic->streams[j];
3706 if (check_stream_specifier(ic, st, o->dump_attachment[i].specifier) == 1)
3707 dump_attachment(st, o->dump_attachment[i].u.str);
3711 for (i = 0; i < orig_nb_streams; i++)
3712 av_dict_free(&opts[i]);
3719 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3720 AVCodecContext *avctx)
3726 for (p = kf; *p; p++)
3729 ost->forced_kf_count = n;
3730 ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
3731 if (!ost->forced_kf_pts) {
3732 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3735 for (i = 0; i < n; i++) {
3736 p = i ? strchr(p, ',') + 1 : kf;
3737 t = parse_time_or_die("force_key_frames", p, 1);
3738 ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3742 static uint8_t *get_line(AVIOContext *s)
3748 if (avio_open_dyn_buf(&line) < 0) {
3749 av_log(NULL, AV_LOG_FATAL, "Could not alloc buffer for reading preset.\n");
3753 while ((c = avio_r8(s)) && c != '\n')
3756 avio_close_dyn_buf(line, &buf);
3761 static int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s)
3764 char filename[1000];
3765 const char *base[3] = { getenv("AVCONV_DATADIR"),
3770 for (i = 0; i < FF_ARRAY_ELEMS(base) && ret; i++) {
3774 snprintf(filename, sizeof(filename), "%s%s/%s-%s.avpreset", base[i],
3775 i != 1 ? "" : "/.avconv", codec_name, preset_name);
3776 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3779 snprintf(filename, sizeof(filename), "%s%s/%s.avpreset", base[i],
3780 i != 1 ? "" : "/.avconv", preset_name);
3781 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3787 static void choose_encoder(OptionsContext *o, AVFormatContext *s, OutputStream *ost)
3789 char *codec_name = NULL;
3791 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, ost->st);
3793 ost->st->codec->codec_id = av_guess_codec(s->oformat, NULL, s->filename,
3794 NULL, ost->st->codec->codec_type);
3795 ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
3796 } else if (!strcmp(codec_name, "copy"))
3797 ost->stream_copy = 1;
3799 ost->enc = find_codec_or_die(codec_name, ost->st->codec->codec_type, 1);
3800 ost->st->codec->codec_id = ost->enc->id;
3804 static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVMediaType type)
3807 AVStream *st = avformat_new_stream(oc, NULL);
3808 int idx = oc->nb_streams - 1, ret = 0;
3809 char *bsf = NULL, *next, *codec_tag = NULL;
3810 AVBitStreamFilterContext *bsfc, *bsfc_prev = NULL;
3812 char *buf = NULL, *arg = NULL, *preset = NULL;
3813 AVIOContext *s = NULL;
3816 av_log(NULL, AV_LOG_FATAL, "Could not alloc stream.\n");
3820 if (oc->nb_streams - 1 < o->nb_streamid_map)
3821 st->id = o->streamid_map[oc->nb_streams - 1];
3823 output_streams = grow_array(output_streams, sizeof(*output_streams), &nb_output_streams,
3824 nb_output_streams + 1);
3825 if (!(ost = av_mallocz(sizeof(*ost))))
3827 output_streams[nb_output_streams - 1] = ost;
3829 ost->file_index = nb_output_files;
3832 st->codec->codec_type = type;
3833 choose_encoder(o, oc, ost);
3835 ost->opts = filter_codec_opts(codec_opts, ost->enc->id, oc, st);
3838 avcodec_get_context_defaults3(st->codec, ost->enc);
3839 st->codec->codec_type = type; // XXX hack, avcodec_get_context_defaults2() sets type to unknown for stream copy
3841 MATCH_PER_STREAM_OPT(presets, str, preset, oc, st);
3842 if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) {
3845 if (!buf[0] || buf[0] == '#') {
3849 if (!(arg = strchr(buf, '='))) {
3850 av_log(NULL, AV_LOG_FATAL, "Invalid line found in the preset file.\n");
3854 av_dict_set(&ost->opts, buf, arg, AV_DICT_DONT_OVERWRITE);
3856 } while (!s->eof_reached);
3860 av_log(NULL, AV_LOG_FATAL,
3861 "Preset %s specified for stream %d:%d, but could not be opened.\n",
3862 preset, ost->file_index, ost->index);
3866 ost->max_frames = INT64_MAX;
3867 MATCH_PER_STREAM_OPT(max_frames, i64, ost->max_frames, oc, st);
3869 MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st);
3871 if (next = strchr(bsf, ','))
3873 if (!(bsfc = av_bitstream_filter_init(bsf))) {
3874 av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf);
3878 bsfc_prev->next = bsfc;
3880 ost->bitstream_filters = bsfc;
3886 MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st);
3888 uint32_t tag = strtol(codec_tag, &next, 0);
3890 tag = AV_RL32(codec_tag);
3891 st->codec->codec_tag = tag;
3894 MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st);
3895 if (qscale >= 0 || same_quant) {
3896 st->codec->flags |= CODEC_FLAG_QSCALE;
3897 st->codec->global_quality = FF_QP2LAMBDA * qscale;
3900 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
3901 st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
3903 av_opt_get_int(sws_opts, "sws_flags", 0, &ost->sws_flags);
3905 ost->pix_fmts[0] = ost->pix_fmts[1] = PIX_FMT_NONE;
3910 static void parse_matrix_coeffs(uint16_t *dest, const char *str)
3913 const char *p = str;
3920 av_log(NULL, AV_LOG_FATAL, "Syntax error in matrix \"%s\" at coeff %d\n", str, i);
3927 static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc)
3931 AVCodecContext *video_enc;
3933 ost = new_output_stream(o, oc, AVMEDIA_TYPE_VIDEO);
3935 video_enc = st->codec;
3937 if (!ost->stream_copy) {
3938 const char *p = NULL;
3939 char *forced_key_frames = NULL, *frame_rate = NULL, *frame_size = NULL;
3940 char *frame_aspect_ratio = NULL, *frame_pix_fmt = NULL;
3941 char *intra_matrix = NULL, *inter_matrix = NULL, *filters = NULL;
3944 MATCH_PER_STREAM_OPT(frame_rates, str, frame_rate, oc, st);
3945 if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) {
3946 av_log(NULL, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
3950 MATCH_PER_STREAM_OPT(frame_sizes, str, frame_size, oc, st);
3951 if (frame_size && av_parse_video_size(&video_enc->width, &video_enc->height, frame_size) < 0) {
3952 av_log(NULL, AV_LOG_FATAL, "Invalid frame size: %s.\n", frame_size);
3956 MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
3957 if (frame_aspect_ratio)
3958 ost->frame_aspect_ratio = parse_frame_aspect_ratio(frame_aspect_ratio);
3960 MATCH_PER_STREAM_OPT(frame_pix_fmts, str, frame_pix_fmt, oc, st);
3961 if (frame_pix_fmt && (video_enc->pix_fmt = av_get_pix_fmt(frame_pix_fmt)) == PIX_FMT_NONE) {
3962 av_log(NULL, AV_LOG_FATAL, "Unknown pixel format requested: %s.\n", frame_pix_fmt);
3965 st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
3967 MATCH_PER_STREAM_OPT(intra_matrices, str, intra_matrix, oc, st);
3969 if (!(video_enc->intra_matrix = av_mallocz(sizeof(*video_enc->intra_matrix) * 64))) {
3970 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for intra matrix.\n");
3973 parse_matrix_coeffs(video_enc->intra_matrix, intra_matrix);
3975 MATCH_PER_STREAM_OPT(inter_matrices, str, inter_matrix, oc, st);
3977 if (!(video_enc->inter_matrix = av_mallocz(sizeof(*video_enc->inter_matrix) * 64))) {
3978 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for inter matrix.\n");
3981 parse_matrix_coeffs(video_enc->inter_matrix, inter_matrix);
3984 MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st);
3985 for (i = 0; p; i++) {
3987 int e = sscanf(p, "%d,%d,%d", &start, &end, &q);
3989 av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
3992 video_enc->rc_override =
3993 av_realloc(video_enc->rc_override,
3994 sizeof(RcOverride) * (i + 1));
3995 video_enc->rc_override[i].start_frame = start;
3996 video_enc->rc_override[i].end_frame = end;
3998 video_enc->rc_override[i].qscale = q;
3999 video_enc->rc_override[i].quality_factor = 1.0;
4002 video_enc->rc_override[i].qscale = 0;
4003 video_enc->rc_override[i].quality_factor = -q/100.0;
4008 video_enc->rc_override_count = i;
4009 if (!video_enc->rc_initial_buffer_occupancy)
4010 video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size * 3 / 4;
4011 video_enc->intra_dc_precision = intra_dc_precision - 8;
4016 video_enc->flags |= CODEC_FLAG_PASS1;
4018 video_enc->flags |= CODEC_FLAG_PASS2;
4022 MATCH_PER_STREAM_OPT(forced_key_frames, str, forced_key_frames, oc, st);
4023 if (forced_key_frames)
4024 parse_forced_key_frames(forced_key_frames, ost, video_enc);
4026 MATCH_PER_STREAM_OPT(force_fps, i, ost->force_fps, oc, st);
4028 ost->top_field_first = -1;
4029 MATCH_PER_STREAM_OPT(top_field_first, i, ost->top_field_first, oc, st);
4031 MATCH_PER_STREAM_OPT(filters, str, filters, oc, st);
4033 ost->avfilter = av_strdup(filters);
4035 MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i, ost->copy_initial_nonkeyframes, oc ,st);
4041 static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc)
4045 AVCodecContext *audio_enc;
4047 ost = new_output_stream(o, oc, AVMEDIA_TYPE_AUDIO);
4050 audio_enc = st->codec;
4051 audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
4053 if (!ost->stream_copy) {
4054 char *sample_fmt = NULL;
4056 MATCH_PER_STREAM_OPT(audio_channels, i, audio_enc->channels, oc, st);
4058 MATCH_PER_STREAM_OPT(sample_fmts, str, sample_fmt, oc, st);
4060 (audio_enc->sample_fmt = av_get_sample_fmt(sample_fmt)) == AV_SAMPLE_FMT_NONE) {
4061 av_log(NULL, AV_LOG_FATAL, "Invalid sample format '%s'\n", sample_fmt);
4065 MATCH_PER_STREAM_OPT(audio_sample_rate, i, audio_enc->sample_rate, oc, st);
4071 static OutputStream *new_data_stream(OptionsContext *o, AVFormatContext *oc)
4075 ost = new_output_stream(o, oc, AVMEDIA_TYPE_DATA);
4076 if (!ost->stream_copy) {
4077 av_log(NULL, AV_LOG_FATAL, "Data stream encoding not supported yet (only streamcopy)\n");
4084 static OutputStream *new_attachment_stream(OptionsContext *o, AVFormatContext *oc)
4086 OutputStream *ost = new_output_stream(o, oc, AVMEDIA_TYPE_ATTACHMENT);
4087 ost->stream_copy = 1;
4091 static OutputStream *new_subtitle_stream(OptionsContext *o, AVFormatContext *oc)
4095 AVCodecContext *subtitle_enc;
4097 ost = new_output_stream(o, oc, AVMEDIA_TYPE_SUBTITLE);
4099 subtitle_enc = st->codec;
4101 subtitle_enc->codec_type = AVMEDIA_TYPE_SUBTITLE;
4106 /* arg format is "output-stream-index:streamid-value". */
4107 static int opt_streamid(OptionsContext *o, const char *opt, const char *arg)
4113 av_strlcpy(idx_str, arg, sizeof(idx_str));
4114 p = strchr(idx_str, ':');
4116 av_log(NULL, AV_LOG_FATAL,
4117 "Invalid value '%s' for option '%s', required syntax is 'index:value'\n",
4122 idx = parse_number_or_die(opt, idx_str, OPT_INT, 0, INT_MAX);
4123 o->streamid_map = grow_array(o->streamid_map, sizeof(*o->streamid_map), &o->nb_streamid_map, idx+1);
4124 o->streamid_map[idx] = parse_number_or_die(opt, p, OPT_INT, 0, INT_MAX);
4128 static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata)
4130 AVFormatContext *is = ifile->ctx;
4131 AVFormatContext *os = ofile->ctx;
4134 for (i = 0; i < is->nb_chapters; i++) {
4135 AVChapter *in_ch = is->chapters[i], *out_ch;
4136 int64_t ts_off = av_rescale_q(ofile->start_time - ifile->ts_offset,
4137 AV_TIME_BASE_Q, in_ch->time_base);
4138 int64_t rt = (ofile->recording_time == INT64_MAX) ? INT64_MAX :
4139 av_rescale_q(ofile->recording_time, AV_TIME_BASE_Q, in_ch->time_base);
4142 if (in_ch->end < ts_off)
4144 if (rt != INT64_MAX && in_ch->start > rt + ts_off)
4147 out_ch = av_mallocz(sizeof(AVChapter));
4149 return AVERROR(ENOMEM);
4151 out_ch->id = in_ch->id;
4152 out_ch->time_base = in_ch->time_base;
4153 out_ch->start = FFMAX(0, in_ch->start - ts_off);
4154 out_ch->end = FFMIN(rt, in_ch->end - ts_off);
4157 av_dict_copy(&out_ch->metadata, in_ch->metadata, 0);
4160 os->chapters = av_realloc(os->chapters, sizeof(AVChapter) * os->nb_chapters);
4162 return AVERROR(ENOMEM);
4163 os->chapters[os->nb_chapters - 1] = out_ch;
4168 static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
4169 AVFormatContext *oc)
4173 if (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type != AVMEDIA_TYPE_VIDEO) {
4174 av_log(NULL, AV_LOG_FATAL, "Only video filters are supported currently.\n");
4178 ost = new_video_stream(o, oc);
4179 ost->source_index = -1;
4180 ost->filter = ofilter;
4184 if (configure_output_filter(ofilter->graph, ofilter, ofilter->out_tmp) < 0) {
4185 av_log(NULL, AV_LOG_FATAL, "Error configuring filter.\n");
4188 avfilter_inout_free(&ofilter->out_tmp);
4191 static void opt_output_file(void *optctx, const char *filename)
4193 OptionsContext *o = optctx;
4194 AVFormatContext *oc;
4196 AVOutputFormat *file_oformat;
4200 if (configure_complex_filters() < 0) {
4201 av_log(NULL, AV_LOG_FATAL, "Error configuring filters.\n");
4205 if (!strcmp(filename, "-"))
4208 oc = avformat_alloc_context();
4210 print_error(filename, AVERROR(ENOMEM));
4215 file_oformat = av_guess_format(o->format, NULL, NULL);
4216 if (!file_oformat) {
4217 av_log(NULL, AV_LOG_FATAL, "Requested output format '%s' is not a suitable output format\n", o->format);
4221 file_oformat = av_guess_format(NULL, filename, NULL);
4222 if (!file_oformat) {
4223 av_log(NULL, AV_LOG_FATAL, "Unable to find a suitable output format for '%s'\n",
4229 oc->oformat = file_oformat;
4230 oc->interrupt_callback = int_cb;
4231 av_strlcpy(oc->filename, filename, sizeof(oc->filename));
4233 /* create streams for all unlabeled output pads */
4234 for (i = 0; i < nb_filtergraphs; i++) {
4235 FilterGraph *fg = filtergraphs[i];
4236 for (j = 0; j < fg->nb_outputs; j++) {
4237 OutputFilter *ofilter = fg->outputs[j];
4239 if (!ofilter->out_tmp || ofilter->out_tmp->name)
4242 switch (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type) {
4243 case AVMEDIA_TYPE_VIDEO: o->video_disable = 1; break;
4244 case AVMEDIA_TYPE_AUDIO: o->audio_disable = 1; break;
4245 case AVMEDIA_TYPE_SUBTITLE: o->subtitle_disable = 1; break;
4247 init_output_filter(ofilter, o, oc);
4251 if (!o->nb_stream_maps) {
4252 /* pick the "best" stream of each type */
4253 #define NEW_STREAM(type, index)\
4255 ost = new_ ## type ## _stream(o, oc);\
4256 ost->source_index = index;\
4257 ost->sync_ist = input_streams[index];\
4258 input_streams[index]->discard = 0;\
4259 input_streams[index]->st->discard = AVDISCARD_NONE;\
4262 /* video: highest resolution */
4263 if (!o->video_disable && oc->oformat->video_codec != CODEC_ID_NONE) {
4264 int area = 0, idx = -1;
4265 for (i = 0; i < nb_input_streams; i++) {
4266 ist = input_streams[i];
4267 if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
4268 ist->st->codec->width * ist->st->codec->height > area) {
4269 area = ist->st->codec->width * ist->st->codec->height;
4273 NEW_STREAM(video, idx);
4276 /* audio: most channels */
4277 if (!o->audio_disable && oc->oformat->audio_codec != CODEC_ID_NONE) {
4278 int channels = 0, idx = -1;
4279 for (i = 0; i < nb_input_streams; i++) {
4280 ist = input_streams[i];
4281 if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
4282 ist->st->codec->channels > channels) {
4283 channels = ist->st->codec->channels;
4287 NEW_STREAM(audio, idx);
4290 /* subtitles: pick first */
4291 if (!o->subtitle_disable && oc->oformat->subtitle_codec != CODEC_ID_NONE) {
4292 for (i = 0; i < nb_input_streams; i++)
4293 if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
4294 NEW_STREAM(subtitle, i);
4298 /* do something with data? */
4300 for (i = 0; i < o->nb_stream_maps; i++) {
4301 StreamMap *map = &o->stream_maps[i];
4306 if (map->linklabel) {
4308 OutputFilter *ofilter = NULL;
4311 for (j = 0; j < nb_filtergraphs; j++) {
4312 fg = filtergraphs[j];
4313 for (k = 0; k < fg->nb_outputs; k++) {
4314 AVFilterInOut *out = fg->outputs[k]->out_tmp;
4315 if (out && !strcmp(out->name, map->linklabel)) {
4316 ofilter = fg->outputs[k];
4323 av_log(NULL, AV_LOG_FATAL, "Output with label '%s' does not exist "
4324 "in any defined filter graph.\n", map->linklabel);
4327 init_output_filter(ofilter, o, oc);
4329 ist = input_streams[input_files[map->file_index]->ist_index + map->stream_index];
4330 switch (ist->st->codec->codec_type) {
4331 case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc); break;
4332 case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc); break;
4333 case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(o, oc); break;
4334 case AVMEDIA_TYPE_DATA: ost = new_data_stream(o, oc); break;
4335 case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc); break;
4337 av_log(NULL, AV_LOG_FATAL, "Cannot map stream #%d:%d - unsupported type.\n",
4338 map->file_index, map->stream_index);
4342 ost->source_index = input_files[map->file_index]->ist_index + map->stream_index;
4343 ost->sync_ist = input_streams[input_files[map->sync_file_index]->ist_index +
4344 map->sync_stream_index];
4346 ist->st->discard = AVDISCARD_NONE;
4351 /* handle attached files */
4352 for (i = 0; i < o->nb_attachments; i++) {
4354 uint8_t *attachment;
4358 if ((err = avio_open2(&pb, o->attachments[i], AVIO_FLAG_READ, &int_cb, NULL)) < 0) {
4359 av_log(NULL, AV_LOG_FATAL, "Could not open attachment file %s.\n",
4363 if ((len = avio_size(pb)) <= 0) {
4364 av_log(NULL, AV_LOG_FATAL, "Could not get size of the attachment %s.\n",
4368 if (!(attachment = av_malloc(len))) {
4369 av_log(NULL, AV_LOG_FATAL, "Attachment %s too large to fit into memory.\n",
4373 avio_read(pb, attachment, len);
4375 ost = new_attachment_stream(o, oc);
4376 ost->stream_copy = 0;
4377 ost->source_index = -1;
4378 ost->attachment_filename = o->attachments[i];
4379 ost->st->codec->extradata = attachment;
4380 ost->st->codec->extradata_size = len;
4382 p = strrchr(o->attachments[i], '/');
4383 av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
4387 output_files = grow_array(output_files, sizeof(*output_files), &nb_output_files, nb_output_files + 1);
4388 if (!(output_files[nb_output_files - 1] = av_mallocz(sizeof(*output_files[0]))))
4391 output_files[nb_output_files - 1]->ctx = oc;
4392 output_files[nb_output_files - 1]->ost_index = nb_output_streams - oc->nb_streams;
4393 output_files[nb_output_files - 1]->recording_time = o->recording_time;
4394 if (o->recording_time != INT64_MAX)
4395 oc->duration = o->recording_time;
4396 output_files[nb_output_files - 1]->start_time = o->start_time;
4397 output_files[nb_output_files - 1]->limit_filesize = o->limit_filesize;
4398 av_dict_copy(&output_files[nb_output_files - 1]->opts, format_opts, 0);
4400 /* check filename in case of an image number is expected */
4401 if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
4402 if (!av_filename_number_test(oc->filename)) {
4403 print_error(oc->filename, AVERROR(EINVAL));
4408 if (!(oc->oformat->flags & AVFMT_NOFILE)) {
4409 /* test if it already exists to avoid losing precious files */
4410 assert_file_overwrite(filename);
4413 if ((err = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,
4414 &oc->interrupt_callback,
4415 &output_files[nb_output_files - 1]->opts)) < 0) {
4416 print_error(filename, err);
4421 if (o->mux_preload) {
4423 snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE));
4424 av_dict_set(&output_files[nb_output_files - 1]->opts, "preload", buf, 0);
4426 oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
4427 oc->flags |= AVFMT_FLAG_NONBLOCK;
4430 for (i = 0; i < o->nb_metadata_map; i++) {
4432 int in_file_index = strtol(o->metadata_map[i].u.str, &p, 0);
4434 if (in_file_index < 0)
4436 if (in_file_index >= nb_input_files) {
4437 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d while processing metadata maps\n", in_file_index);
4440 copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc, input_files[in_file_index]->ctx, o);
4444 if (o->chapters_input_file >= nb_input_files) {
4445 if (o->chapters_input_file == INT_MAX) {
4446 /* copy chapters from the first input file that has them*/
4447 o->chapters_input_file = -1;
4448 for (i = 0; i < nb_input_files; i++)
4449 if (input_files[i]->ctx->nb_chapters) {
4450 o->chapters_input_file = i;
4454 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d in chapter mapping.\n",
4455 o->chapters_input_file);
4459 if (o->chapters_input_file >= 0)
4460 copy_chapters(input_files[o->chapters_input_file], output_files[nb_output_files - 1],
4461 !o->metadata_chapters_manual);
4463 /* copy global metadata by default */
4464 if (!o->metadata_global_manual && nb_input_files)
4465 av_dict_copy(&oc->metadata, input_files[0]->ctx->metadata,
4466 AV_DICT_DONT_OVERWRITE);
4467 if (!o->metadata_streams_manual)
4468 for (i = output_files[nb_output_files - 1]->ost_index; i < nb_output_streams; i++) {
4470 if (output_streams[i]->source_index < 0) /* this is true e.g. for attached files */
4472 ist = input_streams[output_streams[i]->source_index];
4473 av_dict_copy(&output_streams[i]->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
4476 /* process manually set metadata */
4477 for (i = 0; i < o->nb_metadata; i++) {
4480 const char *stream_spec;
4481 int index = 0, j, ret;
4483 val = strchr(o->metadata[i].u.str, '=');
4485 av_log(NULL, AV_LOG_FATAL, "No '=' character in metadata string %s.\n",
4486 o->metadata[i].u.str);
4491 parse_meta_type(o->metadata[i].specifier, &type, &index, &stream_spec);
4493 for (j = 0; j < oc->nb_streams; j++) {
4494 if ((ret = check_stream_specifier(oc, oc->streams[j], stream_spec)) > 0) {
4495 av_dict_set(&oc->streams[j]->metadata, o->metadata[i].u.str, *val ? val : NULL, 0);
4499 printf("ret %d, stream_spec %s\n", ret, stream_spec);
4507 if (index < 0 || index >= oc->nb_chapters) {
4508 av_log(NULL, AV_LOG_FATAL, "Invalid chapter index %d in metadata specifier.\n", index);
4511 m = &oc->chapters[index]->metadata;
4514 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", o->metadata[i].specifier);
4517 av_dict_set(m, o->metadata[i].u.str, *val ? val : NULL, 0);
4524 /* same option as mencoder */
4525 static int opt_pass(const char *opt, const char *arg)
4527 do_pass = parse_number_or_die(opt, arg, OPT_INT, 1, 2);
4531 static int64_t getutime(void)
4534 struct rusage rusage;
4536 getrusage(RUSAGE_SELF, &rusage);
4537 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4538 #elif HAVE_GETPROCESSTIMES
4540 FILETIME c, e, k, u;
4541 proc = GetCurrentProcess();
4542 GetProcessTimes(proc, &c, &e, &k, &u);
4543 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4545 return av_gettime();
4549 static int64_t getmaxrss(void)
4551 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4552 struct rusage rusage;
4553 getrusage(RUSAGE_SELF, &rusage);
4554 return (int64_t)rusage.ru_maxrss * 1024;
4555 #elif HAVE_GETPROCESSMEMORYINFO
4557 PROCESS_MEMORY_COUNTERS memcounters;
4558 proc = GetCurrentProcess();
4559 memcounters.cb = sizeof(memcounters);
4560 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4561 return memcounters.PeakPagefileUsage;
4567 static int opt_audio_qscale(OptionsContext *o, const char *opt, const char *arg)
4569 return parse_option(o, "q:a", arg, options);
4572 static void show_usage(void)
4574 printf("Hyper fast Audio and Video encoder\n");
4575 printf("usage: %s [options] [[infile options] -i infile]... {[outfile options] outfile}...\n", program_name);
4579 static void show_help(void)
4581 int flags = AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_ENCODING_PARAM;
4582 av_log_set_callback(log_callback_help);
4584 show_help_options(options, "Main options:\n",
4585 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB, 0);
4586 show_help_options(options, "\nAdvanced options:\n",
4587 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB,
4589 show_help_options(options, "\nVideo options:\n",
4590 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4592 show_help_options(options, "\nAdvanced Video options:\n",
4593 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4594 OPT_VIDEO | OPT_EXPERT);
4595 show_help_options(options, "\nAudio options:\n",
4596 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4598 show_help_options(options, "\nAdvanced Audio options:\n",
4599 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4600 OPT_AUDIO | OPT_EXPERT);
4601 show_help_options(options, "\nSubtitle options:\n",
4602 OPT_SUBTITLE | OPT_GRAB,
4604 show_help_options(options, "\nAudio/Video grab options:\n",
4608 show_help_children(avcodec_get_class(), flags);
4609 show_help_children(avformat_get_class(), flags);
4610 show_help_children(sws_get_class(), flags);
4613 static int opt_target(OptionsContext *o, const char *opt, const char *arg)
4615 enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN;
4616 static const char *const frame_rates[] = { "25", "30000/1001", "24000/1001" };
4618 if (!strncmp(arg, "pal-", 4)) {
4621 } else if (!strncmp(arg, "ntsc-", 5)) {
4624 } else if (!strncmp(arg, "film-", 5)) {
4628 /* Try to determine PAL/NTSC by peeking in the input files */
4629 if (nb_input_files) {
4631 for (j = 0; j < nb_input_files; j++) {
4632 for (i = 0; i < input_files[j]->nb_streams; i++) {
4633 AVCodecContext *c = input_files[j]->ctx->streams[i]->codec;
4634 if (c->codec_type != AVMEDIA_TYPE_VIDEO)
4636 fr = c->time_base.den * 1000 / c->time_base.num;
4640 } else if ((fr == 29970) || (fr == 23976)) {
4645 if (norm != UNKNOWN)
4649 if (norm != UNKNOWN)
4650 av_log(NULL, AV_LOG_INFO, "Assuming %s for target.\n", norm == PAL ? "PAL" : "NTSC");
4653 if (norm == UNKNOWN) {
4654 av_log(NULL, AV_LOG_FATAL, "Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n");
4655 av_log(NULL, AV_LOG_FATAL, "Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n");
4656 av_log(NULL, AV_LOG_FATAL, "or set a framerate with \"-r xxx\".\n");
4660 if (!strcmp(arg, "vcd")) {
4661 opt_video_codec(o, "c:v", "mpeg1video");
4662 opt_audio_codec(o, "c:a", "mp2");
4663 parse_option(o, "f", "vcd", options);
4665 parse_option(o, "s", norm == PAL ? "352x288" : "352x240", options);
4666 parse_option(o, "r", frame_rates[norm], options);
4667 opt_default("g", norm == PAL ? "15" : "18");
4669 opt_default("b", "1150000");
4670 opt_default("maxrate", "1150000");
4671 opt_default("minrate", "1150000");
4672 opt_default("bufsize", "327680"); // 40*1024*8;
4674 opt_default("b:a", "224000");
4675 parse_option(o, "ar", "44100", options);
4676 parse_option(o, "ac", "2", options);
4678 opt_default("packetsize", "2324");
4679 opt_default("muxrate", "1411200"); // 2352 * 75 * 8;
4681 /* We have to offset the PTS, so that it is consistent with the SCR.
4682 SCR starts at 36000, but the first two packs contain only padding
4683 and the first pack from the other stream, respectively, may also have
4684 been written before.
4685 So the real data starts at SCR 36000+3*1200. */
4686 o->mux_preload = (36000 + 3 * 1200) / 90000.0; // 0.44
4687 } else if (!strcmp(arg, "svcd")) {
4689 opt_video_codec(o, "c:v", "mpeg2video");
4690 opt_audio_codec(o, "c:a", "mp2");
4691 parse_option(o, "f", "svcd", options);
4693 parse_option(o, "s", norm == PAL ? "480x576" : "480x480", options);
4694 parse_option(o, "r", frame_rates[norm], options);
4695 opt_default("g", norm == PAL ? "15" : "18");
4697 opt_default("b", "2040000");
4698 opt_default("maxrate", "2516000");
4699 opt_default("minrate", "0"); // 1145000;
4700 opt_default("bufsize", "1835008"); // 224*1024*8;
4701 opt_default("flags", "+scan_offset");
4704 opt_default("b:a", "224000");
4705 parse_option(o, "ar", "44100", options);
4707 opt_default("packetsize", "2324");
4709 } else if (!strcmp(arg, "dvd")) {
4711 opt_video_codec(o, "c:v", "mpeg2video");
4712 opt_audio_codec(o, "c:a", "ac3");
4713 parse_option(o, "f", "dvd", options);
4715 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4716 parse_option(o, "r", frame_rates[norm], options);
4717 opt_default("g", norm == PAL ? "15" : "18");
4719 opt_default("b", "6000000");
4720 opt_default("maxrate", "9000000");
4721 opt_default("minrate", "0"); // 1500000;
4722 opt_default("bufsize", "1835008"); // 224*1024*8;
4724 opt_default("packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
4725 opt_default("muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
4727 opt_default("b:a", "448000");
4728 parse_option(o, "ar", "48000", options);
4730 } else if (!strncmp(arg, "dv", 2)) {
4732 parse_option(o, "f", "dv", options);
4734 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4735 parse_option(o, "pix_fmt", !strncmp(arg, "dv50", 4) ? "yuv422p" :
4736 norm == PAL ? "yuv420p" : "yuv411p", options);
4737 parse_option(o, "r", frame_rates[norm], options);
4739 parse_option(o, "ar", "48000", options);
4740 parse_option(o, "ac", "2", options);
4743 av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg);
4744 return AVERROR(EINVAL);
4749 static int opt_vstats_file(const char *opt, const char *arg)
4751 av_free (vstats_filename);
4752 vstats_filename = av_strdup (arg);
4756 static int opt_vstats(const char *opt, const char *arg)
4759 time_t today2 = time(NULL);
4760 struct tm *today = localtime(&today2);
4762 snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min,
4764 return opt_vstats_file(opt, filename);
4767 static int opt_video_frames(OptionsContext *o, const char *opt, const char *arg)
4769 return parse_option(o, "frames:v", arg, options);
4772 static int opt_audio_frames(OptionsContext *o, const char *opt, const char *arg)
4774 return parse_option(o, "frames:a", arg, options);
4777 static int opt_data_frames(OptionsContext *o, const char *opt, const char *arg)
4779 return parse_option(o, "frames:d", arg, options);
4782 static int opt_video_tag(OptionsContext *o, const char *opt, const char *arg)
4784 return parse_option(o, "tag:v", arg, options);
4787 static int opt_audio_tag(OptionsContext *o, const char *opt, const char *arg)
4789 return parse_option(o, "tag:a", arg, options);
4792 static int opt_subtitle_tag(OptionsContext *o, const char *opt, const char *arg)
4794 return parse_option(o, "tag:s", arg, options);
4797 static int opt_video_filters(OptionsContext *o, const char *opt, const char *arg)
4799 return parse_option(o, "filter:v", arg, options);
4802 static int opt_vsync(const char *opt, const char *arg)
4804 if (!av_strcasecmp(arg, "cfr")) video_sync_method = VSYNC_CFR;
4805 else if (!av_strcasecmp(arg, "vfr")) video_sync_method = VSYNC_VFR;
4806 else if (!av_strcasecmp(arg, "passthrough")) video_sync_method = VSYNC_PASSTHROUGH;
4808 if (video_sync_method == VSYNC_AUTO)
4809 video_sync_method = parse_number_or_die("vsync", arg, OPT_INT, VSYNC_AUTO, VSYNC_VFR);
4813 static int opt_deinterlace(const char *opt, const char *arg)
4815 av_log(NULL, AV_LOG_WARNING, "-%s is deprecated, use -filter:v yadif instead\n", opt);
4820 static int opt_cpuflags(const char *opt, const char *arg)
4822 #define CPUFLAG_MMX2 (AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMX2)
4823 #define CPUFLAG_3DNOW (AV_CPU_FLAG_3DNOW | AV_CPU_FLAG_MMX)
4824 #define CPUFLAG_3DNOWEXT (AV_CPU_FLAG_3DNOWEXT | CPUFLAG_3DNOW)
4825 #define CPUFLAG_SSE (AV_CPU_FLAG_SSE | CPUFLAG_MMX2)
4826 #define CPUFLAG_SSE2 (AV_CPU_FLAG_SSE2 | CPUFLAG_SSE)
4827 #define CPUFLAG_SSE2SLOW (AV_CPU_FLAG_SSE2SLOW | CPUFLAG_SSE2)
4828 #define CPUFLAG_SSE3 (AV_CPU_FLAG_SSE3 | CPUFLAG_SSE2)
4829 #define CPUFLAG_SSE3SLOW (AV_CPU_FLAG_SSE3SLOW | CPUFLAG_SSE3)
4830 #define CPUFLAG_SSSE3 (AV_CPU_FLAG_SSSE3 | CPUFLAG_SSE3)
4831 #define CPUFLAG_SSE4 (AV_CPU_FLAG_SSE4 | CPUFLAG_SSSE3)
4832 #define CPUFLAG_SSE42 (AV_CPU_FLAG_SSE42 | CPUFLAG_SSE4)
4833 #define CPUFLAG_AVX (AV_CPU_FLAG_AVX | CPUFLAG_SSE42)
4834 #define CPUFLAG_XOP (AV_CPU_FLAG_XOP | CPUFLAG_AVX)
4835 #define CPUFLAG_FMA4 (AV_CPU_FLAG_FMA4 | CPUFLAG_AVX)
4836 static const AVOption cpuflags_opts[] = {
4837 { "flags" , NULL, 0, AV_OPT_TYPE_FLAGS, { 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
4838 { "altivec" , NULL, 0, AV_OPT_TYPE_CONST, { AV_CPU_FLAG_ALTIVEC }, .unit = "flags" },
4839 { "mmx" , NULL, 0, AV_OPT_TYPE_CONST, { AV_CPU_FLAG_MMX }, .unit = "flags" },
4840 { "mmx2" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_MMX2 }, .unit = "flags" },
4841 { "sse" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_SSE }, .unit = "flags" },
4842 { "sse2" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_SSE2 }, .unit = "flags" },
4843 { "sse2slow", NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_SSE2SLOW }, .unit = "flags" },
4844 { "sse3" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_SSE3 }, .unit = "flags" },
4845 { "sse3slow", NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_SSE3SLOW }, .unit = "flags" },
4846 { "ssse3" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_SSSE3 }, .unit = "flags" },
4847 { "atom" , NULL, 0, AV_OPT_TYPE_CONST, { AV_CPU_FLAG_ATOM }, .unit = "flags" },
4848 { "sse4.1" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_SSE4 }, .unit = "flags" },
4849 { "sse4.2" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_SSE42 }, .unit = "flags" },
4850 { "avx" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_AVX }, .unit = "flags" },
4851 { "xop" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_XOP }, .unit = "flags" },
4852 { "fma4" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_FMA4 }, .unit = "flags" },
4853 { "3dnow" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_3DNOW }, .unit = "flags" },
4854 { "3dnowext", NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_3DNOWEXT }, .unit = "flags" },
4857 static const AVClass class = {
4858 .class_name = "cpuflags",
4859 .item_name = av_default_item_name,
4860 .option = cpuflags_opts,
4861 .version = LIBAVUTIL_VERSION_INT,
4865 const AVClass *pclass = &class;
4867 if ((ret = av_opt_eval_flags(&pclass, &cpuflags_opts[0], arg, &flags)) < 0)
4870 av_set_cpu_flags_mask(flags);
4874 static void parse_cpuflags(int argc, char **argv, const OptionDef *options)
4876 int idx = locate_option(argc, argv, options, "cpuflags");
4877 if (idx && argv[idx + 1])
4878 opt_cpuflags("cpuflags", argv[idx + 1]);
4881 static int opt_channel_layout(OptionsContext *o, const char *opt, const char *arg)
4883 char layout_str[32];
4886 int ret, channels, ac_str_size;
4889 layout = av_get_channel_layout(arg);
4891 av_log(NULL, AV_LOG_ERROR, "Unknown channel layout: %s\n", arg);
4892 return AVERROR(EINVAL);
4894 snprintf(layout_str, sizeof(layout_str), "%"PRIu64, layout);
4895 ret = opt_default(opt, layout_str);
4899 /* set 'ac' option based on channel layout */
4900 channels = av_get_channel_layout_nb_channels(layout);
4901 snprintf(layout_str, sizeof(layout_str), "%d", channels);
4902 stream_str = strchr(opt, ':');
4903 ac_str_size = 3 + (stream_str ? strlen(stream_str) : 0);
4904 ac_str = av_mallocz(ac_str_size);
4906 return AVERROR(ENOMEM);
4907 av_strlcpy(ac_str, "ac", 3);
4909 av_strlcat(ac_str, stream_str, ac_str_size);
4910 ret = parse_option(o, ac_str, layout_str, options);
4916 static int opt_filter_complex(const char *opt, const char *arg)
4918 filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
4919 &nb_filtergraphs, nb_filtergraphs + 1);
4920 if (!(filtergraphs[nb_filtergraphs - 1] = av_mallocz(sizeof(*filtergraphs[0]))))
4921 return AVERROR(ENOMEM);
4922 filtergraphs[nb_filtergraphs - 1]->index = nb_filtergraphs - 1;
4923 filtergraphs[nb_filtergraphs - 1]->graph_desc = arg;
4927 #define OFFSET(x) offsetof(OptionsContext, x)
4928 static const OptionDef options[] = {
4930 #include "cmdutils_common_opts.h"
4931 { "f", HAS_ARG | OPT_STRING | OPT_OFFSET, {.off = OFFSET(format)}, "force format", "fmt" },
4932 { "i", HAS_ARG | OPT_FUNC2, {(void*)opt_input_file}, "input file name", "filename" },
4933 { "y", OPT_BOOL, {(void*)&file_overwrite}, "overwrite output files" },
4934 { "c", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4935 { "codec", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4936 { "pre", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(presets)}, "preset name", "preset" },
4937 { "map", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map}, "set input stream mapping", "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
4938 { "map_metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata_map)}, "set metadata information of outfile from infile",
4939 "outfile[,metadata]:infile[,metadata]" },
4940 { "map_chapters", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(chapters_input_file)}, "set chapters mapping", "input_file_index" },
4941 { "t", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(recording_time)}, "record or transcode \"duration\" seconds of audio/video", "duration" },
4942 { "fs", HAS_ARG | OPT_INT64 | OPT_OFFSET, {.off = OFFSET(limit_filesize)}, "set the limit file size in bytes", "limit_size" }, //
4943 { "ss", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(start_time)}, "set the start time offset", "time_off" },
4944 { "itsoffset", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(input_ts_offset)}, "set the input ts offset", "time_off" },
4945 { "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(ts_scale)}, "set the input ts scale", "scale" },
4946 { "metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata)}, "add metadata", "string=string" },
4947 { "dframes", HAS_ARG | OPT_FUNC2, {(void*)opt_data_frames}, "set the number of data frames to record", "number" },
4948 { "benchmark", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark},
4949 "add timings for benchmarking" },
4950 { "timelimit", HAS_ARG, {(void*)opt_timelimit}, "set max runtime in seconds", "limit" },
4951 { "dump", OPT_BOOL | OPT_EXPERT, {(void*)&do_pkt_dump},
4952 "dump each input packet" },
4953 { "hex", OPT_BOOL | OPT_EXPERT, {(void*)&do_hex_dump},
4954 "when dumping packets, also dump the payload" },
4955 { "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(rate_emu)}, "read input at native frame rate", "" },
4956 { "target", HAS_ARG | OPT_FUNC2, {(void*)opt_target}, "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" },
4957 { "vsync", HAS_ARG | OPT_EXPERT, {(void*)opt_vsync}, "video sync method", "" },
4958 { "async", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&audio_sync_method}, "audio sync method", "" },
4959 { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&audio_drift_threshold}, "audio drift threshold", "threshold" },
4960 { "copyts", OPT_BOOL | OPT_EXPERT, {(void*)©_ts}, "copy timestamps" },
4961 { "copytb", OPT_BOOL | OPT_EXPERT, {(void*)©_tb}, "copy input stream time base when stream copying" },
4962 { "shortest", OPT_BOOL | OPT_EXPERT, {(void*)&opt_shortest}, "finish encoding within shortest input" }, //
4963 { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&dts_delta_threshold}, "timestamp discontinuity delta threshold", "threshold" },
4964 { "xerror", OPT_BOOL, {(void*)&exit_on_error}, "exit on error", "error" },
4965 { "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC, {.off = OFFSET(copy_initial_nonkeyframes)}, "copy initial non-keyframes" },
4966 { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC, {.off = OFFSET(max_frames)}, "set the number of frames to record", "number" },
4967 { "tag", OPT_STRING | HAS_ARG | OPT_SPEC, {.off = OFFSET(codec_tags)}, "force codec tag/fourcc", "fourcc/tag" },
4968 { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4969 { "qscale", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4970 { "filter", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(filters)}, "set stream filterchain", "filter_list" },
4971 { "filter_complex", HAS_ARG | OPT_EXPERT, {(void*)opt_filter_complex}, "create a complex filtergraph", "graph_description" },
4972 { "stats", OPT_BOOL, {&print_stats}, "print progress report during encoding", },
4973 { "attach", HAS_ARG | OPT_FUNC2, {(void*)opt_attach}, "add an attachment to the output file", "filename" },
4974 { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(dump_attachment)}, "extract an attachment into a file", "filename" },
4975 { "cpuflags", HAS_ARG | OPT_EXPERT, {(void*)opt_cpuflags}, "set CPU flags mask", "mask" },
4978 { "vframes", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_frames}, "set the number of video frames to record", "number" },
4979 { "r", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_rates)}, "set frame rate (Hz value, fraction or abbreviation)", "rate" },
4980 { "s", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_sizes)}, "set frame size (WxH or abbreviation)", "size" },
4981 { "aspect", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_aspect_ratios)}, "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" },
4982 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_pix_fmts)}, "set pixel format", "format" },
4983 { "vn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET, {.off = OFFSET(video_disable)}, "disable video" },
4984 { "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" },
4985 { "rc_override", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(rc_overrides)}, "rate control override for specific intervals", "override" },
4986 { "vcodec", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_codec}, "force video codec ('copy' to copy stream)", "codec" },
4987 { "same_quant", OPT_BOOL | OPT_VIDEO, {(void*)&same_quant},
4988 "use same quantizer as source (implies VBR)" },
4989 { "pass", HAS_ARG | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" },
4990 { "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename_prefix}, "select two pass log file name prefix", "prefix" },
4991 { "deinterlace", OPT_EXPERT | OPT_VIDEO, {(void*)opt_deinterlace},
4992 "this option is deprecated, use the yadif filter instead" },
4993 { "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" },
4994 { "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_vstats_file}, "dump video coding statistics to file", "file" },
4995 { "vf", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_filters}, "video filters", "filter list" },
4996 { "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(intra_matrices)}, "specify intra matrix coeffs", "matrix" },
4997 { "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(inter_matrices)}, "specify inter matrix coeffs", "matrix" },
4998 { "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_INT| OPT_SPEC, {.off = OFFSET(top_field_first)}, "top=1/bottom=0/auto=-1 field first", "" },
4999 { "dc", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_dc_precision}, "intra_dc_precision", "precision" },
5000 { "vtag", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_tag}, "force video tag/fourcc", "fourcc/tag" },
5001 { "qphist", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, { (void *)&qp_hist }, "show QP histogram" },
5002 { "force_fps", OPT_BOOL | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(force_fps)}, "force the selected framerate, disable the best supported framerate selection" },
5003 { "streamid", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_streamid}, "set the value of an outfile streamid", "streamIndex:value" },
5004 { "force_key_frames", OPT_STRING | HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(forced_key_frames)}, "force key frames at specified timestamps", "timestamps" },
5007 { "aframes", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_frames}, "set the number of audio frames to record", "number" },
5008 { "aq", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_qscale}, "set audio quality (codec-specific)", "quality", },
5009 { "ar", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_sample_rate)}, "set audio sampling rate (in Hz)", "rate" },
5010 { "ac", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_channels)}, "set number of audio channels", "channels" },
5011 { "an", OPT_BOOL | OPT_AUDIO | OPT_OFFSET, {.off = OFFSET(audio_disable)}, "disable audio" },
5012 { "acodec", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_codec}, "force audio codec ('copy' to copy stream)", "codec" },
5013 { "atag", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_tag}, "force audio tag/fourcc", "fourcc/tag" },
5014 { "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" }, //
5015 { "sample_fmt", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_SPEC | OPT_STRING, {.off = OFFSET(sample_fmts)}, "set sample format", "format" },
5016 { "channel_layout", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_channel_layout}, "set channel layout", "layout" },
5018 /* subtitle options */
5019 { "sn", OPT_BOOL | OPT_SUBTITLE | OPT_OFFSET, {.off = OFFSET(subtitle_disable)}, "disable subtitle" },
5020 { "scodec", HAS_ARG | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_codec}, "force subtitle codec ('copy' to copy stream)", "codec" },
5021 { "stag", HAS_ARG | OPT_EXPERT | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_tag}, "force subtitle tag/fourcc", "fourcc/tag" },
5024 { "isync", OPT_BOOL | OPT_EXPERT | OPT_GRAB, {(void*)&input_sync}, "sync read on input", "" },
5027 { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_max_delay)}, "set the maximum demux-decode delay", "seconds" },
5028 { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_preload)}, "set the initial demux-decode delay", "seconds" },
5030 { "bsf", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(bitstream_filters)}, "A comma-separated list of bitstream filters", "bitstream_filters" },
5032 /* data codec support */
5033 { "dcodec", HAS_ARG | OPT_DATA | OPT_FUNC2, {(void*)opt_data_codec}, "force data codec ('copy' to copy stream)", "codec" },
5035 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
5039 int main(int argc, char **argv)
5041 OptionsContext o = { 0 };
5046 av_log_set_flags(AV_LOG_SKIP_REPEATED);
5047 parse_loglevel(argc, argv, options);
5049 avcodec_register_all();
5051 avdevice_register_all();
5053 avfilter_register_all();
5055 avformat_network_init();
5059 parse_cpuflags(argc, argv, options);
5062 parse_options(&o, argc, argv, options, opt_output_file);
5064 if (nb_output_files <= 0 && nb_input_files == 0) {
5066 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
5070 /* file converter / grab */
5071 if (nb_output_files <= 0) {
5072 fprintf(stderr, "At least one output file must be specified\n");
5076 if (nb_input_files == 0) {
5077 av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
5082 if (transcode() < 0)
5084 ti = getutime() - ti;
5086 int maxrss = getmaxrss() / 1024;
5087 printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);