2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
37 #include "libavformat/avformat.h"
38 #include "libavdevice/avdevice.h"
39 #include "libswscale/swscale.h"
40 #include "libswresample/swresample.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/audioconvert.h"
43 #include "libavutil/parseutils.h"
44 #include "libavutil/samplefmt.h"
45 #include "libavutil/colorspace.h"
46 #include "libavutil/fifo.h"
47 #include "libavutil/intreadwrite.h"
48 #include "libavutil/dict.h"
49 #include "libavutil/mathematics.h"
50 #include "libavutil/pixdesc.h"
51 #include "libavutil/avstring.h"
52 #include "libavutil/libm.h"
53 #include "libavutil/imgutils.h"
54 #include "libavutil/timestamp.h"
55 #include "libavutil/bprint.h"
56 #include "libavutil/time.h"
57 #include "libavformat/os_support.h"
59 #include "libavformat/ffm.h" // not public API
61 # include "libavfilter/avcodec.h"
62 # include "libavfilter/avfilter.h"
63 # include "libavfilter/avfiltergraph.h"
64 # include "libavfilter/buffersrc.h"
65 # include "libavfilter/buffersink.h"
67 #if HAVE_SYS_RESOURCE_H
68 #include <sys/types.h>
69 #include <sys/resource.h>
70 #elif HAVE_GETPROCESSTIMES
73 #if HAVE_GETPROCESSMEMORYINFO
79 #include <sys/select.h>
84 #include <sys/ioctl.h>
99 #include "libavutil/avassert.h"
101 #define VSYNC_AUTO -1
102 #define VSYNC_PASSTHROUGH 0
105 #define VSYNC_DROP 0xff
107 const char program_name[] = "ffmpeg";
108 const int program_birth_year = 2000;
110 /* select an input stream for an output stream */
111 typedef struct StreamMap {
112 int disabled; /** 1 is this mapping is disabled by a negative map */
116 int sync_stream_index;
117 char *linklabel; /** name of an output link, for mapping lavfi outputs */
121 int file_idx, stream_idx, channel_idx; // input
122 int ofile_idx, ostream_idx; // output
125 static const OptionDef *options;
127 #define MAX_STREAMS 1024 /* arbitrary sanity check value */
129 static int frame_bits_per_raw_sample = 0;
130 static int video_discard = 0;
131 static int same_quant = 0;
132 static int do_deinterlace = 0;
133 static int intra_dc_precision = 8;
134 static int qp_hist = 0;
135 static int intra_only = 0;
136 static const char *video_codec_name = NULL;
137 static const char *audio_codec_name = NULL;
138 static const char *subtitle_codec_name = NULL;
140 static int file_overwrite = 0;
141 static int no_file_overwrite = 0;
142 static int do_benchmark = 0;
143 static int do_benchmark_all = 0;
144 static int do_hex_dump = 0;
145 static int do_pkt_dump = 0;
146 static int do_psnr = 0;
147 static int do_pass = 0;
148 static const char *pass_logfilename_prefix;
149 static int video_sync_method = VSYNC_AUTO;
150 static int audio_sync_method = 0;
151 static float audio_drift_threshold = 0.1;
152 static int copy_ts = 0;
153 static int copy_tb = -1;
154 static int opt_shortest = 0;
155 static char *vstats_filename;
156 static FILE *vstats_file;
158 static int audio_volume = 256;
160 static int exit_on_error = 0;
161 static int stdin_interaction = 1;
162 static int run_as_daemon = 0;
163 static volatile int received_nb_signals = 0;
164 static int64_t video_size = 0;
165 static int64_t audio_size = 0;
166 static int64_t subtitle_size = 0;
167 static int64_t extra_size = 0;
168 static int nb_frames_dup = 0;
169 static int nb_frames_drop = 0;
170 static int input_sync;
172 static float dts_delta_threshold = 10;
173 static float dts_error_threshold = 3600*30;
175 static int print_stats = 1;
176 static int debug_ts = 0;
177 static int current_time;
178 static AVIOContext *progress_avio = NULL;
180 static uint8_t *subtitle_out;
183 /* signal to input threads that they should exit; set by the main thread */
184 static int transcoding_finished;
187 #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
189 typedef struct InputFilter {
190 AVFilterContext *filter;
191 struct InputStream *ist;
192 struct FilterGraph *graph;
196 typedef struct OutputFilter {
197 AVFilterContext *filter;
198 struct OutputStream *ost;
199 struct FilterGraph *graph;
202 /* temporary storage until stream maps are processed */
203 AVFilterInOut *out_tmp;
206 typedef struct FilterGraph {
208 const char *graph_desc;
210 AVFilterGraph *graph;
212 InputFilter **inputs;
214 OutputFilter **outputs;
218 typedef struct InputStream {
221 int discard; /* true if stream data should be discarded */
222 int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
224 AVFrame *decoded_frame;
226 int64_t start; /* time when read started */
227 /* predicted dts of the next packet read for this stream or (when there are
228 * several frames in a packet) of the next frame in current packet (in AV_TIME_BASE units) */
230 int64_t dts; ///< dts of the last packet read for this stream (in AV_TIME_BASE units)
232 int64_t next_pts; ///< synthetic pts for the next decode frame (in AV_TIME_BASE units)
233 int64_t pts; ///< current pts of the decoded frame (in AV_TIME_BASE units)
234 int wrap_correction_done;
236 int is_start; /* is 1 at the start and after a discontinuity */
238 int showed_multi_packet_warning;
240 AVRational framerate; /* framerate forced with -r */
245 int resample_pix_fmt;
247 int resample_sample_fmt;
248 int resample_sample_rate;
249 int resample_channels;
250 uint64_t resample_channel_layout;
254 AVFilterBufferRef *ref;
258 /* a pool of free buffers for decoded data */
259 FrameBuffer *buffer_pool;
262 /* decoded data from this stream goes into all those filters
263 * currently video and audio only */
264 InputFilter **filters;
268 typedef struct InputFile {
269 AVFormatContext *ctx;
270 int eof_reached; /* true if eof reached */
271 int unavailable; /* true if the file is unavailable (possibly temporarily) */
272 int ist_index; /* index of first stream in input_streams */
274 int nb_streams; /* number of stream that ffmpeg is aware of; may be different
275 from ctx.nb_streams if new streams appear during av_read_frame() */
276 int nb_streams_warn; /* number of streams that the user was warned of */
280 pthread_t thread; /* thread reading from this file */
281 int finished; /* the thread has exited */
282 int joined; /* the thread has been joined */
283 pthread_mutex_t fifo_lock; /* lock for access to fifo */
284 pthread_cond_t fifo_cond; /* the main thread will signal on this cond after reading from fifo */
285 AVFifoBuffer *fifo; /* demuxed packets are stored here; freed by the main thread */
289 typedef struct OutputStream {
290 int file_index; /* file index */
291 int index; /* stream index in the output file */
292 int source_index; /* InputStream index */
293 AVStream *st; /* stream in the output file */
294 int encoding_needed; /* true if encoding needed for this stream */
296 /* input pts and corresponding output pts
298 struct InputStream *sync_ist; /* input stream to sync against */
299 int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
300 /* pts of the first frame encoded for this stream, used for limiting
303 AVBitStreamFilterContext *bitstream_filters;
306 AVFrame *filtered_frame;
309 AVRational frame_rate;
313 float frame_aspect_ratio;
316 /* forced key frames */
317 int64_t *forced_kf_pts;
320 char *forced_keyframes;
323 int audio_channels_map[SWR_CH_MAX]; /* list of the channels id to pick from the source stream */
324 int audio_channels_mapped; /* number of channels in audio_channels_map */
328 OutputFilter *filter;
332 int64_t swr_dither_method;
333 double swr_dither_scale;
335 int is_past_recording_time;
336 int unavailable; /* true if the steram is unavailable (possibly temporarily) */
338 const char *attachment_filename;
339 int copy_initial_nonkeyframes;
347 /* init terminal so that we can grab keys */
348 static struct termios oldtty;
349 static int restore_tty;
352 typedef struct OutputFile {
353 AVFormatContext *ctx;
355 int ost_index; /* index of the first stream in output_streams */
356 int64_t recording_time; ///< desired length of the resulting file in microseconds == AV_TIME_BASE units
357 int64_t start_time; ///< start time in microseconds == AV_TIME_BASE units
358 uint64_t limit_filesize; /* filesize limit expressed in bytes */
361 static InputStream **input_streams = NULL;
362 static int nb_input_streams = 0;
363 static InputFile **input_files = NULL;
364 static int nb_input_files = 0;
366 static OutputStream **output_streams = NULL;
367 static int nb_output_streams = 0;
368 static OutputFile **output_files = NULL;
369 static int nb_output_files = 0;
371 static FilterGraph **filtergraphs;
374 typedef struct OptionsContext {
375 /* input/output options */
379 SpecifierOpt *codec_names;
381 SpecifierOpt *audio_channels;
382 int nb_audio_channels;
383 SpecifierOpt *audio_sample_rate;
384 int nb_audio_sample_rate;
385 SpecifierOpt *frame_rates;
387 SpecifierOpt *frame_sizes;
389 SpecifierOpt *frame_pix_fmts;
390 int nb_frame_pix_fmts;
393 int64_t input_ts_offset;
396 SpecifierOpt *ts_scale;
398 SpecifierOpt *dump_attachment;
399 int nb_dump_attachment;
402 StreamMap *stream_maps;
404 AudioChannelMap *audio_channel_maps; /* one info entry per -map_channel */
405 int nb_audio_channel_maps; /* number of (valid) -map_channel settings */
406 int metadata_global_manual;
407 int metadata_streams_manual;
408 int metadata_chapters_manual;
409 const char **attachments;
412 int chapters_input_file;
414 int64_t recording_time;
415 uint64_t limit_filesize;
421 int subtitle_disable;
424 /* indexed by output file stream index */
428 SpecifierOpt *metadata;
430 SpecifierOpt *max_frames;
432 SpecifierOpt *bitstream_filters;
433 int nb_bitstream_filters;
434 SpecifierOpt *codec_tags;
436 SpecifierOpt *sample_fmts;
438 SpecifierOpt *qscale;
440 SpecifierOpt *forced_key_frames;
441 int nb_forced_key_frames;
442 SpecifierOpt *force_fps;
444 SpecifierOpt *frame_aspect_ratios;
445 int nb_frame_aspect_ratios;
446 SpecifierOpt *rc_overrides;
448 SpecifierOpt *intra_matrices;
449 int nb_intra_matrices;
450 SpecifierOpt *inter_matrices;
451 int nb_inter_matrices;
452 SpecifierOpt *top_field_first;
453 int nb_top_field_first;
454 SpecifierOpt *metadata_map;
456 SpecifierOpt *presets;
458 SpecifierOpt *copy_initial_nonkeyframes;
459 int nb_copy_initial_nonkeyframes;
460 SpecifierOpt *filters;
464 static void do_video_stats(AVFormatContext *os, OutputStream *ost, int frame_size);
466 #define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\
469 for (i = 0; i < o->nb_ ## name; i++) {\
470 char *spec = o->name[i].specifier;\
471 if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0)\
472 outvar = o->name[i].u.type;\
478 static int64_t getutime(void)
481 struct rusage rusage;
483 getrusage(RUSAGE_SELF, &rusage);
484 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
485 #elif HAVE_GETPROCESSTIMES
488 proc = GetCurrentProcess();
489 GetProcessTimes(proc, &c, &e, &k, &u);
490 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
496 static void update_benchmark(const char *fmt, ...)
498 if (do_benchmark_all) {
499 int64_t t = getutime();
505 vsnprintf(buf, sizeof(buf), fmt, va);
507 printf("bench: %8"PRIu64" %s \n", t - current_time, buf);
514 Convert subtitles to video with alpha to insert them in filter graphs.
515 This is a temporary solution until libavfilter gets real subtitles support.
519 static int sub2video_prepare(InputStream *ist)
521 AVFormatContext *avf = input_files[ist->file_index]->ctx;
526 /* Compute the size of the canvas for the subtitles stream.
527 If the subtitles codec has set a size, use it. Otherwise use the
528 maximum dimensions of the video streams in the same file. */
529 w = ist->st->codec->width;
530 h = ist->st->codec->height;
532 for (i = 0; i < avf->nb_streams; i++) {
533 if (avf->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
534 w = FFMAX(w, avf->streams[i]->codec->width);
535 h = FFMAX(h, avf->streams[i]->codec->height);
542 av_log(avf, AV_LOG_INFO, "sub2video: using %dx%d canvas\n", w, h);
544 ist->sub2video.w = ist->st->codec->width = w;
545 ist->sub2video.h = ist->st->codec->height = h;
547 /* rectangles are PIX_FMT_PAL8, but we have no guarantee that the
548 palettes for all rectangles are identical or compatible */
549 ist->st->codec->pix_fmt = PIX_FMT_RGB32;
551 ret = av_image_alloc(image, linesize, w, h, PIX_FMT_RGB32, 32);
554 memset(image[0], 0, h * linesize[0]);
555 ist->sub2video.ref = avfilter_get_video_buffer_ref_from_arrays(
556 image, linesize, AV_PERM_READ | AV_PERM_PRESERVE,
557 w, h, PIX_FMT_RGB32);
558 if (!ist->sub2video.ref) {
560 return AVERROR(ENOMEM);
565 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
568 uint32_t *pal, *dst2;
572 if (r->type != SUBTITLE_BITMAP) {
573 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
576 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
577 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle overflowing\n");
581 dst += r->y * dst_linesize + r->x * 4;
582 src = r->pict.data[0];
583 pal = (uint32_t *)r->pict.data[1];
584 for (y = 0; y < r->h; y++) {
585 dst2 = (uint32_t *)dst;
587 for (x = 0; x < r->w; x++)
588 *(dst2++) = pal[*(src2++)];
590 src += r->pict.linesize[0];
594 static void sub2video_push_ref(InputStream *ist, int64_t pts)
596 AVFilterBufferRef *ref = ist->sub2video.ref;
599 ist->sub2video.last_pts = ref->pts = pts;
600 for (i = 0; i < ist->nb_filters; i++)
601 av_buffersrc_add_ref(ist->filters[i]->filter,
602 avfilter_ref_buffer(ref, ~0),
603 AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT |
604 AV_BUFFERSRC_FLAG_NO_COPY);
607 static void sub2video_update(InputStream *ist, AVSubtitle *sub, int64_t pts)
609 int w = ist->sub2video.w, h = ist->sub2video.h;
610 AVFilterBufferRef *ref = ist->sub2video.ref;
618 dst_linesize = ref->linesize[0];
619 memset(dst, 0, h * dst_linesize);
620 for (i = 0; i < sub->num_rects; i++)
621 sub2video_copy_rect(dst, dst_linesize, w, h, sub->rects[i]);
622 sub2video_push_ref(ist, pts);
625 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
627 InputFile *infile = input_files[ist->file_index];
631 /* When a frame is read from a file, examine all sub2video streams in
632 the same file and send the sub2video frame again. Otherwise, decoded
633 video frames could be accumulating in the filter graph while a filter
634 (possibly overlay) is desperately waiting for a subtitle frame. */
635 for (i = 0; i < infile->nb_streams; i++) {
636 InputStream *ist2 = input_streams[infile->ist_index + i];
637 if (!ist2->sub2video.ref)
639 /* subtitles seem to be usually muxed ahead of other streams;
640 if not, substracting a larger time here is necessary */
641 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
642 /* do not send the heartbeat frame if the subtitle is already ahead */
643 if (pts2 <= ist2->sub2video.last_pts)
645 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
646 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
648 sub2video_push_ref(ist2, pts2);
652 static void sub2video_flush(InputStream *ist)
656 for (i = 0; i < ist->nb_filters; i++)
657 av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
660 /* end of sub2video hack */
662 static void reset_options(OptionsContext *o, int is_input)
664 const OptionDef *po = options;
665 OptionsContext bak= *o;
668 /* all OPT_SPEC and OPT_STRING can be freed in generic way */
670 void *dst = (uint8_t*)o + po->u.off;
672 if (po->flags & OPT_SPEC) {
673 SpecifierOpt **so = dst;
674 int i, *count = (int*)(so + 1);
675 for (i = 0; i < *count; i++) {
676 av_freep(&(*so)[i].specifier);
677 if (po->flags & OPT_STRING)
678 av_freep(&(*so)[i].u.str);
682 } else if (po->flags & OPT_OFFSET && po->flags & OPT_STRING)
687 for (i = 0; i < o->nb_stream_maps; i++)
688 av_freep(&o->stream_maps[i].linklabel);
689 av_freep(&o->stream_maps);
690 av_freep(&o->audio_channel_maps);
691 av_freep(&o->streamid_map);
693 memset(o, 0, sizeof(*o));
696 o->recording_time = bak.recording_time;
697 if (o->recording_time != INT64_MAX)
698 av_log(NULL, AV_LOG_WARNING,
699 "-t is not an input option, keeping it for the next output;"
700 " consider fixing your command line.\n");
702 o->recording_time = INT64_MAX;
703 o->mux_max_delay = 0.7;
704 o->limit_filesize = UINT64_MAX;
705 o->chapters_input_file = INT_MAX;
711 static enum PixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum PixelFormat target)
713 if (codec && codec->pix_fmts) {
714 const enum PixelFormat *p = codec->pix_fmts;
715 int has_alpha= av_pix_fmt_descriptors[target].nb_components % 2 == 0;
716 enum PixelFormat best= PIX_FMT_NONE;
717 if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
718 if (st->codec->codec_id == CODEC_ID_MJPEG) {
719 p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE };
720 } else if (st->codec->codec_id == CODEC_ID_LJPEG) {
721 p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P,
722 PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE };
725 for (; *p != PIX_FMT_NONE; p++) {
726 best= avcodec_find_best_pix_fmt2(best, *p, target, has_alpha, NULL);
730 if (*p == PIX_FMT_NONE) {
731 if (target != PIX_FMT_NONE)
732 av_log(NULL, AV_LOG_WARNING,
733 "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
734 av_pix_fmt_descriptors[target].name,
736 av_pix_fmt_descriptors[best].name);
743 static char *choose_pix_fmts(OutputStream *ost)
745 if (ost->keep_pix_fmt) {
747 avfilter_graph_set_auto_convert(ost->filter->graph->graph,
748 AVFILTER_AUTO_CONVERT_NONE);
749 if (ost->st->codec->pix_fmt == PIX_FMT_NONE)
751 return av_strdup(av_get_pix_fmt_name(ost->st->codec->pix_fmt));
753 if (ost->st->codec->pix_fmt != PIX_FMT_NONE) {
754 return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc, ost->st->codec->pix_fmt)));
755 } else if (ost->enc && ost->enc->pix_fmts) {
756 const enum PixelFormat *p;
757 AVIOContext *s = NULL;
761 if (avio_open_dyn_buf(&s) < 0)
764 p = ost->enc->pix_fmts;
765 if (ost->st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
766 if (ost->st->codec->codec_id == CODEC_ID_MJPEG) {
767 p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE };
768 } else if (ost->st->codec->codec_id == CODEC_ID_LJPEG) {
769 p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P,
770 PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE };
774 for (; *p != PIX_FMT_NONE; p++) {
775 const char *name = av_get_pix_fmt_name(*p);
776 avio_printf(s, "%s:", name);
778 len = avio_close_dyn_buf(s, &ret);
786 * Define a function for building a string containing a list of
789 #define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name, separator) \
790 static char *choose_ ## var ## s(OutputStream *ost) \
792 if (ost->st->codec->var != none) { \
793 get_name(ost->st->codec->var); \
794 return av_strdup(name); \
795 } else if (ost->enc->supported_list) { \
797 AVIOContext *s = NULL; \
801 if (avio_open_dyn_buf(&s) < 0) \
804 for (p = ost->enc->supported_list; *p != none; p++) { \
806 avio_printf(s, "%s" separator, name); \
808 len = avio_close_dyn_buf(s, &ret); \
815 #define GET_PIX_FMT_NAME(pix_fmt)\
816 const char *name = av_get_pix_fmt_name(pix_fmt);
818 // DEF_CHOOSE_FORMAT(enum PixelFormat, pix_fmt, pix_fmts, PIX_FMT_NONE,
819 // GET_PIX_FMT_NAME, ":")
821 #define GET_SAMPLE_FMT_NAME(sample_fmt)\
822 const char *name = av_get_sample_fmt_name(sample_fmt)
824 DEF_CHOOSE_FORMAT(enum AVSampleFormat, sample_fmt, sample_fmts,
825 AV_SAMPLE_FMT_NONE, GET_SAMPLE_FMT_NAME, ",")
827 #define GET_SAMPLE_RATE_NAME(rate)\
829 snprintf(name, sizeof(name), "%d", rate);
831 DEF_CHOOSE_FORMAT(int, sample_rate, supported_samplerates, 0,
832 GET_SAMPLE_RATE_NAME, ",")
834 #define GET_CH_LAYOUT_NAME(ch_layout)\
836 snprintf(name, sizeof(name), "0x%"PRIx64, ch_layout);
838 DEF_CHOOSE_FORMAT(uint64_t, channel_layout, channel_layouts, 0,
839 GET_CH_LAYOUT_NAME, ",")
841 static FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
843 FilterGraph *fg = av_mallocz(sizeof(*fg));
847 fg->index = nb_filtergraphs;
849 fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs), &fg->nb_outputs,
851 if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
853 fg->outputs[0]->ost = ost;
854 fg->outputs[0]->graph = fg;
856 ost->filter = fg->outputs[0];
858 fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs), &fg->nb_inputs,
860 if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
862 fg->inputs[0]->ist = ist;
863 fg->inputs[0]->graph = fg;
865 ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
866 &ist->nb_filters, ist->nb_filters + 1);
867 ist->filters[ist->nb_filters - 1] = fg->inputs[0];
869 filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
870 &nb_filtergraphs, nb_filtergraphs + 1);
871 filtergraphs[nb_filtergraphs - 1] = fg;
876 static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
878 InputStream *ist = NULL;
879 enum AVMediaType type = avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx);
882 // TODO: support other filter types
883 if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
884 av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
893 int file_idx = strtol(in->name, &p, 0);
895 if (file_idx < 0 || file_idx >= nb_input_files) {
896 av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
897 file_idx, fg->graph_desc);
900 s = input_files[file_idx]->ctx;
902 for (i = 0; i < s->nb_streams; i++) {
903 enum AVMediaType stream_type = s->streams[i]->codec->codec_type;
904 if (stream_type != type &&
905 !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
906 type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
908 if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
914 av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
915 "matches no streams.\n", p, fg->graph_desc);
918 ist = input_streams[input_files[file_idx]->ist_index + st->index];
920 /* find the first unused stream of corresponding type */
921 for (i = 0; i < nb_input_streams; i++) {
922 ist = input_streams[i];
923 if (ist->st->codec->codec_type == type && ist->discard)
926 if (i == nb_input_streams) {
927 av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
928 "unlabeled input pad %d on filter %s\n", in->pad_idx,
929 in->filter_ctx->name);
936 ist->decoding_needed = 1;
937 ist->st->discard = AVDISCARD_NONE;
939 fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs),
940 &fg->nb_inputs, fg->nb_inputs + 1);
941 if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
943 fg->inputs[fg->nb_inputs - 1]->ist = ist;
944 fg->inputs[fg->nb_inputs - 1]->graph = fg;
946 ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
947 &ist->nb_filters, ist->nb_filters + 1);
948 ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
951 static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
954 OutputStream *ost = ofilter->ost;
955 AVCodecContext *codec = ost->st->codec;
956 AVFilterContext *last_filter = out->filter_ctx;
957 int pad_idx = out->pad_idx;
960 AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
962 snprintf(name, sizeof(name), "output stream %d:%d", ost->file_index, ost->index);
963 ret = avfilter_graph_create_filter(&ofilter->filter,
964 avfilter_get_by_name("buffersink"),
965 name, NULL, NULL/*buffersink_params*/, fg->graph);
966 av_freep(&buffersink_params);
971 if (codec->width || codec->height) {
973 AVFilterContext *filter;
975 snprintf(args, sizeof(args), "%d:%d:flags=0x%X",
978 (unsigned)ost->sws_flags);
979 snprintf(name, sizeof(name), "scaler for output stream %d:%d",
980 ost->file_index, ost->index);
981 if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
982 name, args, NULL, fg->graph)) < 0)
984 if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
987 last_filter = filter;
991 if ((pix_fmts = choose_pix_fmts(ost))) {
992 AVFilterContext *filter;
993 snprintf(name, sizeof(name), "pixel format for output stream %d:%d",
994 ost->file_index, ost->index);
995 if ((ret = avfilter_graph_create_filter(&filter,
996 avfilter_get_by_name("format"),
997 "format", pix_fmts, NULL,
1000 if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1003 last_filter = filter;
1005 av_freep(&pix_fmts);
1008 if (ost->frame_rate.num && 0) {
1009 AVFilterContext *fps;
1012 snprintf(args, sizeof(args), "fps=%d/%d", ost->frame_rate.num,
1013 ost->frame_rate.den);
1014 snprintf(name, sizeof(name), "fps for output stream %d:%d",
1015 ost->file_index, ost->index);
1016 ret = avfilter_graph_create_filter(&fps, avfilter_get_by_name("fps"),
1017 name, args, NULL, fg->graph);
1021 ret = avfilter_link(last_filter, pad_idx, fps, 0);
1028 if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
1034 static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
1036 OutputStream *ost = ofilter->ost;
1037 AVCodecContext *codec = ost->st->codec;
1038 AVFilterContext *last_filter = out->filter_ctx;
1039 int pad_idx = out->pad_idx;
1040 char *sample_fmts, *sample_rates, *channel_layouts;
1045 snprintf(name, sizeof(name), "output stream %d:%d", ost->file_index, ost->index);
1046 ret = avfilter_graph_create_filter(&ofilter->filter,
1047 avfilter_get_by_name("abuffersink"),
1048 name, NULL, NULL, fg->graph);
1052 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1053 AVFilterContext *filt_ctx; \
1055 av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1056 "similarly to -af " filter_name "=%s.\n", arg); \
1058 ret = avfilter_graph_create_filter(&filt_ctx, \
1059 avfilter_get_by_name(filter_name), \
1060 filter_name, arg, NULL, fg->graph); \
1064 ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1068 last_filter = filt_ctx; \
1071 if (ost->audio_channels_mapped) {
1074 av_bprint_init(&pan_buf, 256, 8192);
1075 av_bprintf(&pan_buf, "0x%"PRIx64,
1076 av_get_default_channel_layout(ost->audio_channels_mapped));
1077 for (i = 0; i < ost->audio_channels_mapped; i++)
1078 if (ost->audio_channels_map[i] != -1)
1079 av_bprintf(&pan_buf, ":c%d=c%d", i, ost->audio_channels_map[i]);
1081 AUTO_INSERT_FILTER("-map_channel", "pan", pan_buf.str);
1082 av_bprint_finalize(&pan_buf, NULL);
1085 if (codec->channels && !codec->channel_layout)
1086 codec->channel_layout = av_get_default_channel_layout(codec->channels);
1088 sample_fmts = choose_sample_fmts(ost);
1089 sample_rates = choose_sample_rates(ost);
1090 channel_layouts = choose_channel_layouts(ost);
1091 if (sample_fmts || sample_rates || channel_layouts) {
1092 AVFilterContext *format;
1097 len += snprintf(args + len, sizeof(args) - len, "sample_fmts=%s:",
1100 len += snprintf(args + len, sizeof(args) - len, "sample_rates=%s:",
1102 if (channel_layouts)
1103 len += snprintf(args + len, sizeof(args) - len, "channel_layouts=%s:",
1107 av_freep(&sample_fmts);
1108 av_freep(&sample_rates);
1109 av_freep(&channel_layouts);
1111 snprintf(name, sizeof(name), "audio format for output stream %d:%d",
1112 ost->file_index, ost->index);
1113 ret = avfilter_graph_create_filter(&format,
1114 avfilter_get_by_name("aformat"),
1115 name, args, NULL, fg->graph);
1119 ret = avfilter_link(last_filter, pad_idx, format, 0);
1123 last_filter = format;
1127 if (audio_volume != 256 && 0) {
1130 snprintf(args, sizeof(args), "%f", audio_volume / 256.);
1131 AUTO_INSERT_FILTER("-vol", "volume", args);
1134 if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
1140 #define DESCRIBE_FILTER_LINK(f, inout, in) \
1142 AVFilterContext *ctx = inout->filter_ctx; \
1143 AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads; \
1144 int nb_pads = in ? ctx->input_count : ctx->output_count; \
1147 if (avio_open_dyn_buf(&pb) < 0) \
1150 avio_printf(pb, "%s", ctx->filter->name); \
1152 avio_printf(pb, ":%s", avfilter_pad_get_name(pads, inout->pad_idx));\
1154 avio_close_dyn_buf(pb, &f->name); \
1157 static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
1159 av_freep(&ofilter->name);
1160 DESCRIBE_FILTER_LINK(ofilter, out, 0);
1162 switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
1163 case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
1164 case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
1165 default: av_assert0(0);
1169 static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
1172 AVFilterContext *first_filter = in->filter_ctx;
1173 AVFilter *filter = avfilter_get_by_name("buffer");
1174 InputStream *ist = ifilter->ist;
1175 AVRational tb = ist->framerate.num ? (AVRational){ist->framerate.den,
1176 ist->framerate.num} :
1178 AVRational fr = ist->framerate.num ? ist->framerate :
1179 ist->st->r_frame_rate;
1183 int pad_idx = in->pad_idx;
1186 if (ist->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
1187 ret = sub2video_prepare(ist);
1192 sar = ist->st->sample_aspect_ratio.num ?
1193 ist->st->sample_aspect_ratio :
1194 ist->st->codec->sample_aspect_ratio;
1196 sar = (AVRational){0,1};
1197 av_bprint_init(&args, 0, 1);
1199 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
1200 "pixel_aspect=%d/%d:sws_param=flags=%d", ist->st->codec->width,
1201 ist->st->codec->height, ist->st->codec->pix_fmt,
1202 tb.num, tb.den, sar.num, sar.den,
1203 SWS_BILINEAR + ((ist->st->codec->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
1204 if (fr.num && fr.den)
1205 av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
1206 snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
1207 ist->file_index, ist->st->index);
1209 if ((ret = avfilter_graph_create_filter(&ifilter->filter, filter, name,
1210 args.str, NULL, fg->graph)) < 0)
1213 if (ist->framerate.num) {
1214 AVFilterContext *setpts;
1216 snprintf(name, sizeof(name), "force CFR for input from stream %d:%d",
1217 ist->file_index, ist->st->index);
1218 if ((ret = avfilter_graph_create_filter(&setpts,
1219 avfilter_get_by_name("setpts"),
1224 if ((ret = avfilter_link(setpts, 0, first_filter, pad_idx)) < 0)
1227 first_filter = setpts;
1231 if ((ret = avfilter_link(ifilter->filter, 0, first_filter, pad_idx)) < 0)
1236 static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
1239 AVFilterContext *first_filter = in->filter_ctx;
1240 AVFilter *filter = avfilter_get_by_name("abuffer");
1241 InputStream *ist = ifilter->ist;
1242 int pad_idx = in->pad_idx;
1243 char args[255], name[255];
1246 snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s"
1247 ":channel_layout=0x%"PRIx64,
1248 1, ist->st->codec->sample_rate,
1249 ist->st->codec->sample_rate,
1250 av_get_sample_fmt_name(ist->st->codec->sample_fmt),
1251 ist->st->codec->channel_layout);
1252 snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
1253 ist->file_index, ist->st->index);
1255 if ((ret = avfilter_graph_create_filter(&ifilter->filter, filter,
1260 #define AUTO_INSERT_FILTER_INPUT(opt_name, filter_name, arg) do { \
1261 AVFilterContext *filt_ctx; \
1263 av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1264 "similarly to -af " filter_name "=%s.\n", arg); \
1266 snprintf(name, sizeof(name), "graph %d %s for input stream %d:%d", \
1267 fg->index, filter_name, ist->file_index, ist->st->index); \
1268 ret = avfilter_graph_create_filter(&filt_ctx, \
1269 avfilter_get_by_name(filter_name), \
1270 name, arg, NULL, fg->graph); \
1274 ret = avfilter_link(filt_ctx, 0, first_filter, pad_idx); \
1278 first_filter = filt_ctx; \
1281 if (audio_sync_method > 0) {
1282 char args[256] = {0};
1284 av_strlcatf(args, sizeof(args), "min_comp=0.001:min_hard_comp=%f", audio_drift_threshold);
1285 if (audio_sync_method > 1)
1286 av_strlcatf(args, sizeof(args), ":max_soft_comp=%f", audio_sync_method/(double)ist->st->codec->sample_rate);
1287 AUTO_INSERT_FILTER_INPUT("-async", "aresample", args);
1290 // if (ost->audio_channels_mapped) {
1292 // AVBPrint pan_buf;
1293 // av_bprint_init(&pan_buf, 256, 8192);
1294 // av_bprintf(&pan_buf, "0x%"PRIx64,
1295 // av_get_default_channel_layout(ost->audio_channels_mapped));
1296 // for (i = 0; i < ost->audio_channels_mapped; i++)
1297 // if (ost->audio_channels_map[i] != -1)
1298 // av_bprintf(&pan_buf, ":c%d=c%d", i, ost->audio_channels_map[i]);
1299 // AUTO_INSERT_FILTER_INPUT("-map_channel", "pan", pan_buf.str);
1300 // av_bprint_finalize(&pan_buf, NULL);
1303 if (audio_volume != 256) {
1306 snprintf(args, sizeof(args), "%f", audio_volume / 256.);
1307 AUTO_INSERT_FILTER_INPUT("-vol", "volume", args);
1309 if ((ret = avfilter_link(ifilter->filter, 0, first_filter, pad_idx)) < 0)
1315 static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
1318 av_freep(&ifilter->name);
1319 DESCRIBE_FILTER_LINK(ifilter, in, 1);
1321 switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
1322 case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
1323 case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
1324 default: av_assert0(0);
1328 static int configure_filtergraph(FilterGraph *fg)
1330 AVFilterInOut *inputs, *outputs, *cur;
1331 int ret, i, init = !fg->graph, simple = !fg->graph_desc;
1332 const char *graph_desc = simple ? fg->outputs[0]->ost->avfilter :
1335 avfilter_graph_free(&fg->graph);
1336 if (!(fg->graph = avfilter_graph_alloc()))
1337 return AVERROR(ENOMEM);
1340 OutputStream *ost = fg->outputs[0]->ost;
1342 snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
1343 fg->graph->scale_sws_opts = av_strdup(args);
1346 if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0)
1349 if (simple && (!inputs || inputs->next || !outputs || outputs->next)) {
1350 av_log(NULL, AV_LOG_ERROR, "Simple filtergraph '%s' does not have "
1351 "exactly one input and output.\n", graph_desc);
1352 return AVERROR(EINVAL);
1355 for (cur = inputs; !simple && init && cur; cur = cur->next)
1356 init_input_filter(fg, cur);
1358 for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1359 if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0)
1361 avfilter_inout_free(&inputs);
1363 if (!init || simple) {
1364 /* we already know the mappings between lavfi outputs and output streams,
1365 * so we can finish the setup */
1366 for (cur = outputs, i = 0; cur; cur = cur->next, i++)
1367 configure_output_filter(fg, fg->outputs[i], cur);
1368 avfilter_inout_free(&outputs);
1370 if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
1373 /* wait until output mappings are processed */
1374 for (cur = outputs; cur;) {
1375 fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs),
1376 &fg->nb_outputs, fg->nb_outputs + 1);
1377 if (!(fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]))))
1379 fg->outputs[fg->nb_outputs - 1]->graph = fg;
1380 fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
1382 fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
1389 static int configure_complex_filters(void)
1393 for (i = 0; i < nb_filtergraphs; i++)
1394 if (!filtergraphs[i]->graph &&
1395 (ret = configure_filtergraph(filtergraphs[i])) < 0)
1400 static int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
1403 for (i = 0; i < fg->nb_inputs; i++)
1404 if (fg->inputs[i]->ist == ist)
1409 static void term_exit(void)
1411 av_log(NULL, AV_LOG_QUIET, "%s", "");
1414 tcsetattr (0, TCSANOW, &oldtty);
1418 static volatile int received_sigterm = 0;
1420 static void sigterm_handler(int sig)
1422 received_sigterm = sig;
1423 received_nb_signals++;
1425 if(received_nb_signals > 3)
1429 static void term_init(void)
1436 istty = isatty(0) && isatty(2);
1438 if (istty && tcgetattr (0, &tty) == 0) {
1443 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
1444 |INLCR|IGNCR|ICRNL|IXON);
1445 tty.c_oflag |= OPOST;
1446 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
1447 tty.c_cflag &= ~(CSIZE|PARENB);
1450 tty.c_cc[VTIME] = 0;
1452 tcsetattr (0, TCSANOW, &tty);
1454 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
1457 avformat_network_deinit();
1459 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
1460 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
1462 signal(SIGXCPU, sigterm_handler);
1466 /* read a key without blocking */
1467 static int read_key(void)
1479 n = select(1, &rfds, NULL, NULL, &tv);
1481 n = read(0, &ch, 1);
1488 # if HAVE_PEEKNAMEDPIPE
1490 static HANDLE input_handle;
1493 input_handle = GetStdHandle(STD_INPUT_HANDLE);
1494 is_pipe = !GetConsoleMode(input_handle, &dw);
1497 if (stdin->_cnt > 0) {
1502 /* When running under a GUI, you will end here. */
1503 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL))
1520 static int decode_interrupt_cb(void *ctx)
1522 return received_nb_signals > 1;
1525 static const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
1527 void av_noreturn exit_program(int ret)
1531 for (i = 0; i < nb_filtergraphs; i++) {
1532 avfilter_graph_free(&filtergraphs[i]->graph);
1533 for (j = 0; j < filtergraphs[i]->nb_inputs; j++) {
1534 av_freep(&filtergraphs[i]->inputs[j]->name);
1535 av_freep(&filtergraphs[i]->inputs[j]);
1537 av_freep(&filtergraphs[i]->inputs);
1538 for (j = 0; j < filtergraphs[i]->nb_outputs; j++) {
1539 av_freep(&filtergraphs[i]->outputs[j]->name);
1540 av_freep(&filtergraphs[i]->outputs[j]);
1542 av_freep(&filtergraphs[i]->outputs);
1543 av_freep(&filtergraphs[i]);
1545 av_freep(&filtergraphs);
1547 av_freep(&subtitle_out);
1550 for (i = 0; i < nb_output_files; i++) {
1551 AVFormatContext *s = output_files[i]->ctx;
1552 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
1554 avformat_free_context(s);
1555 av_dict_free(&output_files[i]->opts);
1556 av_freep(&output_files[i]);
1558 for (i = 0; i < nb_output_streams; i++) {
1559 AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
1561 AVBitStreamFilterContext *next = bsfc->next;
1562 av_bitstream_filter_close(bsfc);
1565 output_streams[i]->bitstream_filters = NULL;
1567 av_freep(&output_streams[i]->forced_keyframes);
1568 av_freep(&output_streams[i]->filtered_frame);
1569 av_freep(&output_streams[i]->avfilter);
1570 av_freep(&output_streams[i]);
1572 for (i = 0; i < nb_input_files; i++) {
1573 avformat_close_input(&input_files[i]->ctx);
1574 av_freep(&input_files[i]);
1576 for (i = 0; i < nb_input_streams; i++) {
1577 av_freep(&input_streams[i]->decoded_frame);
1578 av_dict_free(&input_streams[i]->opts);
1579 free_buffer_pool(&input_streams[i]->buffer_pool);
1580 avfilter_unref_bufferp(&input_streams[i]->sub2video.ref);
1581 av_freep(&input_streams[i]->filters);
1582 av_freep(&input_streams[i]);
1586 fclose(vstats_file);
1587 av_free(vstats_filename);
1589 av_freep(&input_streams);
1590 av_freep(&input_files);
1591 av_freep(&output_streams);
1592 av_freep(&output_files);
1597 avformat_network_deinit();
1599 if (received_sigterm) {
1600 av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
1601 (int) received_sigterm);
1608 static void assert_avoptions(AVDictionary *m)
1610 AVDictionaryEntry *t;
1611 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1612 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
1617 static void assert_codec_experimental(AVCodecContext *c, int encoder)
1619 const char *codec_string = encoder ? "encoder" : "decoder";
1621 if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL &&
1622 c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
1623 av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
1624 "results.\nAdd '-strict experimental' if you want to use it.\n",
1625 codec_string, c->codec->name);
1626 codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id);
1627 if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
1628 av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
1629 codec_string, codec->name);
1634 static void choose_sample_fmt(AVStream *st, AVCodec *codec)
1636 if (codec && codec->sample_fmts) {
1637 const enum AVSampleFormat *p = codec->sample_fmts;
1638 for (; *p != -1; p++) {
1639 if (*p == st->codec->sample_fmt)
1643 if((codec->capabilities & CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codec->sample_fmt) > av_get_sample_fmt_name(codec->sample_fmts[0]))
1644 av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n");
1645 if(av_get_sample_fmt_name(st->codec->sample_fmt))
1646 av_log(NULL, AV_LOG_WARNING,
1647 "Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
1648 av_get_sample_fmt_name(st->codec->sample_fmt),
1650 av_get_sample_fmt_name(codec->sample_fmts[0]));
1651 st->codec->sample_fmt = codec->sample_fmts[0];
1656 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
1658 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
1659 AVCodecContext *avctx = ost->st->codec;
1662 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
1663 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
1664 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1666 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1667 int64_t max = ost->st->cur_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
1668 if (ost->st->cur_dts && ost->st->cur_dts != AV_NOPTS_VALUE && max > pkt->dts) {
1669 av_log(s, max - pkt->dts > 2 ? AV_LOG_WARNING : AV_LOG_DEBUG, "Audio timestamp %"PRId64" < %"PRId64" invalid, cliping\n", pkt->dts, max);
1670 pkt->pts = pkt->dts = max;
1675 * Audio encoders may split the packets -- #frames in != #packets out.
1676 * But there is no reordering, so we can limit the number of output packets
1677 * by simply dropping them here.
1678 * Counting encoded video frames needs to be done separately because of
1679 * reordering, see do_video_out()
1681 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
1682 if (ost->frame_number >= ost->max_frames) {
1683 av_free_packet(pkt);
1686 ost->frame_number++;
1690 AVPacket new_pkt = *pkt;
1691 int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
1692 &new_pkt.data, &new_pkt.size,
1693 pkt->data, pkt->size,
1694 pkt->flags & AV_PKT_FLAG_KEY);
1696 av_free_packet(pkt);
1697 new_pkt.destruct = av_destruct_packet;
1699 av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
1700 bsfc->filter->name, pkt->stream_index,
1701 avctx->codec ? avctx->codec->name : "copy");
1711 pkt->stream_index = ost->index;
1712 ret = av_interleaved_write_frame(s, pkt);
1714 print_error("av_interleaved_write_frame()", ret);
1719 static int check_recording_time(OutputStream *ost)
1721 OutputFile *of = output_files[ost->file_index];
1723 if (of->recording_time != INT64_MAX &&
1724 av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,
1725 AV_TIME_BASE_Q) >= 0) {
1726 ost->is_past_recording_time = 1;
1732 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
1735 AVCodecContext *enc = ost->st->codec;
1739 av_init_packet(&pkt);
1743 if (!check_recording_time(ost))
1746 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1747 frame->pts = ost->sync_opts;
1748 ost->sync_opts = frame->pts + frame->nb_samples;
1750 av_assert0(pkt.size || !pkt.data);
1751 update_benchmark(NULL);
1752 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
1753 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
1756 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1759 if (pkt.pts != AV_NOPTS_VALUE)
1760 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1761 if (pkt.dts != AV_NOPTS_VALUE)
1762 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1763 if (pkt.duration > 0)
1764 pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
1767 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1768 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1769 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1770 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1773 write_frame(s, &pkt, ost);
1775 audio_size += pkt.size;
1776 av_free_packet(&pkt);
1780 static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
1782 AVCodecContext *dec;
1783 AVPicture *picture2;
1784 AVPicture picture_tmp;
1787 dec = ist->st->codec;
1789 /* deinterlace : must be done before any resize */
1790 if (do_deinterlace) {
1793 /* create temporary picture */
1794 size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
1795 buf = av_malloc(size);
1799 picture2 = &picture_tmp;
1800 avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
1802 if (avpicture_deinterlace(picture2, picture,
1803 dec->pix_fmt, dec->width, dec->height) < 0) {
1804 /* if error, do not deinterlace */
1805 av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
1814 if (picture != picture2)
1815 *picture = *picture2;
1819 static void do_subtitle_out(AVFormatContext *s,
1825 int subtitle_out_max_size = 1024 * 1024;
1826 int subtitle_out_size, nb, i;
1827 AVCodecContext *enc;
1830 if (pts == AV_NOPTS_VALUE) {
1831 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1837 enc = ost->st->codec;
1839 if (!subtitle_out) {
1840 subtitle_out = av_malloc(subtitle_out_max_size);
1843 /* Note: DVB subtitle need one packet to draw them and one other
1844 packet to clear them */
1845 /* XXX: signal it in the codec context ? */
1846 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE)
1851 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1852 pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q)
1853 - output_files[ost->file_index]->start_time;
1854 for (i = 0; i < nb; i++) {
1855 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1856 if (!check_recording_time(ost))
1860 // start_display_time is required to be 0
1861 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1862 sub->end_display_time -= sub->start_display_time;
1863 sub->start_display_time = 0;
1864 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1865 subtitle_out_max_size, sub);
1866 if (subtitle_out_size < 0) {
1867 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1871 av_init_packet(&pkt);
1872 pkt.data = subtitle_out;
1873 pkt.size = subtitle_out_size;
1874 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
1875 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
1876 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {
1877 /* XXX: the pts correction is handled here. Maybe handling
1878 it in the codec would be better */
1880 pkt.pts += 90 * sub->start_display_time;
1882 pkt.pts += 90 * sub->end_display_time;
1884 write_frame(s, &pkt, ost);
1885 subtitle_size += pkt.size;
1889 static void do_video_out(AVFormatContext *s,
1891 AVFrame *in_picture,
1894 int ret, format_video_sync;
1896 AVCodecContext *enc = ost->st->codec;
1898 double sync_ipts, delta;
1899 double duration = 0;
1901 InputStream *ist = NULL;
1903 if (ost->source_index >= 0)
1904 ist = input_streams[ost->source_index];
1906 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1907 duration = 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base));
1909 sync_ipts = in_picture->pts;
1910 delta = sync_ipts - ost->sync_opts + duration;
1912 /* by default, we output a single frame */
1915 format_video_sync = video_sync_method;
1916 if (format_video_sync == VSYNC_AUTO)
1917 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : 1;
1919 switch (format_video_sync) {
1921 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1924 else if (delta > 1.1)
1925 nb_frames = lrintf(delta);
1930 else if (delta > 0.6)
1931 ost->sync_opts = lrint(sync_ipts);
1934 case VSYNC_PASSTHROUGH:
1935 ost->sync_opts = lrint(sync_ipts);
1941 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1942 if (nb_frames == 0) {
1944 av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
1946 } else if (nb_frames > 1) {
1947 if (nb_frames > dts_error_threshold * 30) {
1948 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skiping\n", nb_frames - 1);
1952 nb_frames_dup += nb_frames - 1;
1953 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1956 /* duplicates frame if needed */
1957 for (i = 0; i < nb_frames; i++) {
1958 av_init_packet(&pkt);
1962 in_picture->pts = ost->sync_opts;
1964 if (!check_recording_time(ost))
1967 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1968 enc->codec->id == CODEC_ID_RAWVIDEO) {
1969 /* raw pictures are written as AVPicture structure to
1970 avoid any copies. We support temporarily the older
1972 enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
1973 enc->coded_frame->top_field_first = in_picture->top_field_first;
1974 pkt.data = (uint8_t *)in_picture;
1975 pkt.size = sizeof(AVPicture);
1976 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1977 pkt.flags |= AV_PKT_FLAG_KEY;
1979 write_frame(s, &pkt, ost);
1980 video_size += pkt.size;
1983 AVFrame big_picture;
1985 big_picture = *in_picture;
1986 /* better than nothing: use input picture interlaced
1988 big_picture.interlaced_frame = in_picture->interlaced_frame;
1989 if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
1990 if (ost->top_field_first == -1)
1991 big_picture.top_field_first = in_picture->top_field_first;
1993 big_picture.top_field_first = !!ost->top_field_first;
1996 /* handles same_quant here. This is not correct because it may
1997 not be a global option */
1998 big_picture.quality = quality;
1999 if (!enc->me_threshold)
2000 big_picture.pict_type = 0;
2001 if (ost->forced_kf_index < ost->forced_kf_count &&
2002 big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
2003 big_picture.pict_type = AV_PICTURE_TYPE_I;
2004 ost->forced_kf_index++;
2006 update_benchmark(NULL);
2007 ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
2008 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
2010 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
2015 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
2016 pkt.pts = ost->sync_opts;
2018 if (pkt.pts != AV_NOPTS_VALUE)
2019 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
2020 if (pkt.dts != AV_NOPTS_VALUE)
2021 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
2024 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
2025 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
2026 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
2027 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
2030 write_frame(s, &pkt, ost);
2031 frame_size = pkt.size;
2032 video_size += pkt.size;
2033 av_free_packet(&pkt);
2035 /* if two pass, output log */
2036 if (ost->logfile && enc->stats_out) {
2037 fprintf(ost->logfile, "%s", enc->stats_out);
2043 * For video, number of frames in == number of packets out.
2044 * But there may be reordering, so we can't throw away frames on encoder
2045 * flush, we need to limit them here, before they go into encoder.
2047 ost->frame_number++;
2050 if (vstats_filename && frame_size)
2051 do_video_stats(output_files[ost->file_index]->ctx, ost, frame_size);
2054 static double psnr(double d)
2056 return -10.0 * log(d) / log(10.0);
2059 static void do_video_stats(AVFormatContext *os, OutputStream *ost,
2062 AVCodecContext *enc;
2064 double ti1, bitrate, avg_bitrate;
2066 /* this is executed just the first time do_video_stats is called */
2068 vstats_file = fopen(vstats_filename, "w");
2075 enc = ost->st->codec;
2076 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
2077 frame_number = ost->frame_number;
2078 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
2079 if (enc->flags&CODEC_FLAG_PSNR)
2080 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
2082 fprintf(vstats_file,"f_size= %6d ", frame_size);
2083 /* compute pts value */
2084 ti1 = ost->sync_opts * av_q2d(enc->time_base);
2088 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
2089 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
2090 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
2091 (double)video_size / 1024, ti1, bitrate, avg_bitrate);
2092 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
2096 /* check for new output on any of the filtergraphs */
2097 static int poll_filters(void)
2099 AVFilterBufferRef *picref;
2100 AVFrame *filtered_frame = NULL;
2101 int i, ret, ret_all;
2102 unsigned nb_success = 1, av_uninit(nb_eof);
2106 /* Reap all buffers present in the buffer sinks */
2107 for (i = 0; i < nb_output_streams; i++) {
2108 OutputStream *ost = output_streams[i];
2109 OutputFile *of = output_files[ost->file_index];
2115 if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
2116 return AVERROR(ENOMEM);
2118 avcodec_get_frame_defaults(ost->filtered_frame);
2119 filtered_frame = ost->filtered_frame;
2122 ret = av_buffersink_get_buffer_ref(ost->filter->filter, &picref,
2123 AV_BUFFERSINK_FLAG_NO_REQUEST);
2125 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
2127 av_strerror(ret, buf, sizeof(buf));
2128 av_log(NULL, AV_LOG_WARNING,
2129 "Error in av_buffersink_get_buffer_ref(): %s\n", buf);
2133 frame_pts = AV_NOPTS_VALUE;
2134 if (picref->pts != AV_NOPTS_VALUE) {
2135 filtered_frame->pts = frame_pts = av_rescale_q(picref->pts,
2136 ost->filter->filter->inputs[0]->time_base,
2137 ost->st->codec->time_base) -
2138 av_rescale_q(of->start_time,
2140 ost->st->codec->time_base);
2142 if (of->start_time && filtered_frame->pts < 0) {
2143 avfilter_unref_buffer(picref);
2147 //if (ost->source_index >= 0)
2148 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
2151 switch (ost->filter->filter->inputs[0]->type) {
2152 case AVMEDIA_TYPE_VIDEO:
2153 avfilter_copy_buf_props(filtered_frame, picref);
2154 filtered_frame->pts = frame_pts;
2155 if (!ost->frame_aspect_ratio)
2156 ost->st->codec->sample_aspect_ratio = picref->video->sample_aspect_ratio;
2158 do_video_out(of->ctx, ost, filtered_frame,
2159 same_quant ? ost->last_quality :
2160 ost->st->codec->global_quality);
2162 case AVMEDIA_TYPE_AUDIO:
2163 avfilter_copy_buf_props(filtered_frame, picref);
2164 filtered_frame->pts = frame_pts;
2165 do_audio_out(of->ctx, ost, filtered_frame);
2168 // TODO support subtitle filters
2172 avfilter_unref_buffer(picref);
2175 if (!nb_success) /* from last round */
2177 /* Request frames through all the graphs */
2178 ret_all = nb_success = nb_eof = 0;
2179 for (i = 0; i < nb_filtergraphs; i++) {
2180 ret = avfilter_graph_request_oldest(filtergraphs[i]->graph);
2183 } else if (ret == AVERROR_EOF) {
2185 } else if (ret != AVERROR(EAGAIN)) {
2187 av_strerror(ret, buf, sizeof(buf));
2188 av_log(NULL, AV_LOG_WARNING,
2189 "Error in request_frame(): %s\n", buf);
2193 /* Try again if anything succeeded */
2195 return nb_eof == nb_filtergraphs ? AVERROR_EOF : ret_all;
2198 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
2201 AVBPrint buf_script;
2203 AVFormatContext *oc;
2205 AVCodecContext *enc;
2206 int frame_number, vid, i;
2208 int64_t pts = INT64_MAX;
2209 static int64_t last_time = -1;
2210 static int qp_histogram[52];
2211 int hours, mins, secs, us;
2213 if (!print_stats && !is_last_report && !progress_avio)
2216 if (!is_last_report) {
2217 if (last_time == -1) {
2218 last_time = cur_time;
2221 if ((cur_time - last_time) < 500000)
2223 last_time = cur_time;
2227 oc = output_files[0]->ctx;
2229 total_size = avio_size(oc->pb);
2230 if (total_size < 0) { // FIXME improve avio_size() so it works with non seekable output too
2231 total_size = avio_tell(oc->pb);
2238 av_bprint_init(&buf_script, 0, 1);
2239 for (i = 0; i < nb_output_streams; i++) {
2241 ost = output_streams[i];
2242 enc = ost->st->codec;
2243 if (!ost->stream_copy && enc->coded_frame)
2244 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
2245 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
2246 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
2247 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
2248 ost->file_index, ost->index, q);
2250 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
2251 float fps, t = (cur_time-timer_start) / 1000000.0;
2253 frame_number = ost->frame_number;
2254 fps = t > 1 ? frame_number / t : 0;
2255 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
2256 frame_number, fps < 9.95, fps, q);
2257 av_bprintf(&buf_script, "frame=%d\n", frame_number);
2258 av_bprintf(&buf_script, "fps=%.1f\n", fps);
2259 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
2260 ost->file_index, ost->index, q);
2262 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
2266 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
2268 for (j = 0; j < 32; j++)
2269 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j] + 1) / log(2)));
2271 if (enc->flags&CODEC_FLAG_PSNR) {
2273 double error, error_sum = 0;
2274 double scale, scale_sum = 0;
2276 char type[3] = { 'Y','U','V' };
2277 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
2278 for (j = 0; j < 3; j++) {
2279 if (is_last_report) {
2280 error = enc->error[j];
2281 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
2283 error = enc->coded_frame->error[j];
2284 scale = enc->width * enc->height * 255.0 * 255.0;
2290 p = psnr(error / scale);
2291 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
2292 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
2293 ost->file_index, ost->index, type[i] | 32, p);
2295 p = psnr(error_sum / scale_sum);
2296 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
2297 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
2298 ost->file_index, ost->index, p);
2302 /* compute min output value */
2303 pts = FFMIN(pts, av_rescale_q(ost->st->pts.val,
2304 ost->st->time_base, AV_TIME_BASE_Q));
2307 secs = pts / AV_TIME_BASE;
2308 us = pts % AV_TIME_BASE;
2314 bitrate = pts ? total_size * 8 / (pts / 1000.0) : 0;
2316 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
2317 "size=%8.0fkB time=", total_size / 1024.0);
2318 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
2319 "%02d:%02d:%02d.%02d ", hours, mins, secs,
2320 (100 * us) / AV_TIME_BASE);
2321 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
2322 "bitrate=%6.1fkbits/s", bitrate);
2323 av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
2324 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
2325 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
2326 hours, mins, secs, us);
2328 if (nb_frames_dup || nb_frames_drop)
2329 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
2330 nb_frames_dup, nb_frames_drop);
2331 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
2332 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
2334 if (print_stats || is_last_report) {
2335 av_log(NULL, AV_LOG_INFO, "%s \r", buf);
2340 if (progress_avio) {
2341 av_bprintf(&buf_script, "progress=%s\n",
2342 is_last_report ? "end" : "continue");
2343 avio_write(progress_avio, buf_script.str,
2344 FFMIN(buf_script.len, buf_script.size - 1));
2345 avio_flush(progress_avio);
2346 av_bprint_finalize(&buf_script, NULL);
2347 if (is_last_report) {
2348 avio_close(progress_avio);
2349 progress_avio = NULL;
2353 if (is_last_report) {
2354 int64_t raw= audio_size + video_size + subtitle_size + extra_size;
2355 av_log(NULL, AV_LOG_INFO, "\n");
2356 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0f global headers:%1.0fkB muxing overhead %f%%\n",
2357 video_size / 1024.0,
2358 audio_size / 1024.0,
2359 subtitle_size / 1024.0,
2360 extra_size / 1024.0,
2361 100.0 * (total_size - raw) / raw
2363 if(video_size + audio_size + subtitle_size + extra_size == 0){
2364 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded (check -ss / -t / -frames parameters if used)\n");
2369 static void flush_encoders(void)
2373 for (i = 0; i < nb_output_streams; i++) {
2374 OutputStream *ost = output_streams[i];
2375 AVCodecContext *enc = ost->st->codec;
2376 AVFormatContext *os = output_files[ost->file_index]->ctx;
2377 int stop_encoding = 0;
2379 if (!ost->encoding_needed)
2382 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
2384 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO)
2388 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
2392 switch (ost->st->codec->codec_type) {
2393 case AVMEDIA_TYPE_AUDIO:
2394 encode = avcodec_encode_audio2;
2398 case AVMEDIA_TYPE_VIDEO:
2399 encode = avcodec_encode_video2;
2410 av_init_packet(&pkt);
2414 update_benchmark(NULL);
2415 ret = encode(enc, &pkt, NULL, &got_packet);
2416 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
2418 av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
2422 if (ost->logfile && enc->stats_out) {
2423 fprintf(ost->logfile, "%s", enc->stats_out);
2429 if (pkt.pts != AV_NOPTS_VALUE)
2430 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
2431 if (pkt.dts != AV_NOPTS_VALUE)
2432 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
2433 write_frame(os, &pkt, ost);
2443 * Check whether a packet from ist should be written into ost at this time
2445 static int check_output_constraints(InputStream *ist, OutputStream *ost)
2447 OutputFile *of = output_files[ost->file_index];
2448 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2450 if (ost->source_index != ist_index)
2453 if (of->start_time && ist->pts < of->start_time)
2459 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2461 OutputFile *of = output_files[ost->file_index];
2462 int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
2466 av_init_packet(&opkt);
2468 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2469 !ost->copy_initial_nonkeyframes)
2472 if (of->recording_time != INT64_MAX &&
2473 ist->pts >= of->recording_time + of->start_time) {
2474 ost->is_past_recording_time = 1;
2478 /* force the input stream PTS */
2479 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
2480 audio_size += pkt->size;
2481 else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2482 video_size += pkt->size;
2484 } else if (ost->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2485 subtitle_size += pkt->size;
2488 if (pkt->pts != AV_NOPTS_VALUE)
2489 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
2491 opkt.pts = AV_NOPTS_VALUE;
2493 if (pkt->dts == AV_NOPTS_VALUE)
2494 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
2496 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
2497 opkt.dts -= ost_tb_start_time;
2499 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
2500 opkt.flags = pkt->flags;
2502 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2503 if ( ost->st->codec->codec_id != CODEC_ID_H264
2504 && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
2505 && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
2506 && ost->st->codec->codec_id != CODEC_ID_VC1
2508 if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
2509 opkt.destruct = av_destruct_packet;
2511 opkt.data = pkt->data;
2512 opkt.size = pkt->size;
2515 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2516 /* store AVPicture in AVPacket, as expected by the output format */
2517 avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
2518 opkt.data = (uint8_t *)&pict;
2519 opkt.size = sizeof(AVPicture);
2520 opkt.flags |= AV_PKT_FLAG_KEY;
2523 write_frame(of->ctx, &opkt, ost);
2524 ost->st->codec->frame_number++;
2525 av_free_packet(&opkt);
2528 static void rate_emu_sleep(InputStream *ist)
2530 if (input_files[ist->file_index]->rate_emu) {
2531 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
2532 int64_t now = av_gettime() - ist->start;
2534 av_usleep(pts - now);
2538 static int guess_input_channel_layout(InputStream *ist)
2540 AVCodecContext *dec = ist->st->codec;
2542 if (!dec->channel_layout) {
2543 char layout_name[256];
2545 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2546 if (!dec->channel_layout)
2548 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2549 dec->channels, dec->channel_layout);
2550 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2551 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2556 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2558 AVFrame *decoded_frame;
2559 AVCodecContext *avctx = ist->st->codec;
2560 int i, ret, resample_changed;
2561 AVRational decoded_frame_tb;
2563 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
2564 return AVERROR(ENOMEM);
2566 avcodec_get_frame_defaults(ist->decoded_frame);
2567 decoded_frame = ist->decoded_frame;
2569 update_benchmark(NULL);
2570 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
2571 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2575 if (avctx->sample_rate <= 0) {
2576 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2577 return AVERROR_INVALIDDATA;
2581 /* no audio frame */
2583 for (i = 0; i < ist->nb_filters; i++)
2584 av_buffersrc_add_ref(ist->filters[i]->filter, NULL,
2585 AV_BUFFERSRC_FLAG_NO_COPY);
2590 /* increment next_dts to use for the case where the input stream does not
2591 have timestamps or there are multiple frames in the packet */
2592 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2594 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2598 rate_emu_sleep(ist);
2600 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2601 ist->resample_channels != avctx->channels ||
2602 ist->resample_channel_layout != decoded_frame->channel_layout ||
2603 ist->resample_sample_rate != decoded_frame->sample_rate;
2604 if (resample_changed) {
2605 char layout1[64], layout2[64];
2607 if (!guess_input_channel_layout(ist)) {
2608 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2609 "layout for Input Stream #%d.%d\n", ist->file_index,
2613 decoded_frame->channel_layout = avctx->channel_layout;
2615 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2616 ist->resample_channel_layout);
2617 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2618 decoded_frame->channel_layout);
2620 av_log(NULL, AV_LOG_INFO,
2621 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2622 ist->file_index, ist->st->index,
2623 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2624 ist->resample_channels, layout1,
2625 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2626 avctx->channels, layout2);
2628 ist->resample_sample_fmt = decoded_frame->format;
2629 ist->resample_sample_rate = decoded_frame->sample_rate;
2630 ist->resample_channel_layout = decoded_frame->channel_layout;
2631 ist->resample_channels = avctx->channels;
2633 for (i = 0; i < nb_filtergraphs; i++)
2634 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2635 FilterGraph *fg = filtergraphs[i];
2637 if (configure_filtergraph(fg) < 0) {
2638 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2641 for (j = 0; j < fg->nb_outputs; j++) {
2642 OutputStream *ost = fg->outputs[j]->ost;
2643 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2644 !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
2645 av_buffersink_set_frame_size(ost->filter->filter,
2646 ost->st->codec->frame_size);
2651 /* if the decoder provides a pts, use it instead of the last packet pts.
2652 the decoder could be delaying output by a packet or more. */
2653 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2654 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2655 decoded_frame_tb = avctx->time_base;
2656 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2657 decoded_frame->pts = decoded_frame->pkt_pts;
2658 pkt->pts = AV_NOPTS_VALUE;
2659 decoded_frame_tb = ist->st->time_base;
2660 } else if (pkt->pts != AV_NOPTS_VALUE) {
2661 decoded_frame->pts = pkt->pts;
2662 pkt->pts = AV_NOPTS_VALUE;
2663 decoded_frame_tb = ist->st->time_base;
2665 decoded_frame->pts = ist->dts;
2666 decoded_frame_tb = AV_TIME_BASE_Q;
2668 if (decoded_frame->pts != AV_NOPTS_VALUE)
2669 decoded_frame->pts = av_rescale_q(decoded_frame->pts,
2671 (AVRational){1, ist->st->codec->sample_rate});
2672 for (i = 0; i < ist->nb_filters; i++)
2673 av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, 0);
2675 decoded_frame->pts = AV_NOPTS_VALUE;
2680 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2682 AVFrame *decoded_frame;
2683 void *buffer_to_free = NULL;
2684 int i, ret = 0, resample_changed;
2685 int64_t best_effort_timestamp;
2686 AVRational *frame_sample_aspect;
2689 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
2690 return AVERROR(ENOMEM);
2692 avcodec_get_frame_defaults(ist->decoded_frame);
2693 decoded_frame = ist->decoded_frame;
2694 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2696 update_benchmark(NULL);
2697 ret = avcodec_decode_video2(ist->st->codec,
2698 decoded_frame, got_output, pkt);
2699 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2703 quality = same_quant ? decoded_frame->quality : 0;
2705 /* no picture yet */
2707 for (i = 0; i < ist->nb_filters; i++)
2708 av_buffersrc_add_ref(ist->filters[i]->filter, NULL, AV_BUFFERSRC_FLAG_NO_COPY);
2712 if(ist->top_field_first>=0)
2713 decoded_frame->top_field_first = ist->top_field_first;
2715 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2716 if(best_effort_timestamp != AV_NOPTS_VALUE)
2717 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2720 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2721 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d \n",
2722 ist->st->index, av_ts2str(decoded_frame->pts),
2723 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2724 best_effort_timestamp,
2725 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2726 decoded_frame->key_frame, decoded_frame->pict_type);
2730 pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
2732 rate_emu_sleep(ist);
2734 if (ist->st->sample_aspect_ratio.num)
2735 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2737 resample_changed = ist->resample_width != decoded_frame->width ||
2738 ist->resample_height != decoded_frame->height ||
2739 ist->resample_pix_fmt != decoded_frame->format;
2740 if (resample_changed) {
2741 av_log(NULL, AV_LOG_INFO,
2742 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2743 ist->file_index, ist->st->index,
2744 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2745 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2747 ist->resample_width = decoded_frame->width;
2748 ist->resample_height = decoded_frame->height;
2749 ist->resample_pix_fmt = decoded_frame->format;
2751 for (i = 0; i < nb_filtergraphs; i++)
2752 if (ist_in_filtergraph(filtergraphs[i], ist) &&
2753 configure_filtergraph(filtergraphs[i]) < 0) {
2754 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2759 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2760 for (i = 0; i < ist->nb_filters; i++) {
2761 int changed = ist->st->codec->width != ist->filters[i]->filter->outputs[0]->w
2762 || ist->st->codec->height != ist->filters[i]->filter->outputs[0]->h
2763 || ist->st->codec->pix_fmt != ist->filters[i]->filter->outputs[0]->format;
2764 // XXX what an ugly hack
2765 if (ist->filters[i]->graph->nb_outputs == 1)
2766 ist->filters[i]->graph->outputs[0]->ost->last_quality = quality;
2768 if (!frame_sample_aspect->num)
2769 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2770 if (ist->dr1 && decoded_frame->type==FF_BUFFER_TYPE_USER && !changed) {
2771 FrameBuffer *buf = decoded_frame->opaque;
2772 AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
2773 decoded_frame->data, decoded_frame->linesize,
2774 AV_PERM_READ | AV_PERM_PRESERVE,
2775 ist->st->codec->width, ist->st->codec->height,
2776 ist->st->codec->pix_fmt);
2778 avfilter_copy_frame_props(fb, decoded_frame);
2779 fb->buf->priv = buf;
2780 fb->buf->free = filter_release_buffer;
2782 av_assert0(buf->refcount>0);
2784 av_buffersrc_add_ref(ist->filters[i]->filter, fb,
2785 AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT |
2786 AV_BUFFERSRC_FLAG_NO_COPY);
2788 if(av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, 0)<0) {
2789 av_log(NULL, AV_LOG_FATAL, "Failed to inject frame into filter network\n");
2795 av_free(buffer_to_free);
2799 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2801 AVSubtitle subtitle;
2802 int i, ret = avcodec_decode_subtitle2(ist->st->codec,
2803 &subtitle, got_output, pkt);
2804 if (ret < 0 || !*got_output) {
2806 sub2video_flush(ist);
2810 rate_emu_sleep(ist);
2812 sub2video_update(ist, &subtitle, pkt->pts);
2814 for (i = 0; i < nb_output_streams; i++) {
2815 OutputStream *ost = output_streams[i];
2817 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
2820 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
2823 avsubtitle_free(&subtitle);
2827 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2828 static int output_packet(InputStream *ist, const AVPacket *pkt)
2834 if (!ist->saw_first_ts) {
2835 ist->dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2837 if (pkt != NULL && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2838 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2839 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2841 ist->saw_first_ts = 1;
2844 if (ist->next_dts == AV_NOPTS_VALUE)
2845 ist->next_dts = ist->dts;
2846 if (ist->next_pts == AV_NOPTS_VALUE)
2847 ist->next_pts = ist->pts;
2851 av_init_packet(&avpkt);
2859 if (pkt->dts != AV_NOPTS_VALUE) {
2860 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2861 if (ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2862 ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2865 // while we have more to decode or while the decoder did output something on EOF
2866 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2870 ist->pts = ist->next_pts;
2871 ist->dts = ist->next_dts;
2873 if (avpkt.size && avpkt.size != pkt->size) {
2874 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2875 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2876 ist->showed_multi_packet_warning = 1;
2879 switch (ist->st->codec->codec_type) {
2880 case AVMEDIA_TYPE_AUDIO:
2881 ret = decode_audio (ist, &avpkt, &got_output);
2883 case AVMEDIA_TYPE_VIDEO:
2884 ret = decode_video (ist, &avpkt, &got_output);
2885 if (avpkt.duration) {
2886 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2887 } else if(ist->st->codec->time_base.num != 0 && ist->st->codec->time_base.den != 0) {
2888 int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
2889 duration = ((int64_t)AV_TIME_BASE *
2890 ist->st->codec->time_base.num * ticks) /
2891 ist->st->codec->time_base.den;
2895 if(ist->dts != AV_NOPTS_VALUE && duration) {
2896 ist->next_dts += duration;
2898 ist->next_dts = AV_NOPTS_VALUE;
2901 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2903 case AVMEDIA_TYPE_SUBTITLE:
2904 ret = transcode_subtitles(ist, &avpkt, &got_output);
2914 avpkt.pts= AV_NOPTS_VALUE;
2916 // touch data and size only if not EOF
2918 if(ist->st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
2928 /* handle stream copy */
2929 if (!ist->decoding_needed) {
2930 rate_emu_sleep(ist);
2931 ist->dts = ist->next_dts;
2932 switch (ist->st->codec->codec_type) {
2933 case AVMEDIA_TYPE_AUDIO:
2934 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
2935 ist->st->codec->sample_rate;
2937 case AVMEDIA_TYPE_VIDEO:
2938 if (pkt->duration) {
2939 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2940 } else if(ist->st->codec->time_base.num != 0) {
2941 int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
2942 ist->next_dts += ((int64_t)AV_TIME_BASE *
2943 ist->st->codec->time_base.num * ticks) /
2944 ist->st->codec->time_base.den;
2948 ist->pts = ist->dts;
2949 ist->next_pts = ist->next_dts;
2951 for (i = 0; pkt && i < nb_output_streams; i++) {
2952 OutputStream *ost = output_streams[i];
2954 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2957 do_streamcopy(ist, ost, pkt);
2963 static void print_sdp(void)
2967 AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
2971 for (i = 0; i < nb_output_files; i++)
2972 avc[i] = output_files[i]->ctx;
2974 av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
2975 printf("SDP:\n%s\n", sdp);
2980 static int init_input_stream(int ist_index, char *error, int error_len)
2982 InputStream *ist = input_streams[ist_index];
2984 if (ist->decoding_needed) {
2985 AVCodec *codec = ist->dec;
2987 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2988 avcodec_get_name(ist->st->codec->codec_id), ist->file_index, ist->st->index);
2989 return AVERROR(EINVAL);
2992 ist->dr1 = (codec->capabilities & CODEC_CAP_DR1) && !do_deinterlace;
2993 if (codec->type == AVMEDIA_TYPE_VIDEO && ist->dr1) {
2994 ist->st->codec->get_buffer = codec_get_buffer;
2995 ist->st->codec->release_buffer = codec_release_buffer;
2996 ist->st->codec->opaque = &ist->buffer_pool;
2999 if (!av_dict_get(ist->opts, "threads", NULL, 0))
3000 av_dict_set(&ist->opts, "threads", "auto", 0);
3001 if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
3002 snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
3003 ist->file_index, ist->st->index);
3004 return AVERROR(EINVAL);
3006 assert_codec_experimental(ist->st->codec, 0);
3007 assert_avoptions(ist->opts);
3010 ist->next_pts = AV_NOPTS_VALUE;
3011 ist->next_dts = AV_NOPTS_VALUE;
3017 static InputStream *get_input_stream(OutputStream *ost)
3019 if (ost->source_index >= 0)
3020 return input_streams[ost->source_index];
3024 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3025 AVCodecContext *avctx)
3031 for (p = kf; *p; p++)
3034 ost->forced_kf_count = n;
3035 ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
3036 if (!ost->forced_kf_pts) {
3037 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3042 for (i = 0; i < n; i++) {
3043 char *next = strchr(p, ',');
3048 t = parse_time_or_die("force_key_frames", p, 1);
3049 ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3055 static void report_new_stream(int input_index, AVPacket *pkt)
3057 InputFile *file = input_files[input_index];
3058 AVStream *st = file->ctx->streams[pkt->stream_index];
3060 if (pkt->stream_index < file->nb_streams_warn)
3062 av_log(file->ctx, AV_LOG_WARNING,
3063 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3064 av_get_media_type_string(st->codec->codec_type),
3065 input_index, pkt->stream_index,
3066 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3067 file->nb_streams_warn = pkt->stream_index + 1;
3070 static int transcode_init(void)
3072 int ret = 0, i, j, k;
3073 AVFormatContext *oc;
3074 AVCodecContext *codec, *icodec = NULL;
3080 /* init framerate emulation */
3081 for (i = 0; i < nb_input_files; i++) {
3082 InputFile *ifile = input_files[i];
3083 if (ifile->rate_emu)
3084 for (j = 0; j < ifile->nb_streams; j++)
3085 input_streams[j + ifile->ist_index]->start = av_gettime();
3088 /* output stream init */
3089 for (i = 0; i < nb_output_files; i++) {
3090 oc = output_files[i]->ctx;
3091 if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
3092 av_dump_format(oc, i, oc->filename, 1);
3093 av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
3094 return AVERROR(EINVAL);
3098 /* init complex filtergraphs */
3099 for (i = 0; i < nb_filtergraphs; i++)
3100 if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
3103 /* for each output stream, we compute the right encoding parameters */
3104 for (i = 0; i < nb_output_streams; i++) {
3105 ost = output_streams[i];
3106 oc = output_files[ost->file_index]->ctx;
3107 ist = get_input_stream(ost);
3109 if (ost->attachment_filename)
3112 codec = ost->st->codec;
3115 icodec = ist->st->codec;
3117 ost->st->disposition = ist->st->disposition;
3118 codec->bits_per_raw_sample = icodec->bits_per_raw_sample;
3119 codec->chroma_sample_location = icodec->chroma_sample_location;
3122 if (ost->stream_copy) {
3123 uint64_t extra_size;
3125 av_assert0(ist && !ost->filter);
3127 extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
3129 if (extra_size > INT_MAX) {
3130 return AVERROR(EINVAL);
3133 /* if stream_copy is selected, no need to decode or encode */
3134 codec->codec_id = icodec->codec_id;
3135 codec->codec_type = icodec->codec_type;
3137 if (!codec->codec_tag) {
3138 if (!oc->oformat->codec_tag ||
3139 av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
3140 av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
3141 codec->codec_tag = icodec->codec_tag;
3144 codec->bit_rate = icodec->bit_rate;
3145 codec->rc_max_rate = icodec->rc_max_rate;
3146 codec->rc_buffer_size = icodec->rc_buffer_size;
3147 codec->field_order = icodec->field_order;
3148 codec->extradata = av_mallocz(extra_size);
3149 if (!codec->extradata) {
3150 return AVERROR(ENOMEM);
3152 memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
3153 codec->extradata_size= icodec->extradata_size;
3154 codec->bits_per_coded_sample = icodec->bits_per_coded_sample;
3156 codec->time_base = ist->st->time_base;
3158 * Avi is a special case here because it supports variable fps but
3159 * having the fps and timebase differe significantly adds quite some
3162 if(!strcmp(oc->oformat->name, "avi")) {
3163 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
3164 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
3165 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(icodec->time_base)
3166 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(icodec->time_base) < 1.0/500
3168 codec->time_base.num = ist->st->r_frame_rate.den;
3169 codec->time_base.den = 2*ist->st->r_frame_rate.num;
3170 codec->ticks_per_frame = 2;
3171 } else if ( copy_tb<0 && av_q2d(icodec->time_base)*icodec->ticks_per_frame > 2*av_q2d(ist->st->time_base)
3172 && av_q2d(ist->st->time_base) < 1.0/500
3174 codec->time_base = icodec->time_base;
3175 codec->time_base.num *= icodec->ticks_per_frame;
3176 codec->time_base.den *= 2;
3177 codec->ticks_per_frame = 2;
3179 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
3180 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
3181 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
3183 if( copy_tb<0 && av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base)
3184 && av_q2d(ist->st->time_base) < 1.0/500
3186 codec->time_base = icodec->time_base;
3187 codec->time_base.num *= icodec->ticks_per_frame;
3191 if(ost->frame_rate.num)
3192 codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num};
3194 av_reduce(&codec->time_base.num, &codec->time_base.den,
3195 codec->time_base.num, codec->time_base.den, INT_MAX);
3197 switch (codec->codec_type) {
3198 case AVMEDIA_TYPE_AUDIO:
3199 if (audio_volume != 256) {
3200 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3203 codec->channel_layout = icodec->channel_layout;
3204 codec->sample_rate = icodec->sample_rate;
3205 codec->channels = icodec->channels;
3206 codec->frame_size = icodec->frame_size;
3207 codec->audio_service_type = icodec->audio_service_type;
3208 codec->block_align = icodec->block_align;
3209 if((codec->block_align == 1 || codec->block_align == 1152) && codec->codec_id == CODEC_ID_MP3)
3210 codec->block_align= 0;
3211 if(codec->codec_id == CODEC_ID_AC3)
3212 codec->block_align= 0;
3214 case AVMEDIA_TYPE_VIDEO:
3215 codec->pix_fmt = icodec->pix_fmt;
3216 codec->width = icodec->width;
3217 codec->height = icodec->height;
3218 codec->has_b_frames = icodec->has_b_frames;
3219 if (!codec->sample_aspect_ratio.num) {
3220 codec->sample_aspect_ratio =
3221 ost->st->sample_aspect_ratio =
3222 ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
3223 ist->st->codec->sample_aspect_ratio.num ?
3224 ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
3226 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3228 case AVMEDIA_TYPE_SUBTITLE:
3229 codec->width = icodec->width;
3230 codec->height = icodec->height;
3232 case AVMEDIA_TYPE_DATA:
3233 case AVMEDIA_TYPE_ATTACHMENT:
3240 ost->enc = avcodec_find_encoder(codec->codec_id);
3242 /* should only happen when a default codec is not present. */
3243 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3244 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3245 ret = AVERROR(EINVAL);
3250 ist->decoding_needed = 1;
3251 ost->encoding_needed = 1;
3254 (codec->codec_type == AVMEDIA_TYPE_VIDEO ||
3255 codec->codec_type == AVMEDIA_TYPE_AUDIO)) {
3257 fg = init_simple_filtergraph(ist, ost);
3258 if (configure_filtergraph(fg)) {
3259 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3264 if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
3265 if (ost->filter && !ost->frame_rate.num)
3266 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3267 if (ist && !ost->frame_rate.num)
3268 ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
3269 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3270 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3271 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3272 ost->frame_rate = ost->enc->supported_framerates[idx];
3276 switch (codec->codec_type) {
3277 case AVMEDIA_TYPE_AUDIO:
3278 codec->sample_fmt = ost->filter->filter->inputs[0]->format;
3279 codec->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3280 codec->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3281 codec->channels = av_get_channel_layout_nb_channels(codec->channel_layout);
3282 codec->time_base = (AVRational){ 1, codec->sample_rate };
3284 case AVMEDIA_TYPE_VIDEO:
3285 codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num};
3286 if (ost->filter && !(codec->time_base.num && codec->time_base.den))
3287 codec->time_base = ost->filter->filter->inputs[0]->time_base;
3288 if ( av_q2d(codec->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3289 && (video_sync_method == VSYNC_CFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3290 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3291 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3293 for (j = 0; j < ost->forced_kf_count; j++)
3294 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3298 codec->width = ost->filter->filter->inputs[0]->w;
3299 codec->height = ost->filter->filter->inputs[0]->h;
3300 codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3301 ost->frame_aspect_ratio ? // overridden by the -aspect cli option
3302 av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
3303 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3304 codec->pix_fmt = ost->filter->filter->inputs[0]->format;
3307 codec->width != icodec->width ||
3308 codec->height != icodec->height ||
3309 codec->pix_fmt != icodec->pix_fmt) {
3310 codec->bits_per_raw_sample = frame_bits_per_raw_sample;
3313 if (ost->forced_keyframes)
3314 parse_forced_key_frames(ost->forced_keyframes, ost,
3317 case AVMEDIA_TYPE_SUBTITLE:
3318 codec->time_base = (AVRational){1, 1000};
3325 if (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) {
3326 char logfilename[1024];
3329 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
3330 pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
3332 if (!strcmp(ost->enc->name, "libx264")) {
3333 av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
3335 if (codec->flags & CODEC_FLAG_PASS2) {
3337 size_t logbuffer_size;
3338 if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
3339 av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
3343 codec->stats_in = logbuffer;
3345 if (codec->flags & CODEC_FLAG_PASS1) {
3346 f = fopen(logfilename, "wb");
3348 av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
3349 logfilename, strerror(errno));
3359 /* open each encoder */
3360 for (i = 0; i < nb_output_streams; i++) {
3361 ost = output_streams[i];
3362 if (ost->encoding_needed) {
3363 AVCodec *codec = ost->enc;
3364 AVCodecContext *dec = NULL;
3366 if ((ist = get_input_stream(ost)))
3367 dec = ist->st->codec;
3368 if (dec && dec->subtitle_header) {
3369 ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size);
3370 if (!ost->st->codec->subtitle_header) {
3371 ret = AVERROR(ENOMEM);
3374 memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3375 ost->st->codec->subtitle_header_size = dec->subtitle_header_size;
3377 if (!av_dict_get(ost->opts, "threads", NULL, 0))
3378 av_dict_set(&ost->opts, "threads", "auto", 0);
3379 if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) {
3380 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
3381 ost->file_index, ost->index);
3382 ret = AVERROR(EINVAL);
3385 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3386 !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
3387 av_buffersink_set_frame_size(ost->filter->filter,
3388 ost->st->codec->frame_size);
3389 assert_codec_experimental(ost->st->codec, 1);
3390 assert_avoptions(ost->opts);
3391 if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
3392 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3393 " It takes bits/s as argument, not kbits/s\n");
3394 extra_size += ost->st->codec->extradata_size;
3396 if (ost->st->codec->me_threshold)
3397 input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
3401 /* init input streams */
3402 for (i = 0; i < nb_input_streams; i++)
3403 if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
3406 /* discard unused programs */
3407 for (i = 0; i < nb_input_files; i++) {
3408 InputFile *ifile = input_files[i];
3409 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3410 AVProgram *p = ifile->ctx->programs[j];
3411 int discard = AVDISCARD_ALL;
3413 for (k = 0; k < p->nb_stream_indexes; k++)
3414 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3415 discard = AVDISCARD_DEFAULT;
3418 p->discard = discard;
3422 /* open files and write file headers */
3423 for (i = 0; i < nb_output_files; i++) {
3424 oc = output_files[i]->ctx;
3425 oc->interrupt_callback = int_cb;
3426 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3428 const char *errbuf_ptr = errbuf;
3429 if (av_strerror(ret, errbuf, sizeof(errbuf)) < 0)
3430 errbuf_ptr = strerror(AVUNERROR(ret));
3431 snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?): %s", i, errbuf_ptr);
3432 ret = AVERROR(EINVAL);
3435 // assert_avoptions(output_files[i]->opts);
3436 if (strcmp(oc->oformat->name, "rtp")) {
3442 /* dump the file output parameters - cannot be done before in case
3444 for (i = 0; i < nb_output_files; i++) {
3445 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3448 /* dump the stream mapping */
3449 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3450 for (i = 0; i < nb_input_streams; i++) {
3451 ist = input_streams[i];
3453 for (j = 0; j < ist->nb_filters; j++) {
3454 if (ist->filters[j]->graph->graph_desc) {
3455 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3456 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3457 ist->filters[j]->name);
3458 if (nb_filtergraphs > 1)
3459 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3460 av_log(NULL, AV_LOG_INFO, "\n");
3465 for (i = 0; i < nb_output_streams; i++) {
3466 ost = output_streams[i];
3468 if (ost->attachment_filename) {
3469 /* an attached file */
3470 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3471 ost->attachment_filename, ost->file_index, ost->index);
3475 if (ost->filter && ost->filter->graph->graph_desc) {
3476 /* output from a complex graph */
3477 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3478 if (nb_filtergraphs > 1)
3479 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3481 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3482 ost->index, ost->enc ? ost->enc->name : "?");
3486 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3487 input_streams[ost->source_index]->file_index,
3488 input_streams[ost->source_index]->st->index,
3491 if (ost->sync_ist != input_streams[ost->source_index])
3492 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3493 ost->sync_ist->file_index,
3494 ost->sync_ist->st->index);
3495 if (ost->stream_copy)
3496 av_log(NULL, AV_LOG_INFO, " (copy)");
3498 av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ?
3499 input_streams[ost->source_index]->dec->name : "?",
3500 ost->enc ? ost->enc->name : "?");
3501 av_log(NULL, AV_LOG_INFO, "\n");
3505 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3517 * @return 1 if there are still streams where more output is wanted,
3520 static int need_output(void)
3524 for (i = 0; i < nb_output_streams; i++) {
3525 OutputStream *ost = output_streams[i];
3526 OutputFile *of = output_files[ost->file_index];
3527 AVFormatContext *os = output_files[ost->file_index]->ctx;
3529 if (ost->is_past_recording_time ||
3530 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3532 if (ost->frame_number >= ost->max_frames) {
3534 for (j = 0; j < of->ctx->nb_streams; j++)
3535 output_streams[of->ost_index + j]->is_past_recording_time = 1;
3545 static int input_acceptable(InputStream *ist)
3547 av_assert1(!ist->discard);
3548 return !input_files[ist->file_index]->unavailable &&
3549 !input_files[ist->file_index]->eof_reached;
3552 static int find_graph_input(FilterGraph *graph)
3554 int i, nb_req_max = 0, file_index = -1;
3556 for (i = 0; i < graph->nb_inputs; i++) {
3557 int nb_req = av_buffersrc_get_nb_failed_requests(graph->inputs[i]->filter);
3558 if (nb_req > nb_req_max) {
3559 InputStream *ist = graph->inputs[i]->ist;
3560 if (input_acceptable(ist)) {
3561 nb_req_max = nb_req;
3562 file_index = ist->file_index;
3571 * Select the input file to read from.
3573 * @return >=0 index of the input file to use;
3574 * -1 if no file is acceptable;
3575 * -2 to read from filters without reading from a file
3577 static int select_input_file(void)
3579 int i, ret, nb_active_out = nb_output_streams, ost_index = -1;
3582 AVFilterBufferRef *dummy;
3584 for (i = 0; i < nb_output_streams; i++)
3585 nb_active_out -= output_streams[i]->unavailable =
3586 output_streams[i]->is_past_recording_time;
3587 while (nb_active_out) {
3588 opts_min = INT64_MAX;
3590 for (i = 0; i < nb_output_streams; i++) {
3591 OutputStream *ost = output_streams[i];
3592 int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3594 if (!ost->unavailable && opts < opts_min) {
3602 ost = output_streams[ost_index];
3603 if (ost->source_index >= 0) {
3604 /* ost is directly connected to an input */
3605 InputStream *ist = input_streams[ost->source_index];
3606 if (input_acceptable(ist))
3607 return ist->file_index;
3609 /* ost is connected to a complex filtergraph */
3610 av_assert1(ost->filter);
3611 ret = av_buffersink_get_buffer_ref(ost->filter->filter, &dummy,
3612 AV_BUFFERSINK_FLAG_PEEK);
3615 ret = find_graph_input(ost->filter->graph);
3619 ost->unavailable = 1;
3625 static int check_keyboard_interaction(int64_t cur_time)
3628 static int64_t last_time;
3629 if (received_nb_signals)
3630 return AVERROR_EXIT;
3631 /* read_key() returns 0 on EOF */
3632 if(cur_time - last_time >= 100000 && !run_as_daemon){
3634 last_time = cur_time;
3638 return AVERROR_EXIT;
3639 if (key == '+') av_log_set_level(av_log_get_level()+10);
3640 if (key == '-') av_log_set_level(av_log_get_level()-10);
3641 if (key == 's') qp_hist ^= 1;
3644 do_hex_dump = do_pkt_dump = 0;
3645 } else if(do_pkt_dump){
3649 av_log_set_level(AV_LOG_DEBUG);
3651 if (key == 'c' || key == 'C'){
3652 char buf[4096], target[64], command[256], arg[256] = {0};
3655 fprintf(stderr, "\nEnter command: <target> <time> <command>[ <argument>]\n");
3657 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3662 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3663 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3664 target, time, command, arg);
3665 for (i = 0; i < nb_filtergraphs; i++) {
3666 FilterGraph *fg = filtergraphs[i];
3669 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3670 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3671 fprintf(stderr, "Command reply for stream %d: ret:%d res:%s\n", i, ret, buf);
3673 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3678 av_log(NULL, AV_LOG_ERROR,
3679 "Parse error, at least 3 arguments were expected, "
3680 "only %d given in string '%s'\n", n, buf);
3683 if (key == 'd' || key == 'D'){
3686 debug = input_streams[0]->st->codec->debug<<1;
3687 if(!debug) debug = 1;
3688 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3691 if(scanf("%d", &debug)!=1)
3692 fprintf(stderr,"error parsing debug value\n");
3693 for(i=0;i<nb_input_streams;i++) {
3694 input_streams[i]->st->codec->debug = debug;
3696 for(i=0;i<nb_output_streams;i++) {
3697 OutputStream *ost = output_streams[i];
3698 ost->st->codec->debug = debug;
3700 if(debug) av_log_set_level(AV_LOG_DEBUG);
3701 fprintf(stderr,"debug=%d\n", debug);
3704 fprintf(stderr, "key function\n"
3705 "? show this help\n"
3706 "+ increase verbosity\n"
3707 "- decrease verbosity\n"
3708 "c Send command to filtergraph\n"
3709 "D cycle through available debug modes\n"
3710 "h dump packets/hex press to cycle through the 3 states\n"
3712 "s Show QP histogram\n"
3719 static void *input_thread(void *arg)
3724 while (!transcoding_finished && ret >= 0) {
3726 ret = av_read_frame(f->ctx, &pkt);
3728 if (ret == AVERROR(EAGAIN)) {
3735 pthread_mutex_lock(&f->fifo_lock);
3736 while (!av_fifo_space(f->fifo))
3737 pthread_cond_wait(&f->fifo_cond, &f->fifo_lock);
3739 av_dup_packet(&pkt);
3740 av_fifo_generic_write(f->fifo, &pkt, sizeof(pkt), NULL);
3742 pthread_mutex_unlock(&f->fifo_lock);
3749 static void free_input_threads(void)
3753 if (nb_input_files == 1)
3756 transcoding_finished = 1;
3758 for (i = 0; i < nb_input_files; i++) {
3759 InputFile *f = input_files[i];
3762 if (!f->fifo || f->joined)
3765 pthread_mutex_lock(&f->fifo_lock);
3766 while (av_fifo_size(f->fifo)) {
3767 av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
3768 av_free_packet(&pkt);
3770 pthread_cond_signal(&f->fifo_cond);
3771 pthread_mutex_unlock(&f->fifo_lock);
3773 pthread_join(f->thread, NULL);
3776 while (av_fifo_size(f->fifo)) {
3777 av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
3778 av_free_packet(&pkt);
3780 av_fifo_free(f->fifo);
3784 static int init_input_threads(void)
3788 if (nb_input_files == 1)
3791 for (i = 0; i < nb_input_files; i++) {
3792 InputFile *f = input_files[i];
3794 if (!(f->fifo = av_fifo_alloc(8*sizeof(AVPacket))))
3795 return AVERROR(ENOMEM);
3797 pthread_mutex_init(&f->fifo_lock, NULL);
3798 pthread_cond_init (&f->fifo_cond, NULL);
3800 if ((ret = pthread_create(&f->thread, NULL, input_thread, f)))
3801 return AVERROR(ret);
3806 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3810 pthread_mutex_lock(&f->fifo_lock);
3812 if (av_fifo_size(f->fifo)) {
3813 av_fifo_generic_read(f->fifo, pkt, sizeof(*pkt), NULL);
3814 pthread_cond_signal(&f->fifo_cond);
3819 ret = AVERROR(EAGAIN);
3822 pthread_mutex_unlock(&f->fifo_lock);
3828 static int get_input_packet(InputFile *f, AVPacket *pkt)
3831 if (nb_input_files > 1)
3832 return get_input_packet_mt(f, pkt);
3834 return av_read_frame(f->ctx, pkt);
3838 * The following code is the main loop of the file converter
3840 static int transcode(void)
3843 AVFormatContext *is, *os;
3846 int no_packet_count = 0;
3847 int64_t timer_start;
3849 ret = transcode_init();
3853 if (stdin_interaction) {
3854 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3857 timer_start = av_gettime();
3860 if ((ret = init_input_threads()) < 0)
3864 for (; received_sigterm == 0;) {
3865 int file_index, ist_index;
3867 int64_t cur_time= av_gettime();
3869 /* if 'q' pressed, exits */
3870 if (stdin_interaction)
3871 if (check_keyboard_interaction(cur_time) < 0)
3874 /* check if there's any stream where output is still needed */
3875 if (!need_output()) {
3876 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3880 /* select the stream that we must read now */
3881 file_index = select_input_file();
3882 /* if none, if is finished */
3883 if (file_index == -2) {
3887 if (file_index < 0) {
3888 if (no_packet_count) {
3889 no_packet_count = 0;
3890 for (i = 0; i < nb_input_files; i++)
3891 input_files[i]->unavailable = 0;
3895 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3899 is = input_files[file_index]->ctx;
3900 ret = get_input_packet(input_files[file_index], &pkt);
3902 if (ret == AVERROR(EAGAIN)) {
3903 input_files[file_index]->unavailable = 1;
3908 if (ret != AVERROR_EOF) {
3909 print_error(is->filename, ret);
3913 input_files[file_index]->eof_reached = 1;
3915 for (i = 0; i < input_files[file_index]->nb_streams; i++) {
3916 ist = input_streams[input_files[file_index]->ist_index + i];
3917 if (ist->decoding_needed)
3918 output_packet(ist, NULL);
3928 no_packet_count = 0;
3929 for (i = 0; i < nb_input_files; i++)
3930 input_files[i]->unavailable = 0;
3933 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3934 is->streams[pkt.stream_index]);
3936 /* the following test is needed in case new streams appear
3937 dynamically in stream : we ignore them */
3938 if (pkt.stream_index >= input_files[file_index]->nb_streams) {
3939 report_new_stream(file_index, &pkt);
3940 goto discard_packet;
3942 ist_index = input_files[file_index]->ist_index + pkt.stream_index;
3943 ist = input_streams[ist_index];
3945 goto discard_packet;
3947 if(!ist->wrap_correction_done && input_files[file_index]->ctx->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3948 uint64_t stime = av_rescale_q(input_files[file_index]->ctx->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3949 uint64_t stime2= stime + (1LL<<ist->st->pts_wrap_bits);
3950 ist->wrap_correction_done = 1;
3951 if(pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime && pkt.dts - stime > stime2 - pkt.dts) {
3952 pkt.dts -= 1LL<<ist->st->pts_wrap_bits;
3953 ist->wrap_correction_done = 0;
3955 if(pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime && pkt.pts - stime > stime2 - pkt.pts) {
3956 pkt.pts -= 1LL<<ist->st->pts_wrap_bits;
3957 ist->wrap_correction_done = 0;
3961 if (pkt.dts != AV_NOPTS_VALUE)
3962 pkt.dts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3963 if (pkt.pts != AV_NOPTS_VALUE)
3964 pkt.pts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3966 if (pkt.pts != AV_NOPTS_VALUE)
3967 pkt.pts *= ist->ts_scale;
3968 if (pkt.dts != AV_NOPTS_VALUE)
3969 pkt.dts *= ist->ts_scale;
3972 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3973 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%"PRId64"\n",
3974 ist_index, av_get_media_type_string(ist->st->codec->codec_type),
3975 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3976 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3977 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3978 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3979 input_files[ist->file_index]->ts_offset);
3982 if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE && !copy_ts) {
3983 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3984 int64_t delta = pkt_dts - ist->next_dts;
3985 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3986 if(delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3987 (delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
3988 ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE) ||
3989 pkt_dts+1<ist->pts){
3990 input_files[ist->file_index]->ts_offset -= delta;
3991 av_log(NULL, AV_LOG_DEBUG,
3992 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3993 delta, input_files[ist->file_index]->ts_offset);
3994 pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3995 if (pkt.pts != AV_NOPTS_VALUE)
3996 pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3999 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4000 (delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE) ||
4001 pkt_dts+1<ist->pts){
4002 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4003 pkt.dts = AV_NOPTS_VALUE;
4005 if (pkt.pts != AV_NOPTS_VALUE){
4006 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4007 delta = pkt_pts - ist->next_dts;
4008 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4009 (delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE) ||
4010 pkt_pts+1<ist->pts) {
4011 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4012 pkt.pts = AV_NOPTS_VALUE;
4018 sub2video_heartbeat(ist, pkt.pts);
4020 // fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
4021 if ((ret = output_packet(ist, &pkt)) < 0 ||
4022 ((ret = poll_filters()) < 0 && ret != AVERROR_EOF)) {
4024 av_strerror(ret, buf, sizeof(buf));
4025 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
4026 ist->file_index, ist->st->index, buf);
4029 av_free_packet(&pkt);
4034 av_free_packet(&pkt);
4036 /* dump report by using the output first video and audio streams */
4037 print_report(0, timer_start, cur_time);
4040 free_input_threads();
4043 /* at the end of stream, we must flush the decoder buffers */
4044 for (i = 0; i < nb_input_streams; i++) {
4045 ist = input_streams[i];
4046 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4047 output_packet(ist, NULL);
4055 /* write the trailer if needed and close file */
4056 for (i = 0; i < nb_output_files; i++) {
4057 os = output_files[i]->ctx;
4058 av_write_trailer(os);
4061 /* dump report by using the first video and audio streams */
4062 print_report(1, timer_start, av_gettime());
4064 /* close each encoder */
4065 for (i = 0; i < nb_output_streams; i++) {
4066 ost = output_streams[i];
4067 if (ost->encoding_needed) {
4068 av_freep(&ost->st->codec->stats_in);
4069 avcodec_close(ost->st->codec);
4073 /* close each decoder */
4074 for (i = 0; i < nb_input_streams; i++) {
4075 ist = input_streams[i];
4076 if (ist->decoding_needed) {
4077 avcodec_close(ist->st->codec);
4086 free_input_threads();
4089 if (output_streams) {
4090 for (i = 0; i < nb_output_streams; i++) {
4091 ost = output_streams[i];
4093 if (ost->stream_copy)
4094 av_freep(&ost->st->codec->extradata);
4096 fclose(ost->logfile);
4097 ost->logfile = NULL;
4099 av_freep(&ost->st->codec->subtitle_header);
4100 av_free(ost->forced_kf_pts);
4101 av_dict_free(&ost->opts);
4108 static int opt_frame_crop(const char *opt, const char *arg)
4110 av_log(NULL, AV_LOG_FATAL, "Option '%s' has been removed, use the crop filter instead\n", opt);
4111 return AVERROR(EINVAL);
4114 static int opt_pad(const char *opt, const char *arg)
4116 av_log(NULL, AV_LOG_FATAL, "Option '%s' has been removed, use the pad filter instead\n", opt);
4120 static int opt_video_channel(const char *opt, const char *arg)
4122 av_log(NULL, AV_LOG_WARNING, "This option is deprecated, use -channel.\n");
4123 return opt_default("channel", arg);
4126 static int opt_video_standard(const char *opt, const char *arg)
4128 av_log(NULL, AV_LOG_WARNING, "This option is deprecated, use -standard.\n");
4129 return opt_default("standard", arg);
4132 static int opt_audio_codec(OptionsContext *o, const char *opt, const char *arg)
4134 audio_codec_name = arg;
4135 return parse_option(o, "codec:a", arg, options);
4138 static int opt_video_codec(OptionsContext *o, const char *opt, const char *arg)
4140 video_codec_name = arg;
4141 return parse_option(o, "codec:v", arg, options);
4144 static int opt_subtitle_codec(OptionsContext *o, const char *opt, const char *arg)
4146 subtitle_codec_name = arg;
4147 return parse_option(o, "codec:s", arg, options);
4150 static int opt_data_codec(OptionsContext *o, const char *opt, const char *arg)
4152 return parse_option(o, "codec:d", arg, options);
4155 static int opt_map(OptionsContext *o, const char *opt, const char *arg)
4157 StreamMap *m = NULL;
4158 int i, negative = 0, file_idx;
4159 int sync_file_idx = -1, sync_stream_idx = 0;
4167 map = av_strdup(arg);
4169 /* parse sync stream first, just pick first matching stream */
4170 if (sync = strchr(map, ',')) {
4172 sync_file_idx = strtol(sync + 1, &sync, 0);
4173 if (sync_file_idx >= nb_input_files || sync_file_idx < 0) {
4174 av_log(NULL, AV_LOG_FATAL, "Invalid sync file index: %d.\n", sync_file_idx);
4179 for (i = 0; i < input_files[sync_file_idx]->nb_streams; i++)
4180 if (check_stream_specifier(input_files[sync_file_idx]->ctx,
4181 input_files[sync_file_idx]->ctx->streams[i], sync) == 1) {
4182 sync_stream_idx = i;
4185 if (i == input_files[sync_file_idx]->nb_streams) {
4186 av_log(NULL, AV_LOG_FATAL, "Sync stream specification in map %s does not "
4187 "match any streams.\n", arg);
4193 if (map[0] == '[') {
4194 /* this mapping refers to lavfi output */
4195 const char *c = map + 1;
4196 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
4197 &o->nb_stream_maps, o->nb_stream_maps + 1);
4198 m = &o->stream_maps[o->nb_stream_maps - 1];
4199 m->linklabel = av_get_token(&c, "]");
4200 if (!m->linklabel) {
4201 av_log(NULL, AV_LOG_ERROR, "Invalid output link label: %s.\n", map);
4205 file_idx = strtol(map, &p, 0);
4206 if (file_idx >= nb_input_files || file_idx < 0) {
4207 av_log(NULL, AV_LOG_FATAL, "Invalid input file index: %d.\n", file_idx);
4211 /* disable some already defined maps */
4212 for (i = 0; i < o->nb_stream_maps; i++) {
4213 m = &o->stream_maps[i];
4214 if (file_idx == m->file_index &&
4215 check_stream_specifier(input_files[m->file_index]->ctx,
4216 input_files[m->file_index]->ctx->streams[m->stream_index],
4217 *p == ':' ? p + 1 : p) > 0)
4221 for (i = 0; i < input_files[file_idx]->nb_streams; i++) {
4222 if (check_stream_specifier(input_files[file_idx]->ctx, input_files[file_idx]->ctx->streams[i],
4223 *p == ':' ? p + 1 : p) <= 0)
4225 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
4226 &o->nb_stream_maps, o->nb_stream_maps + 1);
4227 m = &o->stream_maps[o->nb_stream_maps - 1];
4229 m->file_index = file_idx;
4230 m->stream_index = i;
4232 if (sync_file_idx >= 0) {
4233 m->sync_file_index = sync_file_idx;
4234 m->sync_stream_index = sync_stream_idx;
4236 m->sync_file_index = file_idx;
4237 m->sync_stream_index = i;
4243 av_log(NULL, AV_LOG_FATAL, "Stream map '%s' matches no streams.\n", arg);
4251 static int opt_attach(OptionsContext *o, const char *opt, const char *arg)
4253 o->attachments = grow_array(o->attachments, sizeof(*o->attachments),
4254 &o->nb_attachments, o->nb_attachments + 1);
4255 o->attachments[o->nb_attachments - 1] = arg;
4259 static int opt_map_channel(OptionsContext *o, const char *opt, const char *arg)
4265 o->audio_channel_maps =
4266 grow_array(o->audio_channel_maps, sizeof(*o->audio_channel_maps),
4267 &o->nb_audio_channel_maps, o->nb_audio_channel_maps + 1);
4268 m = &o->audio_channel_maps[o->nb_audio_channel_maps - 1];
4270 /* muted channel syntax */
4271 n = sscanf(arg, "%d:%d.%d", &m->channel_idx, &m->ofile_idx, &m->ostream_idx);
4272 if ((n == 1 || n == 3) && m->channel_idx == -1) {
4273 m->file_idx = m->stream_idx = -1;
4275 m->ofile_idx = m->ostream_idx = -1;
4280 n = sscanf(arg, "%d.%d.%d:%d.%d",
4281 &m->file_idx, &m->stream_idx, &m->channel_idx,
4282 &m->ofile_idx, &m->ostream_idx);
4284 if (n != 3 && n != 5) {
4285 av_log(NULL, AV_LOG_FATAL, "Syntax error, mapchan usage: "
4286 "[file.stream.channel|-1][:syncfile:syncstream]\n");
4290 if (n != 5) // only file.stream.channel specified
4291 m->ofile_idx = m->ostream_idx = -1;
4294 if (m->file_idx < 0 || m->file_idx >= nb_input_files) {
4295 av_log(NULL, AV_LOG_FATAL, "mapchan: invalid input file index: %d\n",
4299 if (m->stream_idx < 0 ||
4300 m->stream_idx >= input_files[m->file_idx]->nb_streams) {
4301 av_log(NULL, AV_LOG_FATAL, "mapchan: invalid input file stream index #%d.%d\n",
4302 m->file_idx, m->stream_idx);
4305 st = input_files[m->file_idx]->ctx->streams[m->stream_idx];
4306 if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
4307 av_log(NULL, AV_LOG_FATAL, "mapchan: stream #%d.%d is not an audio stream.\n",
4308 m->file_idx, m->stream_idx);
4311 if (m->channel_idx < 0 || m->channel_idx >= st->codec->channels) {
4312 av_log(NULL, AV_LOG_FATAL, "mapchan: invalid audio channel #%d.%d.%d\n",
4313 m->file_idx, m->stream_idx, m->channel_idx);
4320 * Parse a metadata specifier in arg.
4321 * @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram)
4322 * @param index for type c/p, chapter/program index is written here
4323 * @param stream_spec for type s, the stream specifier is written here
4325 static void parse_meta_type(char *arg, char *type, int *index, const char **stream_spec)
4333 if (*(++arg) && *arg != ':') {
4334 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", arg);
4337 *stream_spec = *arg == ':' ? arg + 1 : "";
4341 if (*(++arg) == ':')
4342 *index = strtol(++arg, NULL, 0);
4345 av_log(NULL, AV_LOG_FATAL, "Invalid metadata type %c.\n", *arg);
4352 static int copy_metadata(char *outspec, char *inspec, AVFormatContext *oc, AVFormatContext *ic, OptionsContext *o)
4354 AVDictionary **meta_in = NULL;
4355 AVDictionary **meta_out = NULL;
4357 char type_in, type_out;
4358 const char *istream_spec = NULL, *ostream_spec = NULL;
4359 int idx_in = 0, idx_out = 0;
4361 parse_meta_type(inspec, &type_in, &idx_in, &istream_spec);
4362 parse_meta_type(outspec, &type_out, &idx_out, &ostream_spec);
4365 if (type_out == 'g' || !*outspec)
4366 o->metadata_global_manual = 1;
4367 if (type_out == 's' || !*outspec)
4368 o->metadata_streams_manual = 1;
4369 if (type_out == 'c' || !*outspec)
4370 o->metadata_chapters_manual = 1;
4374 if (type_in == 'g' || type_out == 'g')
4375 o->metadata_global_manual = 1;
4376 if (type_in == 's' || type_out == 's')
4377 o->metadata_streams_manual = 1;
4378 if (type_in == 'c' || type_out == 'c')
4379 o->metadata_chapters_manual = 1;
4381 #define METADATA_CHECK_INDEX(index, nb_elems, desc)\
4382 if ((index) < 0 || (index) >= (nb_elems)) {\
4383 av_log(NULL, AV_LOG_FATAL, "Invalid %s index %d while processing metadata maps.\n",\
4388 #define SET_DICT(type, meta, context, index)\
4391 meta = &context->metadata;\
4394 METADATA_CHECK_INDEX(index, context->nb_chapters, "chapter")\
4395 meta = &context->chapters[index]->metadata;\
4398 METADATA_CHECK_INDEX(index, context->nb_programs, "program")\
4399 meta = &context->programs[index]->metadata;\
4401 default: av_assert0(0);\
4404 SET_DICT(type_in, meta_in, ic, idx_in);
4405 SET_DICT(type_out, meta_out, oc, idx_out);
4407 /* for input streams choose first matching stream */
4408 if (type_in == 's') {
4409 for (i = 0; i < ic->nb_streams; i++) {
4410 if ((ret = check_stream_specifier(ic, ic->streams[i], istream_spec)) > 0) {
4411 meta_in = &ic->streams[i]->metadata;
4417 av_log(NULL, AV_LOG_FATAL, "Stream specifier %s does not match any streams.\n", istream_spec);
4422 if (type_out == 's') {
4423 for (i = 0; i < oc->nb_streams; i++) {
4424 if ((ret = check_stream_specifier(oc, oc->streams[i], ostream_spec)) > 0) {
4425 meta_out = &oc->streams[i]->metadata;
4426 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
4431 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
4436 static int opt_recording_timestamp(OptionsContext *o, const char *opt, const char *arg)
4439 int64_t recording_timestamp = parse_time_or_die(opt, arg, 0) / 1E6;
4440 struct tm time = *gmtime((time_t*)&recording_timestamp);
4441 strftime(buf, sizeof(buf), "creation_time=%FT%T%z", &time);
4442 parse_option(o, "metadata", buf, options);
4444 av_log(NULL, AV_LOG_WARNING, "%s is deprecated, set the 'creation_time' metadata "
4445 "tag instead.\n", opt);
4449 static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
4451 const char *codec_string = encoder ? "encoder" : "decoder";
4455 avcodec_find_encoder_by_name(name) :
4456 avcodec_find_decoder_by_name(name);
4458 av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name);
4461 if (codec->type != type) {
4462 av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name);
4468 static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st)
4470 char *codec_name = NULL;
4472 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, st);
4474 AVCodec *codec = find_codec_or_die(codec_name, st->codec->codec_type, 0);
4475 st->codec->codec_id = codec->id;
4478 return avcodec_find_decoder(st->codec->codec_id);
4482 * Add all the streams from the given input file to the global
4483 * list of input streams.
4485 static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
4488 char *next, *codec_tag = NULL;
4490 for (i = 0; i < ic->nb_streams; i++) {
4491 AVStream *st = ic->streams[i];
4492 AVCodecContext *dec = st->codec;
4493 InputStream *ist = av_mallocz(sizeof(*ist));
4494 char *framerate = NULL;
4499 input_streams = grow_array(input_streams, sizeof(*input_streams), &nb_input_streams, nb_input_streams + 1);
4500 input_streams[nb_input_streams - 1] = ist;
4503 ist->file_index = nb_input_files;
4505 st->discard = AVDISCARD_ALL;
4506 ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, ic, st, choose_decoder(o, ic, st));
4508 ist->ts_scale = 1.0;
4509 MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st);
4511 MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, ic, st);
4513 uint32_t tag = strtol(codec_tag, &next, 0);
4515 tag = AV_RL32(codec_tag);
4516 st->codec->codec_tag = tag;
4519 ist->dec = choose_decoder(o, ic, st);
4521 switch (dec->codec_type) {
4522 case AVMEDIA_TYPE_VIDEO:
4524 ist->dec = avcodec_find_decoder(dec->codec_id);
4526 dec->flags |= CODEC_FLAG_EMU_EDGE;
4529 ist->resample_height = dec->height;
4530 ist->resample_width = dec->width;
4531 ist->resample_pix_fmt = dec->pix_fmt;
4533 MATCH_PER_STREAM_OPT(frame_rates, str, framerate, ic, st);
4534 if (framerate && av_parse_video_rate(&ist->framerate,
4536 av_log(NULL, AV_LOG_ERROR, "Error parsing framerate %s.\n",
4541 ist->top_field_first = -1;
4542 MATCH_PER_STREAM_OPT(top_field_first, i, ist->top_field_first, ic, st);
4545 case AVMEDIA_TYPE_AUDIO:
4546 guess_input_channel_layout(ist);
4548 ist->resample_sample_fmt = dec->sample_fmt;
4549 ist->resample_sample_rate = dec->sample_rate;
4550 ist->resample_channels = dec->channels;
4551 ist->resample_channel_layout = dec->channel_layout;
4554 case AVMEDIA_TYPE_DATA:
4555 case AVMEDIA_TYPE_SUBTITLE:
4557 ist->dec = avcodec_find_decoder(dec->codec_id);
4559 case AVMEDIA_TYPE_ATTACHMENT:
4560 case AVMEDIA_TYPE_UNKNOWN:
4568 static void assert_file_overwrite(const char *filename)
4570 if ((!file_overwrite || no_file_overwrite) &&
4571 (strchr(filename, ':') == NULL || filename[1] == ':' ||
4572 av_strstart(filename, "file:", NULL))) {
4573 if (avio_check(filename, 0) == 0) {
4574 if (stdin_interaction && (!no_file_overwrite || file_overwrite)) {
4575 fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
4578 signal(SIGINT, SIG_DFL);
4579 if (!read_yesno()) {
4580 av_log(NULL, AV_LOG_FATAL, "Not overwriting - exiting\n");
4586 av_log(NULL, AV_LOG_FATAL, "File '%s' already exists. Exiting.\n", filename);
4593 static void dump_attachment(AVStream *st, const char *filename)
4596 AVIOContext *out = NULL;
4597 AVDictionaryEntry *e;
4599 if (!st->codec->extradata_size) {
4600 av_log(NULL, AV_LOG_WARNING, "No extradata to dump in stream #%d:%d.\n",
4601 nb_input_files - 1, st->index);
4604 if (!*filename && (e = av_dict_get(st->metadata, "filename", NULL, 0)))
4605 filename = e->value;
4607 av_log(NULL, AV_LOG_FATAL, "No filename specified and no 'filename' tag"
4608 "in stream #%d:%d.\n", nb_input_files - 1, st->index);
4612 assert_file_overwrite(filename);
4614 if ((ret = avio_open2(&out, filename, AVIO_FLAG_WRITE, &int_cb, NULL)) < 0) {
4615 av_log(NULL, AV_LOG_FATAL, "Could not open file %s for writing.\n",
4620 avio_write(out, st->codec->extradata, st->codec->extradata_size);
4625 static int opt_input_file(OptionsContext *o, const char *opt, const char *filename)
4627 AVFormatContext *ic;
4628 AVInputFormat *file_iformat = NULL;
4632 AVDictionary **opts;
4633 int orig_nb_streams; // number of streams before avformat_find_stream_info
4636 if (!(file_iformat = av_find_input_format(o->format))) {
4637 av_log(NULL, AV_LOG_FATAL, "Unknown input format: '%s'\n", o->format);
4642 if (!strcmp(filename, "-"))
4645 stdin_interaction &= strncmp(filename, "pipe:", 5) &&
4646 strcmp(filename, "/dev/stdin");
4648 /* get default parameters from command line */
4649 ic = avformat_alloc_context();
4651 print_error(filename, AVERROR(ENOMEM));
4654 if (o->nb_audio_sample_rate) {
4655 snprintf(buf, sizeof(buf), "%d", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i);
4656 av_dict_set(&format_opts, "sample_rate", buf, 0);
4658 if (o->nb_audio_channels) {
4659 /* because we set audio_channels based on both the "ac" and
4660 * "channel_layout" options, we need to check that the specified
4661 * demuxer actually has the "channels" option before setting it */
4662 if (file_iformat && file_iformat->priv_class &&
4663 av_opt_find(&file_iformat->priv_class, "channels", NULL, 0,
4664 AV_OPT_SEARCH_FAKE_OBJ)) {
4665 snprintf(buf, sizeof(buf), "%d",
4666 o->audio_channels[o->nb_audio_channels - 1].u.i);
4667 av_dict_set(&format_opts, "channels", buf, 0);
4670 if (o->nb_frame_rates) {
4671 /* set the format-level framerate option;
4672 * this is important for video grabbers, e.g. x11 */
4673 if (file_iformat && file_iformat->priv_class &&
4674 av_opt_find(&file_iformat->priv_class, "framerate", NULL, 0,
4675 AV_OPT_SEARCH_FAKE_OBJ)) {
4676 av_dict_set(&format_opts, "framerate",
4677 o->frame_rates[o->nb_frame_rates - 1].u.str, 0);
4680 if (o->nb_frame_sizes) {
4681 av_dict_set(&format_opts, "video_size", o->frame_sizes[o->nb_frame_sizes - 1].u.str, 0);
4683 if (o->nb_frame_pix_fmts)
4684 av_dict_set(&format_opts, "pixel_format", o->frame_pix_fmts[o->nb_frame_pix_fmts - 1].u.str, 0);
4686 ic->video_codec_id = video_codec_name ?
4687 find_codec_or_die(video_codec_name , AVMEDIA_TYPE_VIDEO , 0)->id : CODEC_ID_NONE;
4688 ic->audio_codec_id = audio_codec_name ?
4689 find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0)->id : CODEC_ID_NONE;
4690 ic->subtitle_codec_id= subtitle_codec_name ?
4691 find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0)->id : CODEC_ID_NONE;
4692 ic->flags |= AVFMT_FLAG_NONBLOCK;
4693 ic->interrupt_callback = int_cb;
4695 /* open the input file with generic avformat function */
4696 err = avformat_open_input(&ic, filename, file_iformat, &format_opts);
4698 print_error(filename, err);
4701 assert_avoptions(format_opts);
4703 /* apply forced codec ids */
4704 for (i = 0; i < ic->nb_streams; i++)
4705 choose_decoder(o, ic, ic->streams[i]);
4707 /* Set AVCodecContext options for avformat_find_stream_info */
4708 opts = setup_find_stream_info_opts(ic, codec_opts);
4709 orig_nb_streams = ic->nb_streams;
4711 /* If not enough info to get the stream parameters, we decode the
4712 first frames to get it. (used in mpeg case for example) */
4713 ret = avformat_find_stream_info(ic, opts);
4715 av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
4716 avformat_close_input(&ic);
4720 timestamp = o->start_time;
4721 /* add the stream start time */
4722 if (ic->start_time != AV_NOPTS_VALUE)
4723 timestamp += ic->start_time;
4725 /* if seeking requested, we execute it */
4726 if (o->start_time != 0) {
4727 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
4729 av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
4730 filename, (double)timestamp / AV_TIME_BASE);
4734 /* update the current parameters so that they match the one of the input stream */
4735 add_input_streams(o, ic);
4737 /* dump the file content */
4738 av_dump_format(ic, nb_input_files, filename, 0);
4740 input_files = grow_array(input_files, sizeof(*input_files), &nb_input_files, nb_input_files + 1);
4741 if (!(input_files[nb_input_files - 1] = av_mallocz(sizeof(*input_files[0]))))
4744 input_files[nb_input_files - 1]->ctx = ic;
4745 input_files[nb_input_files - 1]->ist_index = nb_input_streams - ic->nb_streams;
4746 input_files[nb_input_files - 1]->ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
4747 input_files[nb_input_files - 1]->nb_streams = ic->nb_streams;
4748 input_files[nb_input_files - 1]->rate_emu = o->rate_emu;
4750 for (i = 0; i < o->nb_dump_attachment; i++) {
4753 for (j = 0; j < ic->nb_streams; j++) {
4754 AVStream *st = ic->streams[j];
4756 if (check_stream_specifier(ic, st, o->dump_attachment[i].specifier) == 1)
4757 dump_attachment(st, o->dump_attachment[i].u.str);
4761 for (i = 0; i < orig_nb_streams; i++)
4762 av_dict_free(&opts[i]);
4765 reset_options(o, 1);
4769 static uint8_t *get_line(AVIOContext *s)
4775 if (avio_open_dyn_buf(&line) < 0) {
4776 av_log(NULL, AV_LOG_FATAL, "Could not alloc buffer for reading preset.\n");
4780 while ((c = avio_r8(s)) && c != '\n')
4783 avio_close_dyn_buf(line, &buf);
4788 static int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s)
4791 char filename[1000];
4792 const char *base[3] = { getenv("AVCONV_DATADIR"),
4797 for (i = 0; i < FF_ARRAY_ELEMS(base) && ret; i++) {
4801 snprintf(filename, sizeof(filename), "%s%s/%s-%s.avpreset", base[i],
4802 i != 1 ? "" : "/.avconv", codec_name, preset_name);
4803 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
4806 snprintf(filename, sizeof(filename), "%s%s/%s.avpreset", base[i],
4807 i != 1 ? "" : "/.avconv", preset_name);
4808 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
4814 static void choose_encoder(OptionsContext *o, AVFormatContext *s, OutputStream *ost)
4816 char *codec_name = NULL;
4818 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, ost->st);
4820 ost->st->codec->codec_id = av_guess_codec(s->oformat, NULL, s->filename,
4821 NULL, ost->st->codec->codec_type);
4822 ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
4823 } else if (!strcmp(codec_name, "copy"))
4824 ost->stream_copy = 1;
4826 ost->enc = find_codec_or_die(codec_name, ost->st->codec->codec_type, 1);
4827 ost->st->codec->codec_id = ost->enc->id;
4831 static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVMediaType type, int source_index)
4834 AVStream *st = avformat_new_stream(oc, NULL);
4835 int idx = oc->nb_streams - 1, ret = 0;
4836 char *bsf = NULL, *next, *codec_tag = NULL;
4837 AVBitStreamFilterContext *bsfc, *bsfc_prev = NULL;
4839 char *buf = NULL, *arg = NULL, *preset = NULL;
4840 AVIOContext *s = NULL;
4843 av_log(NULL, AV_LOG_FATAL, "Could not alloc stream.\n");
4847 if (oc->nb_streams - 1 < o->nb_streamid_map)
4848 st->id = o->streamid_map[oc->nb_streams - 1];
4850 output_streams = grow_array(output_streams, sizeof(*output_streams), &nb_output_streams,
4851 nb_output_streams + 1);
4852 if (!(ost = av_mallocz(sizeof(*ost))))
4854 output_streams[nb_output_streams - 1] = ost;
4856 ost->file_index = nb_output_files;
4859 st->codec->codec_type = type;
4860 choose_encoder(o, oc, ost);
4862 ost->opts = filter_codec_opts(codec_opts, ost->enc->id, oc, st, ost->enc);
4865 avcodec_get_context_defaults3(st->codec, ost->enc);
4866 st->codec->codec_type = type; // XXX hack, avcodec_get_context_defaults2() sets type to unknown for stream copy
4868 MATCH_PER_STREAM_OPT(presets, str, preset, oc, st);
4869 if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) {
4872 if (!buf[0] || buf[0] == '#') {
4876 if (!(arg = strchr(buf, '='))) {
4877 av_log(NULL, AV_LOG_FATAL, "Invalid line found in the preset file.\n");
4881 av_dict_set(&ost->opts, buf, arg, AV_DICT_DONT_OVERWRITE);
4883 } while (!s->eof_reached);
4887 av_log(NULL, AV_LOG_FATAL,
4888 "Preset %s specified for stream %d:%d, but could not be opened.\n",
4889 preset, ost->file_index, ost->index);
4893 ost->max_frames = INT64_MAX;
4894 MATCH_PER_STREAM_OPT(max_frames, i64, ost->max_frames, oc, st);
4896 MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st);
4898 if (next = strchr(bsf, ','))
4900 if (!(bsfc = av_bitstream_filter_init(bsf))) {
4901 av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf);
4905 bsfc_prev->next = bsfc;
4907 ost->bitstream_filters = bsfc;
4913 MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st);
4915 uint32_t tag = strtol(codec_tag, &next, 0);
4917 tag = AV_RL32(codec_tag);
4918 st->codec->codec_tag = tag;
4921 MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st);
4922 if (qscale >= 0 || same_quant) {
4923 st->codec->flags |= CODEC_FLAG_QSCALE;
4924 st->codec->global_quality = FF_QP2LAMBDA * qscale;
4927 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
4928 st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
4930 av_opt_get_int(sws_opts, "sws_flags", 0, &ost->sws_flags);
4931 av_opt_get_int (swr_opts, "dither_method", 0, &ost->swr_dither_method);
4932 av_opt_get_double(swr_opts, "dither_scale" , 0, &ost->swr_dither_scale);
4934 ost->source_index = source_index;
4935 if (source_index >= 0) {
4936 ost->sync_ist = input_streams[source_index];
4937 input_streams[source_index]->discard = 0;
4938 input_streams[source_index]->st->discard = AVDISCARD_NONE;
4944 static void parse_matrix_coeffs(uint16_t *dest, const char *str)
4947 const char *p = str;
4954 av_log(NULL, AV_LOG_FATAL, "Syntax error in matrix \"%s\" at coeff %d\n", str, i);
4961 static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, int source_index)
4965 AVCodecContext *video_enc;
4966 char *frame_rate = NULL;
4968 ost = new_output_stream(o, oc, AVMEDIA_TYPE_VIDEO, source_index);
4970 video_enc = st->codec;
4972 MATCH_PER_STREAM_OPT(frame_rates, str, frame_rate, oc, st);
4973 if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) {
4974 av_log(NULL, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
4978 if (!ost->stream_copy) {
4979 const char *p = NULL;
4980 char *frame_size = NULL;
4981 char *frame_aspect_ratio = NULL, *frame_pix_fmt = NULL;
4982 char *intra_matrix = NULL, *inter_matrix = NULL;
4983 const char *filters = "null";
4986 MATCH_PER_STREAM_OPT(frame_sizes, str, frame_size, oc, st);
4987 if (frame_size && av_parse_video_size(&video_enc->width, &video_enc->height, frame_size) < 0) {
4988 av_log(NULL, AV_LOG_FATAL, "Invalid frame size: %s.\n", frame_size);
4992 MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
4993 if (frame_aspect_ratio) {
4995 if (av_parse_ratio(&q, frame_aspect_ratio, 255, 0, NULL) < 0 ||
4996 q.num <= 0 || q.den <= 0) {
4997 av_log(NULL, AV_LOG_FATAL, "Invalid aspect ratio: %s\n", frame_aspect_ratio);
5000 ost->frame_aspect_ratio = av_q2d(q);
5003 video_enc->bits_per_raw_sample = frame_bits_per_raw_sample;
5004 MATCH_PER_STREAM_OPT(frame_pix_fmts, str, frame_pix_fmt, oc, st);
5005 if (frame_pix_fmt && *frame_pix_fmt == '+') {
5006 ost->keep_pix_fmt = 1;
5007 if (!*++frame_pix_fmt)
5008 frame_pix_fmt = NULL;
5010 if (frame_pix_fmt && (video_enc->pix_fmt = av_get_pix_fmt(frame_pix_fmt)) == PIX_FMT_NONE) {
5011 av_log(NULL, AV_LOG_FATAL, "Unknown pixel format requested: %s.\n", frame_pix_fmt);
5014 st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
5017 video_enc->gop_size = 0;
5018 MATCH_PER_STREAM_OPT(intra_matrices, str, intra_matrix, oc, st);
5020 if (!(video_enc->intra_matrix = av_mallocz(sizeof(*video_enc->intra_matrix) * 64))) {
5021 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for intra matrix.\n");
5024 parse_matrix_coeffs(video_enc->intra_matrix, intra_matrix);
5026 MATCH_PER_STREAM_OPT(inter_matrices, str, inter_matrix, oc, st);
5028 if (!(video_enc->inter_matrix = av_mallocz(sizeof(*video_enc->inter_matrix) * 64))) {
5029 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for inter matrix.\n");
5032 parse_matrix_coeffs(video_enc->inter_matrix, inter_matrix);
5035 MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st);
5036 for (i = 0; p; i++) {
5038 int e = sscanf(p, "%d,%d,%d", &start, &end, &q);
5040 av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
5043 /* FIXME realloc failure */
5044 video_enc->rc_override =
5045 av_realloc(video_enc->rc_override,
5046 sizeof(RcOverride) * (i + 1));
5047 video_enc->rc_override[i].start_frame = start;
5048 video_enc->rc_override[i].end_frame = end;
5050 video_enc->rc_override[i].qscale = q;
5051 video_enc->rc_override[i].quality_factor = 1.0;
5054 video_enc->rc_override[i].qscale = 0;
5055 video_enc->rc_override[i].quality_factor = -q/100.0;
5060 video_enc->rc_override_count = i;
5061 if (!video_enc->rc_initial_buffer_occupancy)
5062 video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size * 3 / 4;
5063 video_enc->intra_dc_precision = intra_dc_precision - 8;
5066 video_enc->flags|= CODEC_FLAG_PSNR;
5071 video_enc->flags |= CODEC_FLAG_PASS1;
5074 video_enc->flags |= CODEC_FLAG_PASS2;
5078 MATCH_PER_STREAM_OPT(forced_key_frames, str, ost->forced_keyframes, oc, st);
5079 if (ost->forced_keyframes)
5080 ost->forced_keyframes = av_strdup(ost->forced_keyframes);
5082 MATCH_PER_STREAM_OPT(force_fps, i, ost->force_fps, oc, st);
5084 ost->top_field_first = -1;
5085 MATCH_PER_STREAM_OPT(top_field_first, i, ost->top_field_first, oc, st);
5087 MATCH_PER_STREAM_OPT(filters, str, filters, oc, st);
5088 ost->avfilter = av_strdup(filters);
5090 MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i, ost->copy_initial_nonkeyframes, oc ,st);
5096 static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc, int source_index)
5101 AVCodecContext *audio_enc;
5103 ost = new_output_stream(o, oc, AVMEDIA_TYPE_AUDIO, source_index);
5106 audio_enc = st->codec;
5107 audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
5109 if (!ost->stream_copy) {
5110 char *sample_fmt = NULL;
5111 const char *filters = "anull";
5113 MATCH_PER_STREAM_OPT(audio_channels, i, audio_enc->channels, oc, st);
5115 MATCH_PER_STREAM_OPT(sample_fmts, str, sample_fmt, oc, st);
5117 (audio_enc->sample_fmt = av_get_sample_fmt(sample_fmt)) == AV_SAMPLE_FMT_NONE) {
5118 av_log(NULL, AV_LOG_FATAL, "Invalid sample format '%s'\n", sample_fmt);
5122 MATCH_PER_STREAM_OPT(audio_sample_rate, i, audio_enc->sample_rate, oc, st);
5124 MATCH_PER_STREAM_OPT(filters, str, filters, oc, st);
5126 av_assert1(filters);
5127 ost->avfilter = av_strdup(filters);
5129 /* check for channel mapping for this audio stream */
5130 for (n = 0; n < o->nb_audio_channel_maps; n++) {
5131 AudioChannelMap *map = &o->audio_channel_maps[n];
5132 InputStream *ist = input_streams[ost->source_index];
5133 if ((map->channel_idx == -1 || (ist->file_index == map->file_idx && ist->st->index == map->stream_idx)) &&
5134 (map->ofile_idx == -1 || ost->file_index == map->ofile_idx) &&
5135 (map->ostream_idx == -1 || ost->st->index == map->ostream_idx)) {
5136 if (ost->audio_channels_mapped < FF_ARRAY_ELEMS(ost->audio_channels_map))
5137 ost->audio_channels_map[ost->audio_channels_mapped++] = map->channel_idx;
5139 av_log(NULL, AV_LOG_FATAL, "Max channel mapping for output %d.%d reached\n",
5140 ost->file_index, ost->st->index);
5148 static OutputStream *new_data_stream(OptionsContext *o, AVFormatContext *oc, int source_index)
5152 ost = new_output_stream(o, oc, AVMEDIA_TYPE_DATA, source_index);
5153 if (!ost->stream_copy) {
5154 av_log(NULL, AV_LOG_FATAL, "Data stream encoding not supported yet (only streamcopy)\n");
5161 static OutputStream *new_attachment_stream(OptionsContext *o, AVFormatContext *oc, int source_index)
5163 OutputStream *ost = new_output_stream(o, oc, AVMEDIA_TYPE_ATTACHMENT, source_index);
5164 ost->stream_copy = 1;
5168 static OutputStream *new_subtitle_stream(OptionsContext *o, AVFormatContext *oc, int source_index)
5172 AVCodecContext *subtitle_enc;
5174 ost = new_output_stream(o, oc, AVMEDIA_TYPE_SUBTITLE, source_index);
5176 subtitle_enc = st->codec;
5178 subtitle_enc->codec_type = AVMEDIA_TYPE_SUBTITLE;
5180 MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i, ost->copy_initial_nonkeyframes, oc, st);
5185 /* arg format is "output-stream-index:streamid-value". */
5186 static int opt_streamid(OptionsContext *o, const char *opt, const char *arg)
5192 av_strlcpy(idx_str, arg, sizeof(idx_str));
5193 p = strchr(idx_str, ':');
5195 av_log(NULL, AV_LOG_FATAL,
5196 "Invalid value '%s' for option '%s', required syntax is 'index:value'\n",
5201 idx = parse_number_or_die(opt, idx_str, OPT_INT, 0, MAX_STREAMS-1);
5202 o->streamid_map = grow_array(o->streamid_map, sizeof(*o->streamid_map), &o->nb_streamid_map, idx+1);
5203 o->streamid_map[idx] = parse_number_or_die(opt, p, OPT_INT, 0, INT_MAX);
5207 static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata)
5209 AVFormatContext *is = ifile->ctx;
5210 AVFormatContext *os = ofile->ctx;
5213 for (i = 0; i < is->nb_chapters; i++) {
5214 AVChapter *in_ch = is->chapters[i], *out_ch;
5215 int64_t ts_off = av_rescale_q(ofile->start_time - ifile->ts_offset,
5216 AV_TIME_BASE_Q, in_ch->time_base);
5217 int64_t rt = (ofile->recording_time == INT64_MAX) ? INT64_MAX :
5218 av_rescale_q(ofile->recording_time, AV_TIME_BASE_Q, in_ch->time_base);
5221 if (in_ch->end < ts_off)
5223 if (rt != INT64_MAX && in_ch->start > rt + ts_off)
5226 out_ch = av_mallocz(sizeof(AVChapter));
5228 return AVERROR(ENOMEM);
5230 out_ch->id = in_ch->id;
5231 out_ch->time_base = in_ch->time_base;
5232 out_ch->start = FFMAX(0, in_ch->start - ts_off);
5233 out_ch->end = FFMIN(rt, in_ch->end - ts_off);
5236 av_dict_copy(&out_ch->metadata, in_ch->metadata, 0);
5239 os->chapters = av_realloc_f(os->chapters, os->nb_chapters, sizeof(AVChapter));
5241 return AVERROR(ENOMEM);
5242 os->chapters[os->nb_chapters - 1] = out_ch;
5247 static int read_ffserver_streams(OptionsContext *o, AVFormatContext *s, const char *filename)
5250 AVFormatContext *ic = avformat_alloc_context();
5252 ic->interrupt_callback = int_cb;
5253 err = avformat_open_input(&ic, filename, NULL, NULL);
5256 /* copy stream format */
5257 for(i=0;i<ic->nb_streams;i++) {
5261 AVCodecContext *avctx;
5263 codec = avcodec_find_encoder(ic->streams[i]->codec->codec_id);
5264 ost = new_output_stream(o, s, codec->type, -1);
5269 // FIXME: a more elegant solution is needed
5270 memcpy(st, ic->streams[i], sizeof(AVStream));
5272 st->info = av_malloc(sizeof(*st->info));
5273 memcpy(st->info, ic->streams[i]->info, sizeof(*st->info));
5275 avcodec_copy_context(st->codec, ic->streams[i]->codec);
5277 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && !ost->stream_copy)
5278 choose_sample_fmt(st, codec);
5279 else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && !ost->stream_copy)
5280 choose_pixel_fmt(st, codec, st->codec->pix_fmt);
5283 avformat_close_input(&ic);
5287 static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
5288 AVFormatContext *oc)
5292 switch (avfilter_pad_get_type(ofilter->out_tmp->filter_ctx->output_pads,
5293 ofilter->out_tmp->pad_idx)) {
5294 case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc, -1); break;
5295 case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc, -1); break;
5297 av_log(NULL, AV_LOG_FATAL, "Only video and audio filters are supported "
5302 ost->source_index = -1;
5303 ost->filter = ofilter;
5307 if (ost->stream_copy) {
5308 av_log(NULL, AV_LOG_ERROR, "Streamcopy requested for output stream %d:%d, "
5309 "which is fed from a complex filtergraph. Filtering and streamcopy "
5310 "cannot be used together.\n", ost->file_index, ost->index);
5313 if (o->recording_time != INT64_MAX)
5314 av_log(NULL, AV_LOG_WARNING,
5315 "-t does not work with -filter_complex (yet).\n");
5317 if (configure_output_filter(ofilter->graph, ofilter, ofilter->out_tmp) < 0) {
5318 av_log(NULL, AV_LOG_FATAL, "Error configuring filter.\n");
5321 avfilter_inout_free(&ofilter->out_tmp);
5324 static void opt_output_file(void *optctx, const char *filename)
5326 OptionsContext *o = optctx;
5327 AVFormatContext *oc;
5329 AVOutputFormat *file_oformat;
5333 if (configure_complex_filters() < 0) {
5334 av_log(NULL, AV_LOG_FATAL, "Error configuring filters.\n");
5338 if (!strcmp(filename, "-"))
5341 err = avformat_alloc_output_context2(&oc, NULL, o->format, filename);
5343 print_error(filename, err);
5346 file_oformat= oc->oformat;
5347 oc->interrupt_callback = int_cb;
5349 /* create streams for all unlabeled output pads */
5350 for (i = 0; i < nb_filtergraphs; i++) {
5351 FilterGraph *fg = filtergraphs[i];
5352 for (j = 0; j < fg->nb_outputs; j++) {
5353 OutputFilter *ofilter = fg->outputs[j];
5355 if (!ofilter->out_tmp || ofilter->out_tmp->name)
5358 switch (avfilter_pad_get_type(ofilter->out_tmp->filter_ctx->output_pads,
5359 ofilter->out_tmp->pad_idx)) {
5360 case AVMEDIA_TYPE_VIDEO: o->video_disable = 1; break;
5361 case AVMEDIA_TYPE_AUDIO: o->audio_disable = 1; break;
5362 case AVMEDIA_TYPE_SUBTITLE: o->subtitle_disable = 1; break;
5364 init_output_filter(ofilter, o, oc);
5368 if (!strcmp(file_oformat->name, "ffm") &&
5369 av_strstart(filename, "http:", NULL)) {
5371 /* special case for files sent to ffserver: we get the stream
5372 parameters from ffserver */
5373 int err = read_ffserver_streams(o, oc, filename);
5375 print_error(filename, err);
5378 for(j = nb_output_streams - oc->nb_streams; j < nb_output_streams; j++) {
5379 ost = output_streams[j];
5380 for (i = 0; i < nb_input_streams; i++) {
5381 ist = input_streams[i];
5382 if(ist->st->codec->codec_type == ost->st->codec->codec_type){
5384 ost->source_index= i;
5385 if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) ost->avfilter = av_strdup("anull");
5386 if(ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) ost->avfilter = av_strdup("null");
5388 ist->st->discard = AVDISCARD_NONE;
5393 av_log(NULL, AV_LOG_FATAL, "Missing %s stream which is required by this ffm\n", av_get_media_type_string(ost->st->codec->codec_type));
5397 } else if (!o->nb_stream_maps) {
5398 /* pick the "best" stream of each type */
5400 /* video: highest resolution */
5401 if (!o->video_disable && oc->oformat->video_codec != CODEC_ID_NONE) {
5402 int area = 0, idx = -1;
5403 for (i = 0; i < nb_input_streams; i++) {
5404 ist = input_streams[i];
5405 if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
5406 ist->st->codec->width * ist->st->codec->height > area) {
5407 area = ist->st->codec->width * ist->st->codec->height;
5412 new_video_stream(o, oc, idx);
5415 /* audio: most channels */
5416 if (!o->audio_disable && oc->oformat->audio_codec != CODEC_ID_NONE) {
5417 int channels = 0, idx = -1;
5418 for (i = 0; i < nb_input_streams; i++) {
5419 ist = input_streams[i];
5420 if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
5421 ist->st->codec->channels > channels) {
5422 channels = ist->st->codec->channels;
5427 new_audio_stream(o, oc, idx);
5430 /* subtitles: pick first */
5431 if (!o->subtitle_disable && (oc->oformat->subtitle_codec != CODEC_ID_NONE || subtitle_codec_name)) {
5432 for (i = 0; i < nb_input_streams; i++)
5433 if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
5434 new_subtitle_stream(o, oc, i);
5438 /* do something with data? */
5440 for (i = 0; i < o->nb_stream_maps; i++) {
5441 StreamMap *map = &o->stream_maps[i];
5442 int src_idx = input_files[map->file_index]->ist_index + map->stream_index;
5447 if (map->linklabel) {
5449 OutputFilter *ofilter = NULL;
5452 for (j = 0; j < nb_filtergraphs; j++) {
5453 fg = filtergraphs[j];
5454 for (k = 0; k < fg->nb_outputs; k++) {
5455 AVFilterInOut *out = fg->outputs[k]->out_tmp;
5456 if (out && !strcmp(out->name, map->linklabel)) {
5457 ofilter = fg->outputs[k];
5464 av_log(NULL, AV_LOG_FATAL, "Output with label '%s' does not exist "
5465 "in any defined filter graph.\n", map->linklabel);
5468 init_output_filter(ofilter, o, oc);
5470 ist = input_streams[input_files[map->file_index]->ist_index + map->stream_index];
5471 if(o->subtitle_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE)
5473 if(o-> audio_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
5475 if(o-> video_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
5477 if(o-> data_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_DATA)
5480 switch (ist->st->codec->codec_type) {
5481 case AVMEDIA_TYPE_VIDEO: ost = new_video_stream (o, oc, src_idx); break;
5482 case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream (o, oc, src_idx); break;
5483 case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream (o, oc, src_idx); break;
5484 case AVMEDIA_TYPE_DATA: ost = new_data_stream (o, oc, src_idx); break;
5485 case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc, src_idx); break;
5487 av_log(NULL, AV_LOG_FATAL, "Cannot map stream #%d:%d - unsupported type.\n",
5488 map->file_index, map->stream_index);
5496 for (i = nb_output_streams - oc->nb_streams; i < nb_output_streams; i++) { //for all streams of this output file
5497 AVDictionaryEntry *e;
5498 ost = output_streams[i];
5500 if ( ost->stream_copy
5501 && (e = av_dict_get(codec_opts, "flags", NULL, AV_DICT_IGNORE_SUFFIX))
5502 && (!e->key[5] || check_stream_specifier(oc, ost->st, e->key+6)))
5503 if (av_opt_set(ost->st->codec, "flags", e->value, 0) < 0)
5507 /* handle attached files */
5508 for (i = 0; i < o->nb_attachments; i++) {
5510 uint8_t *attachment;
5514 if ((err = avio_open2(&pb, o->attachments[i], AVIO_FLAG_READ, &int_cb, NULL)) < 0) {
5515 av_log(NULL, AV_LOG_FATAL, "Could not open attachment file %s.\n",
5519 if ((len = avio_size(pb)) <= 0) {
5520 av_log(NULL, AV_LOG_FATAL, "Could not get size of the attachment %s.\n",
5524 if (!(attachment = av_malloc(len))) {
5525 av_log(NULL, AV_LOG_FATAL, "Attachment %s too large to fit into memory.\n",
5529 avio_read(pb, attachment, len);
5531 ost = new_attachment_stream(o, oc, -1);
5532 ost->stream_copy = 0;
5533 ost->attachment_filename = o->attachments[i];
5534 ost->st->codec->extradata = attachment;
5535 ost->st->codec->extradata_size = len;
5537 p = strrchr(o->attachments[i], '/');
5538 av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
5542 output_files = grow_array(output_files, sizeof(*output_files), &nb_output_files, nb_output_files + 1);
5543 if (!(output_files[nb_output_files - 1] = av_mallocz(sizeof(*output_files[0]))))
5546 output_files[nb_output_files - 1]->ctx = oc;
5547 output_files[nb_output_files - 1]->ost_index = nb_output_streams - oc->nb_streams;
5548 output_files[nb_output_files - 1]->recording_time = o->recording_time;
5549 if (o->recording_time != INT64_MAX)
5550 oc->duration = o->recording_time;
5551 output_files[nb_output_files - 1]->start_time = o->start_time;
5552 output_files[nb_output_files - 1]->limit_filesize = o->limit_filesize;
5553 av_dict_copy(&output_files[nb_output_files - 1]->opts, format_opts, 0);
5555 /* check filename in case of an image number is expected */
5556 if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
5557 if (!av_filename_number_test(oc->filename)) {
5558 print_error(oc->filename, AVERROR(EINVAL));
5563 if (!(oc->oformat->flags & AVFMT_NOFILE)) {
5564 /* test if it already exists to avoid losing precious files */
5565 assert_file_overwrite(filename);
5568 if ((err = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,
5569 &oc->interrupt_callback,
5570 &output_files[nb_output_files - 1]->opts)) < 0) {
5571 print_error(filename, err);
5576 if (o->mux_preload) {
5578 snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE));
5579 av_dict_set(&output_files[nb_output_files - 1]->opts, "preload", buf, 0);
5581 oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
5584 for (i = 0; i < o->nb_metadata_map; i++) {
5586 int in_file_index = strtol(o->metadata_map[i].u.str, &p, 0);
5588 if (in_file_index >= nb_input_files) {
5589 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d while processing metadata maps\n", in_file_index);
5592 copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc, in_file_index >= 0 ? input_files[in_file_index]->ctx : NULL, o);
5596 if (o->chapters_input_file >= nb_input_files) {
5597 if (o->chapters_input_file == INT_MAX) {
5598 /* copy chapters from the first input file that has them*/
5599 o->chapters_input_file = -1;
5600 for (i = 0; i < nb_input_files; i++)
5601 if (input_files[i]->ctx->nb_chapters) {
5602 o->chapters_input_file = i;
5606 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d in chapter mapping.\n",
5607 o->chapters_input_file);
5611 if (o->chapters_input_file >= 0)
5612 copy_chapters(input_files[o->chapters_input_file], output_files[nb_output_files - 1],
5613 !o->metadata_chapters_manual);
5615 /* copy global metadata by default */
5616 if (!o->metadata_global_manual && nb_input_files){
5617 av_dict_copy(&oc->metadata, input_files[0]->ctx->metadata,
5618 AV_DICT_DONT_OVERWRITE);
5619 if(o->recording_time != INT64_MAX)
5620 av_dict_set(&oc->metadata, "duration", NULL, 0);
5621 av_dict_set(&oc->metadata, "creation_time", NULL, 0);
5623 if (!o->metadata_streams_manual)
5624 for (i = output_files[nb_output_files - 1]->ost_index; i < nb_output_streams; i++) {
5626 if (output_streams[i]->source_index < 0) /* this is true e.g. for attached files */
5628 ist = input_streams[output_streams[i]->source_index];
5629 av_dict_copy(&output_streams[i]->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
5632 /* process manually set metadata */
5633 for (i = 0; i < o->nb_metadata; i++) {
5636 const char *stream_spec;
5637 int index = 0, j, ret = 0;
5639 val = strchr(o->metadata[i].u.str, '=');
5641 av_log(NULL, AV_LOG_FATAL, "No '=' character in metadata string %s.\n",
5642 o->metadata[i].u.str);
5647 parse_meta_type(o->metadata[i].specifier, &type, &index, &stream_spec);
5649 for (j = 0; j < oc->nb_streams; j++) {
5650 if ((ret = check_stream_specifier(oc, oc->streams[j], stream_spec)) > 0) {
5651 av_dict_set(&oc->streams[j]->metadata, o->metadata[i].u.str, *val ? val : NULL, 0);
5662 if (index < 0 || index >= oc->nb_chapters) {
5663 av_log(NULL, AV_LOG_FATAL, "Invalid chapter index %d in metadata specifier.\n", index);
5666 m = &oc->chapters[index]->metadata;
5669 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", o->metadata[i].specifier);
5672 av_dict_set(m, o->metadata[i].u.str, *val ? val : NULL, 0);
5676 reset_options(o, 0);
5679 /* same option as mencoder */
5680 static int opt_pass(const char *opt, const char *arg)
5682 do_pass = parse_number_or_die(opt, arg, OPT_INT, 1, 3);
5686 static int64_t getmaxrss(void)
5688 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
5689 struct rusage rusage;
5690 getrusage(RUSAGE_SELF, &rusage);
5691 return (int64_t)rusage.ru_maxrss * 1024;
5692 #elif HAVE_GETPROCESSMEMORYINFO
5694 PROCESS_MEMORY_COUNTERS memcounters;
5695 proc = GetCurrentProcess();
5696 memcounters.cb = sizeof(memcounters);
5697 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
5698 return memcounters.PeakPagefileUsage;
5704 static int opt_audio_qscale(OptionsContext *o, const char *opt, const char *arg)
5706 return parse_option(o, "q:a", arg, options);
5709 static void show_usage(void)
5711 av_log(NULL, AV_LOG_INFO, "Hyper fast Audio and Video encoder\n");
5712 av_log(NULL, AV_LOG_INFO, "usage: %s [options] [[infile options] -i infile]... {[outfile options] outfile}...\n", program_name);
5713 av_log(NULL, AV_LOG_INFO, "\n");
5716 static int opt_help(const char *opt, const char *arg)
5718 int flags = AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_ENCODING_PARAM;
5719 av_log_set_callback(log_callback_help);
5721 show_help_options(options, "Main options:\n",
5722 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB, 0);
5723 show_help_options(options, "\nAdvanced options:\n",
5724 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB,
5726 show_help_options(options, "\nVideo options:\n",
5727 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
5729 show_help_options(options, "\nAdvanced Video options:\n",
5730 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
5731 OPT_VIDEO | OPT_EXPERT);
5732 show_help_options(options, "\nAudio options:\n",
5733 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
5735 show_help_options(options, "\nAdvanced Audio options:\n",
5736 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
5737 OPT_AUDIO | OPT_EXPERT);
5738 show_help_options(options, "\nSubtitle options:\n",
5739 OPT_SUBTITLE | OPT_GRAB,
5741 show_help_options(options, "\nAudio/Video grab options:\n",
5745 show_help_children(avcodec_get_class(), flags);
5746 show_help_children(avformat_get_class(), flags);
5747 show_help_children(sws_get_class(), flags);
5748 show_help_children(swr_get_class(), AV_OPT_FLAG_AUDIO_PARAM);
5753 static int opt_target(OptionsContext *o, const char *opt, const char *arg)
5755 enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN;
5756 static const char *const frame_rates[] = { "25", "30000/1001", "24000/1001" };
5758 if (!strncmp(arg, "pal-", 4)) {
5761 } else if (!strncmp(arg, "ntsc-", 5)) {
5764 } else if (!strncmp(arg, "film-", 5)) {
5768 /* Try to determine PAL/NTSC by peeking in the input files */
5769 if (nb_input_files) {
5771 for (j = 0; j < nb_input_files; j++) {
5772 for (i = 0; i < input_files[j]->nb_streams; i++) {
5773 AVCodecContext *c = input_files[j]->ctx->streams[i]->codec;
5774 if (c->codec_type != AVMEDIA_TYPE_VIDEO)
5776 fr = c->time_base.den * 1000 / c->time_base.num;
5780 } else if ((fr == 29970) || (fr == 23976)) {
5785 if (norm != UNKNOWN)
5789 if (norm != UNKNOWN)
5790 av_log(NULL, AV_LOG_INFO, "Assuming %s for target.\n", norm == PAL ? "PAL" : "NTSC");
5793 if (norm == UNKNOWN) {
5794 av_log(NULL, AV_LOG_FATAL, "Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n");
5795 av_log(NULL, AV_LOG_FATAL, "Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n");
5796 av_log(NULL, AV_LOG_FATAL, "or set a framerate with \"-r xxx\".\n");
5800 if (!strcmp(arg, "vcd")) {
5801 opt_video_codec(o, "c:v", "mpeg1video");
5802 opt_audio_codec(o, "c:a", "mp2");
5803 parse_option(o, "f", "vcd", options);
5805 parse_option(o, "s", norm == PAL ? "352x288" : "352x240", options);
5806 parse_option(o, "r", frame_rates[norm], options);
5807 opt_default("g", norm == PAL ? "15" : "18");
5809 opt_default("b:v", "1150000");
5810 opt_default("maxrate", "1150000");
5811 opt_default("minrate", "1150000");
5812 opt_default("bufsize", "327680"); // 40*1024*8;
5814 opt_default("b:a", "224000");
5815 parse_option(o, "ar", "44100", options);
5816 parse_option(o, "ac", "2", options);
5818 opt_default("packetsize", "2324");
5819 opt_default("muxrate", "1411200"); // 2352 * 75 * 8;
5821 /* We have to offset the PTS, so that it is consistent with the SCR.
5822 SCR starts at 36000, but the first two packs contain only padding
5823 and the first pack from the other stream, respectively, may also have
5824 been written before.
5825 So the real data starts at SCR 36000+3*1200. */
5826 o->mux_preload = (36000 + 3 * 1200) / 90000.0; // 0.44
5827 } else if (!strcmp(arg, "svcd")) {
5829 opt_video_codec(o, "c:v", "mpeg2video");
5830 opt_audio_codec(o, "c:a", "mp2");
5831 parse_option(o, "f", "svcd", options);
5833 parse_option(o, "s", norm == PAL ? "480x576" : "480x480", options);
5834 parse_option(o, "r", frame_rates[norm], options);
5835 parse_option(o, "pix_fmt", "yuv420p", options);
5836 opt_default("g", norm == PAL ? "15" : "18");
5838 opt_default("b:v", "2040000");
5839 opt_default("maxrate", "2516000");
5840 opt_default("minrate", "0"); // 1145000;
5841 opt_default("bufsize", "1835008"); // 224*1024*8;
5842 opt_default("scan_offset", "1");
5845 opt_default("b:a", "224000");
5846 parse_option(o, "ar", "44100", options);
5848 opt_default("packetsize", "2324");
5850 } else if (!strcmp(arg, "dvd")) {
5852 opt_video_codec(o, "c:v", "mpeg2video");
5853 opt_audio_codec(o, "c:a", "ac3");
5854 parse_option(o, "f", "dvd", options);
5856 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
5857 parse_option(o, "r", frame_rates[norm], options);
5858 parse_option(o, "pix_fmt", "yuv420p", options);
5859 opt_default("g", norm == PAL ? "15" : "18");
5861 opt_default("b:v", "6000000");
5862 opt_default("maxrate", "9000000");
5863 opt_default("minrate", "0"); // 1500000;
5864 opt_default("bufsize", "1835008"); // 224*1024*8;
5866 opt_default("packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
5867 opt_default("muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
5869 opt_default("b:a", "448000");
5870 parse_option(o, "ar", "48000", options);
5872 } else if (!strncmp(arg, "dv", 2)) {
5874 parse_option(o, "f", "dv", options);
5876 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
5877 parse_option(o, "pix_fmt", !strncmp(arg, "dv50", 4) ? "yuv422p" :
5878 norm == PAL ? "yuv420p" : "yuv411p", options);
5879 parse_option(o, "r", frame_rates[norm], options);
5881 parse_option(o, "ar", "48000", options);
5882 parse_option(o, "ac", "2", options);
5885 av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg);
5886 return AVERROR(EINVAL);
5891 static int opt_vstats_file(const char *opt, const char *arg)
5893 av_free (vstats_filename);
5894 vstats_filename = av_strdup (arg);
5898 static int opt_vstats(const char *opt, const char *arg)
5901 time_t today2 = time(NULL);
5902 struct tm *today = localtime(&today2);
5904 snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min,
5906 return opt_vstats_file(opt, filename);
5909 static int opt_video_frames(OptionsContext *o, const char *opt, const char *arg)
5911 return parse_option(o, "frames:v", arg, options);
5914 static int opt_audio_frames(OptionsContext *o, const char *opt, const char *arg)
5916 return parse_option(o, "frames:a", arg, options);
5919 static int opt_data_frames(OptionsContext *o, const char *opt, const char *arg)
5921 return parse_option(o, "frames:d", arg, options);
5924 static int opt_preset(OptionsContext *o, const char *opt, const char *arg)
5927 char filename[1000], line[1000], tmp_line[1000];
5928 const char *codec_name = *opt == 'v' ? video_codec_name :
5929 *opt == 'a' ? audio_codec_name :
5930 subtitle_codec_name;
5932 if (!(f = get_preset_file(filename, sizeof(filename), arg, *opt == 'f', codec_name))) {
5933 if(!strncmp(arg, "libx264-lossless", strlen("libx264-lossless"))){
5934 av_log(NULL, AV_LOG_FATAL, "Please use -preset <speed> -qp 0\n");
5936 av_log(NULL, AV_LOG_FATAL, "File for preset '%s' not found\n", arg);
5940 while (fgets(line, sizeof(line), f)) {
5941 char *key = tmp_line, *value, *endptr;
5943 if (strcspn(line, "#\n\r") == 0)
5945 strcpy(tmp_line, line);
5946 if (!av_strtok(key, "=", &value) ||
5947 !av_strtok(value, "\r\n", &endptr)) {
5948 av_log(NULL, AV_LOG_FATAL, "%s: Invalid syntax: '%s'\n", filename, line);
5951 av_log(NULL, AV_LOG_DEBUG, "ffpreset[%s]: set '%s' = '%s'\n", filename, key, value);
5953 if (!strcmp(key, "acodec")) opt_audio_codec (o, key, value);
5954 else if (!strcmp(key, "vcodec")) opt_video_codec (o, key, value);
5955 else if (!strcmp(key, "scodec")) opt_subtitle_codec(o, key, value);
5956 else if (!strcmp(key, "dcodec")) opt_data_codec (o, key, value);
5957 else if (opt_default(key, value) < 0) {
5958 av_log(NULL, AV_LOG_FATAL, "%s: Invalid option or argument: '%s', parsed as '%s' = '%s'\n",
5959 filename, line, key, value);
5969 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
5973 static int opt_passlogfile(const char *opt, const char *arg)
5975 pass_logfilename_prefix = arg;
5976 #if CONFIG_LIBX264_ENCODER
5977 return opt_default(opt, arg);
5983 static int opt_old2new(OptionsContext *o, const char *opt, const char *arg)
5985 char *s = av_asprintf("%s:%c", opt + 1, *opt);
5986 int ret = parse_option(o, s, arg, options);
5991 static int opt_bitrate(OptionsContext *o, const char *opt, const char *arg)
5993 if(!strcmp(opt, "b")){
5994 av_log(NULL, AV_LOG_WARNING, "Please use -b:a or -b:v, -b is ambiguous\n");
5995 return parse_option(o, "b:v", arg, options);
5997 return opt_default(opt, arg);
6000 static int opt_qscale(OptionsContext *o, const char *opt, const char *arg)
6004 if(!strcmp(opt, "qscale")){
6005 av_log(NULL, AV_LOG_WARNING, "Please use -q:a or -q:v, -qscale is ambiguous\n");
6006 return parse_option(o, "q:v", arg, options);
6008 s = av_asprintf("q%s", opt + 6);
6009 ret = parse_option(o, s, arg, options);
6014 static int opt_profile(OptionsContext *o, const char *opt, const char *arg)
6016 if(!strcmp(opt, "profile")){
6017 av_log(NULL, AV_LOG_WARNING, "Please use -profile:a or -profile:v, -profile is ambiguous\n");
6018 return parse_option(o, "profile:v", arg, options);
6020 return opt_default(opt, arg);
6023 static int opt_video_filters(OptionsContext *o, const char *opt, const char *arg)
6025 return parse_option(o, "filter:v", arg, options);
6028 static int opt_audio_filters(OptionsContext *o, const char *opt, const char *arg)
6030 return parse_option(o, "filter:a", arg, options);
6033 static int opt_vsync(const char *opt, const char *arg)
6035 if (!av_strcasecmp(arg, "cfr")) video_sync_method = VSYNC_CFR;
6036 else if (!av_strcasecmp(arg, "vfr")) video_sync_method = VSYNC_VFR;
6037 else if (!av_strcasecmp(arg, "passthrough")) video_sync_method = VSYNC_PASSTHROUGH;
6038 else if (!av_strcasecmp(arg, "drop")) video_sync_method = VSYNC_DROP;
6040 if (video_sync_method == VSYNC_AUTO)
6041 video_sync_method = parse_number_or_die("vsync", arg, OPT_INT, VSYNC_AUTO, VSYNC_VFR);
6045 static int opt_deinterlace(const char *opt, const char *arg)
6047 av_log(NULL, AV_LOG_WARNING, "-%s is deprecated, use -filter:v yadif instead\n", opt);
6052 static int opt_timecode(OptionsContext *o, const char *opt, const char *arg)
6054 char *tcr = av_asprintf("timecode=%s", arg);
6055 int ret = parse_option(o, "metadata:g", tcr, options);
6057 ret = opt_default("gop_timecode", arg);
6062 static void parse_cpuflags(int argc, char **argv, const OptionDef *options)
6064 int idx = locate_option(argc, argv, options, "cpuflags");
6065 if (idx && argv[idx + 1])
6066 opt_cpuflags("cpuflags", argv[idx + 1]);
6069 static int opt_channel_layout(OptionsContext *o, const char *opt, const char *arg)
6071 char layout_str[32];
6074 int ret, channels, ac_str_size;
6077 layout = av_get_channel_layout(arg);
6079 av_log(NULL, AV_LOG_ERROR, "Unknown channel layout: %s\n", arg);
6080 return AVERROR(EINVAL);
6082 snprintf(layout_str, sizeof(layout_str), "%"PRIu64, layout);
6083 ret = opt_default(opt, layout_str);
6087 /* set 'ac' option based on channel layout */
6088 channels = av_get_channel_layout_nb_channels(layout);
6089 snprintf(layout_str, sizeof(layout_str), "%d", channels);
6090 stream_str = strchr(opt, ':');
6091 ac_str_size = 3 + (stream_str ? strlen(stream_str) : 0);
6092 ac_str = av_mallocz(ac_str_size);
6094 return AVERROR(ENOMEM);
6095 av_strlcpy(ac_str, "ac", 3);
6097 av_strlcat(ac_str, stream_str, ac_str_size);
6098 ret = parse_option(o, ac_str, layout_str, options);
6104 static int opt_filter_complex(const char *opt, const char *arg)
6106 filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
6107 &nb_filtergraphs, nb_filtergraphs + 1);
6108 if (!(filtergraphs[nb_filtergraphs - 1] = av_mallocz(sizeof(*filtergraphs[0]))))
6109 return AVERROR(ENOMEM);
6110 filtergraphs[nb_filtergraphs - 1]->index = nb_filtergraphs - 1;
6111 filtergraphs[nb_filtergraphs - 1]->graph_desc = arg;
6115 static int opt_progress(const char *opt, const char *arg)
6117 AVIOContext *avio = NULL;
6120 if (!strcmp(arg, "-"))
6122 ret = avio_open2(&avio, arg, AVIO_FLAG_WRITE, &int_cb, NULL);
6124 av_log(0, AV_LOG_ERROR, "Failed to open progress URL \"%s\": %s\n",
6125 arg, av_err2str(ret));
6128 progress_avio = avio;
6132 #define OFFSET(x) offsetof(OptionsContext, x)
6133 static const OptionDef real_options[] = {
6135 #include "cmdutils_common_opts.h"
6136 { "f", HAS_ARG | OPT_STRING | OPT_OFFSET, {.off = OFFSET(format)}, "force format", "fmt" },
6137 { "i", HAS_ARG | OPT_FUNC2, {(void*)opt_input_file}, "input file name", "filename" },
6138 { "y", OPT_BOOL, {(void*)&file_overwrite}, "overwrite output files" },
6139 { "n", OPT_BOOL, {(void*)&no_file_overwrite}, "do not overwrite output files" },
6140 { "c", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
6141 { "codec", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
6142 { "pre", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(presets)}, "preset name", "preset" },
6143 { "map", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map}, "set input stream mapping", "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
6144 { "map_channel", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map_channel}, "map an audio channel from one stream to another", "file.stream.channel[:syncfile.syncstream]" },
6145 { "map_metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata_map)}, "set metadata information of outfile from infile",
6146 "outfile[,metadata]:infile[,metadata]" },
6147 { "map_chapters", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(chapters_input_file)}, "set chapters mapping", "input_file_index" },
6148 { "t", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(recording_time)}, "record or transcode \"duration\" seconds of audio/video", "duration" },
6149 { "fs", HAS_ARG | OPT_INT64 | OPT_OFFSET, {.off = OFFSET(limit_filesize)}, "set the limit file size in bytes", "limit_size" }, //
6150 { "ss", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(start_time)}, "set the start time offset", "time_off" },
6151 { "itsoffset", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(input_ts_offset)}, "set the input ts offset", "time_off" },
6152 { "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(ts_scale)}, "set the input ts scale", "scale" },
6153 { "timestamp", HAS_ARG | OPT_FUNC2, {(void*)opt_recording_timestamp}, "set the recording timestamp ('now' to set the current time)", "time" },
6154 { "metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata)}, "add metadata", "string=string" },
6155 { "dframes", HAS_ARG | OPT_FUNC2, {(void*)opt_data_frames}, "set the number of data frames to record", "number" },
6156 { "benchmark", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark},
6157 "add timings for benchmarking" },
6158 { "benchmark_all", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark_all},
6159 "add timings for each task" },
6160 { "progress", HAS_ARG | OPT_EXPERT, {(void*)opt_progress},
6161 "write program-readable progress information", "url" },
6162 { "stdin", OPT_BOOL | OPT_EXPERT, {(void*)&stdin_interaction},
6163 "enable or disable interaction on standard input" },
6164 { "timelimit", HAS_ARG, {(void*)opt_timelimit}, "set max runtime in seconds", "limit" },
6165 { "dump", OPT_BOOL | OPT_EXPERT, {(void*)&do_pkt_dump},
6166 "dump each input packet" },
6167 { "hex", OPT_BOOL | OPT_EXPERT, {(void*)&do_hex_dump},
6168 "when dumping packets, also dump the payload" },
6169 { "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(rate_emu)}, "read input at native frame rate", "" },
6170 { "target", HAS_ARG | OPT_FUNC2, {(void*)opt_target}, "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" },
6171 { "vsync", HAS_ARG | OPT_EXPERT, {(void*)opt_vsync}, "video sync method", "" },
6172 { "async", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&audio_sync_method}, "audio sync method", "" },
6173 { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&audio_drift_threshold}, "audio drift threshold", "threshold" },
6174 { "copyts", OPT_BOOL | OPT_EXPERT, {(void*)©_ts}, "copy timestamps" },
6175 { "copytb", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)©_tb}, "copy input stream time base when stream copying", "mode" },
6176 { "shortest", OPT_BOOL | OPT_EXPERT, {(void*)&opt_shortest}, "finish encoding within shortest input" }, //
6177 { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&dts_delta_threshold}, "timestamp discontinuity delta threshold", "threshold" },
6178 { "dts_error_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&dts_error_threshold}, "timestamp error delta threshold", "threshold" },
6179 { "xerror", OPT_BOOL, {(void*)&exit_on_error}, "exit on error", "error" },
6180 { "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC, {.off = OFFSET(copy_initial_nonkeyframes)}, "copy initial non-keyframes" },
6181 { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC, {.off = OFFSET(max_frames)}, "set the number of frames to record", "number" },
6182 { "tag", OPT_STRING | HAS_ARG | OPT_SPEC, {.off = OFFSET(codec_tags)}, "force codec tag/fourcc", "fourcc/tag" },
6183 { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
6184 { "qscale", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_qscale}, "use fixed quality scale (VBR)", "q" },
6185 { "profile", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_profile}, "set profile", "profile" },
6186 { "filter", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(filters)}, "set stream filterchain", "filter_list" },
6187 { "filter_complex", HAS_ARG | OPT_EXPERT, {(void*)opt_filter_complex}, "create a complex filtergraph", "graph_description" },
6188 { "stats", OPT_BOOL, {&print_stats}, "print progress report during encoding", },
6189 { "attach", HAS_ARG | OPT_FUNC2, {(void*)opt_attach}, "add an attachment to the output file", "filename" },
6190 { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(dump_attachment)}, "extract an attachment into a file", "filename" },
6191 { "debug_ts", OPT_BOOL | OPT_EXPERT, {&debug_ts}, "print timestamp debugging info" },
6194 { "vframes", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_frames}, "set the number of video frames to record", "number" },
6195 { "r", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_rates)}, "set frame rate (Hz value, fraction or abbreviation)", "rate" },
6196 { "s", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_sizes)}, "set frame size (WxH or abbreviation)", "size" },
6197 { "aspect", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_aspect_ratios)}, "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" },
6198 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_pix_fmts)}, "set pixel format", "format" },
6199 { "bits_per_raw_sample", OPT_INT | HAS_ARG | OPT_VIDEO, {(void*)&frame_bits_per_raw_sample}, "set the number of bits per raw sample", "number" },
6200 { "croptop", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_crop}, "Removed, use the crop filter instead", "size" },
6201 { "cropbottom", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_crop}, "Removed, use the crop filter instead", "size" },
6202 { "cropleft", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_crop}, "Removed, use the crop filter instead", "size" },
6203 { "cropright", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_crop}, "Removed, use the crop filter instead", "size" },
6204 { "padtop", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "Removed, use the pad filter instead", "size" },
6205 { "padbottom", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "Removed, use the pad filter instead", "size" },
6206 { "padleft", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "Removed, use the pad filter instead", "size" },
6207 { "padright", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "Removed, use the pad filter instead", "size" },
6208 { "padcolor", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "Removed, use the pad filter instead", "color" },
6209 { "intra", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_only}, "deprecated use -g 1"},
6210 { "vn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET, {.off = OFFSET(video_disable)}, "disable video" },
6211 { "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" },
6212 { "rc_override", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(rc_overrides)}, "rate control override for specific intervals", "override" },
6213 { "vcodec", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_codec}, "force video codec ('copy' to copy stream)", "codec" },
6214 { "sameq", OPT_BOOL | OPT_VIDEO, {(void*)&same_quant}, "use same quantizer as source (implies VBR)" },
6215 { "same_quant", OPT_BOOL | OPT_VIDEO, {(void*)&same_quant},
6216 "use same quantizer as source (implies VBR)" },
6217 { "timecode", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_timecode}, "set initial TimeCode value.", "hh:mm:ss[:;.]ff" },
6218 { "pass", HAS_ARG | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" },
6219 { "passlogfile", HAS_ARG | OPT_VIDEO, {(void*)&opt_passlogfile}, "select two pass log file name prefix", "prefix" },
6220 { "deinterlace", OPT_EXPERT | OPT_VIDEO, {(void*)opt_deinterlace},
6221 "this option is deprecated, use the yadif filter instead" },
6222 { "psnr", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_psnr}, "calculate PSNR of compressed frames" },
6223 { "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" },
6224 { "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_vstats_file}, "dump video coding statistics to file", "file" },
6225 { "vf", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_filters}, "video filters", "filter list" },
6226 { "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(intra_matrices)}, "specify intra matrix coeffs", "matrix" },
6227 { "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(inter_matrices)}, "specify inter matrix coeffs", "matrix" },
6228 { "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_INT| OPT_SPEC, {.off = OFFSET(top_field_first)}, "top=1/bottom=0/auto=-1 field first", "" },
6229 { "dc", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_dc_precision}, "intra_dc_precision", "precision" },
6230 { "vtag", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_FUNC2, {(void*)opt_old2new}, "force video tag/fourcc", "fourcc/tag" },
6231 { "qphist", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, { (void *)&qp_hist }, "show QP histogram" },
6232 { "force_fps", OPT_BOOL | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(force_fps)}, "force the selected framerate, disable the best supported framerate selection" },
6233 { "streamid", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_streamid}, "set the value of an outfile streamid", "streamIndex:value" },
6234 { "force_key_frames", OPT_STRING | HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(forced_key_frames)}, "force key frames at specified timestamps", "timestamps" },
6235 { "b", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_bitrate}, "video bitrate (please use -b:v)", "bitrate" },
6238 { "aframes", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_frames}, "set the number of audio frames to record", "number" },
6239 { "aq", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_qscale}, "set audio quality (codec-specific)", "quality", },
6240 { "ar", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_sample_rate)}, "set audio sampling rate (in Hz)", "rate" },
6241 { "ac", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_channels)}, "set number of audio channels", "channels" },
6242 { "an", OPT_BOOL | OPT_AUDIO | OPT_OFFSET, {.off = OFFSET(audio_disable)}, "disable audio" },
6243 { "acodec", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_codec}, "force audio codec ('copy' to copy stream)", "codec" },
6244 { "atag", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_old2new}, "force audio tag/fourcc", "fourcc/tag" },
6245 { "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" }, //
6246 { "sample_fmt", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_SPEC | OPT_STRING, {.off = OFFSET(sample_fmts)}, "set sample format", "format" },
6247 { "channel_layout", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_channel_layout}, "set channel layout", "layout" },
6248 { "af", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_filters}, "audio filters", "filter list" },
6250 /* subtitle options */
6251 { "sn", OPT_BOOL | OPT_SUBTITLE | OPT_OFFSET, {.off = OFFSET(subtitle_disable)}, "disable subtitle" },
6252 { "scodec", HAS_ARG | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_codec}, "force subtitle codec ('copy' to copy stream)", "codec" },
6253 { "stag", HAS_ARG | OPT_EXPERT | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_old2new}, "force subtitle tag/fourcc", "fourcc/tag" },
6256 { "vc", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_GRAB, {(void*)opt_video_channel}, "deprecated, use -channel", "channel" },
6257 { "tvstd", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_GRAB, {(void*)opt_video_standard}, "deprecated, use -standard", "standard" },
6258 { "isync", OPT_BOOL | OPT_EXPERT | OPT_GRAB, {(void*)&input_sync}, "sync read on input", "" },
6261 { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_max_delay)}, "set the maximum demux-decode delay", "seconds" },
6262 { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_preload)}, "set the initial demux-decode delay", "seconds" },
6264 { "bsf", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(bitstream_filters)}, "A comma-separated list of bitstream filters", "bitstream_filters" },
6265 { "absf", HAS_ARG | OPT_AUDIO | OPT_EXPERT| OPT_FUNC2, {(void*)opt_old2new}, "deprecated", "audio bitstream_filters" },
6266 { "vbsf", HAS_ARG | OPT_VIDEO | OPT_EXPERT| OPT_FUNC2, {(void*)opt_old2new}, "deprecated", "video bitstream_filters" },
6268 { "apre", HAS_ARG | OPT_AUDIO | OPT_EXPERT| OPT_FUNC2, {(void*)opt_preset}, "set the audio options to the indicated preset", "preset" },
6269 { "vpre", HAS_ARG | OPT_VIDEO | OPT_EXPERT| OPT_FUNC2, {(void*)opt_preset}, "set the video options to the indicated preset", "preset" },
6270 { "spre", HAS_ARG | OPT_SUBTITLE | OPT_EXPERT| OPT_FUNC2, {(void*)opt_preset}, "set the subtitle options to the indicated preset", "preset" },
6271 { "fpre", HAS_ARG | OPT_EXPERT| OPT_FUNC2, {(void*)opt_preset}, "set options from indicated preset file", "filename" },
6272 /* data codec support */
6273 { "dcodec", HAS_ARG | OPT_DATA | OPT_FUNC2, {(void*)opt_data_codec}, "force data codec ('copy' to copy stream)", "codec" },
6274 { "dn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET, {.off = OFFSET(data_disable)}, "disable data" },
6276 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
6280 int main(int argc, char **argv)
6282 OptionsContext o = { 0 };
6285 options = real_options;
6286 reset_options(&o, 0);
6288 av_log_set_flags(AV_LOG_SKIP_REPEATED);
6289 parse_loglevel(argc, argv, options);
6291 if(argc>1 && !strcmp(argv[1], "-d")){
6293 av_log_set_callback(log_callback_null);
6298 avcodec_register_all();
6300 avdevice_register_all();
6302 avfilter_register_all();
6304 avformat_network_init();
6306 show_banner(argc, argv, options);
6310 parse_cpuflags(argc, argv, options);
6313 parse_options(&o, argc, argv, options, opt_output_file);
6315 if (nb_output_files <= 0 && nb_input_files == 0) {
6317 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
6321 /* file converter / grab */
6322 if (nb_output_files <= 0) {
6323 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
6327 if (nb_input_files == 0) {
6328 av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
6332 current_time = ti = getutime();
6333 if (transcode() < 0)
6335 ti = getutime() - ti;
6337 int maxrss = getmaxrss() / 1024;
6338 printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);