2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
44 #include "libavformat/avformat.h"
45 #include "libavdevice/avdevice.h"
46 #include "libswresample/swresample.h"
47 #include "libavutil/opt.h"
48 #include "libavutil/channel_layout.h"
49 #include "libavutil/parseutils.h"
50 #include "libavutil/samplefmt.h"
51 #include "libavutil/fifo.h"
52 #include "libavutil/intreadwrite.h"
53 #include "libavutil/dict.h"
54 #include "libavutil/mathematics.h"
55 #include "libavutil/pixdesc.h"
56 #include "libavutil/avstring.h"
57 #include "libavutil/libm.h"
58 #include "libavutil/imgutils.h"
59 #include "libavutil/timestamp.h"
60 #include "libavutil/bprint.h"
61 #include "libavutil/time.h"
62 #include "libavutil/threadmessage.h"
63 #include "libavcodec/mathops.h"
64 #include "libavformat/os_support.h"
66 # include "libavfilter/avcodec.h"
67 # include "libavfilter/avfilter.h"
68 # include "libavfilter/buffersrc.h"
69 # include "libavfilter/buffersink.h"
71 #if HAVE_SYS_RESOURCE_H
73 #include <sys/types.h>
74 #include <sys/resource.h>
75 #elif HAVE_GETPROCESSTIMES
78 #if HAVE_GETPROCESSMEMORYINFO
82 #if HAVE_SETCONSOLECTRLHANDLER
88 #include <sys/select.h>
93 #include <sys/ioctl.h>
107 #include "cmdutils.h"
109 #include "libavutil/avassert.h"
111 const char program_name[] = "ffmpeg";
112 const int program_birth_year = 2000;
114 static FILE *vstats_file;
116 const char *const forced_keyframes_const_names[] = {
125 static void do_video_stats(OutputStream *ost, int frame_size);
126 static int64_t getutime(void);
127 static int64_t getmaxrss(void);
129 static int run_as_daemon = 0;
130 static int nb_frames_dup = 0;
131 static int nb_frames_drop = 0;
132 static int64_t decode_error_stat[2];
134 static int current_time;
135 AVIOContext *progress_avio = NULL;
137 static uint8_t *subtitle_out;
139 #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
141 InputStream **input_streams = NULL;
142 int nb_input_streams = 0;
143 InputFile **input_files = NULL;
144 int nb_input_files = 0;
146 OutputStream **output_streams = NULL;
147 int nb_output_streams = 0;
148 OutputFile **output_files = NULL;
149 int nb_output_files = 0;
151 FilterGraph **filtergraphs;
156 /* init terminal so that we can grab keys */
157 static struct termios oldtty;
158 static int restore_tty;
162 static void free_input_threads(void);
166 Convert subtitles to video with alpha to insert them in filter graphs.
167 This is a temporary solution until libavfilter gets real subtitles support.
170 static int sub2video_get_blank_frame(InputStream *ist)
173 AVFrame *frame = ist->sub2video.frame;
175 av_frame_unref(frame);
176 ist->sub2video.frame->width = ist->sub2video.w;
177 ist->sub2video.frame->height = ist->sub2video.h;
178 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
179 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
181 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
185 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
188 uint32_t *pal, *dst2;
192 if (r->type != SUBTITLE_BITMAP) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
196 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
197 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle overflowing\n");
201 dst += r->y * dst_linesize + r->x * 4;
202 src = r->pict.data[0];
203 pal = (uint32_t *)r->pict.data[1];
204 for (y = 0; y < r->h; y++) {
205 dst2 = (uint32_t *)dst;
207 for (x = 0; x < r->w; x++)
208 *(dst2++) = pal[*(src2++)];
210 src += r->pict.linesize[0];
214 static void sub2video_push_ref(InputStream *ist, int64_t pts)
216 AVFrame *frame = ist->sub2video.frame;
219 av_assert1(frame->data[0]);
220 ist->sub2video.last_pts = frame->pts = pts;
221 for (i = 0; i < ist->nb_filters; i++)
222 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
223 AV_BUFFERSRC_FLAG_KEEP_REF |
224 AV_BUFFERSRC_FLAG_PUSH);
227 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
229 int w = ist->sub2video.w, h = ist->sub2video.h;
230 AVFrame *frame = ist->sub2video.frame;
234 int64_t pts, end_pts;
239 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240 AV_TIME_BASE_Q, ist->st->time_base);
241 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
242 AV_TIME_BASE_Q, ist->st->time_base);
243 num_rects = sub->num_rects;
245 pts = ist->sub2video.end_pts;
249 if (sub2video_get_blank_frame(ist) < 0) {
250 av_log(ist->dec_ctx, AV_LOG_ERROR,
251 "Impossible to get a blank canvas.\n");
254 dst = frame->data [0];
255 dst_linesize = frame->linesize[0];
256 for (i = 0; i < num_rects; i++)
257 sub2video_copy_rect(dst, dst_linesize, w, h, sub->rects[i]);
258 sub2video_push_ref(ist, pts);
259 ist->sub2video.end_pts = end_pts;
262 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
264 InputFile *infile = input_files[ist->file_index];
268 /* When a frame is read from a file, examine all sub2video streams in
269 the same file and send the sub2video frame again. Otherwise, decoded
270 video frames could be accumulating in the filter graph while a filter
271 (possibly overlay) is desperately waiting for a subtitle frame. */
272 for (i = 0; i < infile->nb_streams; i++) {
273 InputStream *ist2 = input_streams[infile->ist_index + i];
274 if (!ist2->sub2video.frame)
276 /* subtitles seem to be usually muxed ahead of other streams;
277 if not, subtracting a larger time here is necessary */
278 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
279 /* do not send the heartbeat frame if the subtitle is already ahead */
280 if (pts2 <= ist2->sub2video.last_pts)
282 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
283 sub2video_update(ist2, NULL);
284 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
285 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
287 sub2video_push_ref(ist2, pts2);
291 static void sub2video_flush(InputStream *ist)
295 if (ist->sub2video.end_pts < INT64_MAX)
296 sub2video_update(ist, NULL);
297 for (i = 0; i < ist->nb_filters; i++)
298 av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
301 /* end of sub2video hack */
303 static void term_exit_sigsafe(void)
307 tcsetattr (0, TCSANOW, &oldtty);
313 av_log(NULL, AV_LOG_QUIET, "%s", "");
317 static volatile int received_sigterm = 0;
318 static volatile int received_nb_signals = 0;
319 static volatile int transcode_init_done = 0;
320 static volatile int ffmpeg_exited = 0;
321 static int main_return_code = 0;
324 sigterm_handler(int sig)
326 received_sigterm = sig;
327 received_nb_signals++;
329 if(received_nb_signals > 3) {
330 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
331 strlen("Received > 3 system signals, hard exiting\n"));
337 #if HAVE_SETCONSOLECTRLHANDLER
338 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
340 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345 case CTRL_BREAK_EVENT:
346 sigterm_handler(SIGINT);
349 case CTRL_CLOSE_EVENT:
350 case CTRL_LOGOFF_EVENT:
351 case CTRL_SHUTDOWN_EVENT:
352 sigterm_handler(SIGTERM);
353 /* Basically, with these 3 events, when we return from this method the
354 process is hard terminated, so stall as long as we need to
355 to try and let the main thread(s) clean up and gracefully terminate
356 (we have at most 5 seconds, but should be done far before that). */
357 while (!ffmpeg_exited) {
363 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
376 istty = isatty(0) && isatty(2);
378 if (istty && tcgetattr (0, &tty) == 0) {
382 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383 |INLCR|IGNCR|ICRNL|IXON);
384 tty.c_oflag |= OPOST;
385 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386 tty.c_cflag &= ~(CSIZE|PARENB);
391 tcsetattr (0, TCSANOW, &tty);
393 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
397 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
400 signal(SIGXCPU, sigterm_handler);
402 #if HAVE_SETCONSOLECTRLHANDLER
403 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
407 /* read a key without blocking */
408 static int read_key(void)
420 n = select(1, &rfds, NULL, NULL, &tv);
429 # if HAVE_PEEKNAMEDPIPE
431 static HANDLE input_handle;
434 input_handle = GetStdHandle(STD_INPUT_HANDLE);
435 is_pipe = !GetConsoleMode(input_handle, &dw);
438 if (stdin->_cnt > 0) {
443 /* When running under a GUI, you will end here. */
444 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
445 // input pipe may have been closed by the program that ran ffmpeg
463 static int decode_interrupt_cb(void *ctx)
465 return received_nb_signals > transcode_init_done;
468 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
470 static void ffmpeg_cleanup(int ret)
475 int maxrss = getmaxrss() / 1024;
476 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
479 for (i = 0; i < nb_filtergraphs; i++) {
480 FilterGraph *fg = filtergraphs[i];
481 avfilter_graph_free(&fg->graph);
482 for (j = 0; j < fg->nb_inputs; j++) {
483 av_freep(&fg->inputs[j]->name);
484 av_freep(&fg->inputs[j]);
486 av_freep(&fg->inputs);
487 for (j = 0; j < fg->nb_outputs; j++) {
488 av_freep(&fg->outputs[j]->name);
489 av_freep(&fg->outputs[j]);
491 av_freep(&fg->outputs);
492 av_freep(&fg->graph_desc);
494 av_freep(&filtergraphs[i]);
496 av_freep(&filtergraphs);
498 av_freep(&subtitle_out);
501 for (i = 0; i < nb_output_files; i++) {
502 OutputFile *of = output_files[i];
507 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
509 avformat_free_context(s);
510 av_dict_free(&of->opts);
512 av_freep(&output_files[i]);
514 for (i = 0; i < nb_output_streams; i++) {
515 OutputStream *ost = output_streams[i];
516 AVBitStreamFilterContext *bsfc;
521 bsfc = ost->bitstream_filters;
523 AVBitStreamFilterContext *next = bsfc->next;
524 av_bitstream_filter_close(bsfc);
527 ost->bitstream_filters = NULL;
528 av_frame_free(&ost->filtered_frame);
529 av_frame_free(&ost->last_frame);
531 av_parser_close(ost->parser);
533 av_freep(&ost->forced_keyframes);
534 av_expr_free(ost->forced_keyframes_pexpr);
535 av_freep(&ost->avfilter);
536 av_freep(&ost->logfile_prefix);
538 av_freep(&ost->audio_channels_map);
539 ost->audio_channels_mapped = 0;
541 avcodec_free_context(&ost->enc_ctx);
543 av_freep(&output_streams[i]);
546 free_input_threads();
548 for (i = 0; i < nb_input_files; i++) {
549 avformat_close_input(&input_files[i]->ctx);
550 av_freep(&input_files[i]);
552 for (i = 0; i < nb_input_streams; i++) {
553 InputStream *ist = input_streams[i];
555 av_frame_free(&ist->decoded_frame);
556 av_frame_free(&ist->filter_frame);
557 av_dict_free(&ist->decoder_opts);
558 avsubtitle_free(&ist->prev_sub.subtitle);
559 av_frame_free(&ist->sub2video.frame);
560 av_freep(&ist->filters);
561 av_freep(&ist->hwaccel_device);
563 avcodec_free_context(&ist->dec_ctx);
565 av_freep(&input_streams[i]);
570 av_freep(&vstats_filename);
572 av_freep(&input_streams);
573 av_freep(&input_files);
574 av_freep(&output_streams);
575 av_freep(&output_files);
579 avformat_network_deinit();
581 if (received_sigterm) {
582 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
583 (int) received_sigterm);
584 } else if (ret && transcode_init_done) {
585 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
591 void remove_avoptions(AVDictionary **a, AVDictionary *b)
593 AVDictionaryEntry *t = NULL;
595 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
596 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
600 void assert_avoptions(AVDictionary *m)
602 AVDictionaryEntry *t;
603 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
604 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
609 static void abort_codec_experimental(AVCodec *c, int encoder)
614 static void update_benchmark(const char *fmt, ...)
616 if (do_benchmark_all) {
617 int64_t t = getutime();
623 vsnprintf(buf, sizeof(buf), fmt, va);
625 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
631 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
634 for (i = 0; i < nb_output_streams; i++) {
635 OutputStream *ost2 = output_streams[i];
636 ost2->finished |= ost == ost2 ? this_stream : others;
640 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
642 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
643 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
646 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
647 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
648 if (ost->st->codec->extradata) {
649 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
650 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
654 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
655 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
656 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
659 * Audio encoders may split the packets -- #frames in != #packets out.
660 * But there is no reordering, so we can limit the number of output packets
661 * by simply dropping them here.
662 * Counting encoded video frames needs to be done separately because of
663 * reordering, see do_video_out()
665 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
666 if (ost->frame_number >= ost->max_frames) {
674 av_packet_split_side_data(pkt);
677 AVPacket new_pkt = *pkt;
678 AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
681 int a = av_bitstream_filter_filter(bsfc, avctx,
682 bsf_arg ? bsf_arg->value : NULL,
683 &new_pkt.data, &new_pkt.size,
684 pkt->data, pkt->size,
685 pkt->flags & AV_PKT_FLAG_KEY);
686 if(a == 0 && new_pkt.data != pkt->data && new_pkt.destruct) {
687 uint8_t *t = av_malloc(new_pkt.size + FF_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
689 memcpy(t, new_pkt.data, new_pkt.size);
690 memset(t + new_pkt.size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
698 pkt->side_data = NULL;
699 pkt->side_data_elems = 0;
701 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
702 av_buffer_default_free, NULL, 0);
707 av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
708 bsfc->filter->name, pkt->stream_index,
709 avctx->codec ? avctx->codec->name : "copy");
719 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
720 if (pkt->dts != AV_NOPTS_VALUE &&
721 pkt->pts != AV_NOPTS_VALUE &&
722 pkt->dts > pkt->pts) {
723 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
725 ost->file_index, ost->st->index);
727 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
728 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
729 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
732 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
733 pkt->dts != AV_NOPTS_VALUE &&
734 ost->last_mux_dts != AV_NOPTS_VALUE) {
735 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
736 if (pkt->dts < max) {
737 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
738 av_log(s, loglevel, "Non-monotonous DTS in output stream "
739 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
740 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
742 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
745 av_log(s, loglevel, "changing to %"PRId64". This may result "
746 "in incorrect timestamps in the output file.\n",
748 if(pkt->pts >= pkt->dts)
749 pkt->pts = FFMAX(pkt->pts, max);
754 ost->last_mux_dts = pkt->dts;
756 ost->data_size += pkt->size;
757 ost->packets_written++;
759 pkt->stream_index = ost->index;
762 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
763 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
764 av_get_media_type_string(ost->enc_ctx->codec_type),
765 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
766 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
771 ret = av_interleaved_write_frame(s, pkt);
773 print_error("av_interleaved_write_frame()", ret);
774 main_return_code = 1;
775 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
780 static void close_output_stream(OutputStream *ost)
782 OutputFile *of = output_files[ost->file_index];
784 ost->finished |= ENCODER_FINISHED;
786 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
787 of->recording_time = FFMIN(of->recording_time, end);
791 static int check_recording_time(OutputStream *ost)
793 OutputFile *of = output_files[ost->file_index];
795 if (of->recording_time != INT64_MAX &&
796 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
797 AV_TIME_BASE_Q) >= 0) {
798 close_output_stream(ost);
804 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
807 AVCodecContext *enc = ost->enc_ctx;
811 av_init_packet(&pkt);
815 if (!check_recording_time(ost))
818 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
819 frame->pts = ost->sync_opts;
820 ost->sync_opts = frame->pts + frame->nb_samples;
821 ost->samples_encoded += frame->nb_samples;
822 ost->frames_encoded++;
824 av_assert0(pkt.size || !pkt.data);
825 update_benchmark(NULL);
827 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
828 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
829 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
830 enc->time_base.num, enc->time_base.den);
833 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
834 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
837 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
840 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
843 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
844 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
845 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
846 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
849 write_frame(s, &pkt, ost);
853 static void do_subtitle_out(AVFormatContext *s,
858 int subtitle_out_max_size = 1024 * 1024;
859 int subtitle_out_size, nb, i;
864 if (sub->pts == AV_NOPTS_VALUE) {
865 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
874 subtitle_out = av_malloc(subtitle_out_max_size);
876 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
881 /* Note: DVB subtitle need one packet to draw them and one other
882 packet to clear them */
883 /* XXX: signal it in the codec context ? */
884 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
889 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
891 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
892 pts -= output_files[ost->file_index]->start_time;
893 for (i = 0; i < nb; i++) {
894 unsigned save_num_rects = sub->num_rects;
896 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
897 if (!check_recording_time(ost))
901 // start_display_time is required to be 0
902 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
903 sub->end_display_time -= sub->start_display_time;
904 sub->start_display_time = 0;
908 ost->frames_encoded++;
910 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
911 subtitle_out_max_size, sub);
913 sub->num_rects = save_num_rects;
914 if (subtitle_out_size < 0) {
915 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
919 av_init_packet(&pkt);
920 pkt.data = subtitle_out;
921 pkt.size = subtitle_out_size;
922 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
923 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
924 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
925 /* XXX: the pts correction is handled here. Maybe handling
926 it in the codec would be better */
928 pkt.pts += 90 * sub->start_display_time;
930 pkt.pts += 90 * sub->end_display_time;
933 write_frame(s, &pkt, ost);
937 static void do_video_out(AVFormatContext *s,
939 AVFrame *next_picture,
942 int ret, format_video_sync;
944 AVCodecContext *enc = ost->enc_ctx;
945 AVCodecContext *mux_enc = ost->st->codec;
946 int nb_frames, nb0_frames, i;
947 double delta, delta0;
950 InputStream *ist = NULL;
951 AVFilterContext *filter = ost->filter->filter;
953 if (ost->source_index >= 0)
954 ist = input_streams[ost->source_index];
956 if (filter->inputs[0]->frame_rate.num > 0 &&
957 filter->inputs[0]->frame_rate.den > 0)
958 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
960 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
961 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
963 if (!ost->filters_script &&
967 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
968 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
973 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
974 ost->last_nb0_frames[1],
975 ost->last_nb0_frames[2]);
977 delta0 = sync_ipts - ost->sync_opts;
978 delta = delta0 + duration;
980 /* by default, we output a single frame */
984 format_video_sync = video_sync_method;
985 if (format_video_sync == VSYNC_AUTO) {
986 if(!strcmp(s->oformat->name, "avi")) {
987 format_video_sync = VSYNC_VFR;
989 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
991 && format_video_sync == VSYNC_CFR
992 && input_files[ist->file_index]->ctx->nb_streams == 1
993 && input_files[ist->file_index]->input_ts_offset == 0) {
994 format_video_sync = VSYNC_VSCFR;
996 if (format_video_sync == VSYNC_CFR && copy_ts) {
997 format_video_sync = VSYNC_VSCFR;
1003 format_video_sync != VSYNC_PASSTHROUGH &&
1004 format_video_sync != VSYNC_DROP) {
1005 double cor = FFMIN(-delta0, duration);
1006 if (delta0 < -0.6) {
1007 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1009 av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
1015 switch (format_video_sync) {
1017 if (ost->frame_number == 0 && delta - duration >= 0.5) {
1018 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
1021 ost->sync_opts = lrint(sync_ipts);
1024 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1025 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1027 } else if (delta < -1.1)
1029 else if (delta > 1.1) {
1030 nb_frames = lrintf(delta);
1032 nb0_frames = lrintf(delta0 - 0.6);
1038 else if (delta > 0.6)
1039 ost->sync_opts = lrint(sync_ipts);
1042 case VSYNC_PASSTHROUGH:
1043 ost->sync_opts = lrint(sync_ipts);
1050 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1051 nb0_frames = FFMIN(nb0_frames, nb_frames);
1053 memmove(ost->last_nb0_frames + 1,
1054 ost->last_nb0_frames,
1055 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1056 ost->last_nb0_frames[0] = nb0_frames;
1058 if (nb0_frames == 0 && ost->last_droped) {
1060 av_log(NULL, AV_LOG_VERBOSE,
1061 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1062 ost->frame_number, ost->st->index, ost->last_frame->pts);
1064 if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
1065 if (nb_frames > dts_error_threshold * 30) {
1066 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1070 nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1071 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1073 ost->last_droped = nb_frames == nb0_frames && next_picture;
1075 /* duplicates frame if needed */
1076 for (i = 0; i < nb_frames; i++) {
1077 AVFrame *in_picture;
1078 av_init_packet(&pkt);
1082 if (i < nb0_frames && ost->last_frame) {
1083 in_picture = ost->last_frame;
1085 in_picture = next_picture;
1090 in_picture->pts = ost->sync_opts;
1093 if (!check_recording_time(ost))
1095 if (ost->frame_number >= ost->max_frames)
1099 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1100 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1101 /* raw pictures are written as AVPicture structure to
1102 avoid any copies. We support temporarily the older
1104 if (in_picture->interlaced_frame)
1105 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1107 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1108 pkt.data = (uint8_t *)in_picture;
1109 pkt.size = sizeof(AVPicture);
1110 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1111 pkt.flags |= AV_PKT_FLAG_KEY;
1113 write_frame(s, &pkt, ost);
1115 int got_packet, forced_keyframe = 0;
1118 if (enc->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME) &&
1119 ost->top_field_first >= 0)
1120 in_picture->top_field_first = !!ost->top_field_first;
1122 if (in_picture->interlaced_frame) {
1123 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1124 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1126 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1128 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1130 in_picture->quality = enc->global_quality;
1131 in_picture->pict_type = 0;
1133 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1134 in_picture->pts * av_q2d(enc->time_base) : NAN;
1135 if (ost->forced_kf_index < ost->forced_kf_count &&
1136 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1137 ost->forced_kf_index++;
1138 forced_keyframe = 1;
1139 } else if (ost->forced_keyframes_pexpr) {
1141 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1142 res = av_expr_eval(ost->forced_keyframes_pexpr,
1143 ost->forced_keyframes_expr_const_values, NULL);
1144 av_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1145 ost->forced_keyframes_expr_const_values[FKF_N],
1146 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1147 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1148 ost->forced_keyframes_expr_const_values[FKF_T],
1149 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1152 forced_keyframe = 1;
1153 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1154 ost->forced_keyframes_expr_const_values[FKF_N];
1155 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1156 ost->forced_keyframes_expr_const_values[FKF_T];
1157 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1160 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1161 } else if ( ost->forced_keyframes
1162 && !strncmp(ost->forced_keyframes, "source", 6)
1163 && in_picture->key_frame==1) {
1164 forced_keyframe = 1;
1167 if (forced_keyframe) {
1168 in_picture->pict_type = AV_PICTURE_TYPE_I;
1169 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1172 update_benchmark(NULL);
1174 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1175 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1176 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1177 enc->time_base.num, enc->time_base.den);
1180 ost->frames_encoded++;
1182 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1183 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1185 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1191 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1192 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1193 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1194 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1197 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
1198 pkt.pts = ost->sync_opts;
1200 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1203 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1204 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1205 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1206 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1209 frame_size = pkt.size;
1210 write_frame(s, &pkt, ost);
1212 /* if two pass, output log */
1213 if (ost->logfile && enc->stats_out) {
1214 fprintf(ost->logfile, "%s", enc->stats_out);
1220 * For video, number of frames in == number of packets out.
1221 * But there may be reordering, so we can't throw away frames on encoder
1222 * flush, we need to limit them here, before they go into encoder.
1224 ost->frame_number++;
1226 if (vstats_filename && frame_size)
1227 do_video_stats(ost, frame_size);
1230 if (!ost->last_frame)
1231 ost->last_frame = av_frame_alloc();
1232 av_frame_unref(ost->last_frame);
1233 if (next_picture && ost->last_frame)
1234 av_frame_ref(ost->last_frame, next_picture);
1236 av_frame_free(&ost->last_frame);
1239 static double psnr(double d)
1241 return -10.0 * log(d) / log(10.0);
1244 static void do_video_stats(OutputStream *ost, int frame_size)
1246 AVCodecContext *enc;
1248 double ti1, bitrate, avg_bitrate;
1250 /* this is executed just the first time do_video_stats is called */
1252 vstats_file = fopen(vstats_filename, "w");
1260 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1261 frame_number = ost->st->nb_frames;
1262 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame ? enc->coded_frame->quality / (float)FF_QP2LAMBDA : 0);
1263 if (enc->coded_frame && (enc->flags&CODEC_FLAG_PSNR))
1264 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1266 fprintf(vstats_file,"f_size= %6d ", frame_size);
1267 /* compute pts value */
1268 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1272 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1273 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1274 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1275 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1276 fprintf(vstats_file, "type= %c\n", enc->coded_frame ? av_get_picture_type_char(enc->coded_frame->pict_type) : 'I');
1280 static void finish_output_stream(OutputStream *ost)
1282 OutputFile *of = output_files[ost->file_index];
1285 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1288 for (i = 0; i < of->ctx->nb_streams; i++)
1289 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1294 * Get and encode new output from any of the filtergraphs, without causing
1297 * @return 0 for success, <0 for severe errors
1299 static int reap_filters(int flush)
1301 AVFrame *filtered_frame = NULL;
1304 /* Reap all buffers present in the buffer sinks */
1305 for (i = 0; i < nb_output_streams; i++) {
1306 OutputStream *ost = output_streams[i];
1307 OutputFile *of = output_files[ost->file_index];
1308 AVFilterContext *filter;
1309 AVCodecContext *enc = ost->enc_ctx;
1314 filter = ost->filter->filter;
1316 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1317 return AVERROR(ENOMEM);
1319 filtered_frame = ost->filtered_frame;
1322 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1323 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1324 AV_BUFFERSINK_FLAG_NO_REQUEST);
1326 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1327 av_log(NULL, AV_LOG_WARNING,
1328 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1329 } else if (flush && ret == AVERROR_EOF) {
1330 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1331 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1335 if (ost->finished) {
1336 av_frame_unref(filtered_frame);
1339 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1340 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1341 AVRational tb = enc->time_base;
1342 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1344 tb.den <<= extra_bits;
1346 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1347 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1348 float_pts /= 1 << extra_bits;
1349 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1350 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1352 filtered_frame->pts =
1353 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1354 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1356 //if (ost->source_index >= 0)
1357 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1359 switch (filter->inputs[0]->type) {
1360 case AVMEDIA_TYPE_VIDEO:
1361 if (!ost->frame_aspect_ratio.num)
1362 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1365 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1366 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1368 enc->time_base.num, enc->time_base.den);
1371 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1373 case AVMEDIA_TYPE_AUDIO:
1374 if (!(enc->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
1375 enc->channels != av_frame_get_channels(filtered_frame)) {
1376 av_log(NULL, AV_LOG_ERROR,
1377 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1380 do_audio_out(of->ctx, ost, filtered_frame);
1383 // TODO support subtitle filters
1387 av_frame_unref(filtered_frame);
1394 static void print_final_stats(int64_t total_size)
1396 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1397 uint64_t subtitle_size = 0;
1398 uint64_t data_size = 0;
1399 float percent = -1.0;
1403 for (i = 0; i < nb_output_streams; i++) {
1404 OutputStream *ost = output_streams[i];
1405 switch (ost->enc_ctx->codec_type) {
1406 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1407 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1408 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1409 default: other_size += ost->data_size; break;
1411 extra_size += ost->enc_ctx->extradata_size;
1412 data_size += ost->data_size;
1413 if ( (ost->enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1414 != CODEC_FLAG_PASS1)
1418 if (data_size && total_size>0 && total_size >= data_size)
1419 percent = 100.0 * (total_size - data_size) / data_size;
1421 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1422 video_size / 1024.0,
1423 audio_size / 1024.0,
1424 subtitle_size / 1024.0,
1425 other_size / 1024.0,
1426 extra_size / 1024.0);
1428 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1430 av_log(NULL, AV_LOG_INFO, "unknown");
1431 av_log(NULL, AV_LOG_INFO, "\n");
1433 /* print verbose per-stream stats */
1434 for (i = 0; i < nb_input_files; i++) {
1435 InputFile *f = input_files[i];
1436 uint64_t total_packets = 0, total_size = 0;
1438 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1439 i, f->ctx->filename);
1441 for (j = 0; j < f->nb_streams; j++) {
1442 InputStream *ist = input_streams[f->ist_index + j];
1443 enum AVMediaType type = ist->dec_ctx->codec_type;
1445 total_size += ist->data_size;
1446 total_packets += ist->nb_packets;
1448 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1449 i, j, media_type_string(type));
1450 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1451 ist->nb_packets, ist->data_size);
1453 if (ist->decoding_needed) {
1454 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1455 ist->frames_decoded);
1456 if (type == AVMEDIA_TYPE_AUDIO)
1457 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1458 av_log(NULL, AV_LOG_VERBOSE, "; ");
1461 av_log(NULL, AV_LOG_VERBOSE, "\n");
1464 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1465 total_packets, total_size);
1468 for (i = 0; i < nb_output_files; i++) {
1469 OutputFile *of = output_files[i];
1470 uint64_t total_packets = 0, total_size = 0;
1472 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1473 i, of->ctx->filename);
1475 for (j = 0; j < of->ctx->nb_streams; j++) {
1476 OutputStream *ost = output_streams[of->ost_index + j];
1477 enum AVMediaType type = ost->enc_ctx->codec_type;
1479 total_size += ost->data_size;
1480 total_packets += ost->packets_written;
1482 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1483 i, j, media_type_string(type));
1484 if (ost->encoding_needed) {
1485 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1486 ost->frames_encoded);
1487 if (type == AVMEDIA_TYPE_AUDIO)
1488 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1489 av_log(NULL, AV_LOG_VERBOSE, "; ");
1492 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1493 ost->packets_written, ost->data_size);
1495 av_log(NULL, AV_LOG_VERBOSE, "\n");
1498 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1499 total_packets, total_size);
1501 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1502 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1504 av_log(NULL, AV_LOG_WARNING, "\n");
1506 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1511 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1514 AVBPrint buf_script;
1516 AVFormatContext *oc;
1518 AVCodecContext *enc;
1519 int frame_number, vid, i;
1521 int64_t pts = INT64_MIN;
1522 static int64_t last_time = -1;
1523 static int qp_histogram[52];
1524 int hours, mins, secs, us;
1526 if (!print_stats && !is_last_report && !progress_avio)
1529 if (!is_last_report) {
1530 if (last_time == -1) {
1531 last_time = cur_time;
1534 if ((cur_time - last_time) < 500000)
1536 last_time = cur_time;
1540 oc = output_files[0]->ctx;
1542 total_size = avio_size(oc->pb);
1543 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1544 total_size = avio_tell(oc->pb);
1548 av_bprint_init(&buf_script, 0, 1);
1549 for (i = 0; i < nb_output_streams; i++) {
1551 ost = output_streams[i];
1553 if (!ost->stream_copy && enc->coded_frame)
1554 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1555 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1556 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1557 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1558 ost->file_index, ost->index, q);
1560 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1561 float fps, t = (cur_time-timer_start) / 1000000.0;
1563 frame_number = ost->frame_number;
1564 fps = t > 1 ? frame_number / t : 0;
1565 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1566 frame_number, fps < 9.95, fps, q);
1567 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1568 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1569 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1570 ost->file_index, ost->index, q);
1572 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1576 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1578 for (j = 0; j < 32; j++)
1579 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1581 if ((enc->flags&CODEC_FLAG_PSNR) && (enc->coded_frame || is_last_report)) {
1583 double error, error_sum = 0;
1584 double scale, scale_sum = 0;
1586 char type[3] = { 'Y','U','V' };
1587 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1588 for (j = 0; j < 3; j++) {
1589 if (is_last_report) {
1590 error = enc->error[j];
1591 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1593 error = enc->coded_frame->error[j];
1594 scale = enc->width * enc->height * 255.0 * 255.0;
1600 p = psnr(error / scale);
1601 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1602 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1603 ost->file_index, ost->index, type[j] | 32, p);
1605 p = psnr(error_sum / scale_sum);
1606 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1607 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1608 ost->file_index, ost->index, p);
1612 /* compute min output value */
1613 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1614 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1615 ost->st->time_base, AV_TIME_BASE_Q));
1617 nb_frames_drop += ost->last_droped;
1620 secs = FFABS(pts) / AV_TIME_BASE;
1621 us = FFABS(pts) % AV_TIME_BASE;
1627 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1629 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1631 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1632 "size=%8.0fkB time=", total_size / 1024.0);
1634 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1635 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1636 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1637 (100 * us) / AV_TIME_BASE);
1640 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1641 av_bprintf(&buf_script, "bitrate=N/A\n");
1643 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1644 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1647 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1648 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1649 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1650 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1651 hours, mins, secs, us);
1653 if (nb_frames_dup || nb_frames_drop)
1654 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1655 nb_frames_dup, nb_frames_drop);
1656 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1657 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1659 if (print_stats || is_last_report) {
1660 const char end = is_last_report ? '\n' : '\r';
1661 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1662 fprintf(stderr, "%s %c", buf, end);
1664 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1669 if (progress_avio) {
1670 av_bprintf(&buf_script, "progress=%s\n",
1671 is_last_report ? "end" : "continue");
1672 avio_write(progress_avio, buf_script.str,
1673 FFMIN(buf_script.len, buf_script.size - 1));
1674 avio_flush(progress_avio);
1675 av_bprint_finalize(&buf_script, NULL);
1676 if (is_last_report) {
1677 avio_closep(&progress_avio);
1682 print_final_stats(total_size);
1685 static void flush_encoders(void)
1689 for (i = 0; i < nb_output_streams; i++) {
1690 OutputStream *ost = output_streams[i];
1691 AVCodecContext *enc = ost->enc_ctx;
1692 AVFormatContext *os = output_files[ost->file_index]->ctx;
1693 int stop_encoding = 0;
1695 if (!ost->encoding_needed)
1698 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1700 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1704 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1707 switch (enc->codec_type) {
1708 case AVMEDIA_TYPE_AUDIO:
1709 encode = avcodec_encode_audio2;
1712 case AVMEDIA_TYPE_VIDEO:
1713 encode = avcodec_encode_video2;
1724 av_init_packet(&pkt);
1728 update_benchmark(NULL);
1729 ret = encode(enc, &pkt, NULL, &got_packet);
1730 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1732 av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1735 if (ost->logfile && enc->stats_out) {
1736 fprintf(ost->logfile, "%s", enc->stats_out);
1742 if (ost->finished & MUXER_FINISHED) {
1743 av_free_packet(&pkt);
1746 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1747 pkt_size = pkt.size;
1748 write_frame(os, &pkt, ost);
1749 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1750 do_video_stats(ost, pkt_size);
1761 * Check whether a packet from ist should be written into ost at this time
1763 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1765 OutputFile *of = output_files[ost->file_index];
1766 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1768 if (ost->source_index != ist_index)
1774 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1780 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1782 OutputFile *of = output_files[ost->file_index];
1783 InputFile *f = input_files [ist->file_index];
1784 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1785 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1786 int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1790 av_init_packet(&opkt);
1792 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1793 !ost->copy_initial_nonkeyframes)
1796 if (pkt->pts == AV_NOPTS_VALUE) {
1797 if (!ost->frame_number && ist->pts < start_time &&
1798 !ost->copy_prior_start)
1801 if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
1802 !ost->copy_prior_start)
1806 if (of->recording_time != INT64_MAX &&
1807 ist->pts >= of->recording_time + start_time) {
1808 close_output_stream(ost);
1812 if (f->recording_time != INT64_MAX) {
1813 start_time = f->ctx->start_time;
1814 if (f->start_time != AV_NOPTS_VALUE)
1815 start_time += f->start_time;
1816 if (ist->pts >= f->recording_time + start_time) {
1817 close_output_stream(ost);
1822 /* force the input stream PTS */
1823 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1826 if (pkt->pts != AV_NOPTS_VALUE)
1827 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1829 opkt.pts = AV_NOPTS_VALUE;
1831 if (pkt->dts == AV_NOPTS_VALUE)
1832 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1834 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1835 opkt.dts -= ost_tb_start_time;
1837 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1838 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1840 duration = ist->dec_ctx->frame_size;
1841 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1842 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1843 ost->st->time_base) - ost_tb_start_time;
1846 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1847 opkt.flags = pkt->flags;
1849 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1850 if ( ost->enc_ctx->codec_id != AV_CODEC_ID_H264
1851 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO
1852 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO
1853 && ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
1855 if (av_parser_change(ost->parser, ost->st->codec,
1856 &opkt.data, &opkt.size,
1857 pkt->data, pkt->size,
1858 pkt->flags & AV_PKT_FLAG_KEY)) {
1859 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1864 opkt.data = pkt->data;
1865 opkt.size = pkt->size;
1867 av_copy_packet_side_data(&opkt, pkt);
1869 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1870 /* store AVPicture in AVPacket, as expected by the output format */
1871 avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1872 opkt.data = (uint8_t *)&pict;
1873 opkt.size = sizeof(AVPicture);
1874 opkt.flags |= AV_PKT_FLAG_KEY;
1877 write_frame(of->ctx, &opkt, ost);
1880 int guess_input_channel_layout(InputStream *ist)
1882 AVCodecContext *dec = ist->dec_ctx;
1884 if (!dec->channel_layout) {
1885 char layout_name[256];
1887 if (dec->channels > ist->guess_layout_max)
1889 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1890 if (!dec->channel_layout)
1892 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1893 dec->channels, dec->channel_layout);
1894 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1895 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1900 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1902 AVFrame *decoded_frame, *f;
1903 AVCodecContext *avctx = ist->dec_ctx;
1904 int i, ret, err = 0, resample_changed;
1905 AVRational decoded_frame_tb;
1907 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1908 return AVERROR(ENOMEM);
1909 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1910 return AVERROR(ENOMEM);
1911 decoded_frame = ist->decoded_frame;
1913 update_benchmark(NULL);
1914 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1915 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1917 if (ret >= 0 && avctx->sample_rate <= 0) {
1918 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1919 ret = AVERROR_INVALIDDATA;
1922 if (*got_output || ret<0)
1923 decode_error_stat[ret<0] ++;
1925 if (ret < 0 && exit_on_error)
1928 if (!*got_output || ret < 0)
1931 ist->samples_decoded += decoded_frame->nb_samples;
1932 ist->frames_decoded++;
1935 /* increment next_dts to use for the case where the input stream does not
1936 have timestamps or there are multiple frames in the packet */
1937 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1939 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1943 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1944 ist->resample_channels != avctx->channels ||
1945 ist->resample_channel_layout != decoded_frame->channel_layout ||
1946 ist->resample_sample_rate != decoded_frame->sample_rate;
1947 if (resample_changed) {
1948 char layout1[64], layout2[64];
1950 if (!guess_input_channel_layout(ist)) {
1951 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1952 "layout for Input Stream #%d.%d\n", ist->file_index,
1956 decoded_frame->channel_layout = avctx->channel_layout;
1958 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1959 ist->resample_channel_layout);
1960 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1961 decoded_frame->channel_layout);
1963 av_log(NULL, AV_LOG_INFO,
1964 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1965 ist->file_index, ist->st->index,
1966 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1967 ist->resample_channels, layout1,
1968 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1969 avctx->channels, layout2);
1971 ist->resample_sample_fmt = decoded_frame->format;
1972 ist->resample_sample_rate = decoded_frame->sample_rate;
1973 ist->resample_channel_layout = decoded_frame->channel_layout;
1974 ist->resample_channels = avctx->channels;
1976 for (i = 0; i < nb_filtergraphs; i++)
1977 if (ist_in_filtergraph(filtergraphs[i], ist)) {
1978 FilterGraph *fg = filtergraphs[i];
1979 if (configure_filtergraph(fg) < 0) {
1980 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1986 /* if the decoder provides a pts, use it instead of the last packet pts.
1987 the decoder could be delaying output by a packet or more. */
1988 if (decoded_frame->pts != AV_NOPTS_VALUE) {
1989 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
1990 decoded_frame_tb = avctx->time_base;
1991 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
1992 decoded_frame->pts = decoded_frame->pkt_pts;
1993 decoded_frame_tb = ist->st->time_base;
1994 } else if (pkt->pts != AV_NOPTS_VALUE) {
1995 decoded_frame->pts = pkt->pts;
1996 decoded_frame_tb = ist->st->time_base;
1998 decoded_frame->pts = ist->dts;
1999 decoded_frame_tb = AV_TIME_BASE_Q;
2001 pkt->pts = AV_NOPTS_VALUE;
2002 if (decoded_frame->pts != AV_NOPTS_VALUE)
2003 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2004 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2005 (AVRational){1, avctx->sample_rate});
2006 for (i = 0; i < ist->nb_filters; i++) {
2007 if (i < ist->nb_filters - 1) {
2008 f = ist->filter_frame;
2009 err = av_frame_ref(f, decoded_frame);
2014 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2015 AV_BUFFERSRC_FLAG_PUSH);
2016 if (err == AVERROR_EOF)
2017 err = 0; /* ignore */
2021 decoded_frame->pts = AV_NOPTS_VALUE;
2023 av_frame_unref(ist->filter_frame);
2024 av_frame_unref(decoded_frame);
2025 return err < 0 ? err : ret;
2028 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2030 AVFrame *decoded_frame, *f;
2031 int i, ret = 0, err = 0, resample_changed;
2032 int64_t best_effort_timestamp;
2033 AVRational *frame_sample_aspect;
2035 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2036 return AVERROR(ENOMEM);
2037 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2038 return AVERROR(ENOMEM);
2039 decoded_frame = ist->decoded_frame;
2040 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2042 update_benchmark(NULL);
2043 ret = avcodec_decode_video2(ist->dec_ctx,
2044 decoded_frame, got_output, pkt);
2045 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2047 // The following line may be required in some cases where there is no parser
2048 // or the parser does not has_b_frames correctly
2049 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2050 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2051 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2053 av_log_ask_for_sample(
2055 "has_b_frames is larger in decoder than demuxer %d > %d ",
2056 ist->dec_ctx->has_b_frames,
2057 ist->st->codec->has_b_frames
2061 if (*got_output || ret<0)
2062 decode_error_stat[ret<0] ++;
2064 if (ret < 0 && exit_on_error)
2067 if (*got_output && ret >= 0) {
2068 if (ist->dec_ctx->width != decoded_frame->width ||
2069 ist->dec_ctx->height != decoded_frame->height ||
2070 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2071 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2072 decoded_frame->width,
2073 decoded_frame->height,
2074 decoded_frame->format,
2075 ist->dec_ctx->width,
2076 ist->dec_ctx->height,
2077 ist->dec_ctx->pix_fmt);
2081 if (!*got_output || ret < 0)
2084 if(ist->top_field_first>=0)
2085 decoded_frame->top_field_first = ist->top_field_first;
2087 ist->frames_decoded++;
2089 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2090 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2094 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2096 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2097 if(best_effort_timestamp != AV_NOPTS_VALUE)
2098 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2101 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2102 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2103 ist->st->index, av_ts2str(decoded_frame->pts),
2104 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2105 best_effort_timestamp,
2106 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2107 decoded_frame->key_frame, decoded_frame->pict_type,
2108 ist->st->time_base.num, ist->st->time_base.den);
2113 if (ist->st->sample_aspect_ratio.num)
2114 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2116 resample_changed = ist->resample_width != decoded_frame->width ||
2117 ist->resample_height != decoded_frame->height ||
2118 ist->resample_pix_fmt != decoded_frame->format;
2119 if (resample_changed) {
2120 av_log(NULL, AV_LOG_INFO,
2121 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2122 ist->file_index, ist->st->index,
2123 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2124 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2126 ist->resample_width = decoded_frame->width;
2127 ist->resample_height = decoded_frame->height;
2128 ist->resample_pix_fmt = decoded_frame->format;
2130 for (i = 0; i < nb_filtergraphs; i++) {
2131 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2132 configure_filtergraph(filtergraphs[i]) < 0) {
2133 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2139 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2140 for (i = 0; i < ist->nb_filters; i++) {
2141 if (!frame_sample_aspect->num)
2142 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2144 if (i < ist->nb_filters - 1) {
2145 f = ist->filter_frame;
2146 err = av_frame_ref(f, decoded_frame);
2151 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2152 if (ret == AVERROR_EOF) {
2153 ret = 0; /* ignore */
2154 } else if (ret < 0) {
2155 av_log(NULL, AV_LOG_FATAL,
2156 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2162 av_frame_unref(ist->filter_frame);
2163 av_frame_unref(decoded_frame);
2164 return err < 0 ? err : ret;
2167 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2169 AVSubtitle subtitle;
2170 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2171 &subtitle, got_output, pkt);
2173 if (*got_output || ret<0)
2174 decode_error_stat[ret<0] ++;
2176 if (ret < 0 && exit_on_error)
2179 if (ret < 0 || !*got_output) {
2181 sub2video_flush(ist);
2185 if (ist->fix_sub_duration) {
2187 if (ist->prev_sub.got_output) {
2188 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2189 1000, AV_TIME_BASE);
2190 if (end < ist->prev_sub.subtitle.end_display_time) {
2191 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2192 "Subtitle duration reduced from %d to %d%s\n",
2193 ist->prev_sub.subtitle.end_display_time, end,
2194 end <= 0 ? ", dropping it" : "");
2195 ist->prev_sub.subtitle.end_display_time = end;
2198 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2199 FFSWAP(int, ret, ist->prev_sub.ret);
2200 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2208 sub2video_update(ist, &subtitle);
2210 if (!subtitle.num_rects)
2213 ist->frames_decoded++;
2215 for (i = 0; i < nb_output_streams; i++) {
2216 OutputStream *ost = output_streams[i];
2218 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2219 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2222 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2226 avsubtitle_free(&subtitle);
2230 static int send_filter_eof(InputStream *ist)
2233 for (i = 0; i < ist->nb_filters; i++) {
2235 ret = av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
2237 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2245 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2246 static int process_input_packet(InputStream *ist, const AVPacket *pkt)
2252 if (!ist->saw_first_ts) {
2253 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2255 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2256 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2257 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2259 ist->saw_first_ts = 1;
2262 if (ist->next_dts == AV_NOPTS_VALUE)
2263 ist->next_dts = ist->dts;
2264 if (ist->next_pts == AV_NOPTS_VALUE)
2265 ist->next_pts = ist->pts;
2269 av_init_packet(&avpkt);
2277 if (pkt->dts != AV_NOPTS_VALUE) {
2278 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2279 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2280 ist->next_pts = ist->pts = ist->dts;
2283 // while we have more to decode or while the decoder did output something on EOF
2284 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2288 ist->pts = ist->next_pts;
2289 ist->dts = ist->next_dts;
2291 if (avpkt.size && avpkt.size != pkt->size &&
2292 !(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) {
2293 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2294 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2295 ist->showed_multi_packet_warning = 1;
2298 switch (ist->dec_ctx->codec_type) {
2299 case AVMEDIA_TYPE_AUDIO:
2300 ret = decode_audio (ist, &avpkt, &got_output);
2302 case AVMEDIA_TYPE_VIDEO:
2303 ret = decode_video (ist, &avpkt, &got_output);
2304 if (avpkt.duration) {
2305 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2306 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2307 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2308 duration = ((int64_t)AV_TIME_BASE *
2309 ist->dec_ctx->framerate.den * ticks) /
2310 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2314 if(ist->dts != AV_NOPTS_VALUE && duration) {
2315 ist->next_dts += duration;
2317 ist->next_dts = AV_NOPTS_VALUE;
2320 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2322 case AVMEDIA_TYPE_SUBTITLE:
2323 ret = transcode_subtitles(ist, &avpkt, &got_output);
2330 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2331 ist->file_index, ist->st->index, av_err2str(ret));
2338 avpkt.pts= AV_NOPTS_VALUE;
2340 // touch data and size only if not EOF
2342 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2350 if (got_output && !pkt)
2354 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2355 if (!pkt && ist->decoding_needed && !got_output) {
2356 int ret = send_filter_eof(ist);
2358 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2363 /* handle stream copy */
2364 if (!ist->decoding_needed) {
2365 ist->dts = ist->next_dts;
2366 switch (ist->dec_ctx->codec_type) {
2367 case AVMEDIA_TYPE_AUDIO:
2368 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2369 ist->dec_ctx->sample_rate;
2371 case AVMEDIA_TYPE_VIDEO:
2372 if (ist->framerate.num) {
2373 // TODO: Remove work-around for c99-to-c89 issue 7
2374 AVRational time_base_q = AV_TIME_BASE_Q;
2375 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2376 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2377 } else if (pkt->duration) {
2378 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2379 } else if(ist->dec_ctx->framerate.num != 0) {
2380 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2381 ist->next_dts += ((int64_t)AV_TIME_BASE *
2382 ist->dec_ctx->framerate.den * ticks) /
2383 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2387 ist->pts = ist->dts;
2388 ist->next_pts = ist->next_dts;
2390 for (i = 0; pkt && i < nb_output_streams; i++) {
2391 OutputStream *ost = output_streams[i];
2393 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2396 do_streamcopy(ist, ost, pkt);
2402 static void print_sdp(void)
2407 AVIOContext *sdp_pb;
2408 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2412 for (i = 0, j = 0; i < nb_output_files; i++) {
2413 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2414 avc[j] = output_files[i]->ctx;
2419 av_sdp_create(avc, j, sdp, sizeof(sdp));
2421 if (!sdp_filename) {
2422 printf("SDP:\n%s\n", sdp);
2425 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2426 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2428 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2429 avio_closep(&sdp_pb);
2430 av_freep(&sdp_filename);
2437 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2440 for (i = 0; hwaccels[i].name; i++)
2441 if (hwaccels[i].pix_fmt == pix_fmt)
2442 return &hwaccels[i];
2446 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2448 InputStream *ist = s->opaque;
2449 const enum AVPixelFormat *p;
2452 for (p = pix_fmts; *p != -1; p++) {
2453 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2454 const HWAccel *hwaccel;
2456 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2459 hwaccel = get_hwaccel(*p);
2461 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2462 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2465 ret = hwaccel->init(s);
2467 if (ist->hwaccel_id == hwaccel->id) {
2468 av_log(NULL, AV_LOG_FATAL,
2469 "%s hwaccel requested for input stream #%d:%d, "
2470 "but cannot be initialized.\n", hwaccel->name,
2471 ist->file_index, ist->st->index);
2472 return AV_PIX_FMT_NONE;
2476 ist->active_hwaccel_id = hwaccel->id;
2477 ist->hwaccel_pix_fmt = *p;
2484 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2486 InputStream *ist = s->opaque;
2488 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2489 return ist->hwaccel_get_buffer(s, frame, flags);
2491 return avcodec_default_get_buffer2(s, frame, flags);
2494 static int init_input_stream(int ist_index, char *error, int error_len)
2497 InputStream *ist = input_streams[ist_index];
2499 if (ist->decoding_needed) {
2500 AVCodec *codec = ist->dec;
2502 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2503 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2504 return AVERROR(EINVAL);
2507 ist->dec_ctx->opaque = ist;
2508 ist->dec_ctx->get_format = get_format;
2509 ist->dec_ctx->get_buffer2 = get_buffer;
2510 ist->dec_ctx->thread_safe_callbacks = 1;
2512 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2513 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2514 (ist->decoding_needed & DECODING_FOR_OST)) {
2515 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2516 if (ist->decoding_needed & DECODING_FOR_FILTER)
2517 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2520 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2521 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2522 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2523 if (ret == AVERROR_EXPERIMENTAL)
2524 abort_codec_experimental(codec, 0);
2526 snprintf(error, error_len,
2527 "Error while opening decoder for input stream "
2529 ist->file_index, ist->st->index, av_err2str(ret));
2532 assert_avoptions(ist->decoder_opts);
2535 ist->next_pts = AV_NOPTS_VALUE;
2536 ist->next_dts = AV_NOPTS_VALUE;
2541 static InputStream *get_input_stream(OutputStream *ost)
2543 if (ost->source_index >= 0)
2544 return input_streams[ost->source_index];
2548 static int compare_int64(const void *a, const void *b)
2550 int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
2551 return va < vb ? -1 : va > vb ? +1 : 0;
2554 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2555 AVCodecContext *avctx)
2558 int n = 1, i, size, index = 0;
2561 for (p = kf; *p; p++)
2565 pts = av_malloc_array(size, sizeof(*pts));
2567 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2572 for (i = 0; i < n; i++) {
2573 char *next = strchr(p, ',');
2578 if (!memcmp(p, "chapters", 8)) {
2580 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2583 if (avf->nb_chapters > INT_MAX - size ||
2584 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2586 av_log(NULL, AV_LOG_FATAL,
2587 "Could not allocate forced key frames array.\n");
2590 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2591 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2593 for (j = 0; j < avf->nb_chapters; j++) {
2594 AVChapter *c = avf->chapters[j];
2595 av_assert1(index < size);
2596 pts[index++] = av_rescale_q(c->start, c->time_base,
2597 avctx->time_base) + t;
2602 t = parse_time_or_die("force_key_frames", p, 1);
2603 av_assert1(index < size);
2604 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2611 av_assert0(index == size);
2612 qsort(pts, size, sizeof(*pts), compare_int64);
2613 ost->forced_kf_count = size;
2614 ost->forced_kf_pts = pts;
2617 static void report_new_stream(int input_index, AVPacket *pkt)
2619 InputFile *file = input_files[input_index];
2620 AVStream *st = file->ctx->streams[pkt->stream_index];
2622 if (pkt->stream_index < file->nb_streams_warn)
2624 av_log(file->ctx, AV_LOG_WARNING,
2625 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2626 av_get_media_type_string(st->codec->codec_type),
2627 input_index, pkt->stream_index,
2628 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2629 file->nb_streams_warn = pkt->stream_index + 1;
2632 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2634 AVDictionaryEntry *e;
2636 uint8_t *encoder_string;
2637 int encoder_string_len;
2638 int format_flags = 0;
2639 int codec_flags = 0;
2641 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2644 e = av_dict_get(of->opts, "fflags", NULL, 0);
2646 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2649 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2651 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2653 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2656 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2659 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2660 encoder_string = av_mallocz(encoder_string_len);
2661 if (!encoder_string)
2664 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & CODEC_FLAG_BITEXACT))
2665 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2667 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2668 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2669 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2670 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2673 static int transcode_init(void)
2675 int ret = 0, i, j, k;
2676 AVFormatContext *oc;
2679 char error[1024] = {0};
2682 for (i = 0; i < nb_filtergraphs; i++) {
2683 FilterGraph *fg = filtergraphs[i];
2684 for (j = 0; j < fg->nb_outputs; j++) {
2685 OutputFilter *ofilter = fg->outputs[j];
2686 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2688 if (fg->nb_inputs != 1)
2690 for (k = nb_input_streams-1; k >= 0 ; k--)
2691 if (fg->inputs[0]->ist == input_streams[k])
2693 ofilter->ost->source_index = k;
2697 /* init framerate emulation */
2698 for (i = 0; i < nb_input_files; i++) {
2699 InputFile *ifile = input_files[i];
2700 if (ifile->rate_emu)
2701 for (j = 0; j < ifile->nb_streams; j++)
2702 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2705 /* output stream init */
2706 for (i = 0; i < nb_output_files; i++) {
2707 oc = output_files[i]->ctx;
2708 if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
2709 av_dump_format(oc, i, oc->filename, 1);
2710 av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2711 return AVERROR(EINVAL);
2715 /* init complex filtergraphs */
2716 for (i = 0; i < nb_filtergraphs; i++)
2717 if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
2720 /* for each output stream, we compute the right encoding parameters */
2721 for (i = 0; i < nb_output_streams; i++) {
2722 AVCodecContext *enc_ctx;
2723 AVCodecContext *dec_ctx = NULL;
2724 ost = output_streams[i];
2725 oc = output_files[ost->file_index]->ctx;
2726 ist = get_input_stream(ost);
2728 if (ost->attachment_filename)
2731 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2734 dec_ctx = ist->dec_ctx;
2736 ost->st->disposition = ist->st->disposition;
2737 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2738 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2740 for (j=0; j<oc->nb_streams; j++) {
2741 AVStream *st = oc->streams[j];
2742 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2745 if (j == oc->nb_streams)
2746 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2747 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2750 if (ost->stream_copy) {
2752 uint64_t extra_size;
2754 av_assert0(ist && !ost->filter);
2756 extra_size = (uint64_t)dec_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2758 if (extra_size > INT_MAX) {
2759 return AVERROR(EINVAL);
2762 /* if stream_copy is selected, no need to decode or encode */
2763 enc_ctx->codec_id = dec_ctx->codec_id;
2764 enc_ctx->codec_type = dec_ctx->codec_type;
2766 if (!enc_ctx->codec_tag) {
2767 unsigned int codec_tag;
2768 if (!oc->oformat->codec_tag ||
2769 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2770 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2771 enc_ctx->codec_tag = dec_ctx->codec_tag;
2774 enc_ctx->bit_rate = dec_ctx->bit_rate;
2775 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2776 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2777 enc_ctx->field_order = dec_ctx->field_order;
2778 if (dec_ctx->extradata_size) {
2779 enc_ctx->extradata = av_mallocz(extra_size);
2780 if (!enc_ctx->extradata) {
2781 return AVERROR(ENOMEM);
2783 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2785 enc_ctx->extradata_size= dec_ctx->extradata_size;
2786 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2788 enc_ctx->time_base = ist->st->time_base;
2790 * Avi is a special case here because it supports variable fps but
2791 * having the fps and timebase differe significantly adds quite some
2794 if(!strcmp(oc->oformat->name, "avi")) {
2795 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2796 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2797 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2798 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2800 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2801 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2802 enc_ctx->ticks_per_frame = 2;
2803 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2804 && av_q2d(ist->st->time_base) < 1.0/500
2806 enc_ctx->time_base = dec_ctx->time_base;
2807 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2808 enc_ctx->time_base.den *= 2;
2809 enc_ctx->ticks_per_frame = 2;
2811 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2812 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2813 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2814 && strcmp(oc->oformat->name, "f4v")
2816 if( copy_tb<0 && dec_ctx->time_base.den
2817 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2818 && av_q2d(ist->st->time_base) < 1.0/500
2820 enc_ctx->time_base = dec_ctx->time_base;
2821 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2824 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2825 && dec_ctx->time_base.num < dec_ctx->time_base.den
2826 && dec_ctx->time_base.num > 0
2827 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2828 enc_ctx->time_base = dec_ctx->time_base;
2831 if (ist && !ost->frame_rate.num)
2832 ost->frame_rate = ist->framerate;
2833 if(ost->frame_rate.num)
2834 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2836 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2837 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2839 if (ist->st->nb_side_data) {
2840 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2841 sizeof(*ist->st->side_data));
2842 if (!ost->st->side_data)
2843 return AVERROR(ENOMEM);
2845 ost->st->nb_side_data = 0;
2846 for (j = 0; j < ist->st->nb_side_data; j++) {
2847 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2848 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2850 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2853 sd_dst->data = av_malloc(sd_src->size);
2855 return AVERROR(ENOMEM);
2856 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2857 sd_dst->size = sd_src->size;
2858 sd_dst->type = sd_src->type;
2859 ost->st->nb_side_data++;
2863 ost->parser = av_parser_init(enc_ctx->codec_id);
2865 switch (enc_ctx->codec_type) {
2866 case AVMEDIA_TYPE_AUDIO:
2867 if (audio_volume != 256) {
2868 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2871 enc_ctx->channel_layout = dec_ctx->channel_layout;
2872 enc_ctx->sample_rate = dec_ctx->sample_rate;
2873 enc_ctx->channels = dec_ctx->channels;
2874 enc_ctx->frame_size = dec_ctx->frame_size;
2875 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2876 enc_ctx->block_align = dec_ctx->block_align;
2877 enc_ctx->initial_padding = dec_ctx->delay;
2878 #if FF_API_AUDIOENC_DELAY
2879 enc_ctx->delay = dec_ctx->delay;
2881 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2882 enc_ctx->block_align= 0;
2883 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2884 enc_ctx->block_align= 0;
2886 case AVMEDIA_TYPE_VIDEO:
2887 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2888 enc_ctx->width = dec_ctx->width;
2889 enc_ctx->height = dec_ctx->height;
2890 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2891 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2893 av_mul_q(ost->frame_aspect_ratio,
2894 (AVRational){ enc_ctx->height, enc_ctx->width });
2895 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2896 "with stream copy may produce invalid files\n");
2898 else if (ist->st->sample_aspect_ratio.num)
2899 sar = ist->st->sample_aspect_ratio;
2901 sar = dec_ctx->sample_aspect_ratio;
2902 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2903 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2904 ost->st->r_frame_rate = ist->st->r_frame_rate;
2906 case AVMEDIA_TYPE_SUBTITLE:
2907 enc_ctx->width = dec_ctx->width;
2908 enc_ctx->height = dec_ctx->height;
2910 case AVMEDIA_TYPE_UNKNOWN:
2911 case AVMEDIA_TYPE_DATA:
2912 case AVMEDIA_TYPE_ATTACHMENT:
2919 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
2921 /* should only happen when a default codec is not present. */
2922 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
2923 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
2924 ret = AVERROR(EINVAL);
2929 ist->decoding_needed |= DECODING_FOR_OST;
2930 ost->encoding_needed = 1;
2932 set_encoder_id(output_files[ost->file_index], ost);
2935 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
2936 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
2938 fg = init_simple_filtergraph(ist, ost);
2939 if (configure_filtergraph(fg)) {
2940 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2945 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2946 if (!ost->frame_rate.num)
2947 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
2948 if (ist && !ost->frame_rate.num)
2949 ost->frame_rate = ist->framerate;
2950 if (ist && !ost->frame_rate.num)
2951 ost->frame_rate = ist->st->r_frame_rate;
2952 if (ist && !ost->frame_rate.num) {
2953 ost->frame_rate = (AVRational){25, 1};
2954 av_log(NULL, AV_LOG_WARNING,
2956 "about the input framerate is available. Falling "
2957 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
2958 "if you want a different framerate.\n",
2959 ost->file_index, ost->index);
2961 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
2962 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
2963 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
2964 ost->frame_rate = ost->enc->supported_framerates[idx];
2966 // reduce frame rate for mpeg4 to be within the spec limits
2967 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
2968 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
2969 ost->frame_rate.num, ost->frame_rate.den, 65535);
2973 switch (enc_ctx->codec_type) {
2974 case AVMEDIA_TYPE_AUDIO:
2975 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
2976 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
2977 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
2978 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
2979 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
2981 case AVMEDIA_TYPE_VIDEO:
2982 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2983 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
2984 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
2985 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
2986 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
2987 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
2988 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
2990 for (j = 0; j < ost->forced_kf_count; j++)
2991 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
2993 enc_ctx->time_base);
2995 enc_ctx->width = ost->filter->filter->inputs[0]->w;
2996 enc_ctx->height = ost->filter->filter->inputs[0]->h;
2997 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2998 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
2999 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3000 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3001 if (!strncmp(ost->enc->name, "libx264", 7) &&
3002 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3003 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3004 av_log(NULL, AV_LOG_WARNING,
3005 "No pixel format specified, %s for H.264 encoding chosen.\n"
3006 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3007 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3008 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3009 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3010 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3011 av_log(NULL, AV_LOG_WARNING,
3012 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3013 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3014 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3015 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3017 ost->st->avg_frame_rate = ost->frame_rate;
3020 enc_ctx->width != dec_ctx->width ||
3021 enc_ctx->height != dec_ctx->height ||
3022 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3023 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3026 if (ost->forced_keyframes) {
3027 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3028 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3029 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3031 av_log(NULL, AV_LOG_ERROR,
3032 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3035 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3036 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3037 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3038 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3040 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3041 // parse it only for static kf timings
3042 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3043 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3047 case AVMEDIA_TYPE_SUBTITLE:
3048 enc_ctx->time_base = (AVRational){1, 1000};
3049 if (!enc_ctx->width) {
3050 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3051 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3054 case AVMEDIA_TYPE_DATA:
3061 if (enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) {
3062 char logfilename[1024];
3065 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
3066 ost->logfile_prefix ? ost->logfile_prefix :
3067 DEFAULT_PASS_LOGFILENAME_PREFIX,
3069 if (!strcmp(ost->enc->name, "libx264")) {
3070 av_dict_set(&ost->encoder_opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
3072 if (enc_ctx->flags & CODEC_FLAG_PASS2) {
3074 size_t logbuffer_size;
3075 if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
3076 av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
3080 enc_ctx->stats_in = logbuffer;
3082 if (enc_ctx->flags & CODEC_FLAG_PASS1) {
3083 f = av_fopen_utf8(logfilename, "wb");
3085 av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
3086 logfilename, strerror(errno));
3095 if (ost->disposition) {
3096 static const AVOption opts[] = {
3097 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3098 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3099 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3100 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3101 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3102 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3103 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3104 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3105 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3106 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3107 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3108 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3109 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3110 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3113 static const AVClass class = {
3115 .item_name = av_default_item_name,
3117 .version = LIBAVUTIL_VERSION_INT,
3119 const AVClass *pclass = &class;
3121 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3127 /* open each encoder */
3128 for (i = 0; i < nb_output_streams; i++) {
3129 ost = output_streams[i];
3130 if (ost->encoding_needed) {
3131 AVCodec *codec = ost->enc;
3132 AVCodecContext *dec = NULL;
3134 if ((ist = get_input_stream(ost)))
3136 if (dec && dec->subtitle_header) {
3137 /* ASS code assumes this buffer is null terminated so add extra byte. */
3138 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3139 if (!ost->enc_ctx->subtitle_header) {
3140 ret = AVERROR(ENOMEM);
3143 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3144 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3146 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3147 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3148 av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
3150 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3151 if (ret == AVERROR_EXPERIMENTAL)
3152 abort_codec_experimental(codec, 1);
3153 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
3154 ost->file_index, ost->index);
3157 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3158 !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
3159 av_buffersink_set_frame_size(ost->filter->filter,
3160 ost->enc_ctx->frame_size);
3161 assert_avoptions(ost->encoder_opts);
3162 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3163 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3164 " It takes bits/s as argument, not kbits/s\n");
3166 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3168 av_log(NULL, AV_LOG_FATAL,
3169 "Error initializing the output stream codec context.\n");
3173 // copy timebase while removing common factors
3174 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3175 ost->st->codec->codec= ost->enc_ctx->codec;
3177 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3179 av_log(NULL, AV_LOG_FATAL,
3180 "Error setting up codec context options.\n");
3183 // copy timebase while removing common factors
3184 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
3188 /* init input streams */
3189 for (i = 0; i < nb_input_streams; i++)
3190 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3191 for (i = 0; i < nb_output_streams; i++) {
3192 ost = output_streams[i];
3193 avcodec_close(ost->enc_ctx);
3198 /* discard unused programs */
3199 for (i = 0; i < nb_input_files; i++) {
3200 InputFile *ifile = input_files[i];
3201 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3202 AVProgram *p = ifile->ctx->programs[j];
3203 int discard = AVDISCARD_ALL;
3205 for (k = 0; k < p->nb_stream_indexes; k++)
3206 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3207 discard = AVDISCARD_DEFAULT;
3210 p->discard = discard;
3214 /* open files and write file headers */
3215 for (i = 0; i < nb_output_files; i++) {
3216 oc = output_files[i]->ctx;
3217 oc->interrupt_callback = int_cb;
3218 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3219 snprintf(error, sizeof(error),
3220 "Could not write header for output file #%d "
3221 "(incorrect codec parameters ?): %s",
3222 i, av_err2str(ret));
3223 ret = AVERROR(EINVAL);
3226 // assert_avoptions(output_files[i]->opts);
3227 if (strcmp(oc->oformat->name, "rtp")) {
3233 /* dump the file output parameters - cannot be done before in case
3235 for (i = 0; i < nb_output_files; i++) {
3236 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3239 /* dump the stream mapping */
3240 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3241 for (i = 0; i < nb_input_streams; i++) {
3242 ist = input_streams[i];
3244 for (j = 0; j < ist->nb_filters; j++) {
3245 if (ist->filters[j]->graph->graph_desc) {
3246 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3247 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3248 ist->filters[j]->name);
3249 if (nb_filtergraphs > 1)
3250 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3251 av_log(NULL, AV_LOG_INFO, "\n");
3256 for (i = 0; i < nb_output_streams; i++) {
3257 ost = output_streams[i];
3259 if (ost->attachment_filename) {
3260 /* an attached file */
3261 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3262 ost->attachment_filename, ost->file_index, ost->index);
3266 if (ost->filter && ost->filter->graph->graph_desc) {
3267 /* output from a complex graph */
3268 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3269 if (nb_filtergraphs > 1)
3270 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3272 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3273 ost->index, ost->enc ? ost->enc->name : "?");
3277 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3278 input_streams[ost->source_index]->file_index,
3279 input_streams[ost->source_index]->st->index,
3282 if (ost->sync_ist != input_streams[ost->source_index])
3283 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3284 ost->sync_ist->file_index,
3285 ost->sync_ist->st->index);
3286 if (ost->stream_copy)
3287 av_log(NULL, AV_LOG_INFO, " (copy)");
3289 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3290 const AVCodec *out_codec = ost->enc;
3291 const char *decoder_name = "?";
3292 const char *in_codec_name = "?";
3293 const char *encoder_name = "?";
3294 const char *out_codec_name = "?";
3295 const AVCodecDescriptor *desc;
3298 decoder_name = in_codec->name;
3299 desc = avcodec_descriptor_get(in_codec->id);
3301 in_codec_name = desc->name;
3302 if (!strcmp(decoder_name, in_codec_name))
3303 decoder_name = "native";
3307 encoder_name = out_codec->name;
3308 desc = avcodec_descriptor_get(out_codec->id);
3310 out_codec_name = desc->name;
3311 if (!strcmp(encoder_name, out_codec_name))
3312 encoder_name = "native";
3315 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3316 in_codec_name, decoder_name,
3317 out_codec_name, encoder_name);
3319 av_log(NULL, AV_LOG_INFO, "\n");
3323 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3327 if (sdp_filename || want_sdp) {
3331 transcode_init_done = 1;
3336 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3337 static int need_output(void)
3341 for (i = 0; i < nb_output_streams; i++) {
3342 OutputStream *ost = output_streams[i];
3343 OutputFile *of = output_files[ost->file_index];
3344 AVFormatContext *os = output_files[ost->file_index]->ctx;
3346 if (ost->finished ||
3347 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3349 if (ost->frame_number >= ost->max_frames) {
3351 for (j = 0; j < of->ctx->nb_streams; j++)
3352 close_output_stream(output_streams[of->ost_index + j]);
3363 * Select the output stream to process.
3365 * @return selected output stream, or NULL if none available
3367 static OutputStream *choose_output(void)
3370 int64_t opts_min = INT64_MAX;
3371 OutputStream *ost_min = NULL;
3373 for (i = 0; i < nb_output_streams; i++) {
3374 OutputStream *ost = output_streams[i];
3375 int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3377 if (!ost->finished && opts < opts_min) {
3379 ost_min = ost->unavailable ? NULL : ost;
3385 static int check_keyboard_interaction(int64_t cur_time)
3388 static int64_t last_time;
3389 if (received_nb_signals)
3390 return AVERROR_EXIT;
3391 /* read_key() returns 0 on EOF */
3392 if(cur_time - last_time >= 100000 && !run_as_daemon){
3394 last_time = cur_time;
3398 return AVERROR_EXIT;
3399 if (key == '+') av_log_set_level(av_log_get_level()+10);
3400 if (key == '-') av_log_set_level(av_log_get_level()-10);
3401 if (key == 's') qp_hist ^= 1;
3404 do_hex_dump = do_pkt_dump = 0;
3405 } else if(do_pkt_dump){
3409 av_log_set_level(AV_LOG_DEBUG);
3411 if (key == 'c' || key == 'C'){
3412 char buf[4096], target[64], command[256], arg[256] = {0};
3415 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3417 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3422 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3423 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3424 target, time, command, arg);
3425 for (i = 0; i < nb_filtergraphs; i++) {
3426 FilterGraph *fg = filtergraphs[i];
3429 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3430 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3431 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3432 } else if (key == 'c') {
3433 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3434 ret = AVERROR_PATCHWELCOME;
3436 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3438 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3443 av_log(NULL, AV_LOG_ERROR,
3444 "Parse error, at least 3 arguments were expected, "
3445 "only %d given in string '%s'\n", n, buf);
3448 if (key == 'd' || key == 'D'){
3451 debug = input_streams[0]->st->codec->debug<<1;
3452 if(!debug) debug = 1;
3453 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3456 if(scanf("%d", &debug)!=1)
3457 fprintf(stderr,"error parsing debug value\n");
3458 for(i=0;i<nb_input_streams;i++) {
3459 input_streams[i]->st->codec->debug = debug;
3461 for(i=0;i<nb_output_streams;i++) {
3462 OutputStream *ost = output_streams[i];
3463 ost->enc_ctx->debug = debug;
3465 if(debug) av_log_set_level(AV_LOG_DEBUG);
3466 fprintf(stderr,"debug=%d\n", debug);
3469 fprintf(stderr, "key function\n"
3470 "? show this help\n"
3471 "+ increase verbosity\n"
3472 "- decrease verbosity\n"
3473 "c Send command to first matching filter supporting it\n"
3474 "C Send/Que command to all matching filters\n"
3475 "D cycle through available debug modes\n"
3476 "h dump packets/hex press to cycle through the 3 states\n"
3478 "s Show QP histogram\n"
3485 static void *input_thread(void *arg)
3488 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3493 ret = av_read_frame(f->ctx, &pkt);
3495 if (ret == AVERROR(EAGAIN)) {
3500 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3503 av_dup_packet(&pkt);
3504 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3505 if (flags && ret == AVERROR(EAGAIN)) {
3507 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3508 av_log(f->ctx, AV_LOG_WARNING,
3509 "Thread message queue blocking; consider raising the "
3510 "thread_queue_size option (current value: %d)\n",
3511 f->thread_queue_size);
3514 if (ret != AVERROR_EOF)
3515 av_log(f->ctx, AV_LOG_ERROR,
3516 "Unable to send packet to main thread: %s\n",
3518 av_free_packet(&pkt);
3519 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3527 static void free_input_threads(void)
3531 for (i = 0; i < nb_input_files; i++) {
3532 InputFile *f = input_files[i];
3535 if (!f || !f->in_thread_queue)
3537 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3538 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3539 av_free_packet(&pkt);
3541 pthread_join(f->thread, NULL);
3543 av_thread_message_queue_free(&f->in_thread_queue);
3547 static int init_input_threads(void)
3551 if (nb_input_files == 1)
3554 for (i = 0; i < nb_input_files; i++) {
3555 InputFile *f = input_files[i];
3557 if (f->ctx->pb ? !f->ctx->pb->seekable :
3558 strcmp(f->ctx->iformat->name, "lavfi"))
3559 f->non_blocking = 1;
3560 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3561 f->thread_queue_size, sizeof(AVPacket));
3565 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3566 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3567 av_thread_message_queue_free(&f->in_thread_queue);
3568 return AVERROR(ret);
3574 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3576 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3578 AV_THREAD_MESSAGE_NONBLOCK : 0);
3582 static int get_input_packet(InputFile *f, AVPacket *pkt)
3586 for (i = 0; i < f->nb_streams; i++) {
3587 InputStream *ist = input_streams[f->ist_index + i];
3588 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3589 int64_t now = av_gettime_relative() - ist->start;
3591 return AVERROR(EAGAIN);
3596 if (nb_input_files > 1)
3597 return get_input_packet_mt(f, pkt);
3599 return av_read_frame(f->ctx, pkt);
3602 static int got_eagain(void)
3605 for (i = 0; i < nb_output_streams; i++)
3606 if (output_streams[i]->unavailable)
3611 static void reset_eagain(void)
3614 for (i = 0; i < nb_input_files; i++)
3615 input_files[i]->eagain = 0;
3616 for (i = 0; i < nb_output_streams; i++)
3617 output_streams[i]->unavailable = 0;
3622 * - 0 -- one packet was read and processed
3623 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3624 * this function should be called again
3625 * - AVERROR_EOF -- this function should not be called again
3627 static int process_input(int file_index)
3629 InputFile *ifile = input_files[file_index];
3630 AVFormatContext *is;
3636 ret = get_input_packet(ifile, &pkt);
3638 if (ret == AVERROR(EAGAIN)) {
3643 if (ret != AVERROR_EOF) {
3644 print_error(is->filename, ret);
3649 for (i = 0; i < ifile->nb_streams; i++) {
3650 ist = input_streams[ifile->ist_index + i];
3651 if (ist->decoding_needed) {
3652 ret = process_input_packet(ist, NULL);
3657 /* mark all outputs that don't go through lavfi as finished */
3658 for (j = 0; j < nb_output_streams; j++) {
3659 OutputStream *ost = output_streams[j];
3661 if (ost->source_index == ifile->ist_index + i &&
3662 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3663 finish_output_stream(ost);
3667 ifile->eof_reached = 1;
3668 return AVERROR(EAGAIN);
3674 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3675 is->streams[pkt.stream_index]);
3677 /* the following test is needed in case new streams appear
3678 dynamically in stream : we ignore them */
3679 if (pkt.stream_index >= ifile->nb_streams) {
3680 report_new_stream(file_index, &pkt);
3681 goto discard_packet;
3684 ist = input_streams[ifile->ist_index + pkt.stream_index];
3686 ist->data_size += pkt.size;
3690 goto discard_packet;
3693 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3694 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3695 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3696 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3697 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3698 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3699 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3700 av_ts2str(input_files[ist->file_index]->ts_offset),
3701 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3704 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3705 int64_t stime, stime2;
3706 // Correcting starttime based on the enabled streams
3707 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3708 // so we instead do it here as part of discontinuity handling
3709 if ( ist->next_dts == AV_NOPTS_VALUE
3710 && ifile->ts_offset == -is->start_time
3711 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3712 int64_t new_start_time = INT64_MAX;
3713 for (i=0; i<is->nb_streams; i++) {
3714 AVStream *st = is->streams[i];
3715 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3717 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3719 if (new_start_time > is->start_time) {
3720 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3721 ifile->ts_offset = -new_start_time;
3725 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3726 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3727 ist->wrap_correction_done = 1;
3729 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3730 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3731 ist->wrap_correction_done = 0;
3733 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3734 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3735 ist->wrap_correction_done = 0;
3739 /* add the stream-global side data to the first packet */
3740 if (ist->nb_packets == 1) {
3741 if (ist->st->nb_side_data)
3742 av_packet_split_side_data(&pkt);
3743 for (i = 0; i < ist->st->nb_side_data; i++) {
3744 AVPacketSideData *src_sd = &ist->st->side_data[i];
3747 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3749 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3752 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3756 memcpy(dst_data, src_sd->data, src_sd->size);
3760 if (pkt.dts != AV_NOPTS_VALUE)
3761 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3762 if (pkt.pts != AV_NOPTS_VALUE)
3763 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3765 if (pkt.pts != AV_NOPTS_VALUE)
3766 pkt.pts *= ist->ts_scale;
3767 if (pkt.dts != AV_NOPTS_VALUE)
3768 pkt.dts *= ist->ts_scale;
3770 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3771 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3772 pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3773 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3774 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3775 int64_t delta = pkt_dts - ifile->last_ts;
3776 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3777 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3778 ifile->ts_offset -= delta;
3779 av_log(NULL, AV_LOG_DEBUG,
3780 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3781 delta, ifile->ts_offset);
3782 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3783 if (pkt.pts != AV_NOPTS_VALUE)
3784 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3788 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3789 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3790 pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3792 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3793 int64_t delta = pkt_dts - ist->next_dts;
3794 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3795 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3796 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3797 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3798 ifile->ts_offset -= delta;
3799 av_log(NULL, AV_LOG_DEBUG,
3800 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3801 delta, ifile->ts_offset);
3802 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3803 if (pkt.pts != AV_NOPTS_VALUE)
3804 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3807 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3808 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3809 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3810 pkt.dts = AV_NOPTS_VALUE;
3812 if (pkt.pts != AV_NOPTS_VALUE){
3813 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3814 delta = pkt_pts - ist->next_dts;
3815 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3816 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3817 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3818 pkt.pts = AV_NOPTS_VALUE;
3824 if (pkt.dts != AV_NOPTS_VALUE)
3825 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3828 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3829 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3830 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3831 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3832 av_ts2str(input_files[ist->file_index]->ts_offset),
3833 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3836 sub2video_heartbeat(ist, pkt.pts);
3838 process_input_packet(ist, &pkt);
3841 av_free_packet(&pkt);
3847 * Perform a step of transcoding for the specified filter graph.
3849 * @param[in] graph filter graph to consider
3850 * @param[out] best_ist input stream where a frame would allow to continue
3851 * @return 0 for success, <0 for error
3853 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3856 int nb_requests, nb_requests_max = 0;
3857 InputFilter *ifilter;
3861 ret = avfilter_graph_request_oldest(graph->graph);
3863 return reap_filters(0);
3865 if (ret == AVERROR_EOF) {
3866 ret = reap_filters(1);
3867 for (i = 0; i < graph->nb_outputs; i++)
3868 close_output_stream(graph->outputs[i]->ost);
3871 if (ret != AVERROR(EAGAIN))
3874 for (i = 0; i < graph->nb_inputs; i++) {
3875 ifilter = graph->inputs[i];
3877 if (input_files[ist->file_index]->eagain ||
3878 input_files[ist->file_index]->eof_reached)
3880 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3881 if (nb_requests > nb_requests_max) {
3882 nb_requests_max = nb_requests;
3888 for (i = 0; i < graph->nb_outputs; i++)
3889 graph->outputs[i]->ost->unavailable = 1;
3895 * Run a single step of transcoding.
3897 * @return 0 for success, <0 for error
3899 static int transcode_step(void)
3905 ost = choose_output();
3912 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3917 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3922 av_assert0(ost->source_index >= 0);
3923 ist = input_streams[ost->source_index];
3926 ret = process_input(ist->file_index);
3927 if (ret == AVERROR(EAGAIN)) {
3928 if (input_files[ist->file_index]->eagain)
3929 ost->unavailable = 1;
3934 return ret == AVERROR_EOF ? 0 : ret;
3936 return reap_filters(0);
3940 * The following code is the main loop of the file converter
3942 static int transcode(void)
3945 AVFormatContext *os;
3948 int64_t timer_start;
3950 ret = transcode_init();
3954 if (stdin_interaction) {
3955 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3958 timer_start = av_gettime_relative();
3961 if ((ret = init_input_threads()) < 0)
3965 while (!received_sigterm) {
3966 int64_t cur_time= av_gettime_relative();
3968 /* if 'q' pressed, exits */
3969 if (stdin_interaction)
3970 if (check_keyboard_interaction(cur_time) < 0)
3973 /* check if there's any stream where output is still needed */
3974 if (!need_output()) {
3975 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3979 ret = transcode_step();
3981 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
3985 av_strerror(ret, errbuf, sizeof(errbuf));
3987 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
3992 /* dump report by using the output first video and audio streams */
3993 print_report(0, timer_start, cur_time);
3996 free_input_threads();
3999 /* at the end of stream, we must flush the decoder buffers */
4000 for (i = 0; i < nb_input_streams; i++) {
4001 ist = input_streams[i];
4002 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4003 process_input_packet(ist, NULL);
4010 /* write the trailer if needed and close file */
4011 for (i = 0; i < nb_output_files; i++) {
4012 os = output_files[i]->ctx;
4013 av_write_trailer(os);
4016 /* dump report by using the first video and audio streams */
4017 print_report(1, timer_start, av_gettime_relative());
4019 /* close each encoder */
4020 for (i = 0; i < nb_output_streams; i++) {
4021 ost = output_streams[i];
4022 if (ost->encoding_needed) {
4023 av_freep(&ost->enc_ctx->stats_in);
4027 /* close each decoder */
4028 for (i = 0; i < nb_input_streams; i++) {
4029 ist = input_streams[i];
4030 if (ist->decoding_needed) {
4031 avcodec_close(ist->dec_ctx);
4032 if (ist->hwaccel_uninit)
4033 ist->hwaccel_uninit(ist->dec_ctx);
4042 free_input_threads();
4045 if (output_streams) {
4046 for (i = 0; i < nb_output_streams; i++) {
4047 ost = output_streams[i];
4050 fclose(ost->logfile);
4051 ost->logfile = NULL;
4053 av_freep(&ost->forced_kf_pts);
4054 av_freep(&ost->apad);
4055 av_freep(&ost->disposition);
4056 av_dict_free(&ost->encoder_opts);
4057 av_dict_free(&ost->swr_opts);
4058 av_dict_free(&ost->resample_opts);
4059 av_dict_free(&ost->bsf_args);
4067 static int64_t getutime(void)
4070 struct rusage rusage;
4072 getrusage(RUSAGE_SELF, &rusage);
4073 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4074 #elif HAVE_GETPROCESSTIMES
4076 FILETIME c, e, k, u;
4077 proc = GetCurrentProcess();
4078 GetProcessTimes(proc, &c, &e, &k, &u);
4079 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4081 return av_gettime_relative();
4085 static int64_t getmaxrss(void)
4087 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4088 struct rusage rusage;
4089 getrusage(RUSAGE_SELF, &rusage);
4090 return (int64_t)rusage.ru_maxrss * 1024;
4091 #elif HAVE_GETPROCESSMEMORYINFO
4093 PROCESS_MEMORY_COUNTERS memcounters;
4094 proc = GetCurrentProcess();
4095 memcounters.cb = sizeof(memcounters);
4096 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4097 return memcounters.PeakPagefileUsage;
4103 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4107 int main(int argc, char **argv)
4112 register_exit(ffmpeg_cleanup);
4114 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4116 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4117 parse_loglevel(argc, argv, options);
4119 if(argc>1 && !strcmp(argv[1], "-d")){
4121 av_log_set_callback(log_callback_null);
4126 avcodec_register_all();
4128 avdevice_register_all();
4130 avfilter_register_all();
4132 avformat_network_init();
4134 show_banner(argc, argv, options);
4138 /* parse options and open all input/output files */
4139 ret = ffmpeg_parse_options(argc, argv);
4143 if (nb_output_files <= 0 && nb_input_files == 0) {
4145 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4149 /* file converter / grab */
4150 if (nb_output_files <= 0) {
4151 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4155 // if (nb_input_files == 0) {
4156 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4160 current_time = ti = getutime();
4161 if (transcode() < 0)
4163 ti = getutime() - ti;
4165 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4167 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4168 decode_error_stat[0], decode_error_stat[1]);
4169 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4172 exit_program(received_nb_signals ? 255 : main_return_code);
4173 return main_return_code;