2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
44 #include "libavformat/avformat.h"
45 #include "libavdevice/avdevice.h"
46 #include "libswresample/swresample.h"
47 #include "libavutil/opt.h"
48 #include "libavutil/channel_layout.h"
49 #include "libavutil/parseutils.h"
50 #include "libavutil/samplefmt.h"
51 #include "libavutil/fifo.h"
52 #include "libavutil/intreadwrite.h"
53 #include "libavutil/dict.h"
54 #include "libavutil/mathematics.h"
55 #include "libavutil/pixdesc.h"
56 #include "libavutil/avstring.h"
57 #include "libavutil/libm.h"
58 #include "libavutil/imgutils.h"
59 #include "libavutil/timestamp.h"
60 #include "libavutil/bprint.h"
61 #include "libavutil/time.h"
62 #include "libavutil/threadmessage.h"
63 #include "libavcodec/mathops.h"
64 #include "libavformat/os_support.h"
66 # include "libavfilter/avcodec.h"
67 # include "libavfilter/avfilter.h"
68 # include "libavfilter/buffersrc.h"
69 # include "libavfilter/buffersink.h"
71 #if HAVE_SYS_RESOURCE_H
73 #include <sys/types.h>
74 #include <sys/resource.h>
75 #elif HAVE_GETPROCESSTIMES
78 #if HAVE_GETPROCESSMEMORYINFO
82 #if HAVE_SETCONSOLECTRLHANDLER
88 #include <sys/select.h>
93 #include <sys/ioctl.h>
107 #include "cmdutils.h"
109 #include "libavutil/avassert.h"
111 const char program_name[] = "ffmpeg";
112 const int program_birth_year = 2000;
114 static FILE *vstats_file;
116 const char *const forced_keyframes_const_names[] = {
125 static void do_video_stats(OutputStream *ost, int frame_size);
126 static int64_t getutime(void);
127 static int64_t getmaxrss(void);
129 static int run_as_daemon = 0;
130 static int nb_frames_dup = 0;
131 static int nb_frames_drop = 0;
132 static int64_t decode_error_stat[2];
134 static int current_time;
135 AVIOContext *progress_avio = NULL;
137 static uint8_t *subtitle_out;
139 #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
141 InputStream **input_streams = NULL;
142 int nb_input_streams = 0;
143 InputFile **input_files = NULL;
144 int nb_input_files = 0;
146 OutputStream **output_streams = NULL;
147 int nb_output_streams = 0;
148 OutputFile **output_files = NULL;
149 int nb_output_files = 0;
151 FilterGraph **filtergraphs;
156 /* init terminal so that we can grab keys */
157 static struct termios oldtty;
158 static int restore_tty;
162 static void free_input_threads(void);
166 Convert subtitles to video with alpha to insert them in filter graphs.
167 This is a temporary solution until libavfilter gets real subtitles support.
170 static int sub2video_get_blank_frame(InputStream *ist)
173 AVFrame *frame = ist->sub2video.frame;
175 av_frame_unref(frame);
176 ist->sub2video.frame->width = ist->sub2video.w;
177 ist->sub2video.frame->height = ist->sub2video.h;
178 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
179 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
181 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
185 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
188 uint32_t *pal, *dst2;
192 if (r->type != SUBTITLE_BITMAP) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
196 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
197 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle overflowing\n");
201 dst += r->y * dst_linesize + r->x * 4;
202 src = r->pict.data[0];
203 pal = (uint32_t *)r->pict.data[1];
204 for (y = 0; y < r->h; y++) {
205 dst2 = (uint32_t *)dst;
207 for (x = 0; x < r->w; x++)
208 *(dst2++) = pal[*(src2++)];
210 src += r->pict.linesize[0];
214 static void sub2video_push_ref(InputStream *ist, int64_t pts)
216 AVFrame *frame = ist->sub2video.frame;
219 av_assert1(frame->data[0]);
220 ist->sub2video.last_pts = frame->pts = pts;
221 for (i = 0; i < ist->nb_filters; i++)
222 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
223 AV_BUFFERSRC_FLAG_KEEP_REF |
224 AV_BUFFERSRC_FLAG_PUSH);
227 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
229 int w = ist->sub2video.w, h = ist->sub2video.h;
230 AVFrame *frame = ist->sub2video.frame;
234 int64_t pts, end_pts;
239 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240 AV_TIME_BASE_Q, ist->st->time_base);
241 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
242 AV_TIME_BASE_Q, ist->st->time_base);
243 num_rects = sub->num_rects;
245 pts = ist->sub2video.end_pts;
249 if (sub2video_get_blank_frame(ist) < 0) {
250 av_log(ist->dec_ctx, AV_LOG_ERROR,
251 "Impossible to get a blank canvas.\n");
254 dst = frame->data [0];
255 dst_linesize = frame->linesize[0];
256 for (i = 0; i < num_rects; i++)
257 sub2video_copy_rect(dst, dst_linesize, w, h, sub->rects[i]);
258 sub2video_push_ref(ist, pts);
259 ist->sub2video.end_pts = end_pts;
262 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
264 InputFile *infile = input_files[ist->file_index];
268 /* When a frame is read from a file, examine all sub2video streams in
269 the same file and send the sub2video frame again. Otherwise, decoded
270 video frames could be accumulating in the filter graph while a filter
271 (possibly overlay) is desperately waiting for a subtitle frame. */
272 for (i = 0; i < infile->nb_streams; i++) {
273 InputStream *ist2 = input_streams[infile->ist_index + i];
274 if (!ist2->sub2video.frame)
276 /* subtitles seem to be usually muxed ahead of other streams;
277 if not, subtracting a larger time here is necessary */
278 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
279 /* do not send the heartbeat frame if the subtitle is already ahead */
280 if (pts2 <= ist2->sub2video.last_pts)
282 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
283 sub2video_update(ist2, NULL);
284 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
285 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
287 sub2video_push_ref(ist2, pts2);
291 static void sub2video_flush(InputStream *ist)
295 if (ist->sub2video.end_pts < INT64_MAX)
296 sub2video_update(ist, NULL);
297 for (i = 0; i < ist->nb_filters; i++)
298 av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
301 /* end of sub2video hack */
303 static void term_exit_sigsafe(void)
307 tcsetattr (0, TCSANOW, &oldtty);
313 av_log(NULL, AV_LOG_QUIET, "%s", "");
317 static volatile int received_sigterm = 0;
318 static volatile int received_nb_signals = 0;
319 static volatile int transcode_init_done = 0;
320 static volatile int ffmpeg_exited = 0;
321 static int main_return_code = 0;
324 sigterm_handler(int sig)
326 received_sigterm = sig;
327 received_nb_signals++;
329 if(received_nb_signals > 3) {
330 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
331 strlen("Received > 3 system signals, hard exiting\n"));
337 #if HAVE_SETCONSOLECTRLHANDLER
338 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
340 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345 case CTRL_BREAK_EVENT:
346 sigterm_handler(SIGINT);
349 case CTRL_CLOSE_EVENT:
350 case CTRL_LOGOFF_EVENT:
351 case CTRL_SHUTDOWN_EVENT:
352 sigterm_handler(SIGTERM);
353 /* Basically, with these 3 events, when we return from this method the
354 process is hard terminated, so stall as long as we need to
355 to try and let the main thread(s) clean up and gracefully terminate
356 (we have at most 5 seconds, but should be done far before that). */
357 while (!ffmpeg_exited) {
363 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
376 istty = isatty(0) && isatty(2);
378 if (istty && tcgetattr (0, &tty) == 0) {
382 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383 |INLCR|IGNCR|ICRNL|IXON);
384 tty.c_oflag |= OPOST;
385 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386 tty.c_cflag &= ~(CSIZE|PARENB);
391 tcsetattr (0, TCSANOW, &tty);
393 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
397 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
400 signal(SIGXCPU, sigterm_handler);
402 #if HAVE_SETCONSOLECTRLHANDLER
403 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
407 /* read a key without blocking */
408 static int read_key(void)
420 n = select(1, &rfds, NULL, NULL, &tv);
429 # if HAVE_PEEKNAMEDPIPE
431 static HANDLE input_handle;
434 input_handle = GetStdHandle(STD_INPUT_HANDLE);
435 is_pipe = !GetConsoleMode(input_handle, &dw);
438 if (stdin->_cnt > 0) {
443 /* When running under a GUI, you will end here. */
444 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
445 // input pipe may have been closed by the program that ran ffmpeg
463 static int decode_interrupt_cb(void *ctx)
465 return received_nb_signals > transcode_init_done;
468 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
470 static void ffmpeg_cleanup(int ret)
475 int maxrss = getmaxrss() / 1024;
476 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
479 for (i = 0; i < nb_filtergraphs; i++) {
480 FilterGraph *fg = filtergraphs[i];
481 avfilter_graph_free(&fg->graph);
482 for (j = 0; j < fg->nb_inputs; j++) {
483 av_freep(&fg->inputs[j]->name);
484 av_freep(&fg->inputs[j]);
486 av_freep(&fg->inputs);
487 for (j = 0; j < fg->nb_outputs; j++) {
488 av_freep(&fg->outputs[j]->name);
489 av_freep(&fg->outputs[j]);
491 av_freep(&fg->outputs);
492 av_freep(&fg->graph_desc);
494 av_freep(&filtergraphs[i]);
496 av_freep(&filtergraphs);
498 av_freep(&subtitle_out);
501 for (i = 0; i < nb_output_files; i++) {
502 OutputFile *of = output_files[i];
503 AVFormatContext *s = of->ctx;
504 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
506 avformat_free_context(s);
507 av_dict_free(&of->opts);
509 av_freep(&output_files[i]);
511 for (i = 0; i < nb_output_streams; i++) {
512 OutputStream *ost = output_streams[i];
513 AVBitStreamFilterContext *bsfc;
518 bsfc = ost->bitstream_filters;
520 AVBitStreamFilterContext *next = bsfc->next;
521 av_bitstream_filter_close(bsfc);
524 ost->bitstream_filters = NULL;
525 av_frame_free(&ost->filtered_frame);
526 av_frame_free(&ost->last_frame);
528 av_parser_close(ost->parser);
530 av_freep(&ost->forced_keyframes);
531 av_expr_free(ost->forced_keyframes_pexpr);
532 av_freep(&ost->avfilter);
533 av_freep(&ost->logfile_prefix);
535 av_freep(&ost->audio_channels_map);
536 ost->audio_channels_mapped = 0;
538 avcodec_free_context(&ost->enc_ctx);
540 av_freep(&output_streams[i]);
543 free_input_threads();
545 for (i = 0; i < nb_input_files; i++) {
546 avformat_close_input(&input_files[i]->ctx);
547 av_freep(&input_files[i]);
549 for (i = 0; i < nb_input_streams; i++) {
550 InputStream *ist = input_streams[i];
552 av_frame_free(&ist->decoded_frame);
553 av_frame_free(&ist->filter_frame);
554 av_dict_free(&ist->decoder_opts);
555 avsubtitle_free(&ist->prev_sub.subtitle);
556 av_frame_free(&ist->sub2video.frame);
557 av_freep(&ist->filters);
558 av_freep(&ist->hwaccel_device);
560 avcodec_free_context(&ist->dec_ctx);
562 av_freep(&input_streams[i]);
567 av_freep(&vstats_filename);
569 av_freep(&input_streams);
570 av_freep(&input_files);
571 av_freep(&output_streams);
572 av_freep(&output_files);
576 avformat_network_deinit();
578 if (received_sigterm) {
579 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
580 (int) received_sigterm);
581 } else if (ret && transcode_init_done) {
582 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
588 void remove_avoptions(AVDictionary **a, AVDictionary *b)
590 AVDictionaryEntry *t = NULL;
592 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
593 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
597 void assert_avoptions(AVDictionary *m)
599 AVDictionaryEntry *t;
600 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
601 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
606 static void abort_codec_experimental(AVCodec *c, int encoder)
611 static void update_benchmark(const char *fmt, ...)
613 if (do_benchmark_all) {
614 int64_t t = getutime();
620 vsnprintf(buf, sizeof(buf), fmt, va);
622 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
628 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
631 for (i = 0; i < nb_output_streams; i++) {
632 OutputStream *ost2 = output_streams[i];
633 ost2->finished |= ost == ost2 ? this_stream : others;
637 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
639 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
640 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
643 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
644 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
645 if (ost->st->codec->extradata) {
646 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
647 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
651 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
652 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
653 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
656 * Audio encoders may split the packets -- #frames in != #packets out.
657 * But there is no reordering, so we can limit the number of output packets
658 * by simply dropping them here.
659 * Counting encoded video frames needs to be done separately because of
660 * reordering, see do_video_out()
662 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
663 if (ost->frame_number >= ost->max_frames) {
671 av_packet_split_side_data(pkt);
674 AVPacket new_pkt = *pkt;
675 AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
678 int a = av_bitstream_filter_filter(bsfc, avctx,
679 bsf_arg ? bsf_arg->value : NULL,
680 &new_pkt.data, &new_pkt.size,
681 pkt->data, pkt->size,
682 pkt->flags & AV_PKT_FLAG_KEY);
683 if(a == 0 && new_pkt.data != pkt->data && new_pkt.destruct) {
684 uint8_t *t = av_malloc(new_pkt.size + FF_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
686 memcpy(t, new_pkt.data, new_pkt.size);
687 memset(t + new_pkt.size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
695 pkt->side_data = NULL;
696 pkt->side_data_elems = 0;
698 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
699 av_buffer_default_free, NULL, 0);
704 av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
705 bsfc->filter->name, pkt->stream_index,
706 avctx->codec ? avctx->codec->name : "copy");
716 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
717 if (pkt->dts != AV_NOPTS_VALUE &&
718 pkt->pts != AV_NOPTS_VALUE &&
719 pkt->dts > pkt->pts) {
720 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
722 ost->file_index, ost->st->index);
724 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
725 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
726 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
729 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
730 pkt->dts != AV_NOPTS_VALUE &&
731 ost->last_mux_dts != AV_NOPTS_VALUE) {
732 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
733 if (pkt->dts < max) {
734 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
735 av_log(s, loglevel, "Non-monotonous DTS in output stream "
736 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
737 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
739 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
742 av_log(s, loglevel, "changing to %"PRId64". This may result "
743 "in incorrect timestamps in the output file.\n",
745 if(pkt->pts >= pkt->dts)
746 pkt->pts = FFMAX(pkt->pts, max);
751 ost->last_mux_dts = pkt->dts;
753 ost->data_size += pkt->size;
754 ost->packets_written++;
756 pkt->stream_index = ost->index;
759 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
760 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
761 av_get_media_type_string(ost->enc_ctx->codec_type),
762 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
763 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
768 ret = av_interleaved_write_frame(s, pkt);
770 print_error("av_interleaved_write_frame()", ret);
771 main_return_code = 1;
772 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
777 static void close_output_stream(OutputStream *ost)
779 OutputFile *of = output_files[ost->file_index];
781 ost->finished |= ENCODER_FINISHED;
783 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
784 of->recording_time = FFMIN(of->recording_time, end);
788 static int check_recording_time(OutputStream *ost)
790 OutputFile *of = output_files[ost->file_index];
792 if (of->recording_time != INT64_MAX &&
793 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
794 AV_TIME_BASE_Q) >= 0) {
795 close_output_stream(ost);
801 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
804 AVCodecContext *enc = ost->enc_ctx;
808 av_init_packet(&pkt);
812 if (!check_recording_time(ost))
815 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
816 frame->pts = ost->sync_opts;
817 ost->sync_opts = frame->pts + frame->nb_samples;
818 ost->samples_encoded += frame->nb_samples;
819 ost->frames_encoded++;
821 av_assert0(pkt.size || !pkt.data);
822 update_benchmark(NULL);
824 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
825 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
826 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
827 enc->time_base.num, enc->time_base.den);
830 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
831 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
834 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
837 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
840 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
841 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
842 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
843 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
846 write_frame(s, &pkt, ost);
850 static void do_subtitle_out(AVFormatContext *s,
855 int subtitle_out_max_size = 1024 * 1024;
856 int subtitle_out_size, nb, i;
861 if (sub->pts == AV_NOPTS_VALUE) {
862 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
871 subtitle_out = av_malloc(subtitle_out_max_size);
873 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
878 /* Note: DVB subtitle need one packet to draw them and one other
879 packet to clear them */
880 /* XXX: signal it in the codec context ? */
881 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
886 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
888 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
889 pts -= output_files[ost->file_index]->start_time;
890 for (i = 0; i < nb; i++) {
891 unsigned save_num_rects = sub->num_rects;
893 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
894 if (!check_recording_time(ost))
898 // start_display_time is required to be 0
899 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
900 sub->end_display_time -= sub->start_display_time;
901 sub->start_display_time = 0;
905 ost->frames_encoded++;
907 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
908 subtitle_out_max_size, sub);
910 sub->num_rects = save_num_rects;
911 if (subtitle_out_size < 0) {
912 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
916 av_init_packet(&pkt);
917 pkt.data = subtitle_out;
918 pkt.size = subtitle_out_size;
919 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
920 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
921 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
922 /* XXX: the pts correction is handled here. Maybe handling
923 it in the codec would be better */
925 pkt.pts += 90 * sub->start_display_time;
927 pkt.pts += 90 * sub->end_display_time;
930 write_frame(s, &pkt, ost);
934 static void do_video_out(AVFormatContext *s,
936 AVFrame *next_picture,
939 int ret, format_video_sync;
941 AVCodecContext *enc = ost->enc_ctx;
942 AVCodecContext *mux_enc = ost->st->codec;
943 int nb_frames, nb0_frames, i;
944 double delta, delta0;
947 InputStream *ist = NULL;
948 AVFilterContext *filter = ost->filter->filter;
950 if (ost->source_index >= 0)
951 ist = input_streams[ost->source_index];
953 if (filter->inputs[0]->frame_rate.num > 0 &&
954 filter->inputs[0]->frame_rate.den > 0)
955 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
957 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
958 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
960 if (!ost->filters_script &&
964 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
965 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
970 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
971 ost->last_nb0_frames[1],
972 ost->last_nb0_frames[2]);
974 delta0 = sync_ipts - ost->sync_opts;
975 delta = delta0 + duration;
977 /* by default, we output a single frame */
981 format_video_sync = video_sync_method;
982 if (format_video_sync == VSYNC_AUTO) {
983 if(!strcmp(s->oformat->name, "avi")) {
984 format_video_sync = VSYNC_VFR;
986 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
988 && format_video_sync == VSYNC_CFR
989 && input_files[ist->file_index]->ctx->nb_streams == 1
990 && input_files[ist->file_index]->input_ts_offset == 0) {
991 format_video_sync = VSYNC_VSCFR;
993 if (format_video_sync == VSYNC_CFR && copy_ts) {
994 format_video_sync = VSYNC_VSCFR;
1000 format_video_sync != VSYNC_PASSTHROUGH &&
1001 format_video_sync != VSYNC_DROP) {
1002 double cor = FFMIN(-delta0, duration);
1003 if (delta0 < -0.6) {
1004 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1006 av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
1012 switch (format_video_sync) {
1014 if (ost->frame_number == 0 && delta - duration >= 0.5) {
1015 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
1018 ost->sync_opts = lrint(sync_ipts);
1021 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1022 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1024 } else if (delta < -1.1)
1026 else if (delta > 1.1) {
1027 nb_frames = lrintf(delta);
1029 nb0_frames = lrintf(delta0 - 0.6);
1035 else if (delta > 0.6)
1036 ost->sync_opts = lrint(sync_ipts);
1039 case VSYNC_PASSTHROUGH:
1040 ost->sync_opts = lrint(sync_ipts);
1047 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1048 nb0_frames = FFMIN(nb0_frames, nb_frames);
1050 memmove(ost->last_nb0_frames + 1,
1051 ost->last_nb0_frames,
1052 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1053 ost->last_nb0_frames[0] = nb0_frames;
1055 if (nb0_frames == 0 && ost->last_droped) {
1057 av_log(NULL, AV_LOG_VERBOSE,
1058 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1059 ost->frame_number, ost->st->index, ost->last_frame->pts);
1061 if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
1062 if (nb_frames > dts_error_threshold * 30) {
1063 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1067 nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1068 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1070 ost->last_droped = nb_frames == nb0_frames && next_picture;
1072 /* duplicates frame if needed */
1073 for (i = 0; i < nb_frames; i++) {
1074 AVFrame *in_picture;
1075 av_init_packet(&pkt);
1079 if (i < nb0_frames && ost->last_frame) {
1080 in_picture = ost->last_frame;
1082 in_picture = next_picture;
1087 in_picture->pts = ost->sync_opts;
1090 if (!check_recording_time(ost))
1092 if (ost->frame_number >= ost->max_frames)
1096 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1097 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1098 /* raw pictures are written as AVPicture structure to
1099 avoid any copies. We support temporarily the older
1101 if (in_picture->interlaced_frame)
1102 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1104 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1105 pkt.data = (uint8_t *)in_picture;
1106 pkt.size = sizeof(AVPicture);
1107 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1108 pkt.flags |= AV_PKT_FLAG_KEY;
1110 write_frame(s, &pkt, ost);
1112 int got_packet, forced_keyframe = 0;
1115 if (enc->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME) &&
1116 ost->top_field_first >= 0)
1117 in_picture->top_field_first = !!ost->top_field_first;
1119 if (in_picture->interlaced_frame) {
1120 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1121 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1123 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1125 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1127 in_picture->quality = enc->global_quality;
1128 in_picture->pict_type = 0;
1130 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1131 in_picture->pts * av_q2d(enc->time_base) : NAN;
1132 if (ost->forced_kf_index < ost->forced_kf_count &&
1133 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1134 ost->forced_kf_index++;
1135 forced_keyframe = 1;
1136 } else if (ost->forced_keyframes_pexpr) {
1138 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1139 res = av_expr_eval(ost->forced_keyframes_pexpr,
1140 ost->forced_keyframes_expr_const_values, NULL);
1141 av_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1142 ost->forced_keyframes_expr_const_values[FKF_N],
1143 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1144 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1145 ost->forced_keyframes_expr_const_values[FKF_T],
1146 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1149 forced_keyframe = 1;
1150 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1151 ost->forced_keyframes_expr_const_values[FKF_N];
1152 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1153 ost->forced_keyframes_expr_const_values[FKF_T];
1154 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1157 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1158 } else if ( ost->forced_keyframes
1159 && !strncmp(ost->forced_keyframes, "source", 6)
1160 && in_picture->key_frame==1) {
1161 forced_keyframe = 1;
1164 if (forced_keyframe) {
1165 in_picture->pict_type = AV_PICTURE_TYPE_I;
1166 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1169 update_benchmark(NULL);
1171 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1172 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1173 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1174 enc->time_base.num, enc->time_base.den);
1177 ost->frames_encoded++;
1179 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1180 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1182 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1188 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1189 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1190 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1191 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1194 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
1195 pkt.pts = ost->sync_opts;
1197 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1200 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1201 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1202 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1203 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1206 frame_size = pkt.size;
1207 write_frame(s, &pkt, ost);
1209 /* if two pass, output log */
1210 if (ost->logfile && enc->stats_out) {
1211 fprintf(ost->logfile, "%s", enc->stats_out);
1217 * For video, number of frames in == number of packets out.
1218 * But there may be reordering, so we can't throw away frames on encoder
1219 * flush, we need to limit them here, before they go into encoder.
1221 ost->frame_number++;
1223 if (vstats_filename && frame_size)
1224 do_video_stats(ost, frame_size);
1227 if (!ost->last_frame)
1228 ost->last_frame = av_frame_alloc();
1229 av_frame_unref(ost->last_frame);
1231 av_frame_ref(ost->last_frame, next_picture);
1233 av_frame_free(&ost->last_frame);
1236 static double psnr(double d)
1238 return -10.0 * log(d) / log(10.0);
1241 static void do_video_stats(OutputStream *ost, int frame_size)
1243 AVCodecContext *enc;
1245 double ti1, bitrate, avg_bitrate;
1247 /* this is executed just the first time do_video_stats is called */
1249 vstats_file = fopen(vstats_filename, "w");
1257 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1258 frame_number = ost->st->nb_frames;
1259 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame ? enc->coded_frame->quality / (float)FF_QP2LAMBDA : 0);
1260 if (enc->coded_frame && (enc->flags&CODEC_FLAG_PSNR))
1261 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1263 fprintf(vstats_file,"f_size= %6d ", frame_size);
1264 /* compute pts value */
1265 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1269 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1270 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1271 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1272 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1273 fprintf(vstats_file, "type= %c\n", enc->coded_frame ? av_get_picture_type_char(enc->coded_frame->pict_type) : 'I');
1277 static void finish_output_stream(OutputStream *ost)
1279 OutputFile *of = output_files[ost->file_index];
1282 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1285 for (i = 0; i < of->ctx->nb_streams; i++)
1286 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1291 * Get and encode new output from any of the filtergraphs, without causing
1294 * @return 0 for success, <0 for severe errors
1296 static int reap_filters(int flush)
1298 AVFrame *filtered_frame = NULL;
1301 /* Reap all buffers present in the buffer sinks */
1302 for (i = 0; i < nb_output_streams; i++) {
1303 OutputStream *ost = output_streams[i];
1304 OutputFile *of = output_files[ost->file_index];
1305 AVFilterContext *filter;
1306 AVCodecContext *enc = ost->enc_ctx;
1311 filter = ost->filter->filter;
1313 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1314 return AVERROR(ENOMEM);
1316 filtered_frame = ost->filtered_frame;
1319 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1320 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1321 AV_BUFFERSINK_FLAG_NO_REQUEST);
1323 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1324 av_log(NULL, AV_LOG_WARNING,
1325 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1326 } else if (flush && ret == AVERROR_EOF) {
1327 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1328 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1332 if (ost->finished) {
1333 av_frame_unref(filtered_frame);
1336 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1337 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1338 AVRational tb = enc->time_base;
1339 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1341 tb.den <<= extra_bits;
1343 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1344 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1345 float_pts /= 1 << extra_bits;
1346 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1347 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1349 filtered_frame->pts =
1350 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1351 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1353 //if (ost->source_index >= 0)
1354 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1356 switch (filter->inputs[0]->type) {
1357 case AVMEDIA_TYPE_VIDEO:
1358 if (!ost->frame_aspect_ratio.num)
1359 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1362 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1363 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1365 enc->time_base.num, enc->time_base.den);
1368 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1370 case AVMEDIA_TYPE_AUDIO:
1371 if (!(enc->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
1372 enc->channels != av_frame_get_channels(filtered_frame)) {
1373 av_log(NULL, AV_LOG_ERROR,
1374 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1377 do_audio_out(of->ctx, ost, filtered_frame);
1380 // TODO support subtitle filters
1384 av_frame_unref(filtered_frame);
1391 static void print_final_stats(int64_t total_size)
1393 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1394 uint64_t subtitle_size = 0;
1395 uint64_t data_size = 0;
1396 float percent = -1.0;
1400 for (i = 0; i < nb_output_streams; i++) {
1401 OutputStream *ost = output_streams[i];
1402 switch (ost->enc_ctx->codec_type) {
1403 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1404 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1405 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1406 default: other_size += ost->data_size; break;
1408 extra_size += ost->enc_ctx->extradata_size;
1409 data_size += ost->data_size;
1410 if ( (ost->enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1411 != CODEC_FLAG_PASS1)
1415 if (data_size && total_size>0 && total_size >= data_size)
1416 percent = 100.0 * (total_size - data_size) / data_size;
1418 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1419 video_size / 1024.0,
1420 audio_size / 1024.0,
1421 subtitle_size / 1024.0,
1422 other_size / 1024.0,
1423 extra_size / 1024.0);
1425 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1427 av_log(NULL, AV_LOG_INFO, "unknown");
1428 av_log(NULL, AV_LOG_INFO, "\n");
1430 /* print verbose per-stream stats */
1431 for (i = 0; i < nb_input_files; i++) {
1432 InputFile *f = input_files[i];
1433 uint64_t total_packets = 0, total_size = 0;
1435 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1436 i, f->ctx->filename);
1438 for (j = 0; j < f->nb_streams; j++) {
1439 InputStream *ist = input_streams[f->ist_index + j];
1440 enum AVMediaType type = ist->dec_ctx->codec_type;
1442 total_size += ist->data_size;
1443 total_packets += ist->nb_packets;
1445 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1446 i, j, media_type_string(type));
1447 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1448 ist->nb_packets, ist->data_size);
1450 if (ist->decoding_needed) {
1451 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1452 ist->frames_decoded);
1453 if (type == AVMEDIA_TYPE_AUDIO)
1454 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1455 av_log(NULL, AV_LOG_VERBOSE, "; ");
1458 av_log(NULL, AV_LOG_VERBOSE, "\n");
1461 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1462 total_packets, total_size);
1465 for (i = 0; i < nb_output_files; i++) {
1466 OutputFile *of = output_files[i];
1467 uint64_t total_packets = 0, total_size = 0;
1469 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1470 i, of->ctx->filename);
1472 for (j = 0; j < of->ctx->nb_streams; j++) {
1473 OutputStream *ost = output_streams[of->ost_index + j];
1474 enum AVMediaType type = ost->enc_ctx->codec_type;
1476 total_size += ost->data_size;
1477 total_packets += ost->packets_written;
1479 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1480 i, j, media_type_string(type));
1481 if (ost->encoding_needed) {
1482 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1483 ost->frames_encoded);
1484 if (type == AVMEDIA_TYPE_AUDIO)
1485 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1486 av_log(NULL, AV_LOG_VERBOSE, "; ");
1489 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1490 ost->packets_written, ost->data_size);
1492 av_log(NULL, AV_LOG_VERBOSE, "\n");
1495 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1496 total_packets, total_size);
1498 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1499 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1501 av_log(NULL, AV_LOG_WARNING, "\n");
1503 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1508 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1511 AVBPrint buf_script;
1513 AVFormatContext *oc;
1515 AVCodecContext *enc;
1516 int frame_number, vid, i;
1518 int64_t pts = INT64_MIN;
1519 static int64_t last_time = -1;
1520 static int qp_histogram[52];
1521 int hours, mins, secs, us;
1523 if (!print_stats && !is_last_report && !progress_avio)
1526 if (!is_last_report) {
1527 if (last_time == -1) {
1528 last_time = cur_time;
1531 if ((cur_time - last_time) < 500000)
1533 last_time = cur_time;
1537 oc = output_files[0]->ctx;
1539 total_size = avio_size(oc->pb);
1540 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1541 total_size = avio_tell(oc->pb);
1545 av_bprint_init(&buf_script, 0, 1);
1546 for (i = 0; i < nb_output_streams; i++) {
1548 ost = output_streams[i];
1550 if (!ost->stream_copy && enc->coded_frame)
1551 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1552 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1553 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1554 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1555 ost->file_index, ost->index, q);
1557 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1558 float fps, t = (cur_time-timer_start) / 1000000.0;
1560 frame_number = ost->frame_number;
1561 fps = t > 1 ? frame_number / t : 0;
1562 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1563 frame_number, fps < 9.95, fps, q);
1564 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1565 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1566 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1567 ost->file_index, ost->index, q);
1569 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1573 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1575 for (j = 0; j < 32; j++)
1576 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1578 if ((enc->flags&CODEC_FLAG_PSNR) && (enc->coded_frame || is_last_report)) {
1580 double error, error_sum = 0;
1581 double scale, scale_sum = 0;
1583 char type[3] = { 'Y','U','V' };
1584 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1585 for (j = 0; j < 3; j++) {
1586 if (is_last_report) {
1587 error = enc->error[j];
1588 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1590 error = enc->coded_frame->error[j];
1591 scale = enc->width * enc->height * 255.0 * 255.0;
1597 p = psnr(error / scale);
1598 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1599 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1600 ost->file_index, ost->index, type[j] | 32, p);
1602 p = psnr(error_sum / scale_sum);
1603 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1604 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1605 ost->file_index, ost->index, p);
1609 /* compute min output value */
1610 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1611 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1612 ost->st->time_base, AV_TIME_BASE_Q));
1614 nb_frames_drop += ost->last_droped;
1617 secs = FFABS(pts) / AV_TIME_BASE;
1618 us = FFABS(pts) % AV_TIME_BASE;
1624 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1626 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1628 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1629 "size=%8.0fkB time=", total_size / 1024.0);
1631 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1632 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1633 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1634 (100 * us) / AV_TIME_BASE);
1637 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1638 av_bprintf(&buf_script, "bitrate=N/A\n");
1640 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1641 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1644 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1645 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1646 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1647 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1648 hours, mins, secs, us);
1650 if (nb_frames_dup || nb_frames_drop)
1651 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1652 nb_frames_dup, nb_frames_drop);
1653 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1654 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1656 if (print_stats || is_last_report) {
1657 const char end = is_last_report ? '\n' : '\r';
1658 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1659 fprintf(stderr, "%s %c", buf, end);
1661 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1666 if (progress_avio) {
1667 av_bprintf(&buf_script, "progress=%s\n",
1668 is_last_report ? "end" : "continue");
1669 avio_write(progress_avio, buf_script.str,
1670 FFMIN(buf_script.len, buf_script.size - 1));
1671 avio_flush(progress_avio);
1672 av_bprint_finalize(&buf_script, NULL);
1673 if (is_last_report) {
1674 avio_closep(&progress_avio);
1679 print_final_stats(total_size);
1682 static void flush_encoders(void)
1686 for (i = 0; i < nb_output_streams; i++) {
1687 OutputStream *ost = output_streams[i];
1688 AVCodecContext *enc = ost->enc_ctx;
1689 AVFormatContext *os = output_files[ost->file_index]->ctx;
1690 int stop_encoding = 0;
1692 if (!ost->encoding_needed)
1695 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1697 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1701 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1704 switch (enc->codec_type) {
1705 case AVMEDIA_TYPE_AUDIO:
1706 encode = avcodec_encode_audio2;
1709 case AVMEDIA_TYPE_VIDEO:
1710 encode = avcodec_encode_video2;
1721 av_init_packet(&pkt);
1725 update_benchmark(NULL);
1726 ret = encode(enc, &pkt, NULL, &got_packet);
1727 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1729 av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1732 if (ost->logfile && enc->stats_out) {
1733 fprintf(ost->logfile, "%s", enc->stats_out);
1739 if (ost->finished & MUXER_FINISHED) {
1740 av_free_packet(&pkt);
1743 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1744 pkt_size = pkt.size;
1745 write_frame(os, &pkt, ost);
1746 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1747 do_video_stats(ost, pkt_size);
1758 * Check whether a packet from ist should be written into ost at this time
1760 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1762 OutputFile *of = output_files[ost->file_index];
1763 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1765 if (ost->source_index != ist_index)
1771 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1777 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1779 OutputFile *of = output_files[ost->file_index];
1780 InputFile *f = input_files [ist->file_index];
1781 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1782 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1783 int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1787 av_init_packet(&opkt);
1789 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1790 !ost->copy_initial_nonkeyframes)
1793 if (pkt->pts == AV_NOPTS_VALUE) {
1794 if (!ost->frame_number && ist->pts < start_time &&
1795 !ost->copy_prior_start)
1798 if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
1799 !ost->copy_prior_start)
1803 if (of->recording_time != INT64_MAX &&
1804 ist->pts >= of->recording_time + start_time) {
1805 close_output_stream(ost);
1809 if (f->recording_time != INT64_MAX) {
1810 start_time = f->ctx->start_time;
1811 if (f->start_time != AV_NOPTS_VALUE)
1812 start_time += f->start_time;
1813 if (ist->pts >= f->recording_time + start_time) {
1814 close_output_stream(ost);
1819 /* force the input stream PTS */
1820 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1823 if (pkt->pts != AV_NOPTS_VALUE)
1824 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1826 opkt.pts = AV_NOPTS_VALUE;
1828 if (pkt->dts == AV_NOPTS_VALUE)
1829 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1831 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1832 opkt.dts -= ost_tb_start_time;
1834 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1835 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1837 duration = ist->dec_ctx->frame_size;
1838 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1839 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1840 ost->st->time_base) - ost_tb_start_time;
1843 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1844 opkt.flags = pkt->flags;
1846 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1847 if ( ost->enc_ctx->codec_id != AV_CODEC_ID_H264
1848 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO
1849 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO
1850 && ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
1852 if (av_parser_change(ost->parser, ost->st->codec,
1853 &opkt.data, &opkt.size,
1854 pkt->data, pkt->size,
1855 pkt->flags & AV_PKT_FLAG_KEY)) {
1856 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1861 opkt.data = pkt->data;
1862 opkt.size = pkt->size;
1864 av_copy_packet_side_data(&opkt, pkt);
1866 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1867 /* store AVPicture in AVPacket, as expected by the output format */
1868 avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1869 opkt.data = (uint8_t *)&pict;
1870 opkt.size = sizeof(AVPicture);
1871 opkt.flags |= AV_PKT_FLAG_KEY;
1874 write_frame(of->ctx, &opkt, ost);
1877 int guess_input_channel_layout(InputStream *ist)
1879 AVCodecContext *dec = ist->dec_ctx;
1881 if (!dec->channel_layout) {
1882 char layout_name[256];
1884 if (dec->channels > ist->guess_layout_max)
1886 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1887 if (!dec->channel_layout)
1889 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1890 dec->channels, dec->channel_layout);
1891 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1892 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1897 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1899 AVFrame *decoded_frame, *f;
1900 AVCodecContext *avctx = ist->dec_ctx;
1901 int i, ret, err = 0, resample_changed;
1902 AVRational decoded_frame_tb;
1904 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1905 return AVERROR(ENOMEM);
1906 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1907 return AVERROR(ENOMEM);
1908 decoded_frame = ist->decoded_frame;
1910 update_benchmark(NULL);
1911 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1912 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1914 if (ret >= 0 && avctx->sample_rate <= 0) {
1915 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1916 ret = AVERROR_INVALIDDATA;
1919 if (*got_output || ret<0)
1920 decode_error_stat[ret<0] ++;
1922 if (ret < 0 && exit_on_error)
1925 if (!*got_output || ret < 0)
1928 ist->samples_decoded += decoded_frame->nb_samples;
1929 ist->frames_decoded++;
1932 /* increment next_dts to use for the case where the input stream does not
1933 have timestamps or there are multiple frames in the packet */
1934 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1936 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1940 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1941 ist->resample_channels != avctx->channels ||
1942 ist->resample_channel_layout != decoded_frame->channel_layout ||
1943 ist->resample_sample_rate != decoded_frame->sample_rate;
1944 if (resample_changed) {
1945 char layout1[64], layout2[64];
1947 if (!guess_input_channel_layout(ist)) {
1948 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1949 "layout for Input Stream #%d.%d\n", ist->file_index,
1953 decoded_frame->channel_layout = avctx->channel_layout;
1955 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1956 ist->resample_channel_layout);
1957 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1958 decoded_frame->channel_layout);
1960 av_log(NULL, AV_LOG_INFO,
1961 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1962 ist->file_index, ist->st->index,
1963 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1964 ist->resample_channels, layout1,
1965 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1966 avctx->channels, layout2);
1968 ist->resample_sample_fmt = decoded_frame->format;
1969 ist->resample_sample_rate = decoded_frame->sample_rate;
1970 ist->resample_channel_layout = decoded_frame->channel_layout;
1971 ist->resample_channels = avctx->channels;
1973 for (i = 0; i < nb_filtergraphs; i++)
1974 if (ist_in_filtergraph(filtergraphs[i], ist)) {
1975 FilterGraph *fg = filtergraphs[i];
1976 if (configure_filtergraph(fg) < 0) {
1977 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1983 /* if the decoder provides a pts, use it instead of the last packet pts.
1984 the decoder could be delaying output by a packet or more. */
1985 if (decoded_frame->pts != AV_NOPTS_VALUE) {
1986 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
1987 decoded_frame_tb = avctx->time_base;
1988 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
1989 decoded_frame->pts = decoded_frame->pkt_pts;
1990 decoded_frame_tb = ist->st->time_base;
1991 } else if (pkt->pts != AV_NOPTS_VALUE) {
1992 decoded_frame->pts = pkt->pts;
1993 decoded_frame_tb = ist->st->time_base;
1995 decoded_frame->pts = ist->dts;
1996 decoded_frame_tb = AV_TIME_BASE_Q;
1998 pkt->pts = AV_NOPTS_VALUE;
1999 if (decoded_frame->pts != AV_NOPTS_VALUE)
2000 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2001 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2002 (AVRational){1, avctx->sample_rate});
2003 for (i = 0; i < ist->nb_filters; i++) {
2004 if (i < ist->nb_filters - 1) {
2005 f = ist->filter_frame;
2006 err = av_frame_ref(f, decoded_frame);
2011 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2012 AV_BUFFERSRC_FLAG_PUSH);
2013 if (err == AVERROR_EOF)
2014 err = 0; /* ignore */
2018 decoded_frame->pts = AV_NOPTS_VALUE;
2020 av_frame_unref(ist->filter_frame);
2021 av_frame_unref(decoded_frame);
2022 return err < 0 ? err : ret;
2025 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2027 AVFrame *decoded_frame, *f;
2028 int i, ret = 0, err = 0, resample_changed;
2029 int64_t best_effort_timestamp;
2030 AVRational *frame_sample_aspect;
2032 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2033 return AVERROR(ENOMEM);
2034 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2035 return AVERROR(ENOMEM);
2036 decoded_frame = ist->decoded_frame;
2037 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2039 update_benchmark(NULL);
2040 ret = avcodec_decode_video2(ist->dec_ctx,
2041 decoded_frame, got_output, pkt);
2042 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2044 // The following line may be required in some cases where there is no parser
2045 // or the parser does not has_b_frames correctly
2046 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2047 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2048 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2050 av_log_ask_for_sample(
2052 "has_b_frames is larger in decoder than demuxer %d > %d ",
2053 ist->dec_ctx->has_b_frames,
2054 ist->st->codec->has_b_frames
2058 if (*got_output || ret<0)
2059 decode_error_stat[ret<0] ++;
2061 if (ret < 0 && exit_on_error)
2064 if (*got_output && ret >= 0) {
2065 if (ist->dec_ctx->width != decoded_frame->width ||
2066 ist->dec_ctx->height != decoded_frame->height ||
2067 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2068 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2069 decoded_frame->width,
2070 decoded_frame->height,
2071 decoded_frame->format,
2072 ist->dec_ctx->width,
2073 ist->dec_ctx->height,
2074 ist->dec_ctx->pix_fmt);
2078 if (!*got_output || ret < 0)
2081 if(ist->top_field_first>=0)
2082 decoded_frame->top_field_first = ist->top_field_first;
2084 ist->frames_decoded++;
2086 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2087 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2091 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2093 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2094 if(best_effort_timestamp != AV_NOPTS_VALUE)
2095 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2098 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2099 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2100 ist->st->index, av_ts2str(decoded_frame->pts),
2101 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2102 best_effort_timestamp,
2103 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2104 decoded_frame->key_frame, decoded_frame->pict_type,
2105 ist->st->time_base.num, ist->st->time_base.den);
2110 if (ist->st->sample_aspect_ratio.num)
2111 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2113 resample_changed = ist->resample_width != decoded_frame->width ||
2114 ist->resample_height != decoded_frame->height ||
2115 ist->resample_pix_fmt != decoded_frame->format;
2116 if (resample_changed) {
2117 av_log(NULL, AV_LOG_INFO,
2118 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2119 ist->file_index, ist->st->index,
2120 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2121 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2123 ist->resample_width = decoded_frame->width;
2124 ist->resample_height = decoded_frame->height;
2125 ist->resample_pix_fmt = decoded_frame->format;
2127 for (i = 0; i < nb_filtergraphs; i++) {
2128 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2129 configure_filtergraph(filtergraphs[i]) < 0) {
2130 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2136 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2137 for (i = 0; i < ist->nb_filters; i++) {
2138 if (!frame_sample_aspect->num)
2139 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2141 if (i < ist->nb_filters - 1) {
2142 f = ist->filter_frame;
2143 err = av_frame_ref(f, decoded_frame);
2148 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2149 if (ret == AVERROR_EOF) {
2150 ret = 0; /* ignore */
2151 } else if (ret < 0) {
2152 av_log(NULL, AV_LOG_FATAL,
2153 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2159 av_frame_unref(ist->filter_frame);
2160 av_frame_unref(decoded_frame);
2161 return err < 0 ? err : ret;
2164 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2166 AVSubtitle subtitle;
2167 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2168 &subtitle, got_output, pkt);
2170 if (*got_output || ret<0)
2171 decode_error_stat[ret<0] ++;
2173 if (ret < 0 && exit_on_error)
2176 if (ret < 0 || !*got_output) {
2178 sub2video_flush(ist);
2182 if (ist->fix_sub_duration) {
2184 if (ist->prev_sub.got_output) {
2185 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2186 1000, AV_TIME_BASE);
2187 if (end < ist->prev_sub.subtitle.end_display_time) {
2188 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2189 "Subtitle duration reduced from %d to %d%s\n",
2190 ist->prev_sub.subtitle.end_display_time, end,
2191 end <= 0 ? ", dropping it" : "");
2192 ist->prev_sub.subtitle.end_display_time = end;
2195 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2196 FFSWAP(int, ret, ist->prev_sub.ret);
2197 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2205 sub2video_update(ist, &subtitle);
2207 if (!subtitle.num_rects)
2210 ist->frames_decoded++;
2212 for (i = 0; i < nb_output_streams; i++) {
2213 OutputStream *ost = output_streams[i];
2215 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2216 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2219 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2223 avsubtitle_free(&subtitle);
2227 static int send_filter_eof(InputStream *ist)
2230 for (i = 0; i < ist->nb_filters; i++) {
2232 ret = av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
2234 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2242 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2243 static int process_input_packet(InputStream *ist, const AVPacket *pkt)
2249 if (!ist->saw_first_ts) {
2250 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2252 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2253 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2254 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2256 ist->saw_first_ts = 1;
2259 if (ist->next_dts == AV_NOPTS_VALUE)
2260 ist->next_dts = ist->dts;
2261 if (ist->next_pts == AV_NOPTS_VALUE)
2262 ist->next_pts = ist->pts;
2266 av_init_packet(&avpkt);
2274 if (pkt->dts != AV_NOPTS_VALUE) {
2275 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2276 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2277 ist->next_pts = ist->pts = ist->dts;
2280 // while we have more to decode or while the decoder did output something on EOF
2281 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2285 ist->pts = ist->next_pts;
2286 ist->dts = ist->next_dts;
2288 if (avpkt.size && avpkt.size != pkt->size &&
2289 !(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) {
2290 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2291 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2292 ist->showed_multi_packet_warning = 1;
2295 switch (ist->dec_ctx->codec_type) {
2296 case AVMEDIA_TYPE_AUDIO:
2297 ret = decode_audio (ist, &avpkt, &got_output);
2299 case AVMEDIA_TYPE_VIDEO:
2300 ret = decode_video (ist, &avpkt, &got_output);
2301 if (avpkt.duration) {
2302 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2303 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2304 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2305 duration = ((int64_t)AV_TIME_BASE *
2306 ist->dec_ctx->framerate.den * ticks) /
2307 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2311 if(ist->dts != AV_NOPTS_VALUE && duration) {
2312 ist->next_dts += duration;
2314 ist->next_dts = AV_NOPTS_VALUE;
2317 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2319 case AVMEDIA_TYPE_SUBTITLE:
2320 ret = transcode_subtitles(ist, &avpkt, &got_output);
2327 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2328 ist->file_index, ist->st->index, av_err2str(ret));
2335 avpkt.pts= AV_NOPTS_VALUE;
2337 // touch data and size only if not EOF
2339 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2347 if (got_output && !pkt)
2351 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2352 if (!pkt && ist->decoding_needed && !got_output) {
2353 int ret = send_filter_eof(ist);
2355 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2360 /* handle stream copy */
2361 if (!ist->decoding_needed) {
2362 ist->dts = ist->next_dts;
2363 switch (ist->dec_ctx->codec_type) {
2364 case AVMEDIA_TYPE_AUDIO:
2365 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2366 ist->dec_ctx->sample_rate;
2368 case AVMEDIA_TYPE_VIDEO:
2369 if (ist->framerate.num) {
2370 // TODO: Remove work-around for c99-to-c89 issue 7
2371 AVRational time_base_q = AV_TIME_BASE_Q;
2372 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2373 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2374 } else if (pkt->duration) {
2375 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2376 } else if(ist->dec_ctx->framerate.num != 0) {
2377 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2378 ist->next_dts += ((int64_t)AV_TIME_BASE *
2379 ist->dec_ctx->framerate.den * ticks) /
2380 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2384 ist->pts = ist->dts;
2385 ist->next_pts = ist->next_dts;
2387 for (i = 0; pkt && i < nb_output_streams; i++) {
2388 OutputStream *ost = output_streams[i];
2390 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2393 do_streamcopy(ist, ost, pkt);
2399 static void print_sdp(void)
2404 AVIOContext *sdp_pb;
2405 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2409 for (i = 0, j = 0; i < nb_output_files; i++) {
2410 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2411 avc[j] = output_files[i]->ctx;
2416 av_sdp_create(avc, j, sdp, sizeof(sdp));
2418 if (!sdp_filename) {
2419 printf("SDP:\n%s\n", sdp);
2422 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2423 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2425 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2426 avio_closep(&sdp_pb);
2427 av_freep(&sdp_filename);
2434 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2437 for (i = 0; hwaccels[i].name; i++)
2438 if (hwaccels[i].pix_fmt == pix_fmt)
2439 return &hwaccels[i];
2443 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2445 InputStream *ist = s->opaque;
2446 const enum AVPixelFormat *p;
2449 for (p = pix_fmts; *p != -1; p++) {
2450 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2451 const HWAccel *hwaccel;
2453 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2456 hwaccel = get_hwaccel(*p);
2458 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2459 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2462 ret = hwaccel->init(s);
2464 if (ist->hwaccel_id == hwaccel->id) {
2465 av_log(NULL, AV_LOG_FATAL,
2466 "%s hwaccel requested for input stream #%d:%d, "
2467 "but cannot be initialized.\n", hwaccel->name,
2468 ist->file_index, ist->st->index);
2469 return AV_PIX_FMT_NONE;
2473 ist->active_hwaccel_id = hwaccel->id;
2474 ist->hwaccel_pix_fmt = *p;
2481 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2483 InputStream *ist = s->opaque;
2485 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2486 return ist->hwaccel_get_buffer(s, frame, flags);
2488 return avcodec_default_get_buffer2(s, frame, flags);
2491 static int init_input_stream(int ist_index, char *error, int error_len)
2494 InputStream *ist = input_streams[ist_index];
2496 if (ist->decoding_needed) {
2497 AVCodec *codec = ist->dec;
2499 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2500 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2501 return AVERROR(EINVAL);
2504 ist->dec_ctx->opaque = ist;
2505 ist->dec_ctx->get_format = get_format;
2506 ist->dec_ctx->get_buffer2 = get_buffer;
2507 ist->dec_ctx->thread_safe_callbacks = 1;
2509 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2510 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2511 (ist->decoding_needed & DECODING_FOR_OST)) {
2512 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2513 if (ist->decoding_needed & DECODING_FOR_FILTER)
2514 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2517 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2518 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2519 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2520 if (ret == AVERROR_EXPERIMENTAL)
2521 abort_codec_experimental(codec, 0);
2523 snprintf(error, error_len,
2524 "Error while opening decoder for input stream "
2526 ist->file_index, ist->st->index, av_err2str(ret));
2529 assert_avoptions(ist->decoder_opts);
2532 ist->next_pts = AV_NOPTS_VALUE;
2533 ist->next_dts = AV_NOPTS_VALUE;
2538 static InputStream *get_input_stream(OutputStream *ost)
2540 if (ost->source_index >= 0)
2541 return input_streams[ost->source_index];
2545 static int compare_int64(const void *a, const void *b)
2547 int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
2548 return va < vb ? -1 : va > vb ? +1 : 0;
2551 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2552 AVCodecContext *avctx)
2555 int n = 1, i, size, index = 0;
2558 for (p = kf; *p; p++)
2562 pts = av_malloc_array(size, sizeof(*pts));
2564 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2569 for (i = 0; i < n; i++) {
2570 char *next = strchr(p, ',');
2575 if (!memcmp(p, "chapters", 8)) {
2577 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2580 if (avf->nb_chapters > INT_MAX - size ||
2581 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2583 av_log(NULL, AV_LOG_FATAL,
2584 "Could not allocate forced key frames array.\n");
2587 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2588 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2590 for (j = 0; j < avf->nb_chapters; j++) {
2591 AVChapter *c = avf->chapters[j];
2592 av_assert1(index < size);
2593 pts[index++] = av_rescale_q(c->start, c->time_base,
2594 avctx->time_base) + t;
2599 t = parse_time_or_die("force_key_frames", p, 1);
2600 av_assert1(index < size);
2601 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2608 av_assert0(index == size);
2609 qsort(pts, size, sizeof(*pts), compare_int64);
2610 ost->forced_kf_count = size;
2611 ost->forced_kf_pts = pts;
2614 static void report_new_stream(int input_index, AVPacket *pkt)
2616 InputFile *file = input_files[input_index];
2617 AVStream *st = file->ctx->streams[pkt->stream_index];
2619 if (pkt->stream_index < file->nb_streams_warn)
2621 av_log(file->ctx, AV_LOG_WARNING,
2622 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2623 av_get_media_type_string(st->codec->codec_type),
2624 input_index, pkt->stream_index,
2625 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2626 file->nb_streams_warn = pkt->stream_index + 1;
2629 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2631 AVDictionaryEntry *e;
2633 uint8_t *encoder_string;
2634 int encoder_string_len;
2635 int format_flags = 0;
2636 int codec_flags = 0;
2638 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2641 e = av_dict_get(of->opts, "fflags", NULL, 0);
2643 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2646 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2648 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2650 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2653 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2656 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2657 encoder_string = av_mallocz(encoder_string_len);
2658 if (!encoder_string)
2661 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & CODEC_FLAG_BITEXACT))
2662 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2664 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2665 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2666 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2667 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2670 static int transcode_init(void)
2672 int ret = 0, i, j, k;
2673 AVFormatContext *oc;
2676 char error[1024] = {0};
2679 for (i = 0; i < nb_filtergraphs; i++) {
2680 FilterGraph *fg = filtergraphs[i];
2681 for (j = 0; j < fg->nb_outputs; j++) {
2682 OutputFilter *ofilter = fg->outputs[j];
2683 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2685 if (fg->nb_inputs != 1)
2687 for (k = nb_input_streams-1; k >= 0 ; k--)
2688 if (fg->inputs[0]->ist == input_streams[k])
2690 ofilter->ost->source_index = k;
2694 /* init framerate emulation */
2695 for (i = 0; i < nb_input_files; i++) {
2696 InputFile *ifile = input_files[i];
2697 if (ifile->rate_emu)
2698 for (j = 0; j < ifile->nb_streams; j++)
2699 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2702 /* output stream init */
2703 for (i = 0; i < nb_output_files; i++) {
2704 oc = output_files[i]->ctx;
2705 if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
2706 av_dump_format(oc, i, oc->filename, 1);
2707 av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2708 return AVERROR(EINVAL);
2712 /* init complex filtergraphs */
2713 for (i = 0; i < nb_filtergraphs; i++)
2714 if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
2717 /* for each output stream, we compute the right encoding parameters */
2718 for (i = 0; i < nb_output_streams; i++) {
2719 AVCodecContext *enc_ctx;
2720 AVCodecContext *dec_ctx = NULL;
2721 ost = output_streams[i];
2722 oc = output_files[ost->file_index]->ctx;
2723 ist = get_input_stream(ost);
2725 if (ost->attachment_filename)
2728 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2731 dec_ctx = ist->dec_ctx;
2733 ost->st->disposition = ist->st->disposition;
2734 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2735 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2737 for (j=0; j<oc->nb_streams; j++) {
2738 AVStream *st = oc->streams[j];
2739 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2742 if (j == oc->nb_streams)
2743 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2744 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2747 if (ost->stream_copy) {
2749 uint64_t extra_size;
2751 av_assert0(ist && !ost->filter);
2753 extra_size = (uint64_t)dec_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2755 if (extra_size > INT_MAX) {
2756 return AVERROR(EINVAL);
2759 /* if stream_copy is selected, no need to decode or encode */
2760 enc_ctx->codec_id = dec_ctx->codec_id;
2761 enc_ctx->codec_type = dec_ctx->codec_type;
2763 if (!enc_ctx->codec_tag) {
2764 unsigned int codec_tag;
2765 if (!oc->oformat->codec_tag ||
2766 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2767 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2768 enc_ctx->codec_tag = dec_ctx->codec_tag;
2771 enc_ctx->bit_rate = dec_ctx->bit_rate;
2772 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2773 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2774 enc_ctx->field_order = dec_ctx->field_order;
2775 if (dec_ctx->extradata_size) {
2776 enc_ctx->extradata = av_mallocz(extra_size);
2777 if (!enc_ctx->extradata) {
2778 return AVERROR(ENOMEM);
2780 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2782 enc_ctx->extradata_size= dec_ctx->extradata_size;
2783 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2785 enc_ctx->time_base = ist->st->time_base;
2787 * Avi is a special case here because it supports variable fps but
2788 * having the fps and timebase differe significantly adds quite some
2791 if(!strcmp(oc->oformat->name, "avi")) {
2792 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2793 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2794 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2795 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2797 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2798 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2799 enc_ctx->ticks_per_frame = 2;
2800 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2801 && av_q2d(ist->st->time_base) < 1.0/500
2803 enc_ctx->time_base = dec_ctx->time_base;
2804 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2805 enc_ctx->time_base.den *= 2;
2806 enc_ctx->ticks_per_frame = 2;
2808 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2809 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2810 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2811 && strcmp(oc->oformat->name, "f4v")
2813 if( copy_tb<0 && dec_ctx->time_base.den
2814 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2815 && av_q2d(ist->st->time_base) < 1.0/500
2817 enc_ctx->time_base = dec_ctx->time_base;
2818 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2821 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2822 && dec_ctx->time_base.num < dec_ctx->time_base.den
2823 && dec_ctx->time_base.num > 0
2824 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2825 enc_ctx->time_base = dec_ctx->time_base;
2828 if (ist && !ost->frame_rate.num)
2829 ost->frame_rate = ist->framerate;
2830 if(ost->frame_rate.num)
2831 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2833 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2834 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2836 if (ist->st->nb_side_data) {
2837 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2838 sizeof(*ist->st->side_data));
2839 if (!ost->st->side_data)
2840 return AVERROR(ENOMEM);
2842 ost->st->nb_side_data = 0;
2843 for (j = 0; j < ist->st->nb_side_data; j++) {
2844 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2845 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2847 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2850 sd_dst->data = av_malloc(sd_src->size);
2852 return AVERROR(ENOMEM);
2853 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2854 sd_dst->size = sd_src->size;
2855 sd_dst->type = sd_src->type;
2856 ost->st->nb_side_data++;
2860 ost->parser = av_parser_init(enc_ctx->codec_id);
2862 switch (enc_ctx->codec_type) {
2863 case AVMEDIA_TYPE_AUDIO:
2864 if (audio_volume != 256) {
2865 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2868 enc_ctx->channel_layout = dec_ctx->channel_layout;
2869 enc_ctx->sample_rate = dec_ctx->sample_rate;
2870 enc_ctx->channels = dec_ctx->channels;
2871 enc_ctx->frame_size = dec_ctx->frame_size;
2872 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2873 enc_ctx->block_align = dec_ctx->block_align;
2874 enc_ctx->initial_padding = dec_ctx->delay;
2875 #if FF_API_AUDIOENC_DELAY
2876 enc_ctx->delay = dec_ctx->delay;
2878 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2879 enc_ctx->block_align= 0;
2880 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2881 enc_ctx->block_align= 0;
2883 case AVMEDIA_TYPE_VIDEO:
2884 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2885 enc_ctx->width = dec_ctx->width;
2886 enc_ctx->height = dec_ctx->height;
2887 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2888 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2890 av_mul_q(ost->frame_aspect_ratio,
2891 (AVRational){ enc_ctx->height, enc_ctx->width });
2892 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2893 "with stream copy may produce invalid files\n");
2895 else if (ist->st->sample_aspect_ratio.num)
2896 sar = ist->st->sample_aspect_ratio;
2898 sar = dec_ctx->sample_aspect_ratio;
2899 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2900 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2901 ost->st->r_frame_rate = ist->st->r_frame_rate;
2903 case AVMEDIA_TYPE_SUBTITLE:
2904 enc_ctx->width = dec_ctx->width;
2905 enc_ctx->height = dec_ctx->height;
2907 case AVMEDIA_TYPE_UNKNOWN:
2908 case AVMEDIA_TYPE_DATA:
2909 case AVMEDIA_TYPE_ATTACHMENT:
2916 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
2918 /* should only happen when a default codec is not present. */
2919 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
2920 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
2921 ret = AVERROR(EINVAL);
2926 ist->decoding_needed |= DECODING_FOR_OST;
2927 ost->encoding_needed = 1;
2929 set_encoder_id(output_files[ost->file_index], ost);
2932 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
2933 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
2935 fg = init_simple_filtergraph(ist, ost);
2936 if (configure_filtergraph(fg)) {
2937 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2942 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2943 if (!ost->frame_rate.num)
2944 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
2945 if (ist && !ost->frame_rate.num)
2946 ost->frame_rate = ist->framerate;
2947 if (ist && !ost->frame_rate.num)
2948 ost->frame_rate = ist->st->r_frame_rate;
2949 if (ist && !ost->frame_rate.num) {
2950 ost->frame_rate = (AVRational){25, 1};
2951 av_log(NULL, AV_LOG_WARNING,
2953 "about the input framerate is available. Falling "
2954 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
2955 "if you want a different framerate.\n",
2956 ost->file_index, ost->index);
2958 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
2959 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
2960 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
2961 ost->frame_rate = ost->enc->supported_framerates[idx];
2963 // reduce frame rate for mpeg4 to be within the spec limits
2964 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
2965 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
2966 ost->frame_rate.num, ost->frame_rate.den, 65535);
2970 switch (enc_ctx->codec_type) {
2971 case AVMEDIA_TYPE_AUDIO:
2972 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
2973 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
2974 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
2975 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
2976 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
2978 case AVMEDIA_TYPE_VIDEO:
2979 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2980 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
2981 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
2982 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
2983 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
2984 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
2985 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
2987 for (j = 0; j < ost->forced_kf_count; j++)
2988 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
2990 enc_ctx->time_base);
2992 enc_ctx->width = ost->filter->filter->inputs[0]->w;
2993 enc_ctx->height = ost->filter->filter->inputs[0]->h;
2994 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2995 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
2996 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
2997 ost->filter->filter->inputs[0]->sample_aspect_ratio;
2998 if (!strncmp(ost->enc->name, "libx264", 7) &&
2999 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3000 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3001 av_log(NULL, AV_LOG_WARNING,
3002 "No pixel format specified, %s for H.264 encoding chosen.\n"
3003 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3004 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3005 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3006 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3007 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3008 av_log(NULL, AV_LOG_WARNING,
3009 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3010 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3011 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3012 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3014 ost->st->avg_frame_rate = ost->frame_rate;
3017 enc_ctx->width != dec_ctx->width ||
3018 enc_ctx->height != dec_ctx->height ||
3019 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3020 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3023 if (ost->forced_keyframes) {
3024 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3025 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3026 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3028 av_log(NULL, AV_LOG_ERROR,
3029 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3032 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3033 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3034 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3035 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3037 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3038 // parse it only for static kf timings
3039 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3040 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3044 case AVMEDIA_TYPE_SUBTITLE:
3045 enc_ctx->time_base = (AVRational){1, 1000};
3046 if (!enc_ctx->width) {
3047 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3048 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3051 case AVMEDIA_TYPE_DATA:
3058 if (enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) {
3059 char logfilename[1024];
3062 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
3063 ost->logfile_prefix ? ost->logfile_prefix :
3064 DEFAULT_PASS_LOGFILENAME_PREFIX,
3066 if (!strcmp(ost->enc->name, "libx264")) {
3067 av_dict_set(&ost->encoder_opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
3069 if (enc_ctx->flags & CODEC_FLAG_PASS2) {
3071 size_t logbuffer_size;
3072 if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
3073 av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
3077 enc_ctx->stats_in = logbuffer;
3079 if (enc_ctx->flags & CODEC_FLAG_PASS1) {
3080 f = av_fopen_utf8(logfilename, "wb");
3082 av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
3083 logfilename, strerror(errno));
3092 if (ost->disposition) {
3093 static const AVOption opts[] = {
3094 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3095 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3096 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3097 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3098 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3099 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3100 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3101 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3102 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3103 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3104 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3105 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3106 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3107 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3110 static const AVClass class = {
3112 .item_name = av_default_item_name,
3114 .version = LIBAVUTIL_VERSION_INT,
3116 const AVClass *pclass = &class;
3118 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3124 /* open each encoder */
3125 for (i = 0; i < nb_output_streams; i++) {
3126 ost = output_streams[i];
3127 if (ost->encoding_needed) {
3128 AVCodec *codec = ost->enc;
3129 AVCodecContext *dec = NULL;
3131 if ((ist = get_input_stream(ost)))
3133 if (dec && dec->subtitle_header) {
3134 /* ASS code assumes this buffer is null terminated so add extra byte. */
3135 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3136 if (!ost->enc_ctx->subtitle_header) {
3137 ret = AVERROR(ENOMEM);
3140 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3141 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3143 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3144 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3145 av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
3147 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3148 if (ret == AVERROR_EXPERIMENTAL)
3149 abort_codec_experimental(codec, 1);
3150 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
3151 ost->file_index, ost->index);
3154 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3155 !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
3156 av_buffersink_set_frame_size(ost->filter->filter,
3157 ost->enc_ctx->frame_size);
3158 assert_avoptions(ost->encoder_opts);
3159 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3160 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3161 " It takes bits/s as argument, not kbits/s\n");
3163 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3165 av_log(NULL, AV_LOG_FATAL,
3166 "Error initializing the output stream codec context.\n");
3170 // copy timebase while removing common factors
3171 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3172 ost->st->codec->codec= ost->enc_ctx->codec;
3174 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3176 av_log(NULL, AV_LOG_FATAL,
3177 "Error setting up codec context options.\n");
3180 // copy timebase while removing common factors
3181 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
3185 /* init input streams */
3186 for (i = 0; i < nb_input_streams; i++)
3187 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3188 for (i = 0; i < nb_output_streams; i++) {
3189 ost = output_streams[i];
3190 avcodec_close(ost->enc_ctx);
3195 /* discard unused programs */
3196 for (i = 0; i < nb_input_files; i++) {
3197 InputFile *ifile = input_files[i];
3198 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3199 AVProgram *p = ifile->ctx->programs[j];
3200 int discard = AVDISCARD_ALL;
3202 for (k = 0; k < p->nb_stream_indexes; k++)
3203 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3204 discard = AVDISCARD_DEFAULT;
3207 p->discard = discard;
3211 /* open files and write file headers */
3212 for (i = 0; i < nb_output_files; i++) {
3213 oc = output_files[i]->ctx;
3214 oc->interrupt_callback = int_cb;
3215 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3216 snprintf(error, sizeof(error),
3217 "Could not write header for output file #%d "
3218 "(incorrect codec parameters ?): %s",
3219 i, av_err2str(ret));
3220 ret = AVERROR(EINVAL);
3223 // assert_avoptions(output_files[i]->opts);
3224 if (strcmp(oc->oformat->name, "rtp")) {
3230 /* dump the file output parameters - cannot be done before in case
3232 for (i = 0; i < nb_output_files; i++) {
3233 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3236 /* dump the stream mapping */
3237 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3238 for (i = 0; i < nb_input_streams; i++) {
3239 ist = input_streams[i];
3241 for (j = 0; j < ist->nb_filters; j++) {
3242 if (ist->filters[j]->graph->graph_desc) {
3243 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3244 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3245 ist->filters[j]->name);
3246 if (nb_filtergraphs > 1)
3247 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3248 av_log(NULL, AV_LOG_INFO, "\n");
3253 for (i = 0; i < nb_output_streams; i++) {
3254 ost = output_streams[i];
3256 if (ost->attachment_filename) {
3257 /* an attached file */
3258 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3259 ost->attachment_filename, ost->file_index, ost->index);
3263 if (ost->filter && ost->filter->graph->graph_desc) {
3264 /* output from a complex graph */
3265 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3266 if (nb_filtergraphs > 1)
3267 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3269 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3270 ost->index, ost->enc ? ost->enc->name : "?");
3274 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3275 input_streams[ost->source_index]->file_index,
3276 input_streams[ost->source_index]->st->index,
3279 if (ost->sync_ist != input_streams[ost->source_index])
3280 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3281 ost->sync_ist->file_index,
3282 ost->sync_ist->st->index);
3283 if (ost->stream_copy)
3284 av_log(NULL, AV_LOG_INFO, " (copy)");
3286 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3287 const AVCodec *out_codec = ost->enc;
3288 const char *decoder_name = "?";
3289 const char *in_codec_name = "?";
3290 const char *encoder_name = "?";
3291 const char *out_codec_name = "?";
3292 const AVCodecDescriptor *desc;
3295 decoder_name = in_codec->name;
3296 desc = avcodec_descriptor_get(in_codec->id);
3298 in_codec_name = desc->name;
3299 if (!strcmp(decoder_name, in_codec_name))
3300 decoder_name = "native";
3304 encoder_name = out_codec->name;
3305 desc = avcodec_descriptor_get(out_codec->id);
3307 out_codec_name = desc->name;
3308 if (!strcmp(encoder_name, out_codec_name))
3309 encoder_name = "native";
3312 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3313 in_codec_name, decoder_name,
3314 out_codec_name, encoder_name);
3316 av_log(NULL, AV_LOG_INFO, "\n");
3320 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3324 if (sdp_filename || want_sdp) {
3328 transcode_init_done = 1;
3333 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3334 static int need_output(void)
3338 for (i = 0; i < nb_output_streams; i++) {
3339 OutputStream *ost = output_streams[i];
3340 OutputFile *of = output_files[ost->file_index];
3341 AVFormatContext *os = output_files[ost->file_index]->ctx;
3343 if (ost->finished ||
3344 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3346 if (ost->frame_number >= ost->max_frames) {
3348 for (j = 0; j < of->ctx->nb_streams; j++)
3349 close_output_stream(output_streams[of->ost_index + j]);
3360 * Select the output stream to process.
3362 * @return selected output stream, or NULL if none available
3364 static OutputStream *choose_output(void)
3367 int64_t opts_min = INT64_MAX;
3368 OutputStream *ost_min = NULL;
3370 for (i = 0; i < nb_output_streams; i++) {
3371 OutputStream *ost = output_streams[i];
3372 int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3374 if (!ost->finished && opts < opts_min) {
3376 ost_min = ost->unavailable ? NULL : ost;
3382 static int check_keyboard_interaction(int64_t cur_time)
3385 static int64_t last_time;
3386 if (received_nb_signals)
3387 return AVERROR_EXIT;
3388 /* read_key() returns 0 on EOF */
3389 if(cur_time - last_time >= 100000 && !run_as_daemon){
3391 last_time = cur_time;
3395 return AVERROR_EXIT;
3396 if (key == '+') av_log_set_level(av_log_get_level()+10);
3397 if (key == '-') av_log_set_level(av_log_get_level()-10);
3398 if (key == 's') qp_hist ^= 1;
3401 do_hex_dump = do_pkt_dump = 0;
3402 } else if(do_pkt_dump){
3406 av_log_set_level(AV_LOG_DEBUG);
3408 if (key == 'c' || key == 'C'){
3409 char buf[4096], target[64], command[256], arg[256] = {0};
3412 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3414 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3419 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3420 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3421 target, time, command, arg);
3422 for (i = 0; i < nb_filtergraphs; i++) {
3423 FilterGraph *fg = filtergraphs[i];
3426 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3427 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3428 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3429 } else if (key == 'c') {
3430 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3431 ret = AVERROR_PATCHWELCOME;
3433 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3435 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3440 av_log(NULL, AV_LOG_ERROR,
3441 "Parse error, at least 3 arguments were expected, "
3442 "only %d given in string '%s'\n", n, buf);
3445 if (key == 'd' || key == 'D'){
3448 debug = input_streams[0]->st->codec->debug<<1;
3449 if(!debug) debug = 1;
3450 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3453 if(scanf("%d", &debug)!=1)
3454 fprintf(stderr,"error parsing debug value\n");
3455 for(i=0;i<nb_input_streams;i++) {
3456 input_streams[i]->st->codec->debug = debug;
3458 for(i=0;i<nb_output_streams;i++) {
3459 OutputStream *ost = output_streams[i];
3460 ost->enc_ctx->debug = debug;
3462 if(debug) av_log_set_level(AV_LOG_DEBUG);
3463 fprintf(stderr,"debug=%d\n", debug);
3466 fprintf(stderr, "key function\n"
3467 "? show this help\n"
3468 "+ increase verbosity\n"
3469 "- decrease verbosity\n"
3470 "c Send command to first matching filter supporting it\n"
3471 "C Send/Que command to all matching filters\n"
3472 "D cycle through available debug modes\n"
3473 "h dump packets/hex press to cycle through the 3 states\n"
3475 "s Show QP histogram\n"
3482 static void *input_thread(void *arg)
3485 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3490 ret = av_read_frame(f->ctx, &pkt);
3492 if (ret == AVERROR(EAGAIN)) {
3497 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3500 av_dup_packet(&pkt);
3501 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3502 if (flags && ret == AVERROR(EAGAIN)) {
3504 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3505 av_log(f->ctx, AV_LOG_WARNING,
3506 "Thread message queue blocking; consider raising the "
3507 "thread_queue_size option (current value: %d)\n",
3508 f->thread_queue_size);
3511 if (ret != AVERROR_EOF)
3512 av_log(f->ctx, AV_LOG_ERROR,
3513 "Unable to send packet to main thread: %s\n",
3515 av_free_packet(&pkt);
3516 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3524 static void free_input_threads(void)
3528 for (i = 0; i < nb_input_files; i++) {
3529 InputFile *f = input_files[i];
3532 if (!f->in_thread_queue)
3534 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3535 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3536 av_free_packet(&pkt);
3538 pthread_join(f->thread, NULL);
3540 av_thread_message_queue_free(&f->in_thread_queue);
3544 static int init_input_threads(void)
3548 if (nb_input_files == 1)
3551 for (i = 0; i < nb_input_files; i++) {
3552 InputFile *f = input_files[i];
3554 if (f->ctx->pb ? !f->ctx->pb->seekable :
3555 strcmp(f->ctx->iformat->name, "lavfi"))
3556 f->non_blocking = 1;
3557 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3558 f->thread_queue_size, sizeof(AVPacket));
3562 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3563 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3564 av_thread_message_queue_free(&f->in_thread_queue);
3565 return AVERROR(ret);
3571 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3573 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3575 AV_THREAD_MESSAGE_NONBLOCK : 0);
3579 static int get_input_packet(InputFile *f, AVPacket *pkt)
3583 for (i = 0; i < f->nb_streams; i++) {
3584 InputStream *ist = input_streams[f->ist_index + i];
3585 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3586 int64_t now = av_gettime_relative() - ist->start;
3588 return AVERROR(EAGAIN);
3593 if (nb_input_files > 1)
3594 return get_input_packet_mt(f, pkt);
3596 return av_read_frame(f->ctx, pkt);
3599 static int got_eagain(void)
3602 for (i = 0; i < nb_output_streams; i++)
3603 if (output_streams[i]->unavailable)
3608 static void reset_eagain(void)
3611 for (i = 0; i < nb_input_files; i++)
3612 input_files[i]->eagain = 0;
3613 for (i = 0; i < nb_output_streams; i++)
3614 output_streams[i]->unavailable = 0;
3619 * - 0 -- one packet was read and processed
3620 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3621 * this function should be called again
3622 * - AVERROR_EOF -- this function should not be called again
3624 static int process_input(int file_index)
3626 InputFile *ifile = input_files[file_index];
3627 AVFormatContext *is;
3633 ret = get_input_packet(ifile, &pkt);
3635 if (ret == AVERROR(EAGAIN)) {
3640 if (ret != AVERROR_EOF) {
3641 print_error(is->filename, ret);
3646 for (i = 0; i < ifile->nb_streams; i++) {
3647 ist = input_streams[ifile->ist_index + i];
3648 if (ist->decoding_needed) {
3649 ret = process_input_packet(ist, NULL);
3654 /* mark all outputs that don't go through lavfi as finished */
3655 for (j = 0; j < nb_output_streams; j++) {
3656 OutputStream *ost = output_streams[j];
3658 if (ost->source_index == ifile->ist_index + i &&
3659 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3660 finish_output_stream(ost);
3664 ifile->eof_reached = 1;
3665 return AVERROR(EAGAIN);
3671 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3672 is->streams[pkt.stream_index]);
3674 /* the following test is needed in case new streams appear
3675 dynamically in stream : we ignore them */
3676 if (pkt.stream_index >= ifile->nb_streams) {
3677 report_new_stream(file_index, &pkt);
3678 goto discard_packet;
3681 ist = input_streams[ifile->ist_index + pkt.stream_index];
3683 ist->data_size += pkt.size;
3687 goto discard_packet;
3690 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3691 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3692 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3693 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3694 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3695 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3696 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3697 av_ts2str(input_files[ist->file_index]->ts_offset),
3698 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3701 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3702 int64_t stime, stime2;
3703 // Correcting starttime based on the enabled streams
3704 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3705 // so we instead do it here as part of discontinuity handling
3706 if ( ist->next_dts == AV_NOPTS_VALUE
3707 && ifile->ts_offset == -is->start_time
3708 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3709 int64_t new_start_time = INT64_MAX;
3710 for (i=0; i<is->nb_streams; i++) {
3711 AVStream *st = is->streams[i];
3712 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3714 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3716 if (new_start_time > is->start_time) {
3717 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3718 ifile->ts_offset = -new_start_time;
3722 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3723 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3724 ist->wrap_correction_done = 1;
3726 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3727 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3728 ist->wrap_correction_done = 0;
3730 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3731 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3732 ist->wrap_correction_done = 0;
3736 /* add the stream-global side data to the first packet */
3737 if (ist->nb_packets == 1) {
3738 if (ist->st->nb_side_data)
3739 av_packet_split_side_data(&pkt);
3740 for (i = 0; i < ist->st->nb_side_data; i++) {
3741 AVPacketSideData *src_sd = &ist->st->side_data[i];
3744 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3746 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3749 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3753 memcpy(dst_data, src_sd->data, src_sd->size);
3757 if (pkt.dts != AV_NOPTS_VALUE)
3758 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3759 if (pkt.pts != AV_NOPTS_VALUE)
3760 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3762 if (pkt.pts != AV_NOPTS_VALUE)
3763 pkt.pts *= ist->ts_scale;
3764 if (pkt.dts != AV_NOPTS_VALUE)
3765 pkt.dts *= ist->ts_scale;
3767 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3768 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3769 pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3770 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3771 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3772 int64_t delta = pkt_dts - ifile->last_ts;
3773 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3774 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3775 ifile->ts_offset -= delta;
3776 av_log(NULL, AV_LOG_DEBUG,
3777 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3778 delta, ifile->ts_offset);
3779 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3780 if (pkt.pts != AV_NOPTS_VALUE)
3781 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3785 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3786 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3787 pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3789 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3790 int64_t delta = pkt_dts - ist->next_dts;
3791 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3792 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3793 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3794 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3795 ifile->ts_offset -= delta;
3796 av_log(NULL, AV_LOG_DEBUG,
3797 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3798 delta, ifile->ts_offset);
3799 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3800 if (pkt.pts != AV_NOPTS_VALUE)
3801 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3804 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3805 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3806 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3807 pkt.dts = AV_NOPTS_VALUE;
3809 if (pkt.pts != AV_NOPTS_VALUE){
3810 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3811 delta = pkt_pts - ist->next_dts;
3812 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3813 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3814 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3815 pkt.pts = AV_NOPTS_VALUE;
3821 if (pkt.dts != AV_NOPTS_VALUE)
3822 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3825 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3826 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3827 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3828 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3829 av_ts2str(input_files[ist->file_index]->ts_offset),
3830 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3833 sub2video_heartbeat(ist, pkt.pts);
3835 process_input_packet(ist, &pkt);
3838 av_free_packet(&pkt);
3844 * Perform a step of transcoding for the specified filter graph.
3846 * @param[in] graph filter graph to consider
3847 * @param[out] best_ist input stream where a frame would allow to continue
3848 * @return 0 for success, <0 for error
3850 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3853 int nb_requests, nb_requests_max = 0;
3854 InputFilter *ifilter;
3858 ret = avfilter_graph_request_oldest(graph->graph);
3860 return reap_filters(0);
3862 if (ret == AVERROR_EOF) {
3863 ret = reap_filters(1);
3864 for (i = 0; i < graph->nb_outputs; i++)
3865 close_output_stream(graph->outputs[i]->ost);
3868 if (ret != AVERROR(EAGAIN))
3871 for (i = 0; i < graph->nb_inputs; i++) {
3872 ifilter = graph->inputs[i];
3874 if (input_files[ist->file_index]->eagain ||
3875 input_files[ist->file_index]->eof_reached)
3877 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3878 if (nb_requests > nb_requests_max) {
3879 nb_requests_max = nb_requests;
3885 for (i = 0; i < graph->nb_outputs; i++)
3886 graph->outputs[i]->ost->unavailable = 1;
3892 * Run a single step of transcoding.
3894 * @return 0 for success, <0 for error
3896 static int transcode_step(void)
3902 ost = choose_output();
3909 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3914 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3919 av_assert0(ost->source_index >= 0);
3920 ist = input_streams[ost->source_index];
3923 ret = process_input(ist->file_index);
3924 if (ret == AVERROR(EAGAIN)) {
3925 if (input_files[ist->file_index]->eagain)
3926 ost->unavailable = 1;
3931 return ret == AVERROR_EOF ? 0 : ret;
3933 return reap_filters(0);
3937 * The following code is the main loop of the file converter
3939 static int transcode(void)
3942 AVFormatContext *os;
3945 int64_t timer_start;
3947 ret = transcode_init();
3951 if (stdin_interaction) {
3952 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3955 timer_start = av_gettime_relative();
3958 if ((ret = init_input_threads()) < 0)
3962 while (!received_sigterm) {
3963 int64_t cur_time= av_gettime_relative();
3965 /* if 'q' pressed, exits */
3966 if (stdin_interaction)
3967 if (check_keyboard_interaction(cur_time) < 0)
3970 /* check if there's any stream where output is still needed */
3971 if (!need_output()) {
3972 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3976 ret = transcode_step();
3978 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
3982 av_strerror(ret, errbuf, sizeof(errbuf));
3984 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
3989 /* dump report by using the output first video and audio streams */
3990 print_report(0, timer_start, cur_time);
3993 free_input_threads();
3996 /* at the end of stream, we must flush the decoder buffers */
3997 for (i = 0; i < nb_input_streams; i++) {
3998 ist = input_streams[i];
3999 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4000 process_input_packet(ist, NULL);
4007 /* write the trailer if needed and close file */
4008 for (i = 0; i < nb_output_files; i++) {
4009 os = output_files[i]->ctx;
4010 av_write_trailer(os);
4013 /* dump report by using the first video and audio streams */
4014 print_report(1, timer_start, av_gettime_relative());
4016 /* close each encoder */
4017 for (i = 0; i < nb_output_streams; i++) {
4018 ost = output_streams[i];
4019 if (ost->encoding_needed) {
4020 av_freep(&ost->enc_ctx->stats_in);
4024 /* close each decoder */
4025 for (i = 0; i < nb_input_streams; i++) {
4026 ist = input_streams[i];
4027 if (ist->decoding_needed) {
4028 avcodec_close(ist->dec_ctx);
4029 if (ist->hwaccel_uninit)
4030 ist->hwaccel_uninit(ist->dec_ctx);
4039 free_input_threads();
4042 if (output_streams) {
4043 for (i = 0; i < nb_output_streams; i++) {
4044 ost = output_streams[i];
4047 fclose(ost->logfile);
4048 ost->logfile = NULL;
4050 av_freep(&ost->forced_kf_pts);
4051 av_freep(&ost->apad);
4052 av_freep(&ost->disposition);
4053 av_dict_free(&ost->encoder_opts);
4054 av_dict_free(&ost->swr_opts);
4055 av_dict_free(&ost->resample_opts);
4056 av_dict_free(&ost->bsf_args);
4064 static int64_t getutime(void)
4067 struct rusage rusage;
4069 getrusage(RUSAGE_SELF, &rusage);
4070 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4071 #elif HAVE_GETPROCESSTIMES
4073 FILETIME c, e, k, u;
4074 proc = GetCurrentProcess();
4075 GetProcessTimes(proc, &c, &e, &k, &u);
4076 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4078 return av_gettime_relative();
4082 static int64_t getmaxrss(void)
4084 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4085 struct rusage rusage;
4086 getrusage(RUSAGE_SELF, &rusage);
4087 return (int64_t)rusage.ru_maxrss * 1024;
4088 #elif HAVE_GETPROCESSMEMORYINFO
4090 PROCESS_MEMORY_COUNTERS memcounters;
4091 proc = GetCurrentProcess();
4092 memcounters.cb = sizeof(memcounters);
4093 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4094 return memcounters.PeakPagefileUsage;
4100 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4104 int main(int argc, char **argv)
4109 register_exit(ffmpeg_cleanup);
4111 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4113 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4114 parse_loglevel(argc, argv, options);
4116 if(argc>1 && !strcmp(argv[1], "-d")){
4118 av_log_set_callback(log_callback_null);
4123 avcodec_register_all();
4125 avdevice_register_all();
4127 avfilter_register_all();
4129 avformat_network_init();
4131 show_banner(argc, argv, options);
4135 /* parse options and open all input/output files */
4136 ret = ffmpeg_parse_options(argc, argv);
4140 if (nb_output_files <= 0 && nb_input_files == 0) {
4142 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4146 /* file converter / grab */
4147 if (nb_output_files <= 0) {
4148 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4152 // if (nb_input_files == 0) {
4153 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4157 current_time = ti = getutime();
4158 if (transcode() < 0)
4160 ti = getutime() - ti;
4162 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4164 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4165 decode_error_stat[0], decode_error_stat[1]);
4166 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4169 exit_program(received_nb_signals ? 255 : main_return_code);
4170 return main_return_code;