2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
65 #include "libavutil/threadmessage.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
73 #if HAVE_SYS_RESOURCE_H
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
80 #if HAVE_GETPROCESSMEMORYINFO
84 #if HAVE_SETCONSOLECTRLHANDLER
90 #include <sys/select.h>
95 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 typedef struct BenchmarkTimeStamps {
127 } BenchmarkTimeStamps;
129 static void do_video_stats(OutputStream *ost, int frame_size);
130 static BenchmarkTimeStamps get_benchmark_time_stamps(void);
131 static int64_t getmaxrss(void);
132 static int ifilter_has_all_input_formats(FilterGraph *fg);
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 static unsigned nb_output_dumped = 0;
141 static int want_sdp = 1;
143 static BenchmarkTimeStamps current_time;
144 AVIOContext *progress_avio = NULL;
146 static uint8_t *subtitle_out;
148 InputStream **input_streams = NULL;
149 int nb_input_streams = 0;
150 InputFile **input_files = NULL;
151 int nb_input_files = 0;
153 OutputStream **output_streams = NULL;
154 int nb_output_streams = 0;
155 OutputFile **output_files = NULL;
156 int nb_output_files = 0;
158 FilterGraph **filtergraphs;
163 /* init terminal so that we can grab keys */
164 static struct termios oldtty;
165 static int restore_tty;
169 static void free_input_threads(void);
173 Convert subtitles to video with alpha to insert them in filter graphs.
174 This is a temporary solution until libavfilter gets real subtitles support.
177 static int sub2video_get_blank_frame(InputStream *ist)
180 AVFrame *frame = ist->sub2video.frame;
182 av_frame_unref(frame);
183 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
184 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
186 if ((ret = av_frame_get_buffer(frame, 0)) < 0)
188 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
192 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
195 uint32_t *pal, *dst2;
199 if (r->type != SUBTITLE_BITMAP) {
200 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
203 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
204 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
205 r->x, r->y, r->w, r->h, w, h
210 dst += r->y * dst_linesize + r->x * 4;
212 pal = (uint32_t *)r->data[1];
213 for (y = 0; y < r->h; y++) {
214 dst2 = (uint32_t *)dst;
216 for (x = 0; x < r->w; x++)
217 *(dst2++) = pal[*(src2++)];
219 src += r->linesize[0];
223 static void sub2video_push_ref(InputStream *ist, int64_t pts)
225 AVFrame *frame = ist->sub2video.frame;
229 av_assert1(frame->data[0]);
230 ist->sub2video.last_pts = frame->pts = pts;
231 for (i = 0; i < ist->nb_filters; i++) {
232 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
233 AV_BUFFERSRC_FLAG_KEEP_REF |
234 AV_BUFFERSRC_FLAG_PUSH);
235 if (ret != AVERROR_EOF && ret < 0)
236 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
241 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
243 AVFrame *frame = ist->sub2video.frame;
247 int64_t pts, end_pts;
252 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
253 AV_TIME_BASE_Q, ist->st->time_base);
254 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
255 AV_TIME_BASE_Q, ist->st->time_base);
256 num_rects = sub->num_rects;
258 /* If we are initializing the system, utilize current heartbeat
259 PTS as the start time, and show until the following subpicture
260 is received. Otherwise, utilize the previous subpicture's end time
261 as the fall-back value. */
262 pts = ist->sub2video.initialize ?
263 heartbeat_pts : ist->sub2video.end_pts;
267 if (sub2video_get_blank_frame(ist) < 0) {
268 av_log(ist->dec_ctx, AV_LOG_ERROR,
269 "Impossible to get a blank canvas.\n");
272 dst = frame->data [0];
273 dst_linesize = frame->linesize[0];
274 for (i = 0; i < num_rects; i++)
275 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
276 sub2video_push_ref(ist, pts);
277 ist->sub2video.end_pts = end_pts;
278 ist->sub2video.initialize = 0;
281 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
283 InputFile *infile = input_files[ist->file_index];
287 /* When a frame is read from a file, examine all sub2video streams in
288 the same file and send the sub2video frame again. Otherwise, decoded
289 video frames could be accumulating in the filter graph while a filter
290 (possibly overlay) is desperately waiting for a subtitle frame. */
291 for (i = 0; i < infile->nb_streams; i++) {
292 InputStream *ist2 = input_streams[infile->ist_index + i];
293 if (!ist2->sub2video.frame)
295 /* subtitles seem to be usually muxed ahead of other streams;
296 if not, subtracting a larger time here is necessary */
297 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
298 /* do not send the heartbeat frame if the subtitle is already ahead */
299 if (pts2 <= ist2->sub2video.last_pts)
301 if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
302 /* if we have hit the end of the current displayed subpicture,
303 or if we need to initialize the system, update the
304 overlayed subpicture and its start/end times */
305 sub2video_update(ist2, pts2 + 1, NULL);
306 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
307 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
309 sub2video_push_ref(ist2, pts2);
313 static void sub2video_flush(InputStream *ist)
318 if (ist->sub2video.end_pts < INT64_MAX)
319 sub2video_update(ist, INT64_MAX, NULL);
320 for (i = 0; i < ist->nb_filters; i++) {
321 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
322 if (ret != AVERROR_EOF && ret < 0)
323 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
327 /* end of sub2video hack */
329 static void term_exit_sigsafe(void)
333 tcsetattr (0, TCSANOW, &oldtty);
339 av_log(NULL, AV_LOG_QUIET, "%s", "");
343 static volatile int received_sigterm = 0;
344 static volatile int received_nb_signals = 0;
345 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
346 static volatile int ffmpeg_exited = 0;
347 static int main_return_code = 0;
348 static int64_t copy_ts_first_pts = AV_NOPTS_VALUE;
351 sigterm_handler(int sig)
354 received_sigterm = sig;
355 received_nb_signals++;
357 if(received_nb_signals > 3) {
358 ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
359 strlen("Received > 3 system signals, hard exiting\n"));
360 if (ret < 0) { /* Do nothing */ };
365 #if HAVE_SETCONSOLECTRLHANDLER
366 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
368 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
373 case CTRL_BREAK_EVENT:
374 sigterm_handler(SIGINT);
377 case CTRL_CLOSE_EVENT:
378 case CTRL_LOGOFF_EVENT:
379 case CTRL_SHUTDOWN_EVENT:
380 sigterm_handler(SIGTERM);
381 /* Basically, with these 3 events, when we return from this method the
382 process is hard terminated, so stall as long as we need to
383 to try and let the main thread(s) clean up and gracefully terminate
384 (we have at most 5 seconds, but should be done far before that). */
385 while (!ffmpeg_exited) {
391 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
398 #define SIGNAL(sig, func) \
400 action.sa_handler = func; \
401 sigaction(sig, &action, NULL); \
404 #define SIGNAL(sig, func) \
410 #if defined __linux__
411 struct sigaction action = {0};
412 action.sa_handler = sigterm_handler;
414 /* block other interrupts while processing this one */
415 sigfillset(&action.sa_mask);
417 /* restart interruptible functions (i.e. don't fail with EINTR) */
418 action.sa_flags = SA_RESTART;
422 if (!run_as_daemon && stdin_interaction) {
424 if (tcgetattr (0, &tty) == 0) {
428 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
429 |INLCR|IGNCR|ICRNL|IXON);
430 tty.c_oflag |= OPOST;
431 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
432 tty.c_cflag &= ~(CSIZE|PARENB);
437 tcsetattr (0, TCSANOW, &tty);
439 SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
443 SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
444 SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */
446 SIGNAL(SIGXCPU, sigterm_handler);
449 signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
451 #if HAVE_SETCONSOLECTRLHANDLER
452 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
456 /* read a key without blocking */
457 static int read_key(void)
469 n = select(1, &rfds, NULL, NULL, &tv);
478 # if HAVE_PEEKNAMEDPIPE
480 static HANDLE input_handle;
483 input_handle = GetStdHandle(STD_INPUT_HANDLE);
484 is_pipe = !GetConsoleMode(input_handle, &dw);
488 /* When running under a GUI, you will end here. */
489 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
490 // input pipe may have been closed by the program that ran ffmpeg
508 static int decode_interrupt_cb(void *ctx)
510 return received_nb_signals > atomic_load(&transcode_init_done);
513 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
515 static void ffmpeg_cleanup(int ret)
520 int maxrss = getmaxrss() / 1024;
521 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
524 for (i = 0; i < nb_filtergraphs; i++) {
525 FilterGraph *fg = filtergraphs[i];
526 avfilter_graph_free(&fg->graph);
527 for (j = 0; j < fg->nb_inputs; j++) {
528 InputFilter *ifilter = fg->inputs[j];
529 struct InputStream *ist = ifilter->ist;
531 while (av_fifo_size(ifilter->frame_queue)) {
533 av_fifo_generic_read(ifilter->frame_queue, &frame,
534 sizeof(frame), NULL);
535 av_frame_free(&frame);
537 av_fifo_freep(&ifilter->frame_queue);
538 if (ist->sub2video.sub_queue) {
539 while (av_fifo_size(ist->sub2video.sub_queue)) {
541 av_fifo_generic_read(ist->sub2video.sub_queue,
542 &sub, sizeof(sub), NULL);
543 avsubtitle_free(&sub);
545 av_fifo_freep(&ist->sub2video.sub_queue);
547 av_buffer_unref(&ifilter->hw_frames_ctx);
548 av_freep(&ifilter->name);
549 av_freep(&fg->inputs[j]);
551 av_freep(&fg->inputs);
552 for (j = 0; j < fg->nb_outputs; j++) {
553 OutputFilter *ofilter = fg->outputs[j];
555 avfilter_inout_free(&ofilter->out_tmp);
556 av_freep(&ofilter->name);
557 av_freep(&ofilter->formats);
558 av_freep(&ofilter->channel_layouts);
559 av_freep(&ofilter->sample_rates);
560 av_freep(&fg->outputs[j]);
562 av_freep(&fg->outputs);
563 av_freep(&fg->graph_desc);
565 av_freep(&filtergraphs[i]);
567 av_freep(&filtergraphs);
569 av_freep(&subtitle_out);
572 for (i = 0; i < nb_output_files; i++) {
573 OutputFile *of = output_files[i];
578 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
580 avformat_free_context(s);
581 av_dict_free(&of->opts);
583 av_freep(&output_files[i]);
585 for (i = 0; i < nb_output_streams; i++) {
586 OutputStream *ost = output_streams[i];
591 av_bsf_free(&ost->bsf_ctx);
593 av_frame_free(&ost->filtered_frame);
594 av_frame_free(&ost->last_frame);
595 av_dict_free(&ost->encoder_opts);
597 av_freep(&ost->forced_keyframes);
598 av_expr_free(ost->forced_keyframes_pexpr);
599 av_freep(&ost->avfilter);
600 av_freep(&ost->logfile_prefix);
602 av_freep(&ost->audio_channels_map);
603 ost->audio_channels_mapped = 0;
605 av_dict_free(&ost->sws_dict);
606 av_dict_free(&ost->swr_opts);
608 avcodec_free_context(&ost->enc_ctx);
609 avcodec_parameters_free(&ost->ref_par);
611 if (ost->muxing_queue) {
612 while (av_fifo_size(ost->muxing_queue)) {
614 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
615 av_packet_unref(&pkt);
617 av_fifo_freep(&ost->muxing_queue);
620 av_freep(&output_streams[i]);
623 free_input_threads();
625 for (i = 0; i < nb_input_files; i++) {
626 avformat_close_input(&input_files[i]->ctx);
627 av_freep(&input_files[i]);
629 for (i = 0; i < nb_input_streams; i++) {
630 InputStream *ist = input_streams[i];
632 av_frame_free(&ist->decoded_frame);
633 av_frame_free(&ist->filter_frame);
634 av_dict_free(&ist->decoder_opts);
635 avsubtitle_free(&ist->prev_sub.subtitle);
636 av_frame_free(&ist->sub2video.frame);
637 av_freep(&ist->filters);
638 av_freep(&ist->hwaccel_device);
639 av_freep(&ist->dts_buffer);
641 avcodec_free_context(&ist->dec_ctx);
643 av_freep(&input_streams[i]);
647 if (fclose(vstats_file))
648 av_log(NULL, AV_LOG_ERROR,
649 "Error closing vstats file, loss of information possible: %s\n",
650 av_err2str(AVERROR(errno)));
652 av_freep(&vstats_filename);
654 av_freep(&input_streams);
655 av_freep(&input_files);
656 av_freep(&output_streams);
657 av_freep(&output_files);
661 avformat_network_deinit();
663 if (received_sigterm) {
664 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
665 (int) received_sigterm);
666 } else if (ret && atomic_load(&transcode_init_done)) {
667 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
673 void remove_avoptions(AVDictionary **a, AVDictionary *b)
675 AVDictionaryEntry *t = NULL;
677 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
678 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
682 void assert_avoptions(AVDictionary *m)
684 AVDictionaryEntry *t;
685 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
686 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
691 static void abort_codec_experimental(const AVCodec *c, int encoder)
696 static void update_benchmark(const char *fmt, ...)
698 if (do_benchmark_all) {
699 BenchmarkTimeStamps t = get_benchmark_time_stamps();
705 vsnprintf(buf, sizeof(buf), fmt, va);
707 av_log(NULL, AV_LOG_INFO,
708 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
709 t.user_usec - current_time.user_usec,
710 t.sys_usec - current_time.sys_usec,
711 t.real_usec - current_time.real_usec, buf);
717 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
720 for (i = 0; i < nb_output_streams; i++) {
721 OutputStream *ost2 = output_streams[i];
722 ost2->finished |= ost == ost2 ? this_stream : others;
726 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
728 AVFormatContext *s = of->ctx;
729 AVStream *st = ost->st;
733 * Audio encoders may split the packets -- #frames in != #packets out.
734 * But there is no reordering, so we can limit the number of output packets
735 * by simply dropping them here.
736 * Counting encoded video frames needs to be done separately because of
737 * reordering, see do_video_out().
738 * Do not count the packet when unqueued because it has been counted when queued.
740 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
741 if (ost->frame_number >= ost->max_frames) {
742 av_packet_unref(pkt);
748 if (!of->header_written) {
749 AVPacket tmp_pkt = {0};
750 /* the muxer is not initialized yet, buffer the packet */
751 if (!av_fifo_space(ost->muxing_queue)) {
752 unsigned int are_we_over_size =
753 (ost->muxing_queue_data_size + pkt->size) > ost->muxing_queue_data_threshold;
754 int new_size = are_we_over_size ?
755 FFMIN(2 * av_fifo_size(ost->muxing_queue),
756 ost->max_muxing_queue_size) :
757 2 * av_fifo_size(ost->muxing_queue);
759 if (new_size <= av_fifo_size(ost->muxing_queue)) {
760 av_log(NULL, AV_LOG_ERROR,
761 "Too many packets buffered for output stream %d:%d.\n",
762 ost->file_index, ost->st->index);
765 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
769 ret = av_packet_make_refcounted(pkt);
772 av_packet_move_ref(&tmp_pkt, pkt);
773 ost->muxing_queue_data_size += tmp_pkt.size;
774 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
778 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
779 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
780 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
782 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
784 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
786 ost->quality = sd ? AV_RL32(sd) : -1;
787 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
789 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
791 ost->error[i] = AV_RL64(sd + 8 + 8*i);
796 if (ost->frame_rate.num && ost->is_cfr) {
797 if (pkt->duration > 0)
798 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
799 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
804 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
806 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
807 if (pkt->dts != AV_NOPTS_VALUE &&
808 pkt->pts != AV_NOPTS_VALUE &&
809 pkt->dts > pkt->pts) {
810 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
812 ost->file_index, ost->st->index);
814 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
815 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
816 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
818 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
819 pkt->dts != AV_NOPTS_VALUE &&
820 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
821 ost->last_mux_dts != AV_NOPTS_VALUE) {
822 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
823 if (pkt->dts < max) {
824 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
826 loglevel = AV_LOG_ERROR;
827 av_log(s, loglevel, "Non-monotonous DTS in output stream "
828 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
829 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
831 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
834 av_log(s, loglevel, "changing to %"PRId64". This may result "
835 "in incorrect timestamps in the output file.\n",
837 if (pkt->pts >= pkt->dts)
838 pkt->pts = FFMAX(pkt->pts, max);
843 ost->last_mux_dts = pkt->dts;
845 ost->data_size += pkt->size;
846 ost->packets_written++;
848 pkt->stream_index = ost->index;
851 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
852 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
853 av_get_media_type_string(ost->enc_ctx->codec_type),
854 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
855 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
860 ret = av_interleaved_write_frame(s, pkt);
862 print_error("av_interleaved_write_frame()", ret);
863 main_return_code = 1;
864 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
866 av_packet_unref(pkt);
869 static void close_output_stream(OutputStream *ost)
871 OutputFile *of = output_files[ost->file_index];
873 ost->finished |= ENCODER_FINISHED;
875 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
876 of->recording_time = FFMIN(of->recording_time, end);
881 * Send a single packet to the output, applying any bitstream filters
882 * associated with the output stream. This may result in any number
883 * of packets actually being written, depending on what bitstream
884 * filters are applied. The supplied packet is consumed and will be
885 * blank (as if newly-allocated) when this function returns.
887 * If eof is set, instead indicate EOF to all bitstream filters and
888 * therefore flush any delayed packets to the output. A blank packet
889 * must be supplied in this case.
891 static void output_packet(OutputFile *of, AVPacket *pkt,
892 OutputStream *ost, int eof)
896 /* apply the output bitstream filters */
898 ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
901 while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
902 write_packet(of, pkt, ost, 0);
903 if (ret == AVERROR(EAGAIN))
906 write_packet(of, pkt, ost, 0);
909 if (ret < 0 && ret != AVERROR_EOF) {
910 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
911 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
917 static int check_recording_time(OutputStream *ost)
919 OutputFile *of = output_files[ost->file_index];
921 if (of->recording_time != INT64_MAX &&
922 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
923 AV_TIME_BASE_Q) >= 0) {
924 close_output_stream(ost);
930 static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost,
933 double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
934 AVCodecContext *enc = ost->enc_ctx;
935 if (!frame || frame->pts == AV_NOPTS_VALUE ||
936 !enc || !ost->filter || !ost->filter->graph->graph)
940 AVFilterContext *filter = ost->filter->filter;
942 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
943 AVRational filter_tb = av_buffersink_get_time_base(filter);
944 AVRational tb = enc->time_base;
945 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
947 tb.den <<= extra_bits;
949 av_rescale_q(frame->pts, filter_tb, tb) -
950 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
951 float_pts /= 1 << extra_bits;
952 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
953 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
956 av_rescale_q(frame->pts, filter_tb, enc->time_base) -
957 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
963 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
964 frame ? av_ts2str(frame->pts) : "NULL",
965 frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
967 enc ? enc->time_base.num : -1,
968 enc ? enc->time_base.den : -1);
974 static int init_output_stream(OutputStream *ost, AVFrame *frame,
975 char *error, int error_len);
977 static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame,
980 int ret = AVERROR_BUG;
981 char error[1024] = {0};
983 if (ost->initialized)
986 ret = init_output_stream(ost, frame, error, sizeof(error));
988 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
989 ost->file_index, ost->index, error);
998 static void do_audio_out(OutputFile *of, OutputStream *ost,
1001 AVCodecContext *enc = ost->enc_ctx;
1005 av_init_packet(&pkt);
1009 adjust_frame_pts_to_encoder_tb(of, ost, frame);
1011 if (!check_recording_time(ost))
1014 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1015 frame->pts = ost->sync_opts;
1016 ost->sync_opts = frame->pts + frame->nb_samples;
1017 ost->samples_encoded += frame->nb_samples;
1018 ost->frames_encoded++;
1020 av_assert0(pkt.size || !pkt.data);
1021 update_benchmark(NULL);
1023 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1024 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1025 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1026 enc->time_base.num, enc->time_base.den);
1029 ret = avcodec_send_frame(enc, frame);
1034 ret = avcodec_receive_packet(enc, &pkt);
1035 if (ret == AVERROR(EAGAIN))
1040 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1042 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1045 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1046 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1047 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1048 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1051 output_packet(of, &pkt, ost, 0);
1056 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1060 static void do_subtitle_out(OutputFile *of,
1064 int subtitle_out_max_size = 1024 * 1024;
1065 int subtitle_out_size, nb, i;
1066 AVCodecContext *enc;
1070 if (sub->pts == AV_NOPTS_VALUE) {
1071 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1079 if (!subtitle_out) {
1080 subtitle_out = av_malloc(subtitle_out_max_size);
1081 if (!subtitle_out) {
1082 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1087 /* Note: DVB subtitle need one packet to draw them and one other
1088 packet to clear them */
1089 /* XXX: signal it in the codec context ? */
1090 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1095 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1097 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1098 pts -= output_files[ost->file_index]->start_time;
1099 for (i = 0; i < nb; i++) {
1100 unsigned save_num_rects = sub->num_rects;
1102 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1103 if (!check_recording_time(ost))
1107 // start_display_time is required to be 0
1108 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1109 sub->end_display_time -= sub->start_display_time;
1110 sub->start_display_time = 0;
1114 ost->frames_encoded++;
1116 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1117 subtitle_out_max_size, sub);
1119 sub->num_rects = save_num_rects;
1120 if (subtitle_out_size < 0) {
1121 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1125 av_init_packet(&pkt);
1126 pkt.data = subtitle_out;
1127 pkt.size = subtitle_out_size;
1128 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1129 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1130 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1131 /* XXX: the pts correction is handled here. Maybe handling
1132 it in the codec would be better */
1134 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1136 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1139 output_packet(of, &pkt, ost, 0);
1143 static void do_video_out(OutputFile *of,
1145 AVFrame *next_picture)
1147 int ret, format_video_sync;
1149 AVCodecContext *enc = ost->enc_ctx;
1150 AVRational frame_rate;
1151 int nb_frames, nb0_frames, i;
1152 double delta, delta0;
1153 double duration = 0;
1154 double sync_ipts = AV_NOPTS_VALUE;
1156 InputStream *ist = NULL;
1157 AVFilterContext *filter = ost->filter->filter;
1159 init_output_stream_wrapper(ost, next_picture, 1);
1160 sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1162 if (ost->source_index >= 0)
1163 ist = input_streams[ost->source_index];
1165 frame_rate = av_buffersink_get_frame_rate(filter);
1166 if (frame_rate.num > 0 && frame_rate.den > 0)
1167 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1169 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1170 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1172 if (!ost->filters_script &&
1174 (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1177 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1178 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1181 if (!next_picture) {
1183 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1184 ost->last_nb0_frames[1],
1185 ost->last_nb0_frames[2]);
1187 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1188 delta = delta0 + duration;
1190 /* by default, we output a single frame */
1191 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1194 format_video_sync = video_sync_method;
1195 if (format_video_sync == VSYNC_AUTO) {
1196 if(!strcmp(of->ctx->oformat->name, "avi")) {
1197 format_video_sync = VSYNC_VFR;
1199 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1201 && format_video_sync == VSYNC_CFR
1202 && input_files[ist->file_index]->ctx->nb_streams == 1
1203 && input_files[ist->file_index]->input_ts_offset == 0) {
1204 format_video_sync = VSYNC_VSCFR;
1206 if (format_video_sync == VSYNC_CFR && copy_ts) {
1207 format_video_sync = VSYNC_VSCFR;
1210 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1214 format_video_sync != VSYNC_PASSTHROUGH &&
1215 format_video_sync != VSYNC_DROP) {
1216 if (delta0 < -0.6) {
1217 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1219 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1220 sync_ipts = ost->sync_opts;
1225 switch (format_video_sync) {
1227 if (ost->frame_number == 0 && delta0 >= 0.5) {
1228 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1231 ost->sync_opts = llrint(sync_ipts);
1234 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1235 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1237 } else if (delta < -1.1)
1239 else if (delta > 1.1) {
1240 nb_frames = lrintf(delta);
1242 nb0_frames = llrintf(delta0 - 0.6);
1248 else if (delta > 0.6)
1249 ost->sync_opts = llrint(sync_ipts);
1252 case VSYNC_PASSTHROUGH:
1253 ost->sync_opts = llrint(sync_ipts);
1260 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1261 nb0_frames = FFMIN(nb0_frames, nb_frames);
1263 memmove(ost->last_nb0_frames + 1,
1264 ost->last_nb0_frames,
1265 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1266 ost->last_nb0_frames[0] = nb0_frames;
1268 if (nb0_frames == 0 && ost->last_dropped) {
1270 av_log(NULL, AV_LOG_VERBOSE,
1271 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1272 ost->frame_number, ost->st->index, ost->last_frame->pts);
1274 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1275 if (nb_frames > dts_error_threshold * 30) {
1276 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1280 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1281 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1282 if (nb_frames_dup > dup_warning) {
1283 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1287 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1289 /* duplicates frame if needed */
1290 for (i = 0; i < nb_frames; i++) {
1291 AVFrame *in_picture;
1292 int forced_keyframe = 0;
1294 av_init_packet(&pkt);
1298 if (i < nb0_frames && ost->last_frame) {
1299 in_picture = ost->last_frame;
1301 in_picture = next_picture;
1306 in_picture->pts = ost->sync_opts;
1308 if (!check_recording_time(ost))
1311 in_picture->quality = enc->global_quality;
1312 in_picture->pict_type = 0;
1314 if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1315 in_picture->pts != AV_NOPTS_VALUE)
1316 ost->forced_kf_ref_pts = in_picture->pts;
1318 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1319 (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1320 if (ost->forced_kf_index < ost->forced_kf_count &&
1321 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1322 ost->forced_kf_index++;
1323 forced_keyframe = 1;
1324 } else if (ost->forced_keyframes_pexpr) {
1326 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1327 res = av_expr_eval(ost->forced_keyframes_pexpr,
1328 ost->forced_keyframes_expr_const_values, NULL);
1329 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1330 ost->forced_keyframes_expr_const_values[FKF_N],
1331 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1332 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1333 ost->forced_keyframes_expr_const_values[FKF_T],
1334 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1337 forced_keyframe = 1;
1338 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1339 ost->forced_keyframes_expr_const_values[FKF_N];
1340 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1341 ost->forced_keyframes_expr_const_values[FKF_T];
1342 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1345 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1346 } else if ( ost->forced_keyframes
1347 && !strncmp(ost->forced_keyframes, "source", 6)
1348 && in_picture->key_frame==1
1350 forced_keyframe = 1;
1353 if (forced_keyframe) {
1354 in_picture->pict_type = AV_PICTURE_TYPE_I;
1355 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1358 update_benchmark(NULL);
1360 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1361 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1362 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1363 enc->time_base.num, enc->time_base.den);
1366 ost->frames_encoded++;
1368 ret = avcodec_send_frame(enc, in_picture);
1371 // Make sure Closed Captions will not be duplicated
1372 av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
1375 ret = avcodec_receive_packet(enc, &pkt);
1376 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1377 if (ret == AVERROR(EAGAIN))
1383 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1384 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1385 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1386 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1389 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1390 pkt.pts = ost->sync_opts;
1392 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1395 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1396 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1397 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1398 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1401 frame_size = pkt.size;
1402 output_packet(of, &pkt, ost, 0);
1404 /* if two pass, output log */
1405 if (ost->logfile && enc->stats_out) {
1406 fprintf(ost->logfile, "%s", enc->stats_out);
1411 * For video, number of frames in == number of packets out.
1412 * But there may be reordering, so we can't throw away frames on encoder
1413 * flush, we need to limit them here, before they go into encoder.
1415 ost->frame_number++;
1417 if (vstats_filename && frame_size)
1418 do_video_stats(ost, frame_size);
1421 if (!ost->last_frame)
1422 ost->last_frame = av_frame_alloc();
1423 av_frame_unref(ost->last_frame);
1424 if (next_picture && ost->last_frame)
1425 av_frame_ref(ost->last_frame, next_picture);
1427 av_frame_free(&ost->last_frame);
1431 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1435 static double psnr(double d)
1437 return -10.0 * log10(d);
1440 static void do_video_stats(OutputStream *ost, int frame_size)
1442 AVCodecContext *enc;
1444 double ti1, bitrate, avg_bitrate;
1446 /* this is executed just the first time do_video_stats is called */
1448 vstats_file = fopen(vstats_filename, "w");
1456 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1457 frame_number = ost->st->nb_frames;
1458 if (vstats_version <= 1) {
1459 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1460 ost->quality / (float)FF_QP2LAMBDA);
1462 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1463 ost->quality / (float)FF_QP2LAMBDA);
1466 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1467 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1469 fprintf(vstats_file,"f_size= %6d ", frame_size);
1470 /* compute pts value */
1471 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1475 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1476 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1477 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1478 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1479 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1483 static void finish_output_stream(OutputStream *ost)
1485 OutputFile *of = output_files[ost->file_index];
1488 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1491 for (i = 0; i < of->ctx->nb_streams; i++)
1492 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1497 * Get and encode new output from any of the filtergraphs, without causing
1500 * @return 0 for success, <0 for severe errors
1502 static int reap_filters(int flush)
1504 AVFrame *filtered_frame = NULL;
1507 /* Reap all buffers present in the buffer sinks */
1508 for (i = 0; i < nb_output_streams; i++) {
1509 OutputStream *ost = output_streams[i];
1510 OutputFile *of = output_files[ost->file_index];
1511 AVFilterContext *filter;
1512 AVCodecContext *enc = ost->enc_ctx;
1515 if (!ost->filter || !ost->filter->graph->graph)
1517 filter = ost->filter->filter;
1520 * Unlike video, with audio the audio frame size matters.
1521 * Currently we are fully reliant on the lavfi filter chain to
1522 * do the buffering deed for us, and thus the frame size parameter
1523 * needs to be set accordingly. Where does one get the required
1524 * frame size? From the initialized AVCodecContext of an audio
1525 * encoder. Thus, if we have gotten to an audio stream, initialize
1526 * the encoder earlier than receiving the first AVFrame.
1528 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO)
1529 init_output_stream_wrapper(ost, NULL, 1);
1531 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1532 return AVERROR(ENOMEM);
1534 filtered_frame = ost->filtered_frame;
1537 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1538 AV_BUFFERSINK_FLAG_NO_REQUEST);
1540 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1541 av_log(NULL, AV_LOG_WARNING,
1542 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1543 } else if (flush && ret == AVERROR_EOF) {
1544 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1545 do_video_out(of, ost, NULL);
1549 if (ost->finished) {
1550 av_frame_unref(filtered_frame);
1554 switch (av_buffersink_get_type(filter)) {
1555 case AVMEDIA_TYPE_VIDEO:
1556 if (!ost->frame_aspect_ratio.num)
1557 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1559 do_video_out(of, ost, filtered_frame);
1561 case AVMEDIA_TYPE_AUDIO:
1562 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1563 enc->channels != filtered_frame->channels) {
1564 av_log(NULL, AV_LOG_ERROR,
1565 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1568 do_audio_out(of, ost, filtered_frame);
1571 // TODO support subtitle filters
1575 av_frame_unref(filtered_frame);
1582 static void print_final_stats(int64_t total_size)
1584 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1585 uint64_t subtitle_size = 0;
1586 uint64_t data_size = 0;
1587 float percent = -1.0;
1591 for (i = 0; i < nb_output_streams; i++) {
1592 OutputStream *ost = output_streams[i];
1593 switch (ost->enc_ctx->codec_type) {
1594 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1595 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1596 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1597 default: other_size += ost->data_size; break;
1599 extra_size += ost->enc_ctx->extradata_size;
1600 data_size += ost->data_size;
1601 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1602 != AV_CODEC_FLAG_PASS1)
1606 if (data_size && total_size>0 && total_size >= data_size)
1607 percent = 100.0 * (total_size - data_size) / data_size;
1609 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1610 video_size / 1024.0,
1611 audio_size / 1024.0,
1612 subtitle_size / 1024.0,
1613 other_size / 1024.0,
1614 extra_size / 1024.0);
1616 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1618 av_log(NULL, AV_LOG_INFO, "unknown");
1619 av_log(NULL, AV_LOG_INFO, "\n");
1621 /* print verbose per-stream stats */
1622 for (i = 0; i < nb_input_files; i++) {
1623 InputFile *f = input_files[i];
1624 uint64_t total_packets = 0, total_size = 0;
1626 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1629 for (j = 0; j < f->nb_streams; j++) {
1630 InputStream *ist = input_streams[f->ist_index + j];
1631 enum AVMediaType type = ist->dec_ctx->codec_type;
1633 total_size += ist->data_size;
1634 total_packets += ist->nb_packets;
1636 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1637 i, j, media_type_string(type));
1638 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1639 ist->nb_packets, ist->data_size);
1641 if (ist->decoding_needed) {
1642 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1643 ist->frames_decoded);
1644 if (type == AVMEDIA_TYPE_AUDIO)
1645 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1646 av_log(NULL, AV_LOG_VERBOSE, "; ");
1649 av_log(NULL, AV_LOG_VERBOSE, "\n");
1652 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1653 total_packets, total_size);
1656 for (i = 0; i < nb_output_files; i++) {
1657 OutputFile *of = output_files[i];
1658 uint64_t total_packets = 0, total_size = 0;
1660 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1663 for (j = 0; j < of->ctx->nb_streams; j++) {
1664 OutputStream *ost = output_streams[of->ost_index + j];
1665 enum AVMediaType type = ost->enc_ctx->codec_type;
1667 total_size += ost->data_size;
1668 total_packets += ost->packets_written;
1670 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1671 i, j, media_type_string(type));
1672 if (ost->encoding_needed) {
1673 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1674 ost->frames_encoded);
1675 if (type == AVMEDIA_TYPE_AUDIO)
1676 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1677 av_log(NULL, AV_LOG_VERBOSE, "; ");
1680 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1681 ost->packets_written, ost->data_size);
1683 av_log(NULL, AV_LOG_VERBOSE, "\n");
1686 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1687 total_packets, total_size);
1689 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1690 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1692 av_log(NULL, AV_LOG_WARNING, "\n");
1694 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1699 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1701 AVBPrint buf, buf_script;
1703 AVFormatContext *oc;
1705 AVCodecContext *enc;
1706 int frame_number, vid, i;
1709 int64_t pts = INT64_MIN + 1;
1710 static int64_t last_time = -1;
1711 static int first_report = 1;
1712 static int qp_histogram[52];
1713 int hours, mins, secs, us;
1714 const char *hours_sign;
1718 if (!print_stats && !is_last_report && !progress_avio)
1721 if (!is_last_report) {
1722 if (last_time == -1) {
1723 last_time = cur_time;
1725 if (((cur_time - last_time) < stats_period && !first_report) ||
1726 (first_report && nb_output_dumped < nb_output_files))
1728 last_time = cur_time;
1731 t = (cur_time-timer_start) / 1000000.0;
1734 oc = output_files[0]->ctx;
1736 total_size = avio_size(oc->pb);
1737 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1738 total_size = avio_tell(oc->pb);
1741 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1742 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1743 for (i = 0; i < nb_output_streams; i++) {
1745 ost = output_streams[i];
1747 if (!ost->stream_copy)
1748 q = ost->quality / (float) FF_QP2LAMBDA;
1750 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1751 av_bprintf(&buf, "q=%2.1f ", q);
1752 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1753 ost->file_index, ost->index, q);
1755 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1758 frame_number = ost->frame_number;
1759 fps = t > 1 ? frame_number / t : 0;
1760 av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1761 frame_number, fps < 9.95, fps, q);
1762 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1763 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1764 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1765 ost->file_index, ost->index, q);
1767 av_bprintf(&buf, "L");
1771 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1773 for (j = 0; j < 32; j++)
1774 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1777 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1779 double error, error_sum = 0;
1780 double scale, scale_sum = 0;
1782 char type[3] = { 'Y','U','V' };
1783 av_bprintf(&buf, "PSNR=");
1784 for (j = 0; j < 3; j++) {
1785 if (is_last_report) {
1786 error = enc->error[j];
1787 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1789 error = ost->error[j];
1790 scale = enc->width * enc->height * 255.0 * 255.0;
1796 p = psnr(error / scale);
1797 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1798 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1799 ost->file_index, ost->index, type[j] | 32, p);
1801 p = psnr(error_sum / scale_sum);
1802 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1803 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1804 ost->file_index, ost->index, p);
1808 /* compute min output value */
1809 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) {
1810 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1811 ost->st->time_base, AV_TIME_BASE_Q));
1813 if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1814 copy_ts_first_pts = pts;
1815 if (copy_ts_first_pts != AV_NOPTS_VALUE)
1816 pts -= copy_ts_first_pts;
1821 nb_frames_drop += ost->last_dropped;
1824 secs = FFABS(pts) / AV_TIME_BASE;
1825 us = FFABS(pts) % AV_TIME_BASE;
1830 hours_sign = (pts < 0) ? "-" : "";
1832 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1833 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1835 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1836 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1837 if (pts == AV_NOPTS_VALUE) {
1838 av_bprintf(&buf, "N/A ");
1840 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1841 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1845 av_bprintf(&buf, "bitrate=N/A");
1846 av_bprintf(&buf_script, "bitrate=N/A\n");
1848 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1849 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1852 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1853 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1854 if (pts == AV_NOPTS_VALUE) {
1855 av_bprintf(&buf_script, "out_time_us=N/A\n");
1856 av_bprintf(&buf_script, "out_time_ms=N/A\n");
1857 av_bprintf(&buf_script, "out_time=N/A\n");
1859 av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1860 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1861 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1862 hours_sign, hours, mins, secs, us);
1865 if (nb_frames_dup || nb_frames_drop)
1866 av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1867 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1868 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1871 av_bprintf(&buf, " speed=N/A");
1872 av_bprintf(&buf_script, "speed=N/A\n");
1874 av_bprintf(&buf, " speed=%4.3gx", speed);
1875 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1878 if (print_stats || is_last_report) {
1879 const char end = is_last_report ? '\n' : '\r';
1880 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1881 fprintf(stderr, "%s %c", buf.str, end);
1883 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1887 av_bprint_finalize(&buf, NULL);
1889 if (progress_avio) {
1890 av_bprintf(&buf_script, "progress=%s\n",
1891 is_last_report ? "end" : "continue");
1892 avio_write(progress_avio, buf_script.str,
1893 FFMIN(buf_script.len, buf_script.size - 1));
1894 avio_flush(progress_avio);
1895 av_bprint_finalize(&buf_script, NULL);
1896 if (is_last_report) {
1897 if ((ret = avio_closep(&progress_avio)) < 0)
1898 av_log(NULL, AV_LOG_ERROR,
1899 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1906 print_final_stats(total_size);
1909 static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1911 // We never got any input. Set a fake format, which will
1912 // come from libavformat.
1913 ifilter->format = par->format;
1914 ifilter->sample_rate = par->sample_rate;
1915 ifilter->channels = par->channels;
1916 ifilter->channel_layout = par->channel_layout;
1917 ifilter->width = par->width;
1918 ifilter->height = par->height;
1919 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1922 static void flush_encoders(void)
1926 for (i = 0; i < nb_output_streams; i++) {
1927 OutputStream *ost = output_streams[i];
1928 AVCodecContext *enc = ost->enc_ctx;
1929 OutputFile *of = output_files[ost->file_index];
1931 if (!ost->encoding_needed)
1934 // Try to enable encoding with no input frames.
1935 // Maybe we should just let encoding fail instead.
1936 if (!ost->initialized) {
1937 FilterGraph *fg = ost->filter->graph;
1939 av_log(NULL, AV_LOG_WARNING,
1940 "Finishing stream %d:%d without any data written to it.\n",
1941 ost->file_index, ost->st->index);
1943 if (ost->filter && !fg->graph) {
1945 for (x = 0; x < fg->nb_inputs; x++) {
1946 InputFilter *ifilter = fg->inputs[x];
1947 if (ifilter->format < 0)
1948 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1951 if (!ifilter_has_all_input_formats(fg))
1954 ret = configure_filtergraph(fg);
1956 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1960 finish_output_stream(ost);
1963 init_output_stream_wrapper(ost, NULL, 1);
1966 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1970 const char *desc = NULL;
1974 switch (enc->codec_type) {
1975 case AVMEDIA_TYPE_AUDIO:
1978 case AVMEDIA_TYPE_VIDEO:
1985 av_init_packet(&pkt);
1989 update_benchmark(NULL);
1991 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1992 ret = avcodec_send_frame(enc, NULL);
1994 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2001 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
2002 if (ret < 0 && ret != AVERROR_EOF) {
2003 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2008 if (ost->logfile && enc->stats_out) {
2009 fprintf(ost->logfile, "%s", enc->stats_out);
2011 if (ret == AVERROR_EOF) {
2012 output_packet(of, &pkt, ost, 1);
2015 if (ost->finished & MUXER_FINISHED) {
2016 av_packet_unref(&pkt);
2019 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
2020 pkt_size = pkt.size;
2021 output_packet(of, &pkt, ost, 0);
2022 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
2023 do_video_stats(ost, pkt_size);
2030 * Check whether a packet from ist should be written into ost at this time
2032 static int check_output_constraints(InputStream *ist, OutputStream *ost)
2034 OutputFile *of = output_files[ost->file_index];
2035 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2037 if (ost->source_index != ist_index)
2043 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2049 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2051 OutputFile *of = output_files[ost->file_index];
2052 InputFile *f = input_files [ist->file_index];
2053 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2054 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2057 // EOF: flush output bitstream filters.
2059 av_init_packet(&opkt);
2062 output_packet(of, &opkt, ost, 1);
2066 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2067 !ost->copy_initial_nonkeyframes)
2070 if (!ost->frame_number && !ost->copy_prior_start) {
2071 int64_t comp_start = start_time;
2072 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2073 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2074 if (pkt->pts == AV_NOPTS_VALUE ?
2075 ist->pts < comp_start :
2076 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2080 if (of->recording_time != INT64_MAX &&
2081 ist->pts >= of->recording_time + start_time) {
2082 close_output_stream(ost);
2086 if (f->recording_time != INT64_MAX) {
2087 start_time = f->ctx->start_time;
2088 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2089 start_time += f->start_time;
2090 if (ist->pts >= f->recording_time + start_time) {
2091 close_output_stream(ost);
2096 /* force the input stream PTS */
2097 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2100 if (av_packet_ref(&opkt, pkt) < 0)
2103 if (pkt->pts != AV_NOPTS_VALUE)
2104 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2106 if (pkt->dts == AV_NOPTS_VALUE) {
2107 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2108 } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2109 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2111 duration = ist->dec_ctx->frame_size;
2112 opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2113 (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2114 &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2115 /* dts will be set immediately afterwards to what pts is now */
2116 opkt.pts = opkt.dts - ost_tb_start_time;
2118 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2119 opkt.dts -= ost_tb_start_time;
2121 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2123 output_packet(of, &opkt, ost, 0);
2126 int guess_input_channel_layout(InputStream *ist)
2128 AVCodecContext *dec = ist->dec_ctx;
2130 if (!dec->channel_layout) {
2131 char layout_name[256];
2133 if (dec->channels > ist->guess_layout_max)
2135 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2136 if (!dec->channel_layout)
2138 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2139 dec->channels, dec->channel_layout);
2140 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2141 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2146 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2148 if (*got_output || ret<0)
2149 decode_error_stat[ret<0] ++;
2151 if (ret < 0 && exit_on_error)
2154 if (*got_output && ist) {
2155 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2156 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2157 "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2164 // Filters can be configured only if the formats of all inputs are known.
2165 static int ifilter_has_all_input_formats(FilterGraph *fg)
2168 for (i = 0; i < fg->nb_inputs; i++) {
2169 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2170 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2176 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2178 FilterGraph *fg = ifilter->graph;
2179 int need_reinit, ret, i;
2181 /* determine if the parameters for this input changed */
2182 need_reinit = ifilter->format != frame->format;
2184 switch (ifilter->ist->st->codecpar->codec_type) {
2185 case AVMEDIA_TYPE_AUDIO:
2186 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2187 ifilter->channels != frame->channels ||
2188 ifilter->channel_layout != frame->channel_layout;
2190 case AVMEDIA_TYPE_VIDEO:
2191 need_reinit |= ifilter->width != frame->width ||
2192 ifilter->height != frame->height;
2196 if (!ifilter->ist->reinit_filters && fg->graph)
2199 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2200 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2204 ret = ifilter_parameters_from_frame(ifilter, frame);
2209 /* (re)init the graph if possible, otherwise buffer the frame and return */
2210 if (need_reinit || !fg->graph) {
2211 for (i = 0; i < fg->nb_inputs; i++) {
2212 if (!ifilter_has_all_input_formats(fg)) {
2213 AVFrame *tmp = av_frame_clone(frame);
2215 return AVERROR(ENOMEM);
2216 av_frame_unref(frame);
2218 if (!av_fifo_space(ifilter->frame_queue)) {
2219 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2221 av_frame_free(&tmp);
2225 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2230 ret = reap_filters(1);
2231 if (ret < 0 && ret != AVERROR_EOF) {
2232 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2236 ret = configure_filtergraph(fg);
2238 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2243 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2245 if (ret != AVERROR_EOF)
2246 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2253 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2259 if (ifilter->filter) {
2260 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2264 // the filtergraph was never configured
2265 if (ifilter->format < 0)
2266 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2267 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2268 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2269 return AVERROR_INVALIDDATA;
2276 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2277 // There is the following difference: if you got a frame, you must call
2278 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2279 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2280 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2287 ret = avcodec_send_packet(avctx, pkt);
2288 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2289 // decoded frames with avcodec_receive_frame() until done.
2290 if (ret < 0 && ret != AVERROR_EOF)
2294 ret = avcodec_receive_frame(avctx, frame);
2295 if (ret < 0 && ret != AVERROR(EAGAIN))
2303 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2308 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2309 for (i = 0; i < ist->nb_filters; i++) {
2310 if (i < ist->nb_filters - 1) {
2311 f = ist->filter_frame;
2312 ret = av_frame_ref(f, decoded_frame);
2317 ret = ifilter_send_frame(ist->filters[i], f);
2318 if (ret == AVERROR_EOF)
2319 ret = 0; /* ignore */
2321 av_log(NULL, AV_LOG_ERROR,
2322 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2329 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2332 AVFrame *decoded_frame;
2333 AVCodecContext *avctx = ist->dec_ctx;
2335 AVRational decoded_frame_tb;
2337 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2338 return AVERROR(ENOMEM);
2339 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2340 return AVERROR(ENOMEM);
2341 decoded_frame = ist->decoded_frame;
2343 update_benchmark(NULL);
2344 ret = decode(avctx, decoded_frame, got_output, pkt);
2345 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2349 if (ret >= 0 && avctx->sample_rate <= 0) {
2350 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2351 ret = AVERROR_INVALIDDATA;
2354 if (ret != AVERROR_EOF)
2355 check_decode_result(ist, got_output, ret);
2357 if (!*got_output || ret < 0)
2360 ist->samples_decoded += decoded_frame->nb_samples;
2361 ist->frames_decoded++;
2363 /* increment next_dts to use for the case where the input stream does not
2364 have timestamps or there are multiple frames in the packet */
2365 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2367 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2370 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2371 decoded_frame_tb = ist->st->time_base;
2372 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2373 decoded_frame->pts = pkt->pts;
2374 decoded_frame_tb = ist->st->time_base;
2376 decoded_frame->pts = ist->dts;
2377 decoded_frame_tb = AV_TIME_BASE_Q;
2379 if (decoded_frame->pts != AV_NOPTS_VALUE)
2380 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2381 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2382 (AVRational){1, avctx->sample_rate});
2383 ist->nb_samples = decoded_frame->nb_samples;
2384 err = send_frame_to_filters(ist, decoded_frame);
2386 av_frame_unref(ist->filter_frame);
2387 av_frame_unref(decoded_frame);
2388 return err < 0 ? err : ret;
2391 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2394 AVFrame *decoded_frame;
2395 int i, ret = 0, err = 0;
2396 int64_t best_effort_timestamp;
2397 int64_t dts = AV_NOPTS_VALUE;
2400 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2401 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2403 if (!eof && pkt && pkt->size == 0)
2406 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2407 return AVERROR(ENOMEM);
2408 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2409 return AVERROR(ENOMEM);
2410 decoded_frame = ist->decoded_frame;
2411 if (ist->dts != AV_NOPTS_VALUE)
2412 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2415 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2418 // The old code used to set dts on the drain packet, which does not work
2419 // with the new API anymore.
2421 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2423 return AVERROR(ENOMEM);
2424 ist->dts_buffer = new;
2425 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2428 update_benchmark(NULL);
2429 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2430 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2434 // The following line may be required in some cases where there is no parser
2435 // or the parser does not has_b_frames correctly
2436 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2437 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2438 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2440 av_log(ist->dec_ctx, AV_LOG_WARNING,
2441 "video_delay is larger in decoder than demuxer %d > %d.\n"
2442 "If you want to help, upload a sample "
2443 "of this file to https://streams.videolan.org/upload/ "
2444 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2445 ist->dec_ctx->has_b_frames,
2446 ist->st->codecpar->video_delay);
2449 if (ret != AVERROR_EOF)
2450 check_decode_result(ist, got_output, ret);
2452 if (*got_output && ret >= 0) {
2453 if (ist->dec_ctx->width != decoded_frame->width ||
2454 ist->dec_ctx->height != decoded_frame->height ||
2455 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2456 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2457 decoded_frame->width,
2458 decoded_frame->height,
2459 decoded_frame->format,
2460 ist->dec_ctx->width,
2461 ist->dec_ctx->height,
2462 ist->dec_ctx->pix_fmt);
2466 if (!*got_output || ret < 0)
2469 if(ist->top_field_first>=0)
2470 decoded_frame->top_field_first = ist->top_field_first;
2472 ist->frames_decoded++;
2474 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2475 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2479 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2481 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2482 *duration_pts = decoded_frame->pkt_duration;
2484 if (ist->framerate.num)
2485 best_effort_timestamp = ist->cfr_next_pts++;
2487 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2488 best_effort_timestamp = ist->dts_buffer[0];
2490 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2491 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2492 ist->nb_dts_buffer--;
2495 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2496 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2498 if (ts != AV_NOPTS_VALUE)
2499 ist->next_pts = ist->pts = ts;
2503 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2504 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2505 ist->st->index, av_ts2str(decoded_frame->pts),
2506 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2507 best_effort_timestamp,
2508 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2509 decoded_frame->key_frame, decoded_frame->pict_type,
2510 ist->st->time_base.num, ist->st->time_base.den);
2513 if (ist->st->sample_aspect_ratio.num)
2514 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2516 err = send_frame_to_filters(ist, decoded_frame);
2519 av_frame_unref(ist->filter_frame);
2520 av_frame_unref(decoded_frame);
2521 return err < 0 ? err : ret;
2524 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2527 AVSubtitle subtitle;
2529 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2530 &subtitle, got_output, pkt);
2532 check_decode_result(NULL, got_output, ret);
2534 if (ret < 0 || !*got_output) {
2537 sub2video_flush(ist);
2541 if (ist->fix_sub_duration) {
2543 if (ist->prev_sub.got_output) {
2544 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2545 1000, AV_TIME_BASE);
2546 if (end < ist->prev_sub.subtitle.end_display_time) {
2547 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2548 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2549 ist->prev_sub.subtitle.end_display_time, end,
2550 end <= 0 ? ", dropping it" : "");
2551 ist->prev_sub.subtitle.end_display_time = end;
2554 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2555 FFSWAP(int, ret, ist->prev_sub.ret);
2556 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2564 if (ist->sub2video.frame) {
2565 sub2video_update(ist, INT64_MIN, &subtitle);
2566 } else if (ist->nb_filters) {
2567 if (!ist->sub2video.sub_queue)
2568 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2569 if (!ist->sub2video.sub_queue)
2571 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2572 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2576 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2580 if (!subtitle.num_rects)
2583 ist->frames_decoded++;
2585 for (i = 0; i < nb_output_streams; i++) {
2586 OutputStream *ost = output_streams[i];
2588 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2589 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2592 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2597 avsubtitle_free(&subtitle);
2601 static int send_filter_eof(InputStream *ist)
2604 /* TODO keep pts also in stream time base to avoid converting back */
2605 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2606 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2608 for (i = 0; i < ist->nb_filters; i++) {
2609 ret = ifilter_send_eof(ist->filters[i], pts);
2616 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2617 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2621 int eof_reached = 0;
2624 if (!ist->saw_first_ts) {
2625 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2627 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2628 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2629 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2631 ist->saw_first_ts = 1;
2634 if (ist->next_dts == AV_NOPTS_VALUE)
2635 ist->next_dts = ist->dts;
2636 if (ist->next_pts == AV_NOPTS_VALUE)
2637 ist->next_pts = ist->pts;
2641 av_init_packet(&avpkt);
2648 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2649 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2650 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2651 ist->next_pts = ist->pts = ist->dts;
2654 // while we have more to decode or while the decoder did output something on EOF
2655 while (ist->decoding_needed) {
2656 int64_t duration_dts = 0;
2657 int64_t duration_pts = 0;
2659 int decode_failed = 0;
2661 ist->pts = ist->next_pts;
2662 ist->dts = ist->next_dts;
2664 switch (ist->dec_ctx->codec_type) {
2665 case AVMEDIA_TYPE_AUDIO:
2666 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2669 case AVMEDIA_TYPE_VIDEO:
2670 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2672 if (!repeating || !pkt || got_output) {
2673 if (pkt && pkt->duration) {
2674 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2675 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2676 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2677 duration_dts = ((int64_t)AV_TIME_BASE *
2678 ist->dec_ctx->framerate.den * ticks) /
2679 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2682 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2683 ist->next_dts += duration_dts;
2685 ist->next_dts = AV_NOPTS_VALUE;
2689 if (duration_pts > 0) {
2690 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2692 ist->next_pts += duration_dts;
2696 case AVMEDIA_TYPE_SUBTITLE:
2699 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2700 if (!pkt && ret >= 0)
2707 if (ret == AVERROR_EOF) {
2713 if (decode_failed) {
2714 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2715 ist->file_index, ist->st->index, av_err2str(ret));
2717 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2718 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2720 if (!decode_failed || exit_on_error)
2726 ist->got_output = 1;
2731 // During draining, we might get multiple output frames in this loop.
2732 // ffmpeg.c does not drain the filter chain on configuration changes,
2733 // which means if we send multiple frames at once to the filters, and
2734 // one of those frames changes configuration, the buffered frames will
2735 // be lost. This can upset certain FATE tests.
2736 // Decode only 1 frame per call on EOF to appease these FATE tests.
2737 // The ideal solution would be to rewrite decoding to use the new
2738 // decoding API in a better way.
2745 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2746 /* except when looping we need to flush but not to send an EOF */
2747 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2748 int ret = send_filter_eof(ist);
2750 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2755 /* handle stream copy */
2756 if (!ist->decoding_needed && pkt) {
2757 ist->dts = ist->next_dts;
2758 switch (ist->dec_ctx->codec_type) {
2759 case AVMEDIA_TYPE_AUDIO:
2760 av_assert1(pkt->duration >= 0);
2761 if (ist->dec_ctx->sample_rate) {
2762 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2763 ist->dec_ctx->sample_rate;
2765 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2768 case AVMEDIA_TYPE_VIDEO:
2769 if (ist->framerate.num) {
2770 // TODO: Remove work-around for c99-to-c89 issue 7
2771 AVRational time_base_q = AV_TIME_BASE_Q;
2772 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2773 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2774 } else if (pkt->duration) {
2775 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2776 } else if(ist->dec_ctx->framerate.num != 0) {
2777 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2778 ist->next_dts += ((int64_t)AV_TIME_BASE *
2779 ist->dec_ctx->framerate.den * ticks) /
2780 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2784 ist->pts = ist->dts;
2785 ist->next_pts = ist->next_dts;
2787 for (i = 0; i < nb_output_streams; i++) {
2788 OutputStream *ost = output_streams[i];
2790 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2793 do_streamcopy(ist, ost, pkt);
2796 return !eof_reached;
2799 static void print_sdp(void)
2804 AVIOContext *sdp_pb;
2805 AVFormatContext **avc;
2807 for (i = 0; i < nb_output_files; i++) {
2808 if (!output_files[i]->header_written)
2812 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2815 for (i = 0, j = 0; i < nb_output_files; i++) {
2816 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2817 avc[j] = output_files[i]->ctx;
2825 av_sdp_create(avc, j, sdp, sizeof(sdp));
2827 if (!sdp_filename) {
2828 printf("SDP:\n%s\n", sdp);
2831 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2832 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2834 avio_print(sdp_pb, sdp);
2835 avio_closep(&sdp_pb);
2836 av_freep(&sdp_filename);
2844 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2846 InputStream *ist = s->opaque;
2847 const enum AVPixelFormat *p;
2850 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2851 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2852 const AVCodecHWConfig *config = NULL;
2855 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2858 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2859 ist->hwaccel_id == HWACCEL_AUTO) {
2861 config = avcodec_get_hw_config(s->codec, i);
2864 if (!(config->methods &
2865 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2867 if (config->pix_fmt == *p)
2872 if (config->device_type != ist->hwaccel_device_type) {
2873 // Different hwaccel offered, ignore.
2877 ret = hwaccel_decode_init(s);
2879 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2880 av_log(NULL, AV_LOG_FATAL,
2881 "%s hwaccel requested for input stream #%d:%d, "
2882 "but cannot be initialized.\n",
2883 av_hwdevice_get_type_name(config->device_type),
2884 ist->file_index, ist->st->index);
2885 return AV_PIX_FMT_NONE;
2890 const HWAccel *hwaccel = NULL;
2892 for (i = 0; hwaccels[i].name; i++) {
2893 if (hwaccels[i].pix_fmt == *p) {
2894 hwaccel = &hwaccels[i];
2899 // No hwaccel supporting this pixfmt.
2902 if (hwaccel->id != ist->hwaccel_id) {
2903 // Does not match requested hwaccel.
2907 ret = hwaccel->init(s);
2909 av_log(NULL, AV_LOG_FATAL,
2910 "%s hwaccel requested for input stream #%d:%d, "
2911 "but cannot be initialized.\n", hwaccel->name,
2912 ist->file_index, ist->st->index);
2913 return AV_PIX_FMT_NONE;
2917 if (ist->hw_frames_ctx) {
2918 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2919 if (!s->hw_frames_ctx)
2920 return AV_PIX_FMT_NONE;
2923 ist->hwaccel_pix_fmt = *p;
2930 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2932 InputStream *ist = s->opaque;
2934 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2935 return ist->hwaccel_get_buffer(s, frame, flags);
2937 return avcodec_default_get_buffer2(s, frame, flags);
2940 static int init_input_stream(int ist_index, char *error, int error_len)
2943 InputStream *ist = input_streams[ist_index];
2945 if (ist->decoding_needed) {
2946 const AVCodec *codec = ist->dec;
2948 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2949 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2950 return AVERROR(EINVAL);
2953 ist->dec_ctx->opaque = ist;
2954 ist->dec_ctx->get_format = get_format;
2955 ist->dec_ctx->get_buffer2 = get_buffer;
2956 #if LIBAVCODEC_VERSION_MAJOR < 60
2957 ist->dec_ctx->thread_safe_callbacks = 1;
2960 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2961 (ist->decoding_needed & DECODING_FOR_OST)) {
2962 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2963 if (ist->decoding_needed & DECODING_FOR_FILTER)
2964 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2967 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2969 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2970 * audio, and video decoders such as cuvid or mediacodec */
2971 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2973 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2974 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2975 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2976 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2977 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2979 ret = hw_device_setup_for_decode(ist);
2981 snprintf(error, error_len, "Device setup failed for "
2982 "decoder on input stream #%d:%d : %s",
2983 ist->file_index, ist->st->index, av_err2str(ret));
2987 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2988 if (ret == AVERROR_EXPERIMENTAL)
2989 abort_codec_experimental(codec, 0);
2991 snprintf(error, error_len,
2992 "Error while opening decoder for input stream "
2994 ist->file_index, ist->st->index, av_err2str(ret));
2997 assert_avoptions(ist->decoder_opts);
3000 ist->next_pts = AV_NOPTS_VALUE;
3001 ist->next_dts = AV_NOPTS_VALUE;
3006 static InputStream *get_input_stream(OutputStream *ost)
3008 if (ost->source_index >= 0)
3009 return input_streams[ost->source_index];
3013 static int compare_int64(const void *a, const void *b)
3015 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
3018 /* open the muxer when all the streams are initialized */
3019 static int check_init_output_file(OutputFile *of, int file_index)
3023 for (i = 0; i < of->ctx->nb_streams; i++) {
3024 OutputStream *ost = output_streams[of->ost_index + i];
3025 if (!ost->initialized)
3029 of->ctx->interrupt_callback = int_cb;
3031 ret = avformat_write_header(of->ctx, &of->opts);
3033 av_log(NULL, AV_LOG_ERROR,
3034 "Could not write header for output file #%d "
3035 "(incorrect codec parameters ?): %s\n",
3036 file_index, av_err2str(ret));
3039 //assert_avoptions(of->opts);
3040 of->header_written = 1;
3042 av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3045 if (sdp_filename || want_sdp)
3048 /* flush the muxing queues */
3049 for (i = 0; i < of->ctx->nb_streams; i++) {
3050 OutputStream *ost = output_streams[of->ost_index + i];
3052 /* try to improve muxing time_base (only possible if nothing has been written yet) */
3053 if (!av_fifo_size(ost->muxing_queue))
3054 ost->mux_timebase = ost->st->time_base;
3056 while (av_fifo_size(ost->muxing_queue)) {
3058 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3059 ost->muxing_queue_data_size -= pkt.size;
3060 write_packet(of, &pkt, ost, 1);
3067 static int init_output_bsfs(OutputStream *ost)
3069 AVBSFContext *ctx = ost->bsf_ctx;
3075 ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3079 ctx->time_base_in = ost->st->time_base;
3081 ret = av_bsf_init(ctx);
3083 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3088 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3091 ost->st->time_base = ctx->time_base_out;
3096 static int init_output_stream_streamcopy(OutputStream *ost)
3098 OutputFile *of = output_files[ost->file_index];
3099 InputStream *ist = get_input_stream(ost);
3100 AVCodecParameters *par_dst = ost->st->codecpar;
3101 AVCodecParameters *par_src = ost->ref_par;
3104 uint32_t codec_tag = par_dst->codec_tag;
3106 av_assert0(ist && !ost->filter);
3108 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3110 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3112 av_log(NULL, AV_LOG_FATAL,
3113 "Error setting up codec context options.\n");
3117 ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3119 av_log(NULL, AV_LOG_FATAL,
3120 "Error getting reference codec parameters.\n");
3125 unsigned int codec_tag_tmp;
3126 if (!of->ctx->oformat->codec_tag ||
3127 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3128 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3129 codec_tag = par_src->codec_tag;
3132 ret = avcodec_parameters_copy(par_dst, par_src);
3136 par_dst->codec_tag = codec_tag;
3138 if (!ost->frame_rate.num)
3139 ost->frame_rate = ist->framerate;
3140 ost->st->avg_frame_rate = ost->frame_rate;
3142 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3146 // copy timebase while removing common factors
3147 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3148 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3150 // copy estimated duration as a hint to the muxer
3151 if (ost->st->duration <= 0 && ist->st->duration > 0)
3152 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3155 ost->st->disposition = ist->st->disposition;
3157 if (ist->st->nb_side_data) {
3158 for (i = 0; i < ist->st->nb_side_data; i++) {
3159 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3162 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3164 return AVERROR(ENOMEM);
3165 memcpy(dst_data, sd_src->data, sd_src->size);
3169 if (ost->rotate_overridden) {
3170 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3171 sizeof(int32_t) * 9);
3173 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3176 switch (par_dst->codec_type) {
3177 case AVMEDIA_TYPE_AUDIO:
3178 if (audio_volume != 256) {
3179 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3182 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3183 par_dst->block_align= 0;
3184 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3185 par_dst->block_align= 0;
3187 case AVMEDIA_TYPE_VIDEO:
3188 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3190 av_mul_q(ost->frame_aspect_ratio,
3191 (AVRational){ par_dst->height, par_dst->width });
3192 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3193 "with stream copy may produce invalid files\n");
3195 else if (ist->st->sample_aspect_ratio.num)
3196 sar = ist->st->sample_aspect_ratio;
3198 sar = par_src->sample_aspect_ratio;
3199 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3200 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3201 ost->st->r_frame_rate = ist->st->r_frame_rate;
3205 ost->mux_timebase = ist->st->time_base;
3210 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3212 AVDictionaryEntry *e;
3214 uint8_t *encoder_string;
3215 int encoder_string_len;
3216 int format_flags = 0;
3217 int codec_flags = ost->enc_ctx->flags;
3219 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3222 e = av_dict_get(of->opts, "fflags", NULL, 0);
3224 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3227 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3229 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3231 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3234 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3237 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3238 encoder_string = av_mallocz(encoder_string_len);
3239 if (!encoder_string)
3242 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3243 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3245 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3246 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3247 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3248 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3251 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3252 AVCodecContext *avctx)
3255 int n = 1, i, size, index = 0;
3258 for (p = kf; *p; p++)
3262 pts = av_malloc_array(size, sizeof(*pts));
3264 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3269 for (i = 0; i < n; i++) {
3270 char *next = strchr(p, ',');
3275 if (!memcmp(p, "chapters", 8)) {
3277 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3280 if (avf->nb_chapters > INT_MAX - size ||
3281 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3283 av_log(NULL, AV_LOG_FATAL,
3284 "Could not allocate forced key frames array.\n");
3287 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3288 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3290 for (j = 0; j < avf->nb_chapters; j++) {
3291 AVChapter *c = avf->chapters[j];
3292 av_assert1(index < size);
3293 pts[index++] = av_rescale_q(c->start, c->time_base,
3294 avctx->time_base) + t;
3299 t = parse_time_or_die("force_key_frames", p, 1);
3300 av_assert1(index < size);
3301 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3308 av_assert0(index == size);
3309 qsort(pts, size, sizeof(*pts), compare_int64);
3310 ost->forced_kf_count = size;
3311 ost->forced_kf_pts = pts;
3314 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3316 InputStream *ist = get_input_stream(ost);
3317 AVCodecContext *enc_ctx = ost->enc_ctx;
3318 AVFormatContext *oc;
3320 if (ost->enc_timebase.num > 0) {
3321 enc_ctx->time_base = ost->enc_timebase;
3325 if (ost->enc_timebase.num < 0) {
3327 enc_ctx->time_base = ist->st->time_base;
3331 oc = output_files[ost->file_index]->ctx;
3332 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3335 enc_ctx->time_base = default_time_base;
3338 static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
3340 InputStream *ist = get_input_stream(ost);
3341 AVCodecContext *enc_ctx = ost->enc_ctx;
3342 AVCodecContext *dec_ctx = NULL;
3343 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3346 set_encoder_id(output_files[ost->file_index], ost);
3348 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3349 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3350 // which have to be filtered out to prevent leaking them to output files.
3351 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3354 ost->st->disposition = ist->st->disposition;
3356 dec_ctx = ist->dec_ctx;
3358 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3360 for (j = 0; j < oc->nb_streams; j++) {
3361 AVStream *st = oc->streams[j];
3362 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3365 if (j == oc->nb_streams)
3366 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3367 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3368 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3371 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3372 if (!ost->frame_rate.num)
3373 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3374 if (ist && !ost->frame_rate.num)
3375 ost->frame_rate = ist->framerate;
3376 if (ist && !ost->frame_rate.num)
3377 ost->frame_rate = ist->st->r_frame_rate;
3378 if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
3379 ost->frame_rate = (AVRational){25, 1};
3380 av_log(NULL, AV_LOG_WARNING,
3382 "about the input framerate is available. Falling "
3383 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3384 "if you want a different framerate.\n",
3385 ost->file_index, ost->index);
3388 if (ost->max_frame_rate.num &&
3389 (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) ||
3390 !ost->frame_rate.den))
3391 ost->frame_rate = ost->max_frame_rate;
3393 if (ost->enc->supported_framerates && !ost->force_fps) {
3394 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3395 ost->frame_rate = ost->enc->supported_framerates[idx];
3397 // reduce frame rate for mpeg4 to be within the spec limits
3398 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3399 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3400 ost->frame_rate.num, ost->frame_rate.den, 65535);
3404 switch (enc_ctx->codec_type) {
3405 case AVMEDIA_TYPE_AUDIO:
3406 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3408 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3409 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3410 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3411 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3412 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3414 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3417 case AVMEDIA_TYPE_VIDEO:
3418 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3420 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3421 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3422 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3423 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3424 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3425 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3428 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3429 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3430 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3431 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3432 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3433 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3435 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3437 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3438 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3441 enc_ctx->color_range = frame->color_range;
3442 enc_ctx->color_primaries = frame->color_primaries;
3443 enc_ctx->color_trc = frame->color_trc;
3444 enc_ctx->colorspace = frame->colorspace;
3445 enc_ctx->chroma_sample_location = frame->chroma_location;
3448 enc_ctx->framerate = ost->frame_rate;
3450 ost->st->avg_frame_rate = ost->frame_rate;
3453 enc_ctx->width != dec_ctx->width ||
3454 enc_ctx->height != dec_ctx->height ||
3455 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3456 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3459 if (ost->top_field_first == 0) {
3460 enc_ctx->field_order = AV_FIELD_BB;
3461 } else if (ost->top_field_first == 1) {
3462 enc_ctx->field_order = AV_FIELD_TT;
3466 if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
3467 ost->top_field_first >= 0)
3468 frame->top_field_first = !!ost->top_field_first;
3470 if (frame->interlaced_frame) {
3471 if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3472 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3474 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3476 enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3479 if (ost->forced_keyframes) {
3480 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3481 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3482 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3484 av_log(NULL, AV_LOG_ERROR,
3485 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3488 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3489 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3490 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3491 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3493 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3494 // parse it only for static kf timings
3495 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3496 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3500 case AVMEDIA_TYPE_SUBTITLE:
3501 enc_ctx->time_base = AV_TIME_BASE_Q;
3502 if (!enc_ctx->width) {
3503 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3504 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3507 case AVMEDIA_TYPE_DATA:
3514 ost->mux_timebase = enc_ctx->time_base;
3519 static int init_output_stream(OutputStream *ost, AVFrame *frame,
3520 char *error, int error_len)
3524 if (ost->encoding_needed) {
3525 const AVCodec *codec = ost->enc;
3526 AVCodecContext *dec = NULL;
3529 ret = init_output_stream_encode(ost, frame);
3533 if ((ist = get_input_stream(ost)))
3535 if (dec && dec->subtitle_header) {
3536 /* ASS code assumes this buffer is null terminated so add extra byte. */
3537 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3538 if (!ost->enc_ctx->subtitle_header)
3539 return AVERROR(ENOMEM);
3540 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3541 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3543 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3544 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3545 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3547 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3548 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3549 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3551 ret = hw_device_setup_for_encode(ost);
3553 snprintf(error, error_len, "Device setup failed for "
3554 "encoder on output stream #%d:%d : %s",
3555 ost->file_index, ost->index, av_err2str(ret));
3559 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3560 int input_props = 0, output_props = 0;
3561 AVCodecDescriptor const *input_descriptor =
3562 avcodec_descriptor_get(dec->codec_id);
3563 AVCodecDescriptor const *output_descriptor =
3564 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3565 if (input_descriptor)
3566 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3567 if (output_descriptor)
3568 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3569 if (input_props && output_props && input_props != output_props) {
3570 snprintf(error, error_len,
3571 "Subtitle encoding currently only possible from text to text "
3572 "or bitmap to bitmap");
3573 return AVERROR_INVALIDDATA;
3577 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3578 if (ret == AVERROR_EXPERIMENTAL)
3579 abort_codec_experimental(codec, 1);
3580 snprintf(error, error_len,
3581 "Error while opening encoder for output stream #%d:%d - "
3582 "maybe incorrect parameters such as bit_rate, rate, width or height",
3583 ost->file_index, ost->index);
3586 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3587 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3588 av_buffersink_set_frame_size(ost->filter->filter,
3589 ost->enc_ctx->frame_size);
3590 assert_avoptions(ost->encoder_opts);
3591 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3592 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3593 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3594 " It takes bits/s as argument, not kbits/s\n");
3596 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3598 av_log(NULL, AV_LOG_FATAL,
3599 "Error initializing the output stream codec context.\n");
3603 if (ost->enc_ctx->nb_coded_side_data) {
3606 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3607 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3610 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3612 return AVERROR(ENOMEM);
3613 memcpy(dst_data, sd_src->data, sd_src->size);
3618 * Add global input side data. For now this is naive, and copies it
3619 * from the input stream's global side data. All side data should
3620 * really be funneled over AVFrame and libavfilter, then added back to
3621 * packet side data, and then potentially using the first packet for
3626 for (i = 0; i < ist->st->nb_side_data; i++) {
3627 AVPacketSideData *sd = &ist->st->side_data[i];
3628 if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3629 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3631 return AVERROR(ENOMEM);
3632 memcpy(dst, sd->data, sd->size);
3633 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3634 av_display_rotation_set((uint32_t *)dst, 0);
3639 // copy timebase while removing common factors
3640 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3641 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3643 // copy estimated duration as a hint to the muxer
3644 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3645 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3646 } else if (ost->stream_copy) {
3647 ret = init_output_stream_streamcopy(ost);
3652 // parse user provided disposition, and update stream values
3653 if (ost->disposition) {
3654 static const AVOption opts[] = {
3655 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3656 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3657 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3658 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3659 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3660 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3661 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3662 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3663 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3664 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3665 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3666 { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3667 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3668 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3669 { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3670 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3673 static const AVClass class = {
3675 .item_name = av_default_item_name,
3677 .version = LIBAVUTIL_VERSION_INT,
3679 const AVClass *pclass = &class;
3681 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3686 /* initialize bitstream filters for the output stream
3687 * needs to be done here, because the codec id for streamcopy is not
3688 * known until now */
3689 ret = init_output_bsfs(ost);
3693 ost->initialized = 1;
3695 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3702 static void report_new_stream(int input_index, AVPacket *pkt)
3704 InputFile *file = input_files[input_index];
3705 AVStream *st = file->ctx->streams[pkt->stream_index];
3707 if (pkt->stream_index < file->nb_streams_warn)
3709 av_log(file->ctx, AV_LOG_WARNING,
3710 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3711 av_get_media_type_string(st->codecpar->codec_type),
3712 input_index, pkt->stream_index,
3713 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3714 file->nb_streams_warn = pkt->stream_index + 1;
3717 static int transcode_init(void)
3719 int ret = 0, i, j, k;
3720 AVFormatContext *oc;
3723 char error[1024] = {0};
3725 for (i = 0; i < nb_filtergraphs; i++) {
3726 FilterGraph *fg = filtergraphs[i];
3727 for (j = 0; j < fg->nb_outputs; j++) {
3728 OutputFilter *ofilter = fg->outputs[j];
3729 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3731 if (fg->nb_inputs != 1)
3733 for (k = nb_input_streams-1; k >= 0 ; k--)
3734 if (fg->inputs[0]->ist == input_streams[k])
3736 ofilter->ost->source_index = k;
3740 /* init framerate emulation */
3741 for (i = 0; i < nb_input_files; i++) {
3742 InputFile *ifile = input_files[i];
3743 if (ifile->rate_emu)
3744 for (j = 0; j < ifile->nb_streams; j++)
3745 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3748 /* init input streams */
3749 for (i = 0; i < nb_input_streams; i++)
3750 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3751 for (i = 0; i < nb_output_streams; i++) {
3752 ost = output_streams[i];
3753 avcodec_close(ost->enc_ctx);
3759 * initialize stream copy and subtitle/data streams.
3760 * Encoded AVFrame based streams will get initialized as follows:
3761 * - when the first AVFrame is received in do_video_out
3762 * - just before the first AVFrame is received in either transcode_step
3763 * or reap_filters due to us requiring the filter chain buffer sink
3764 * to be configured with the correct audio frame size, which is only
3765 * known after the encoder is initialized.
3767 for (i = 0; i < nb_output_streams; i++) {
3768 if (!output_streams[i]->stream_copy &&
3769 (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3770 output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO))
3773 ret = init_output_stream_wrapper(output_streams[i], NULL, 0);
3778 /* discard unused programs */
3779 for (i = 0; i < nb_input_files; i++) {
3780 InputFile *ifile = input_files[i];
3781 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3782 AVProgram *p = ifile->ctx->programs[j];
3783 int discard = AVDISCARD_ALL;
3785 for (k = 0; k < p->nb_stream_indexes; k++)
3786 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3787 discard = AVDISCARD_DEFAULT;
3790 p->discard = discard;
3794 /* write headers for files with no streams */
3795 for (i = 0; i < nb_output_files; i++) {
3796 oc = output_files[i]->ctx;
3797 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3798 ret = check_init_output_file(output_files[i], i);
3805 /* dump the stream mapping */
3806 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3807 for (i = 0; i < nb_input_streams; i++) {
3808 ist = input_streams[i];
3810 for (j = 0; j < ist->nb_filters; j++) {
3811 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3812 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3813 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3814 ist->filters[j]->name);
3815 if (nb_filtergraphs > 1)
3816 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3817 av_log(NULL, AV_LOG_INFO, "\n");
3822 for (i = 0; i < nb_output_streams; i++) {
3823 ost = output_streams[i];
3825 if (ost->attachment_filename) {
3826 /* an attached file */
3827 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3828 ost->attachment_filename, ost->file_index, ost->index);
3832 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3833 /* output from a complex graph */
3834 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3835 if (nb_filtergraphs > 1)
3836 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3838 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3839 ost->index, ost->enc ? ost->enc->name : "?");
3843 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3844 input_streams[ost->source_index]->file_index,
3845 input_streams[ost->source_index]->st->index,
3848 if (ost->sync_ist != input_streams[ost->source_index])
3849 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3850 ost->sync_ist->file_index,
3851 ost->sync_ist->st->index);
3852 if (ost->stream_copy)
3853 av_log(NULL, AV_LOG_INFO, " (copy)");
3855 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3856 const AVCodec *out_codec = ost->enc;
3857 const char *decoder_name = "?";
3858 const char *in_codec_name = "?";
3859 const char *encoder_name = "?";
3860 const char *out_codec_name = "?";
3861 const AVCodecDescriptor *desc;
3864 decoder_name = in_codec->name;
3865 desc = avcodec_descriptor_get(in_codec->id);
3867 in_codec_name = desc->name;
3868 if (!strcmp(decoder_name, in_codec_name))
3869 decoder_name = "native";
3873 encoder_name = out_codec->name;
3874 desc = avcodec_descriptor_get(out_codec->id);
3876 out_codec_name = desc->name;
3877 if (!strcmp(encoder_name, out_codec_name))
3878 encoder_name = "native";
3881 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3882 in_codec_name, decoder_name,
3883 out_codec_name, encoder_name);
3885 av_log(NULL, AV_LOG_INFO, "\n");
3889 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3893 atomic_store(&transcode_init_done, 1);
3898 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3899 static int need_output(void)
3903 for (i = 0; i < nb_output_streams; i++) {
3904 OutputStream *ost = output_streams[i];
3905 OutputFile *of = output_files[ost->file_index];
3906 AVFormatContext *os = output_files[ost->file_index]->ctx;
3908 if (ost->finished ||
3909 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3911 if (ost->frame_number >= ost->max_frames) {
3913 for (j = 0; j < of->ctx->nb_streams; j++)
3914 close_output_stream(output_streams[of->ost_index + j]);
3925 * Select the output stream to process.
3927 * @return selected output stream, or NULL if none available
3929 static OutputStream *choose_output(void)
3932 int64_t opts_min = INT64_MAX;
3933 OutputStream *ost_min = NULL;
3935 for (i = 0; i < nb_output_streams; i++) {
3936 OutputStream *ost = output_streams[i];
3937 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3938 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3940 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3941 av_log(NULL, AV_LOG_DEBUG,
3942 "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3943 ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3945 if (!ost->initialized && !ost->inputs_done)
3948 if (!ost->finished && opts < opts_min) {
3950 ost_min = ost->unavailable ? NULL : ost;
3956 static void set_tty_echo(int on)
3960 if (tcgetattr(0, &tty) == 0) {
3961 if (on) tty.c_lflag |= ECHO;
3962 else tty.c_lflag &= ~ECHO;
3963 tcsetattr(0, TCSANOW, &tty);
3968 static int check_keyboard_interaction(int64_t cur_time)
3971 static int64_t last_time;
3972 if (received_nb_signals)
3973 return AVERROR_EXIT;
3974 /* read_key() returns 0 on EOF */
3975 if(cur_time - last_time >= 100000 && !run_as_daemon){
3977 last_time = cur_time;
3981 return AVERROR_EXIT;
3982 if (key == '+') av_log_set_level(av_log_get_level()+10);
3983 if (key == '-') av_log_set_level(av_log_get_level()-10);
3984 if (key == 's') qp_hist ^= 1;
3987 do_hex_dump = do_pkt_dump = 0;
3988 } else if(do_pkt_dump){
3992 av_log_set_level(AV_LOG_DEBUG);
3994 if (key == 'c' || key == 'C'){
3995 char buf[4096], target[64], command[256], arg[256] = {0};
3998 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
4001 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4006 fprintf(stderr, "\n");
4008 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
4009 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
4010 target, time, command, arg);
4011 for (i = 0; i < nb_filtergraphs; i++) {
4012 FilterGraph *fg = filtergraphs[i];
4015 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
4016 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
4017 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
4018 } else if (key == 'c') {
4019 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
4020 ret = AVERROR_PATCHWELCOME;
4022 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
4024 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
4029 av_log(NULL, AV_LOG_ERROR,
4030 "Parse error, at least 3 arguments were expected, "
4031 "only %d given in string '%s'\n", n, buf);
4034 if (key == 'd' || key == 'D'){
4037 debug = input_streams[0]->dec_ctx->debug << 1;
4038 if(!debug) debug = 1;
4039 while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
4046 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4051 fprintf(stderr, "\n");
4052 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
4053 fprintf(stderr,"error parsing debug value\n");
4055 for(i=0;i<nb_input_streams;i++) {
4056 input_streams[i]->dec_ctx->debug = debug;
4058 for(i=0;i<nb_output_streams;i++) {
4059 OutputStream *ost = output_streams[i];
4060 ost->enc_ctx->debug = debug;
4062 if(debug) av_log_set_level(AV_LOG_DEBUG);
4063 fprintf(stderr,"debug=%d\n", debug);
4066 fprintf(stderr, "key function\n"
4067 "? show this help\n"
4068 "+ increase verbosity\n"
4069 "- decrease verbosity\n"
4070 "c Send command to first matching filter supporting it\n"
4071 "C Send/Queue command to all matching filters\n"
4072 "D cycle through available debug modes\n"
4073 "h dump packets/hex press to cycle through the 3 states\n"
4075 "s Show QP histogram\n"
4082 static void *input_thread(void *arg)
4085 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4090 ret = av_read_frame(f->ctx, &pkt);
4092 if (ret == AVERROR(EAGAIN)) {
4097 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4100 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4101 if (flags && ret == AVERROR(EAGAIN)) {
4103 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4104 av_log(f->ctx, AV_LOG_WARNING,
4105 "Thread message queue blocking; consider raising the "
4106 "thread_queue_size option (current value: %d)\n",
4107 f->thread_queue_size);
4110 if (ret != AVERROR_EOF)
4111 av_log(f->ctx, AV_LOG_ERROR,
4112 "Unable to send packet to main thread: %s\n",
4114 av_packet_unref(&pkt);
4115 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4123 static void free_input_thread(int i)
4125 InputFile *f = input_files[i];
4128 if (!f || !f->in_thread_queue)
4130 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4131 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4132 av_packet_unref(&pkt);
4134 pthread_join(f->thread, NULL);
4136 av_thread_message_queue_free(&f->in_thread_queue);
4139 static void free_input_threads(void)
4143 for (i = 0; i < nb_input_files; i++)
4144 free_input_thread(i);
4147 static int init_input_thread(int i)
4150 InputFile *f = input_files[i];
4152 if (f->thread_queue_size < 0)
4153 f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4154 if (!f->thread_queue_size)
4157 if (f->ctx->pb ? !f->ctx->pb->seekable :
4158 strcmp(f->ctx->iformat->name, "lavfi"))
4159 f->non_blocking = 1;
4160 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4161 f->thread_queue_size, sizeof(AVPacket));
4165 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4166 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4167 av_thread_message_queue_free(&f->in_thread_queue);
4168 return AVERROR(ret);
4174 static int init_input_threads(void)
4178 for (i = 0; i < nb_input_files; i++) {
4179 ret = init_input_thread(i);
4186 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4188 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4190 AV_THREAD_MESSAGE_NONBLOCK : 0);
4194 static int get_input_packet(InputFile *f, AVPacket *pkt)
4198 for (i = 0; i < f->nb_streams; i++) {
4199 InputStream *ist = input_streams[f->ist_index + i];
4200 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4201 int64_t now = av_gettime_relative() - ist->start;
4203 return AVERROR(EAGAIN);
4208 if (f->thread_queue_size)
4209 return get_input_packet_mt(f, pkt);
4211 return av_read_frame(f->ctx, pkt);
4214 static int got_eagain(void)
4217 for (i = 0; i < nb_output_streams; i++)
4218 if (output_streams[i]->unavailable)
4223 static void reset_eagain(void)
4226 for (i = 0; i < nb_input_files; i++)
4227 input_files[i]->eagain = 0;
4228 for (i = 0; i < nb_output_streams; i++)
4229 output_streams[i]->unavailable = 0;
4232 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4233 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4234 AVRational time_base)
4240 return tmp_time_base;
4243 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4246 return tmp_time_base;
4252 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4255 AVCodecContext *avctx;
4256 int i, ret, has_audio = 0;
4257 int64_t duration = 0;
4259 ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4263 for (i = 0; i < ifile->nb_streams; i++) {
4264 ist = input_streams[ifile->ist_index + i];
4265 avctx = ist->dec_ctx;
4267 /* duration is the length of the last frame in a stream
4268 * when audio stream is present we don't care about
4269 * last video frame length because it's not defined exactly */
4270 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4274 for (i = 0; i < ifile->nb_streams; i++) {
4275 ist = input_streams[ifile->ist_index + i];
4276 avctx = ist->dec_ctx;
4279 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4280 AVRational sample_rate = {1, avctx->sample_rate};
4282 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4287 if (ist->framerate.num) {
4288 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4289 } else if (ist->st->avg_frame_rate.num) {
4290 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4295 if (!ifile->duration)
4296 ifile->time_base = ist->st->time_base;
4297 /* the total duration of the stream, max_pts - min_pts is
4298 * the duration of the stream without the last frame */
4299 if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4300 duration += ist->max_pts - ist->min_pts;
4301 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4305 if (ifile->loop > 0)
4313 * - 0 -- one packet was read and processed
4314 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4315 * this function should be called again
4316 * - AVERROR_EOF -- this function should not be called again
4318 static int process_input(int file_index)
4320 InputFile *ifile = input_files[file_index];
4321 AVFormatContext *is;
4324 int ret, thread_ret, i, j;
4327 int disable_discontinuity_correction = copy_ts;
4330 ret = get_input_packet(ifile, &pkt);
4332 if (ret == AVERROR(EAGAIN)) {
4336 if (ret < 0 && ifile->loop) {
4337 AVCodecContext *avctx;
4338 for (i = 0; i < ifile->nb_streams; i++) {
4339 ist = input_streams[ifile->ist_index + i];
4340 avctx = ist->dec_ctx;
4341 if (ist->decoding_needed) {
4342 ret = process_input_packet(ist, NULL, 1);
4345 avcodec_flush_buffers(avctx);
4349 free_input_thread(file_index);
4351 ret = seek_to_start(ifile, is);
4353 thread_ret = init_input_thread(file_index);
4358 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4360 ret = get_input_packet(ifile, &pkt);
4361 if (ret == AVERROR(EAGAIN)) {
4367 if (ret != AVERROR_EOF) {
4368 print_error(is->url, ret);
4373 for (i = 0; i < ifile->nb_streams; i++) {
4374 ist = input_streams[ifile->ist_index + i];
4375 if (ist->decoding_needed) {
4376 ret = process_input_packet(ist, NULL, 0);
4381 /* mark all outputs that don't go through lavfi as finished */
4382 for (j = 0; j < nb_output_streams; j++) {
4383 OutputStream *ost = output_streams[j];
4385 if (ost->source_index == ifile->ist_index + i &&
4386 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4387 finish_output_stream(ost);
4391 ifile->eof_reached = 1;
4392 return AVERROR(EAGAIN);
4398 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4399 is->streams[pkt.stream_index]);
4401 /* the following test is needed in case new streams appear
4402 dynamically in stream : we ignore them */
4403 if (pkt.stream_index >= ifile->nb_streams) {
4404 report_new_stream(file_index, &pkt);
4405 goto discard_packet;
4408 ist = input_streams[ifile->ist_index + pkt.stream_index];
4410 ist->data_size += pkt.size;
4414 goto discard_packet;
4416 if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4417 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
4418 "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4424 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4425 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4426 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4427 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4428 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4429 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4430 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4431 av_ts2str(input_files[ist->file_index]->ts_offset),
4432 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4435 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4436 int64_t stime, stime2;
4437 // Correcting starttime based on the enabled streams
4438 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4439 // so we instead do it here as part of discontinuity handling
4440 if ( ist->next_dts == AV_NOPTS_VALUE
4441 && ifile->ts_offset == -is->start_time
4442 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4443 int64_t new_start_time = INT64_MAX;
4444 for (i=0; i<is->nb_streams; i++) {
4445 AVStream *st = is->streams[i];
4446 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4448 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4450 if (new_start_time > is->start_time) {
4451 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4452 ifile->ts_offset = -new_start_time;
4456 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4457 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4458 ist->wrap_correction_done = 1;
4460 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4461 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4462 ist->wrap_correction_done = 0;
4464 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4465 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4466 ist->wrap_correction_done = 0;
4470 /* add the stream-global side data to the first packet */
4471 if (ist->nb_packets == 1) {
4472 for (i = 0; i < ist->st->nb_side_data; i++) {
4473 AVPacketSideData *src_sd = &ist->st->side_data[i];
4476 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4479 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4482 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4486 memcpy(dst_data, src_sd->data, src_sd->size);
4490 if (pkt.dts != AV_NOPTS_VALUE)
4491 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4492 if (pkt.pts != AV_NOPTS_VALUE)
4493 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4495 if (pkt.pts != AV_NOPTS_VALUE)
4496 pkt.pts *= ist->ts_scale;
4497 if (pkt.dts != AV_NOPTS_VALUE)
4498 pkt.dts *= ist->ts_scale;
4500 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4501 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4502 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4503 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4504 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4505 int64_t delta = pkt_dts - ifile->last_ts;
4506 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4507 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4508 ifile->ts_offset -= delta;
4509 av_log(NULL, AV_LOG_DEBUG,
4510 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4511 delta, ifile->ts_offset);
4512 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4513 if (pkt.pts != AV_NOPTS_VALUE)
4514 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4518 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4519 if (pkt.pts != AV_NOPTS_VALUE) {
4520 pkt.pts += duration;
4521 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4522 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4525 if (pkt.dts != AV_NOPTS_VALUE)
4526 pkt.dts += duration;
4528 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4530 if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4531 (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4532 int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
4533 ist->st->time_base, AV_TIME_BASE_Q,
4534 AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4535 if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4536 disable_discontinuity_correction = 0;
4539 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4540 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4541 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4542 !disable_discontinuity_correction) {
4543 int64_t delta = pkt_dts - ist->next_dts;
4544 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4545 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4546 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4547 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4548 ifile->ts_offset -= delta;
4549 av_log(NULL, AV_LOG_DEBUG,
4550 "timestamp discontinuity for stream #%d:%d "
4551 "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4552 ist->file_index, ist->st->index, ist->st->id,
4553 av_get_media_type_string(ist->dec_ctx->codec_type),
4554 delta, ifile->ts_offset);
4555 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4556 if (pkt.pts != AV_NOPTS_VALUE)
4557 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4560 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4561 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4562 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4563 pkt.dts = AV_NOPTS_VALUE;
4565 if (pkt.pts != AV_NOPTS_VALUE){
4566 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4567 delta = pkt_pts - ist->next_dts;
4568 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4569 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4570 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4571 pkt.pts = AV_NOPTS_VALUE;
4577 if (pkt.dts != AV_NOPTS_VALUE)
4578 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4581 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4582 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4583 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4584 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4585 av_ts2str(input_files[ist->file_index]->ts_offset),
4586 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4589 sub2video_heartbeat(ist, pkt.pts);
4591 process_input_packet(ist, &pkt, 0);
4594 av_packet_unref(&pkt);
4600 * Perform a step of transcoding for the specified filter graph.
4602 * @param[in] graph filter graph to consider
4603 * @param[out] best_ist input stream where a frame would allow to continue
4604 * @return 0 for success, <0 for error
4606 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4609 int nb_requests, nb_requests_max = 0;
4610 InputFilter *ifilter;
4614 ret = avfilter_graph_request_oldest(graph->graph);
4616 return reap_filters(0);
4618 if (ret == AVERROR_EOF) {
4619 ret = reap_filters(1);
4620 for (i = 0; i < graph->nb_outputs; i++)
4621 close_output_stream(graph->outputs[i]->ost);
4624 if (ret != AVERROR(EAGAIN))
4627 for (i = 0; i < graph->nb_inputs; i++) {
4628 ifilter = graph->inputs[i];
4630 if (input_files[ist->file_index]->eagain ||
4631 input_files[ist->file_index]->eof_reached)
4633 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4634 if (nb_requests > nb_requests_max) {
4635 nb_requests_max = nb_requests;
4641 for (i = 0; i < graph->nb_outputs; i++)
4642 graph->outputs[i]->ost->unavailable = 1;
4648 * Run a single step of transcoding.
4650 * @return 0 for success, <0 for error
4652 static int transcode_step(void)
4655 InputStream *ist = NULL;
4658 ost = choose_output();
4665 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4669 if (ost->filter && !ost->filter->graph->graph) {
4670 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4671 ret = configure_filtergraph(ost->filter->graph);
4673 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4679 if (ost->filter && ost->filter->graph->graph) {
4681 * Similar case to the early audio initialization in reap_filters.
4682 * Audio is special in ffmpeg.c currently as we depend on lavfi's
4683 * audio frame buffering/creation to get the output audio frame size
4684 * in samples correct. The audio frame size for the filter chain is
4685 * configured during the output stream initialization.
4687 * Apparently avfilter_graph_request_oldest (called in
4688 * transcode_from_filter just down the line) peeks. Peeking already
4689 * puts one frame "ready to be given out", which means that any
4690 * update in filter buffer sink configuration afterwards will not
4691 * help us. And yes, even if it would be utilized,
4692 * av_buffersink_get_samples is affected, as it internally utilizes
4693 * the same early exit for peeked frames.
4695 * In other words, if avfilter_graph_request_oldest would not make
4696 * further filter chain configuration or usage of
4697 * av_buffersink_get_samples useless (by just causing the return
4698 * of the peeked AVFrame as-is), we could get rid of this additional
4699 * early encoder initialization.
4701 if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
4702 init_output_stream_wrapper(ost, NULL, 1);
4704 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4708 } else if (ost->filter) {
4710 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4711 InputFilter *ifilter = ost->filter->graph->inputs[i];
4712 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4718 ost->inputs_done = 1;
4722 av_assert0(ost->source_index >= 0);
4723 ist = input_streams[ost->source_index];
4726 ret = process_input(ist->file_index);
4727 if (ret == AVERROR(EAGAIN)) {
4728 if (input_files[ist->file_index]->eagain)
4729 ost->unavailable = 1;
4734 return ret == AVERROR_EOF ? 0 : ret;
4736 return reap_filters(0);
4740 * The following code is the main loop of the file converter
4742 static int transcode(void)
4745 AVFormatContext *os;
4748 int64_t timer_start;
4749 int64_t total_packets_written = 0;
4751 ret = transcode_init();
4755 if (stdin_interaction) {
4756 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4759 timer_start = av_gettime_relative();
4762 if ((ret = init_input_threads()) < 0)
4766 while (!received_sigterm) {
4767 int64_t cur_time= av_gettime_relative();
4769 /* if 'q' pressed, exits */
4770 if (stdin_interaction)
4771 if (check_keyboard_interaction(cur_time) < 0)
4774 /* check if there's any stream where output is still needed */
4775 if (!need_output()) {
4776 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4780 ret = transcode_step();
4781 if (ret < 0 && ret != AVERROR_EOF) {
4782 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4786 /* dump report by using the output first video and audio streams */
4787 print_report(0, timer_start, cur_time);
4790 free_input_threads();
4793 /* at the end of stream, we must flush the decoder buffers */
4794 for (i = 0; i < nb_input_streams; i++) {
4795 ist = input_streams[i];
4796 if (!input_files[ist->file_index]->eof_reached) {
4797 process_input_packet(ist, NULL, 0);
4804 /* write the trailer if needed and close file */
4805 for (i = 0; i < nb_output_files; i++) {
4806 os = output_files[i]->ctx;
4807 if (!output_files[i]->header_written) {
4808 av_log(NULL, AV_LOG_ERROR,
4809 "Nothing was written into output file %d (%s), because "
4810 "at least one of its streams received no packets.\n",
4814 if ((ret = av_write_trailer(os)) < 0) {
4815 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4821 /* dump report by using the first video and audio streams */
4822 print_report(1, timer_start, av_gettime_relative());
4824 /* close each encoder */
4825 for (i = 0; i < nb_output_streams; i++) {
4826 ost = output_streams[i];
4827 if (ost->encoding_needed) {
4828 av_freep(&ost->enc_ctx->stats_in);
4830 total_packets_written += ost->packets_written;
4831 if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
4832 av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4837 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4838 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4842 /* close each decoder */
4843 for (i = 0; i < nb_input_streams; i++) {
4844 ist = input_streams[i];
4845 if (ist->decoding_needed) {
4846 avcodec_close(ist->dec_ctx);
4847 if (ist->hwaccel_uninit)
4848 ist->hwaccel_uninit(ist->dec_ctx);
4852 hw_device_free_all();
4859 free_input_threads();
4862 if (output_streams) {
4863 for (i = 0; i < nb_output_streams; i++) {
4864 ost = output_streams[i];
4867 if (fclose(ost->logfile))
4868 av_log(NULL, AV_LOG_ERROR,
4869 "Error closing logfile, loss of information possible: %s\n",
4870 av_err2str(AVERROR(errno)));
4871 ost->logfile = NULL;
4873 av_freep(&ost->forced_kf_pts);
4874 av_freep(&ost->apad);
4875 av_freep(&ost->disposition);
4876 av_dict_free(&ost->encoder_opts);
4877 av_dict_free(&ost->sws_dict);
4878 av_dict_free(&ost->swr_opts);
4879 av_dict_free(&ost->resample_opts);
4886 static BenchmarkTimeStamps get_benchmark_time_stamps(void)
4888 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4890 struct rusage rusage;
4892 getrusage(RUSAGE_SELF, &rusage);
4893 time_stamps.user_usec =
4894 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4895 time_stamps.sys_usec =
4896 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4897 #elif HAVE_GETPROCESSTIMES
4899 FILETIME c, e, k, u;
4900 proc = GetCurrentProcess();
4901 GetProcessTimes(proc, &c, &e, &k, &u);
4902 time_stamps.user_usec =
4903 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4904 time_stamps.sys_usec =
4905 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4907 time_stamps.user_usec = time_stamps.sys_usec = 0;
4912 static int64_t getmaxrss(void)
4914 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4915 struct rusage rusage;
4916 getrusage(RUSAGE_SELF, &rusage);
4917 return (int64_t)rusage.ru_maxrss * 1024;
4918 #elif HAVE_GETPROCESSMEMORYINFO
4920 PROCESS_MEMORY_COUNTERS memcounters;
4921 proc = GetCurrentProcess();
4922 memcounters.cb = sizeof(memcounters);
4923 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4924 return memcounters.PeakPagefileUsage;
4930 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4934 int main(int argc, char **argv)
4937 BenchmarkTimeStamps ti;
4941 register_exit(ffmpeg_cleanup);
4943 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4945 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4946 parse_loglevel(argc, argv, options);
4948 if(argc>1 && !strcmp(argv[1], "-d")){
4950 av_log_set_callback(log_callback_null);
4956 avdevice_register_all();
4958 avformat_network_init();
4960 show_banner(argc, argv, options);
4962 /* parse options and open all input/output files */
4963 ret = ffmpeg_parse_options(argc, argv);
4967 if (nb_output_files <= 0 && nb_input_files == 0) {
4969 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4973 /* file converter / grab */
4974 if (nb_output_files <= 0) {
4975 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4979 for (i = 0; i < nb_output_files; i++) {
4980 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4984 current_time = ti = get_benchmark_time_stamps();
4985 if (transcode() < 0)
4988 int64_t utime, stime, rtime;
4989 current_time = get_benchmark_time_stamps();
4990 utime = current_time.user_usec - ti.user_usec;
4991 stime = current_time.sys_usec - ti.sys_usec;
4992 rtime = current_time.real_usec - ti.real_usec;
4993 av_log(NULL, AV_LOG_INFO,
4994 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4995 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4997 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4998 decode_error_stat[0], decode_error_stat[1]);
4999 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
5002 exit_program(received_nb_signals ? 255 : main_return_code);
5003 return main_return_code;