2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
65 #include "libavutil/threadmessage.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
73 #if HAVE_SYS_RESOURCE_H
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
80 #if HAVE_GETPROCESSMEMORYINFO
84 #if HAVE_SETCONSOLECTRLHANDLER
90 #include <sys/select.h>
95 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 typedef struct BenchmarkTimeStamps {
127 } BenchmarkTimeStamps;
129 static void do_video_stats(OutputStream *ost, int frame_size);
130 static BenchmarkTimeStamps get_benchmark_time_stamps(void);
131 static int64_t getmaxrss(void);
132 static int ifilter_has_all_input_formats(FilterGraph *fg);
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 static unsigned nb_output_dumped = 0;
141 static int want_sdp = 1;
143 static BenchmarkTimeStamps current_time;
144 AVIOContext *progress_avio = NULL;
146 static uint8_t *subtitle_out;
148 InputStream **input_streams = NULL;
149 int nb_input_streams = 0;
150 InputFile **input_files = NULL;
151 int nb_input_files = 0;
153 OutputStream **output_streams = NULL;
154 int nb_output_streams = 0;
155 OutputFile **output_files = NULL;
156 int nb_output_files = 0;
158 FilterGraph **filtergraphs;
163 /* init terminal so that we can grab keys */
164 static struct termios oldtty;
165 static int restore_tty;
169 static void free_input_threads(void);
173 Convert subtitles to video with alpha to insert them in filter graphs.
174 This is a temporary solution until libavfilter gets real subtitles support.
177 static int sub2video_get_blank_frame(InputStream *ist)
180 AVFrame *frame = ist->sub2video.frame;
182 av_frame_unref(frame);
183 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
184 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
186 if ((ret = av_frame_get_buffer(frame, 0)) < 0)
188 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
192 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
195 uint32_t *pal, *dst2;
199 if (r->type != SUBTITLE_BITMAP) {
200 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
203 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
204 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
205 r->x, r->y, r->w, r->h, w, h
210 dst += r->y * dst_linesize + r->x * 4;
212 pal = (uint32_t *)r->data[1];
213 for (y = 0; y < r->h; y++) {
214 dst2 = (uint32_t *)dst;
216 for (x = 0; x < r->w; x++)
217 *(dst2++) = pal[*(src2++)];
219 src += r->linesize[0];
223 static void sub2video_push_ref(InputStream *ist, int64_t pts)
225 AVFrame *frame = ist->sub2video.frame;
229 av_assert1(frame->data[0]);
230 ist->sub2video.last_pts = frame->pts = pts;
231 for (i = 0; i < ist->nb_filters; i++) {
232 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
233 AV_BUFFERSRC_FLAG_KEEP_REF |
234 AV_BUFFERSRC_FLAG_PUSH);
235 if (ret != AVERROR_EOF && ret < 0)
236 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
241 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
243 AVFrame *frame = ist->sub2video.frame;
247 int64_t pts, end_pts;
252 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
253 AV_TIME_BASE_Q, ist->st->time_base);
254 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
255 AV_TIME_BASE_Q, ist->st->time_base);
256 num_rects = sub->num_rects;
258 /* If we are initializing the system, utilize current heartbeat
259 PTS as the start time, and show until the following subpicture
260 is received. Otherwise, utilize the previous subpicture's end time
261 as the fall-back value. */
262 pts = ist->sub2video.initialize ?
263 heartbeat_pts : ist->sub2video.end_pts;
267 if (sub2video_get_blank_frame(ist) < 0) {
268 av_log(ist->dec_ctx, AV_LOG_ERROR,
269 "Impossible to get a blank canvas.\n");
272 dst = frame->data [0];
273 dst_linesize = frame->linesize[0];
274 for (i = 0; i < num_rects; i++)
275 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
276 sub2video_push_ref(ist, pts);
277 ist->sub2video.end_pts = end_pts;
278 ist->sub2video.initialize = 0;
281 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
283 InputFile *infile = input_files[ist->file_index];
287 /* When a frame is read from a file, examine all sub2video streams in
288 the same file and send the sub2video frame again. Otherwise, decoded
289 video frames could be accumulating in the filter graph while a filter
290 (possibly overlay) is desperately waiting for a subtitle frame. */
291 for (i = 0; i < infile->nb_streams; i++) {
292 InputStream *ist2 = input_streams[infile->ist_index + i];
293 if (!ist2->sub2video.frame)
295 /* subtitles seem to be usually muxed ahead of other streams;
296 if not, subtracting a larger time here is necessary */
297 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
298 /* do not send the heartbeat frame if the subtitle is already ahead */
299 if (pts2 <= ist2->sub2video.last_pts)
301 if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
302 /* if we have hit the end of the current displayed subpicture,
303 or if we need to initialize the system, update the
304 overlayed subpicture and its start/end times */
305 sub2video_update(ist2, pts2 + 1, NULL);
306 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
307 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
309 sub2video_push_ref(ist2, pts2);
313 static void sub2video_flush(InputStream *ist)
318 if (ist->sub2video.end_pts < INT64_MAX)
319 sub2video_update(ist, INT64_MAX, NULL);
320 for (i = 0; i < ist->nb_filters; i++) {
321 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
322 if (ret != AVERROR_EOF && ret < 0)
323 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
327 /* end of sub2video hack */
329 static void term_exit_sigsafe(void)
333 tcsetattr (0, TCSANOW, &oldtty);
339 av_log(NULL, AV_LOG_QUIET, "%s", "");
343 static volatile int received_sigterm = 0;
344 static volatile int received_nb_signals = 0;
345 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
346 static volatile int ffmpeg_exited = 0;
347 static int main_return_code = 0;
348 static int64_t copy_ts_first_pts = AV_NOPTS_VALUE;
351 sigterm_handler(int sig)
354 received_sigterm = sig;
355 received_nb_signals++;
357 if(received_nb_signals > 3) {
358 ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
359 strlen("Received > 3 system signals, hard exiting\n"));
360 if (ret < 0) { /* Do nothing */ };
365 #if HAVE_SETCONSOLECTRLHANDLER
366 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
368 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
373 case CTRL_BREAK_EVENT:
374 sigterm_handler(SIGINT);
377 case CTRL_CLOSE_EVENT:
378 case CTRL_LOGOFF_EVENT:
379 case CTRL_SHUTDOWN_EVENT:
380 sigterm_handler(SIGTERM);
381 /* Basically, with these 3 events, when we return from this method the
382 process is hard terminated, so stall as long as we need to
383 to try and let the main thread(s) clean up and gracefully terminate
384 (we have at most 5 seconds, but should be done far before that). */
385 while (!ffmpeg_exited) {
391 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
398 #define SIGNAL(sig, func) \
400 action.sa_handler = func; \
401 sigaction(sig, &action, NULL); \
404 #define SIGNAL(sig, func) \
410 #if defined __linux__
411 struct sigaction action = {0};
412 action.sa_handler = sigterm_handler;
414 /* block other interrupts while processing this one */
415 sigfillset(&action.sa_mask);
417 /* restart interruptible functions (i.e. don't fail with EINTR) */
418 action.sa_flags = SA_RESTART;
422 if (!run_as_daemon && stdin_interaction) {
424 if (tcgetattr (0, &tty) == 0) {
428 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
429 |INLCR|IGNCR|ICRNL|IXON);
430 tty.c_oflag |= OPOST;
431 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
432 tty.c_cflag &= ~(CSIZE|PARENB);
437 tcsetattr (0, TCSANOW, &tty);
439 SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
443 SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
444 SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */
446 SIGNAL(SIGXCPU, sigterm_handler);
449 signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
451 #if HAVE_SETCONSOLECTRLHANDLER
452 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
456 /* read a key without blocking */
457 static int read_key(void)
469 n = select(1, &rfds, NULL, NULL, &tv);
478 # if HAVE_PEEKNAMEDPIPE
480 static HANDLE input_handle;
483 input_handle = GetStdHandle(STD_INPUT_HANDLE);
484 is_pipe = !GetConsoleMode(input_handle, &dw);
488 /* When running under a GUI, you will end here. */
489 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
490 // input pipe may have been closed by the program that ran ffmpeg
508 static int decode_interrupt_cb(void *ctx)
510 return received_nb_signals > atomic_load(&transcode_init_done);
513 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
515 static void ffmpeg_cleanup(int ret)
520 int maxrss = getmaxrss() / 1024;
521 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
524 for (i = 0; i < nb_filtergraphs; i++) {
525 FilterGraph *fg = filtergraphs[i];
526 avfilter_graph_free(&fg->graph);
527 for (j = 0; j < fg->nb_inputs; j++) {
528 InputFilter *ifilter = fg->inputs[j];
529 struct InputStream *ist = ifilter->ist;
531 while (av_fifo_size(ifilter->frame_queue)) {
533 av_fifo_generic_read(ifilter->frame_queue, &frame,
534 sizeof(frame), NULL);
535 av_frame_free(&frame);
537 av_fifo_freep(&ifilter->frame_queue);
538 if (ist->sub2video.sub_queue) {
539 while (av_fifo_size(ist->sub2video.sub_queue)) {
541 av_fifo_generic_read(ist->sub2video.sub_queue,
542 &sub, sizeof(sub), NULL);
543 avsubtitle_free(&sub);
545 av_fifo_freep(&ist->sub2video.sub_queue);
547 av_buffer_unref(&ifilter->hw_frames_ctx);
548 av_freep(&ifilter->name);
549 av_freep(&fg->inputs[j]);
551 av_freep(&fg->inputs);
552 for (j = 0; j < fg->nb_outputs; j++) {
553 OutputFilter *ofilter = fg->outputs[j];
555 avfilter_inout_free(&ofilter->out_tmp);
556 av_freep(&ofilter->name);
557 av_freep(&ofilter->formats);
558 av_freep(&ofilter->channel_layouts);
559 av_freep(&ofilter->sample_rates);
560 av_freep(&fg->outputs[j]);
562 av_freep(&fg->outputs);
563 av_freep(&fg->graph_desc);
565 av_freep(&filtergraphs[i]);
567 av_freep(&filtergraphs);
569 av_freep(&subtitle_out);
572 for (i = 0; i < nb_output_files; i++) {
573 OutputFile *of = output_files[i];
578 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
580 avformat_free_context(s);
581 av_dict_free(&of->opts);
583 av_freep(&output_files[i]);
585 for (i = 0; i < nb_output_streams; i++) {
586 OutputStream *ost = output_streams[i];
591 av_bsf_free(&ost->bsf_ctx);
593 av_frame_free(&ost->filtered_frame);
594 av_frame_free(&ost->last_frame);
595 av_packet_free(&ost->pkt);
596 av_dict_free(&ost->encoder_opts);
598 av_freep(&ost->forced_keyframes);
599 av_expr_free(ost->forced_keyframes_pexpr);
600 av_freep(&ost->avfilter);
601 av_freep(&ost->logfile_prefix);
603 av_freep(&ost->audio_channels_map);
604 ost->audio_channels_mapped = 0;
606 av_dict_free(&ost->sws_dict);
607 av_dict_free(&ost->swr_opts);
609 avcodec_free_context(&ost->enc_ctx);
610 avcodec_parameters_free(&ost->ref_par);
612 if (ost->muxing_queue) {
613 while (av_fifo_size(ost->muxing_queue)) {
615 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
616 av_packet_free(&pkt);
618 av_fifo_freep(&ost->muxing_queue);
621 av_freep(&output_streams[i]);
624 free_input_threads();
626 for (i = 0; i < nb_input_files; i++) {
627 avformat_close_input(&input_files[i]->ctx);
628 av_packet_free(&input_files[i]->pkt);
629 av_freep(&input_files[i]);
631 for (i = 0; i < nb_input_streams; i++) {
632 InputStream *ist = input_streams[i];
634 av_frame_free(&ist->decoded_frame);
635 av_frame_free(&ist->filter_frame);
636 av_packet_free(&ist->pkt);
637 av_dict_free(&ist->decoder_opts);
638 avsubtitle_free(&ist->prev_sub.subtitle);
639 av_frame_free(&ist->sub2video.frame);
640 av_freep(&ist->filters);
641 av_freep(&ist->hwaccel_device);
642 av_freep(&ist->dts_buffer);
644 avcodec_free_context(&ist->dec_ctx);
646 av_freep(&input_streams[i]);
650 if (fclose(vstats_file))
651 av_log(NULL, AV_LOG_ERROR,
652 "Error closing vstats file, loss of information possible: %s\n",
653 av_err2str(AVERROR(errno)));
655 av_freep(&vstats_filename);
657 av_freep(&input_streams);
658 av_freep(&input_files);
659 av_freep(&output_streams);
660 av_freep(&output_files);
664 avformat_network_deinit();
666 if (received_sigterm) {
667 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
668 (int) received_sigterm);
669 } else if (ret && atomic_load(&transcode_init_done)) {
670 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
676 void remove_avoptions(AVDictionary **a, AVDictionary *b)
678 AVDictionaryEntry *t = NULL;
680 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
681 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
685 void assert_avoptions(AVDictionary *m)
687 AVDictionaryEntry *t;
688 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
689 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
694 static void abort_codec_experimental(const AVCodec *c, int encoder)
699 static void update_benchmark(const char *fmt, ...)
701 if (do_benchmark_all) {
702 BenchmarkTimeStamps t = get_benchmark_time_stamps();
708 vsnprintf(buf, sizeof(buf), fmt, va);
710 av_log(NULL, AV_LOG_INFO,
711 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
712 t.user_usec - current_time.user_usec,
713 t.sys_usec - current_time.sys_usec,
714 t.real_usec - current_time.real_usec, buf);
720 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
723 for (i = 0; i < nb_output_streams; i++) {
724 OutputStream *ost2 = output_streams[i];
725 ost2->finished |= ost == ost2 ? this_stream : others;
729 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
731 AVFormatContext *s = of->ctx;
732 AVStream *st = ost->st;
736 * Audio encoders may split the packets -- #frames in != #packets out.
737 * But there is no reordering, so we can limit the number of output packets
738 * by simply dropping them here.
739 * Counting encoded video frames needs to be done separately because of
740 * reordering, see do_video_out().
741 * Do not count the packet when unqueued because it has been counted when queued.
743 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
744 if (ost->frame_number >= ost->max_frames) {
745 av_packet_unref(pkt);
751 if (!of->header_written) {
753 /* the muxer is not initialized yet, buffer the packet */
754 if (!av_fifo_space(ost->muxing_queue)) {
755 unsigned int are_we_over_size =
756 (ost->muxing_queue_data_size + pkt->size) > ost->muxing_queue_data_threshold;
757 int new_size = are_we_over_size ?
758 FFMIN(2 * av_fifo_size(ost->muxing_queue),
759 ost->max_muxing_queue_size) :
760 2 * av_fifo_size(ost->muxing_queue);
762 if (new_size <= av_fifo_size(ost->muxing_queue)) {
763 av_log(NULL, AV_LOG_ERROR,
764 "Too many packets buffered for output stream %d:%d.\n",
765 ost->file_index, ost->st->index);
768 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
772 ret = av_packet_make_refcounted(pkt);
775 tmp_pkt = av_packet_alloc();
778 av_packet_move_ref(tmp_pkt, pkt);
779 ost->muxing_queue_data_size += tmp_pkt->size;
780 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
784 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
785 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
786 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
788 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
790 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
792 ost->quality = sd ? AV_RL32(sd) : -1;
793 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
795 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
797 ost->error[i] = AV_RL64(sd + 8 + 8*i);
802 if (ost->frame_rate.num && ost->is_cfr) {
803 if (pkt->duration > 0)
804 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
805 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
810 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
812 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
813 if (pkt->dts != AV_NOPTS_VALUE &&
814 pkt->pts != AV_NOPTS_VALUE &&
815 pkt->dts > pkt->pts) {
816 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
818 ost->file_index, ost->st->index);
820 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
821 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
822 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
824 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
825 pkt->dts != AV_NOPTS_VALUE &&
826 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
827 ost->last_mux_dts != AV_NOPTS_VALUE) {
828 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
829 if (pkt->dts < max) {
830 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
832 loglevel = AV_LOG_ERROR;
833 av_log(s, loglevel, "Non-monotonous DTS in output stream "
834 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
835 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
837 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
840 av_log(s, loglevel, "changing to %"PRId64". This may result "
841 "in incorrect timestamps in the output file.\n",
843 if (pkt->pts >= pkt->dts)
844 pkt->pts = FFMAX(pkt->pts, max);
849 ost->last_mux_dts = pkt->dts;
851 ost->data_size += pkt->size;
852 ost->packets_written++;
854 pkt->stream_index = ost->index;
857 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
858 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
859 av_get_media_type_string(ost->enc_ctx->codec_type),
860 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
861 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
866 ret = av_interleaved_write_frame(s, pkt);
868 print_error("av_interleaved_write_frame()", ret);
869 main_return_code = 1;
870 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
872 av_packet_unref(pkt);
875 static void close_output_stream(OutputStream *ost)
877 OutputFile *of = output_files[ost->file_index];
879 ost->finished |= ENCODER_FINISHED;
881 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
882 of->recording_time = FFMIN(of->recording_time, end);
887 * Send a single packet to the output, applying any bitstream filters
888 * associated with the output stream. This may result in any number
889 * of packets actually being written, depending on what bitstream
890 * filters are applied. The supplied packet is consumed and will be
891 * blank (as if newly-allocated) when this function returns.
893 * If eof is set, instead indicate EOF to all bitstream filters and
894 * therefore flush any delayed packets to the output. A blank packet
895 * must be supplied in this case.
897 static void output_packet(OutputFile *of, AVPacket *pkt,
898 OutputStream *ost, int eof)
902 /* apply the output bitstream filters */
904 ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
907 while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
908 write_packet(of, pkt, ost, 0);
909 if (ret == AVERROR(EAGAIN))
912 write_packet(of, pkt, ost, 0);
915 if (ret < 0 && ret != AVERROR_EOF) {
916 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
917 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
923 static int check_recording_time(OutputStream *ost)
925 OutputFile *of = output_files[ost->file_index];
927 if (of->recording_time != INT64_MAX &&
928 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
929 AV_TIME_BASE_Q) >= 0) {
930 close_output_stream(ost);
936 static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost,
939 double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
940 AVCodecContext *enc = ost->enc_ctx;
941 if (!frame || frame->pts == AV_NOPTS_VALUE ||
942 !enc || !ost->filter || !ost->filter->graph->graph)
946 AVFilterContext *filter = ost->filter->filter;
948 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
949 AVRational filter_tb = av_buffersink_get_time_base(filter);
950 AVRational tb = enc->time_base;
951 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
953 tb.den <<= extra_bits;
955 av_rescale_q(frame->pts, filter_tb, tb) -
956 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
957 float_pts /= 1 << extra_bits;
958 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
959 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
962 av_rescale_q(frame->pts, filter_tb, enc->time_base) -
963 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
969 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
970 frame ? av_ts2str(frame->pts) : "NULL",
971 frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
973 enc ? enc->time_base.num : -1,
974 enc ? enc->time_base.den : -1);
980 static int init_output_stream(OutputStream *ost, AVFrame *frame,
981 char *error, int error_len);
983 static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame,
986 int ret = AVERROR_BUG;
987 char error[1024] = {0};
989 if (ost->initialized)
992 ret = init_output_stream(ost, frame, error, sizeof(error));
994 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
995 ost->file_index, ost->index, error);
1004 static void do_audio_out(OutputFile *of, OutputStream *ost,
1007 AVCodecContext *enc = ost->enc_ctx;
1008 AVPacket *pkt = ost->pkt;
1011 adjust_frame_pts_to_encoder_tb(of, ost, frame);
1013 if (!check_recording_time(ost))
1016 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1017 frame->pts = ost->sync_opts;
1018 ost->sync_opts = frame->pts + frame->nb_samples;
1019 ost->samples_encoded += frame->nb_samples;
1020 ost->frames_encoded++;
1022 update_benchmark(NULL);
1024 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1025 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1026 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1027 enc->time_base.num, enc->time_base.den);
1030 ret = avcodec_send_frame(enc, frame);
1035 av_packet_unref(pkt);
1036 ret = avcodec_receive_packet(enc, pkt);
1037 if (ret == AVERROR(EAGAIN))
1042 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1044 av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
1047 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1048 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1049 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base),
1050 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base));
1053 output_packet(of, pkt, ost, 0);
1058 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1062 static void do_subtitle_out(OutputFile *of,
1066 int subtitle_out_max_size = 1024 * 1024;
1067 int subtitle_out_size, nb, i;
1068 AVCodecContext *enc;
1069 AVPacket *pkt = ost->pkt;
1072 if (sub->pts == AV_NOPTS_VALUE) {
1073 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1081 if (!subtitle_out) {
1082 subtitle_out = av_malloc(subtitle_out_max_size);
1083 if (!subtitle_out) {
1084 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1089 /* Note: DVB subtitle need one packet to draw them and one other
1090 packet to clear them */
1091 /* XXX: signal it in the codec context ? */
1092 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1097 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1099 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1100 pts -= output_files[ost->file_index]->start_time;
1101 for (i = 0; i < nb; i++) {
1102 unsigned save_num_rects = sub->num_rects;
1104 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1105 if (!check_recording_time(ost))
1109 // start_display_time is required to be 0
1110 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1111 sub->end_display_time -= sub->start_display_time;
1112 sub->start_display_time = 0;
1116 ost->frames_encoded++;
1118 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1119 subtitle_out_max_size, sub);
1121 sub->num_rects = save_num_rects;
1122 if (subtitle_out_size < 0) {
1123 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1127 av_packet_unref(pkt);
1128 pkt->data = subtitle_out;
1129 pkt->size = subtitle_out_size;
1130 pkt->pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1131 pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1132 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1133 /* XXX: the pts correction is handled here. Maybe handling
1134 it in the codec would be better */
1136 pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1138 pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1140 pkt->dts = pkt->pts;
1141 output_packet(of, pkt, ost, 0);
1145 static void do_video_out(OutputFile *of,
1147 AVFrame *next_picture)
1149 int ret, format_video_sync;
1150 AVPacket *pkt = ost->pkt;
1151 AVCodecContext *enc = ost->enc_ctx;
1152 AVRational frame_rate;
1153 int nb_frames, nb0_frames, i;
1154 double delta, delta0;
1155 double duration = 0;
1156 double sync_ipts = AV_NOPTS_VALUE;
1158 InputStream *ist = NULL;
1159 AVFilterContext *filter = ost->filter->filter;
1161 init_output_stream_wrapper(ost, next_picture, 1);
1162 sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1164 if (ost->source_index >= 0)
1165 ist = input_streams[ost->source_index];
1167 frame_rate = av_buffersink_get_frame_rate(filter);
1168 if (frame_rate.num > 0 && frame_rate.den > 0)
1169 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1171 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1172 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1174 if (!ost->filters_script &&
1176 (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1179 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1180 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1183 if (!next_picture) {
1185 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1186 ost->last_nb0_frames[1],
1187 ost->last_nb0_frames[2]);
1189 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1190 delta = delta0 + duration;
1192 /* by default, we output a single frame */
1193 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1196 format_video_sync = video_sync_method;
1197 if (format_video_sync == VSYNC_AUTO) {
1198 if(!strcmp(of->ctx->oformat->name, "avi")) {
1199 format_video_sync = VSYNC_VFR;
1201 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1203 && format_video_sync == VSYNC_CFR
1204 && input_files[ist->file_index]->ctx->nb_streams == 1
1205 && input_files[ist->file_index]->input_ts_offset == 0) {
1206 format_video_sync = VSYNC_VSCFR;
1208 if (format_video_sync == VSYNC_CFR && copy_ts) {
1209 format_video_sync = VSYNC_VSCFR;
1212 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1216 format_video_sync != VSYNC_PASSTHROUGH &&
1217 format_video_sync != VSYNC_DROP) {
1218 if (delta0 < -0.6) {
1219 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1221 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1222 sync_ipts = ost->sync_opts;
1227 switch (format_video_sync) {
1229 if (ost->frame_number == 0 && delta0 >= 0.5) {
1230 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1233 ost->sync_opts = llrint(sync_ipts);
1236 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1237 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1239 } else if (delta < -1.1)
1241 else if (delta > 1.1) {
1242 nb_frames = lrintf(delta);
1244 nb0_frames = llrintf(delta0 - 0.6);
1250 else if (delta > 0.6)
1251 ost->sync_opts = llrint(sync_ipts);
1254 case VSYNC_PASSTHROUGH:
1255 ost->sync_opts = llrint(sync_ipts);
1262 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1263 nb0_frames = FFMIN(nb0_frames, nb_frames);
1265 memmove(ost->last_nb0_frames + 1,
1266 ost->last_nb0_frames,
1267 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1268 ost->last_nb0_frames[0] = nb0_frames;
1270 if (nb0_frames == 0 && ost->last_dropped) {
1272 av_log(NULL, AV_LOG_VERBOSE,
1273 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1274 ost->frame_number, ost->st->index, ost->last_frame->pts);
1276 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1277 if (nb_frames > dts_error_threshold * 30) {
1278 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1282 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1283 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1284 if (nb_frames_dup > dup_warning) {
1285 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1289 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1291 /* duplicates frame if needed */
1292 for (i = 0; i < nb_frames; i++) {
1293 AVFrame *in_picture;
1294 int forced_keyframe = 0;
1297 if (i < nb0_frames && ost->last_frame) {
1298 in_picture = ost->last_frame;
1300 in_picture = next_picture;
1305 in_picture->pts = ost->sync_opts;
1307 if (!check_recording_time(ost))
1310 in_picture->quality = enc->global_quality;
1311 in_picture->pict_type = 0;
1313 if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1314 in_picture->pts != AV_NOPTS_VALUE)
1315 ost->forced_kf_ref_pts = in_picture->pts;
1317 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1318 (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1319 if (ost->forced_kf_index < ost->forced_kf_count &&
1320 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1321 ost->forced_kf_index++;
1322 forced_keyframe = 1;
1323 } else if (ost->forced_keyframes_pexpr) {
1325 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1326 res = av_expr_eval(ost->forced_keyframes_pexpr,
1327 ost->forced_keyframes_expr_const_values, NULL);
1328 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1329 ost->forced_keyframes_expr_const_values[FKF_N],
1330 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1331 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1332 ost->forced_keyframes_expr_const_values[FKF_T],
1333 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1336 forced_keyframe = 1;
1337 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1338 ost->forced_keyframes_expr_const_values[FKF_N];
1339 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1340 ost->forced_keyframes_expr_const_values[FKF_T];
1341 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1344 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1345 } else if ( ost->forced_keyframes
1346 && !strncmp(ost->forced_keyframes, "source", 6)
1347 && in_picture->key_frame==1
1349 forced_keyframe = 1;
1352 if (forced_keyframe) {
1353 in_picture->pict_type = AV_PICTURE_TYPE_I;
1354 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1357 update_benchmark(NULL);
1359 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1360 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1361 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1362 enc->time_base.num, enc->time_base.den);
1365 ost->frames_encoded++;
1367 ret = avcodec_send_frame(enc, in_picture);
1370 // Make sure Closed Captions will not be duplicated
1371 av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
1374 av_packet_unref(pkt);
1375 ret = avcodec_receive_packet(enc, pkt);
1376 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1377 if (ret == AVERROR(EAGAIN))
1383 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1384 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1385 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base),
1386 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base));
1389 if (pkt->pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1390 pkt->pts = ost->sync_opts;
1392 av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
1395 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1396 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1397 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->mux_timebase),
1398 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->mux_timebase));
1401 frame_size = pkt->size;
1402 output_packet(of, pkt, ost, 0);
1404 /* if two pass, output log */
1405 if (ost->logfile && enc->stats_out) {
1406 fprintf(ost->logfile, "%s", enc->stats_out);
1411 * For video, number of frames in == number of packets out.
1412 * But there may be reordering, so we can't throw away frames on encoder
1413 * flush, we need to limit them here, before they go into encoder.
1415 ost->frame_number++;
1417 if (vstats_filename && frame_size)
1418 do_video_stats(ost, frame_size);
1421 if (!ost->last_frame)
1422 ost->last_frame = av_frame_alloc();
1423 av_frame_unref(ost->last_frame);
1424 if (next_picture && ost->last_frame)
1425 av_frame_ref(ost->last_frame, next_picture);
1427 av_frame_free(&ost->last_frame);
1431 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1435 static double psnr(double d)
1437 return -10.0 * log10(d);
1440 static void do_video_stats(OutputStream *ost, int frame_size)
1442 AVCodecContext *enc;
1444 double ti1, bitrate, avg_bitrate;
1446 /* this is executed just the first time do_video_stats is called */
1448 vstats_file = fopen(vstats_filename, "w");
1456 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1457 frame_number = ost->st->nb_frames;
1458 if (vstats_version <= 1) {
1459 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1460 ost->quality / (float)FF_QP2LAMBDA);
1462 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1463 ost->quality / (float)FF_QP2LAMBDA);
1466 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1467 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1469 fprintf(vstats_file,"f_size= %6d ", frame_size);
1470 /* compute pts value */
1471 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1475 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1476 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1477 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1478 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1479 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1483 static void finish_output_stream(OutputStream *ost)
1485 OutputFile *of = output_files[ost->file_index];
1488 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1491 for (i = 0; i < of->ctx->nb_streams; i++)
1492 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1497 * Get and encode new output from any of the filtergraphs, without causing
1500 * @return 0 for success, <0 for severe errors
1502 static int reap_filters(int flush)
1504 AVFrame *filtered_frame = NULL;
1507 /* Reap all buffers present in the buffer sinks */
1508 for (i = 0; i < nb_output_streams; i++) {
1509 OutputStream *ost = output_streams[i];
1510 OutputFile *of = output_files[ost->file_index];
1511 AVFilterContext *filter;
1512 AVCodecContext *enc = ost->enc_ctx;
1515 if (!ost->filter || !ost->filter->graph->graph)
1517 filter = ost->filter->filter;
1520 * Unlike video, with audio the audio frame size matters.
1521 * Currently we are fully reliant on the lavfi filter chain to
1522 * do the buffering deed for us, and thus the frame size parameter
1523 * needs to be set accordingly. Where does one get the required
1524 * frame size? From the initialized AVCodecContext of an audio
1525 * encoder. Thus, if we have gotten to an audio stream, initialize
1526 * the encoder earlier than receiving the first AVFrame.
1528 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO)
1529 init_output_stream_wrapper(ost, NULL, 1);
1531 if (!ost->pkt && !(ost->pkt = av_packet_alloc())) {
1532 return AVERROR(ENOMEM);
1534 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1535 return AVERROR(ENOMEM);
1537 filtered_frame = ost->filtered_frame;
1540 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1541 AV_BUFFERSINK_FLAG_NO_REQUEST);
1543 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1544 av_log(NULL, AV_LOG_WARNING,
1545 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1546 } else if (flush && ret == AVERROR_EOF) {
1547 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1548 do_video_out(of, ost, NULL);
1552 if (ost->finished) {
1553 av_frame_unref(filtered_frame);
1557 switch (av_buffersink_get_type(filter)) {
1558 case AVMEDIA_TYPE_VIDEO:
1559 if (!ost->frame_aspect_ratio.num)
1560 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1562 do_video_out(of, ost, filtered_frame);
1564 case AVMEDIA_TYPE_AUDIO:
1565 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1566 enc->channels != filtered_frame->channels) {
1567 av_log(NULL, AV_LOG_ERROR,
1568 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1571 do_audio_out(of, ost, filtered_frame);
1574 // TODO support subtitle filters
1578 av_frame_unref(filtered_frame);
1585 static void print_final_stats(int64_t total_size)
1587 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1588 uint64_t subtitle_size = 0;
1589 uint64_t data_size = 0;
1590 float percent = -1.0;
1594 for (i = 0; i < nb_output_streams; i++) {
1595 OutputStream *ost = output_streams[i];
1596 switch (ost->enc_ctx->codec_type) {
1597 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1598 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1599 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1600 default: other_size += ost->data_size; break;
1602 extra_size += ost->enc_ctx->extradata_size;
1603 data_size += ost->data_size;
1604 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1605 != AV_CODEC_FLAG_PASS1)
1609 if (data_size && total_size>0 && total_size >= data_size)
1610 percent = 100.0 * (total_size - data_size) / data_size;
1612 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1613 video_size / 1024.0,
1614 audio_size / 1024.0,
1615 subtitle_size / 1024.0,
1616 other_size / 1024.0,
1617 extra_size / 1024.0);
1619 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1621 av_log(NULL, AV_LOG_INFO, "unknown");
1622 av_log(NULL, AV_LOG_INFO, "\n");
1624 /* print verbose per-stream stats */
1625 for (i = 0; i < nb_input_files; i++) {
1626 InputFile *f = input_files[i];
1627 uint64_t total_packets = 0, total_size = 0;
1629 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1632 for (j = 0; j < f->nb_streams; j++) {
1633 InputStream *ist = input_streams[f->ist_index + j];
1634 enum AVMediaType type = ist->dec_ctx->codec_type;
1636 total_size += ist->data_size;
1637 total_packets += ist->nb_packets;
1639 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1640 i, j, media_type_string(type));
1641 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1642 ist->nb_packets, ist->data_size);
1644 if (ist->decoding_needed) {
1645 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1646 ist->frames_decoded);
1647 if (type == AVMEDIA_TYPE_AUDIO)
1648 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1649 av_log(NULL, AV_LOG_VERBOSE, "; ");
1652 av_log(NULL, AV_LOG_VERBOSE, "\n");
1655 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1656 total_packets, total_size);
1659 for (i = 0; i < nb_output_files; i++) {
1660 OutputFile *of = output_files[i];
1661 uint64_t total_packets = 0, total_size = 0;
1663 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1666 for (j = 0; j < of->ctx->nb_streams; j++) {
1667 OutputStream *ost = output_streams[of->ost_index + j];
1668 enum AVMediaType type = ost->enc_ctx->codec_type;
1670 total_size += ost->data_size;
1671 total_packets += ost->packets_written;
1673 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1674 i, j, media_type_string(type));
1675 if (ost->encoding_needed) {
1676 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1677 ost->frames_encoded);
1678 if (type == AVMEDIA_TYPE_AUDIO)
1679 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1680 av_log(NULL, AV_LOG_VERBOSE, "; ");
1683 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1684 ost->packets_written, ost->data_size);
1686 av_log(NULL, AV_LOG_VERBOSE, "\n");
1689 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1690 total_packets, total_size);
1692 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1693 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1695 av_log(NULL, AV_LOG_WARNING, "\n");
1697 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1702 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1704 AVBPrint buf, buf_script;
1706 AVFormatContext *oc;
1708 AVCodecContext *enc;
1709 int frame_number, vid, i;
1712 int64_t pts = INT64_MIN + 1;
1713 static int64_t last_time = -1;
1714 static int first_report = 1;
1715 static int qp_histogram[52];
1716 int hours, mins, secs, us;
1717 const char *hours_sign;
1721 if (!print_stats && !is_last_report && !progress_avio)
1724 if (!is_last_report) {
1725 if (last_time == -1) {
1726 last_time = cur_time;
1728 if (((cur_time - last_time) < stats_period && !first_report) ||
1729 (first_report && nb_output_dumped < nb_output_files))
1731 last_time = cur_time;
1734 t = (cur_time-timer_start) / 1000000.0;
1737 oc = output_files[0]->ctx;
1739 total_size = avio_size(oc->pb);
1740 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1741 total_size = avio_tell(oc->pb);
1744 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1745 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1746 for (i = 0; i < nb_output_streams; i++) {
1748 ost = output_streams[i];
1750 if (!ost->stream_copy)
1751 q = ost->quality / (float) FF_QP2LAMBDA;
1753 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1754 av_bprintf(&buf, "q=%2.1f ", q);
1755 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1756 ost->file_index, ost->index, q);
1758 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1761 frame_number = ost->frame_number;
1762 fps = t > 1 ? frame_number / t : 0;
1763 av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1764 frame_number, fps < 9.95, fps, q);
1765 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1766 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1767 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1768 ost->file_index, ost->index, q);
1770 av_bprintf(&buf, "L");
1774 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1776 for (j = 0; j < 32; j++)
1777 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1780 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1782 double error, error_sum = 0;
1783 double scale, scale_sum = 0;
1785 char type[3] = { 'Y','U','V' };
1786 av_bprintf(&buf, "PSNR=");
1787 for (j = 0; j < 3; j++) {
1788 if (is_last_report) {
1789 error = enc->error[j];
1790 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1792 error = ost->error[j];
1793 scale = enc->width * enc->height * 255.0 * 255.0;
1799 p = psnr(error / scale);
1800 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1801 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1802 ost->file_index, ost->index, type[j] | 32, p);
1804 p = psnr(error_sum / scale_sum);
1805 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1806 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1807 ost->file_index, ost->index, p);
1811 /* compute min output value */
1812 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) {
1813 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1814 ost->st->time_base, AV_TIME_BASE_Q));
1816 if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1817 copy_ts_first_pts = pts;
1818 if (copy_ts_first_pts != AV_NOPTS_VALUE)
1819 pts -= copy_ts_first_pts;
1824 nb_frames_drop += ost->last_dropped;
1827 secs = FFABS(pts) / AV_TIME_BASE;
1828 us = FFABS(pts) % AV_TIME_BASE;
1833 hours_sign = (pts < 0) ? "-" : "";
1835 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1836 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1838 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1839 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1840 if (pts == AV_NOPTS_VALUE) {
1841 av_bprintf(&buf, "N/A ");
1843 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1844 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1848 av_bprintf(&buf, "bitrate=N/A");
1849 av_bprintf(&buf_script, "bitrate=N/A\n");
1851 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1852 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1855 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1856 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1857 if (pts == AV_NOPTS_VALUE) {
1858 av_bprintf(&buf_script, "out_time_us=N/A\n");
1859 av_bprintf(&buf_script, "out_time_ms=N/A\n");
1860 av_bprintf(&buf_script, "out_time=N/A\n");
1862 av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1863 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1864 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1865 hours_sign, hours, mins, secs, us);
1868 if (nb_frames_dup || nb_frames_drop)
1869 av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1870 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1871 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1874 av_bprintf(&buf, " speed=N/A");
1875 av_bprintf(&buf_script, "speed=N/A\n");
1877 av_bprintf(&buf, " speed=%4.3gx", speed);
1878 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1881 if (print_stats || is_last_report) {
1882 const char end = is_last_report ? '\n' : '\r';
1883 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1884 fprintf(stderr, "%s %c", buf.str, end);
1886 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1890 av_bprint_finalize(&buf, NULL);
1892 if (progress_avio) {
1893 av_bprintf(&buf_script, "progress=%s\n",
1894 is_last_report ? "end" : "continue");
1895 avio_write(progress_avio, buf_script.str,
1896 FFMIN(buf_script.len, buf_script.size - 1));
1897 avio_flush(progress_avio);
1898 av_bprint_finalize(&buf_script, NULL);
1899 if (is_last_report) {
1900 if ((ret = avio_closep(&progress_avio)) < 0)
1901 av_log(NULL, AV_LOG_ERROR,
1902 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1909 print_final_stats(total_size);
1912 static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1914 // We never got any input. Set a fake format, which will
1915 // come from libavformat.
1916 ifilter->format = par->format;
1917 ifilter->sample_rate = par->sample_rate;
1918 ifilter->channels = par->channels;
1919 ifilter->channel_layout = par->channel_layout;
1920 ifilter->width = par->width;
1921 ifilter->height = par->height;
1922 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1925 static void flush_encoders(void)
1929 for (i = 0; i < nb_output_streams; i++) {
1930 OutputStream *ost = output_streams[i];
1931 AVCodecContext *enc = ost->enc_ctx;
1932 OutputFile *of = output_files[ost->file_index];
1934 if (!ost->encoding_needed)
1937 // Try to enable encoding with no input frames.
1938 // Maybe we should just let encoding fail instead.
1939 if (!ost->initialized) {
1940 FilterGraph *fg = ost->filter->graph;
1942 av_log(NULL, AV_LOG_WARNING,
1943 "Finishing stream %d:%d without any data written to it.\n",
1944 ost->file_index, ost->st->index);
1946 if (ost->filter && !fg->graph) {
1948 for (x = 0; x < fg->nb_inputs; x++) {
1949 InputFilter *ifilter = fg->inputs[x];
1950 if (ifilter->format < 0)
1951 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1954 if (!ifilter_has_all_input_formats(fg))
1957 ret = configure_filtergraph(fg);
1959 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1963 finish_output_stream(ost);
1966 init_output_stream_wrapper(ost, NULL, 1);
1969 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1973 const char *desc = NULL;
1974 AVPacket *pkt = ost->pkt;
1977 switch (enc->codec_type) {
1978 case AVMEDIA_TYPE_AUDIO:
1981 case AVMEDIA_TYPE_VIDEO:
1988 update_benchmark(NULL);
1990 av_packet_unref(pkt);
1991 while ((ret = avcodec_receive_packet(enc, pkt)) == AVERROR(EAGAIN)) {
1992 ret = avcodec_send_frame(enc, NULL);
1994 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2001 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
2002 if (ret < 0 && ret != AVERROR_EOF) {
2003 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2008 if (ost->logfile && enc->stats_out) {
2009 fprintf(ost->logfile, "%s", enc->stats_out);
2011 if (ret == AVERROR_EOF) {
2012 output_packet(of, pkt, ost, 1);
2015 if (ost->finished & MUXER_FINISHED) {
2016 av_packet_unref(pkt);
2019 av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
2020 pkt_size = pkt->size;
2021 output_packet(of, pkt, ost, 0);
2022 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
2023 do_video_stats(ost, pkt_size);
2030 * Check whether a packet from ist should be written into ost at this time
2032 static int check_output_constraints(InputStream *ist, OutputStream *ost)
2034 OutputFile *of = output_files[ost->file_index];
2035 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2037 if (ost->source_index != ist_index)
2043 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2049 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2051 OutputFile *of = output_files[ost->file_index];
2052 InputFile *f = input_files [ist->file_index];
2053 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2054 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2055 AVPacket *opkt = ost->pkt;
2057 av_packet_unref(opkt);
2058 // EOF: flush output bitstream filters.
2060 output_packet(of, opkt, ost, 1);
2064 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2065 !ost->copy_initial_nonkeyframes)
2068 if (!ost->frame_number && !ost->copy_prior_start) {
2069 int64_t comp_start = start_time;
2070 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2071 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2072 if (pkt->pts == AV_NOPTS_VALUE ?
2073 ist->pts < comp_start :
2074 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2078 if (of->recording_time != INT64_MAX &&
2079 ist->pts >= of->recording_time + start_time) {
2080 close_output_stream(ost);
2084 if (f->recording_time != INT64_MAX) {
2085 start_time = f->ctx->start_time;
2086 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2087 start_time += f->start_time;
2088 if (ist->pts >= f->recording_time + start_time) {
2089 close_output_stream(ost);
2094 /* force the input stream PTS */
2095 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2098 if (av_packet_ref(opkt, pkt) < 0)
2101 if (pkt->pts != AV_NOPTS_VALUE)
2102 opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2104 if (pkt->dts == AV_NOPTS_VALUE) {
2105 opkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2106 } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2107 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2109 duration = ist->dec_ctx->frame_size;
2110 opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2111 (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2112 &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2113 /* dts will be set immediately afterwards to what pts is now */
2114 opkt->pts = opkt->dts - ost_tb_start_time;
2116 opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2117 opkt->dts -= ost_tb_start_time;
2119 opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2121 output_packet(of, opkt, ost, 0);
2124 int guess_input_channel_layout(InputStream *ist)
2126 AVCodecContext *dec = ist->dec_ctx;
2128 if (!dec->channel_layout) {
2129 char layout_name[256];
2131 if (dec->channels > ist->guess_layout_max)
2133 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2134 if (!dec->channel_layout)
2136 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2137 dec->channels, dec->channel_layout);
2138 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2139 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2144 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2146 if (*got_output || ret<0)
2147 decode_error_stat[ret<0] ++;
2149 if (ret < 0 && exit_on_error)
2152 if (*got_output && ist) {
2153 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2154 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2155 "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2162 // Filters can be configured only if the formats of all inputs are known.
2163 static int ifilter_has_all_input_formats(FilterGraph *fg)
2166 for (i = 0; i < fg->nb_inputs; i++) {
2167 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2168 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2174 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2176 FilterGraph *fg = ifilter->graph;
2177 int need_reinit, ret, i;
2179 /* determine if the parameters for this input changed */
2180 need_reinit = ifilter->format != frame->format;
2182 switch (ifilter->ist->st->codecpar->codec_type) {
2183 case AVMEDIA_TYPE_AUDIO:
2184 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2185 ifilter->channels != frame->channels ||
2186 ifilter->channel_layout != frame->channel_layout;
2188 case AVMEDIA_TYPE_VIDEO:
2189 need_reinit |= ifilter->width != frame->width ||
2190 ifilter->height != frame->height;
2194 if (!ifilter->ist->reinit_filters && fg->graph)
2197 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2198 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2202 ret = ifilter_parameters_from_frame(ifilter, frame);
2207 /* (re)init the graph if possible, otherwise buffer the frame and return */
2208 if (need_reinit || !fg->graph) {
2209 for (i = 0; i < fg->nb_inputs; i++) {
2210 if (!ifilter_has_all_input_formats(fg)) {
2211 AVFrame *tmp = av_frame_clone(frame);
2213 return AVERROR(ENOMEM);
2214 av_frame_unref(frame);
2216 if (!av_fifo_space(ifilter->frame_queue)) {
2217 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2219 av_frame_free(&tmp);
2223 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2228 ret = reap_filters(1);
2229 if (ret < 0 && ret != AVERROR_EOF) {
2230 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2234 ret = configure_filtergraph(fg);
2236 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2241 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2243 if (ret != AVERROR_EOF)
2244 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2251 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2257 if (ifilter->filter) {
2258 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2262 // the filtergraph was never configured
2263 if (ifilter->format < 0)
2264 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2265 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2266 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2267 return AVERROR_INVALIDDATA;
2274 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2275 // There is the following difference: if you got a frame, you must call
2276 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2277 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2278 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2285 ret = avcodec_send_packet(avctx, pkt);
2286 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2287 // decoded frames with avcodec_receive_frame() until done.
2288 if (ret < 0 && ret != AVERROR_EOF)
2292 ret = avcodec_receive_frame(avctx, frame);
2293 if (ret < 0 && ret != AVERROR(EAGAIN))
2301 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2306 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2307 for (i = 0; i < ist->nb_filters; i++) {
2308 if (i < ist->nb_filters - 1) {
2309 f = ist->filter_frame;
2310 ret = av_frame_ref(f, decoded_frame);
2315 ret = ifilter_send_frame(ist->filters[i], f);
2316 if (ret == AVERROR_EOF)
2317 ret = 0; /* ignore */
2319 av_log(NULL, AV_LOG_ERROR,
2320 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2327 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2330 AVFrame *decoded_frame;
2331 AVCodecContext *avctx = ist->dec_ctx;
2333 AVRational decoded_frame_tb;
2335 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2336 return AVERROR(ENOMEM);
2337 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2338 return AVERROR(ENOMEM);
2339 decoded_frame = ist->decoded_frame;
2341 update_benchmark(NULL);
2342 ret = decode(avctx, decoded_frame, got_output, pkt);
2343 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2347 if (ret >= 0 && avctx->sample_rate <= 0) {
2348 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2349 ret = AVERROR_INVALIDDATA;
2352 if (ret != AVERROR_EOF)
2353 check_decode_result(ist, got_output, ret);
2355 if (!*got_output || ret < 0)
2358 ist->samples_decoded += decoded_frame->nb_samples;
2359 ist->frames_decoded++;
2361 /* increment next_dts to use for the case where the input stream does not
2362 have timestamps or there are multiple frames in the packet */
2363 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2365 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2368 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2369 decoded_frame_tb = ist->st->time_base;
2370 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2371 decoded_frame->pts = pkt->pts;
2372 decoded_frame_tb = ist->st->time_base;
2374 decoded_frame->pts = ist->dts;
2375 decoded_frame_tb = AV_TIME_BASE_Q;
2377 if (decoded_frame->pts != AV_NOPTS_VALUE)
2378 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2379 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2380 (AVRational){1, avctx->sample_rate});
2381 ist->nb_samples = decoded_frame->nb_samples;
2382 err = send_frame_to_filters(ist, decoded_frame);
2384 av_frame_unref(ist->filter_frame);
2385 av_frame_unref(decoded_frame);
2386 return err < 0 ? err : ret;
2389 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2392 AVFrame *decoded_frame;
2393 int i, ret = 0, err = 0;
2394 int64_t best_effort_timestamp;
2395 int64_t dts = AV_NOPTS_VALUE;
2397 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2398 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2400 if (!eof && pkt && pkt->size == 0)
2403 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2404 return AVERROR(ENOMEM);
2405 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2406 return AVERROR(ENOMEM);
2407 decoded_frame = ist->decoded_frame;
2408 if (ist->dts != AV_NOPTS_VALUE)
2409 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2411 pkt->dts = dts; // ffmpeg.c probably shouldn't do this
2414 // The old code used to set dts on the drain packet, which does not work
2415 // with the new API anymore.
2417 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2419 return AVERROR(ENOMEM);
2420 ist->dts_buffer = new;
2421 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2424 update_benchmark(NULL);
2425 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt);
2426 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2430 // The following line may be required in some cases where there is no parser
2431 // or the parser does not has_b_frames correctly
2432 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2433 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2434 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2436 av_log(ist->dec_ctx, AV_LOG_WARNING,
2437 "video_delay is larger in decoder than demuxer %d > %d.\n"
2438 "If you want to help, upload a sample "
2439 "of this file to https://streams.videolan.org/upload/ "
2440 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2441 ist->dec_ctx->has_b_frames,
2442 ist->st->codecpar->video_delay);
2445 if (ret != AVERROR_EOF)
2446 check_decode_result(ist, got_output, ret);
2448 if (*got_output && ret >= 0) {
2449 if (ist->dec_ctx->width != decoded_frame->width ||
2450 ist->dec_ctx->height != decoded_frame->height ||
2451 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2452 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2453 decoded_frame->width,
2454 decoded_frame->height,
2455 decoded_frame->format,
2456 ist->dec_ctx->width,
2457 ist->dec_ctx->height,
2458 ist->dec_ctx->pix_fmt);
2462 if (!*got_output || ret < 0)
2465 if(ist->top_field_first>=0)
2466 decoded_frame->top_field_first = ist->top_field_first;
2468 ist->frames_decoded++;
2470 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2471 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2475 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2477 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2478 *duration_pts = decoded_frame->pkt_duration;
2480 if (ist->framerate.num)
2481 best_effort_timestamp = ist->cfr_next_pts++;
2483 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2484 best_effort_timestamp = ist->dts_buffer[0];
2486 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2487 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2488 ist->nb_dts_buffer--;
2491 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2492 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2494 if (ts != AV_NOPTS_VALUE)
2495 ist->next_pts = ist->pts = ts;
2499 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2500 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2501 ist->st->index, av_ts2str(decoded_frame->pts),
2502 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2503 best_effort_timestamp,
2504 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2505 decoded_frame->key_frame, decoded_frame->pict_type,
2506 ist->st->time_base.num, ist->st->time_base.den);
2509 if (ist->st->sample_aspect_ratio.num)
2510 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2512 err = send_frame_to_filters(ist, decoded_frame);
2515 av_frame_unref(ist->filter_frame);
2516 av_frame_unref(decoded_frame);
2517 return err < 0 ? err : ret;
2520 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2523 AVSubtitle subtitle;
2525 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2526 &subtitle, got_output, pkt);
2528 check_decode_result(NULL, got_output, ret);
2530 if (ret < 0 || !*got_output) {
2533 sub2video_flush(ist);
2537 if (ist->fix_sub_duration) {
2539 if (ist->prev_sub.got_output) {
2540 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2541 1000, AV_TIME_BASE);
2542 if (end < ist->prev_sub.subtitle.end_display_time) {
2543 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2544 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2545 ist->prev_sub.subtitle.end_display_time, end,
2546 end <= 0 ? ", dropping it" : "");
2547 ist->prev_sub.subtitle.end_display_time = end;
2550 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2551 FFSWAP(int, ret, ist->prev_sub.ret);
2552 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2560 if (ist->sub2video.frame) {
2561 sub2video_update(ist, INT64_MIN, &subtitle);
2562 } else if (ist->nb_filters) {
2563 if (!ist->sub2video.sub_queue)
2564 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2565 if (!ist->sub2video.sub_queue)
2567 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2568 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2572 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2576 if (!subtitle.num_rects)
2579 ist->frames_decoded++;
2581 for (i = 0; i < nb_output_streams; i++) {
2582 OutputStream *ost = output_streams[i];
2584 if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
2586 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2587 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2590 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2595 avsubtitle_free(&subtitle);
2599 static int send_filter_eof(InputStream *ist)
2602 /* TODO keep pts also in stream time base to avoid converting back */
2603 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2604 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2606 for (i = 0; i < ist->nb_filters; i++) {
2607 ret = ifilter_send_eof(ist->filters[i], pts);
2614 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2615 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2619 int eof_reached = 0;
2623 if (!ist->pkt && !(ist->pkt = av_packet_alloc()))
2624 return AVERROR(ENOMEM);
2627 if (!ist->saw_first_ts) {
2628 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2630 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2631 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2632 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2634 ist->saw_first_ts = 1;
2637 if (ist->next_dts == AV_NOPTS_VALUE)
2638 ist->next_dts = ist->dts;
2639 if (ist->next_pts == AV_NOPTS_VALUE)
2640 ist->next_pts = ist->pts;
2643 av_packet_unref(avpkt);
2644 ret = av_packet_ref(avpkt, pkt);
2649 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2650 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2651 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2652 ist->next_pts = ist->pts = ist->dts;
2655 // while we have more to decode or while the decoder did output something on EOF
2656 while (ist->decoding_needed) {
2657 int64_t duration_dts = 0;
2658 int64_t duration_pts = 0;
2660 int decode_failed = 0;
2662 ist->pts = ist->next_pts;
2663 ist->dts = ist->next_dts;
2665 switch (ist->dec_ctx->codec_type) {
2666 case AVMEDIA_TYPE_AUDIO:
2667 ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
2669 av_packet_unref(avpkt);
2671 case AVMEDIA_TYPE_VIDEO:
2672 ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
2674 if (!repeating || !pkt || got_output) {
2675 if (pkt && pkt->duration) {
2676 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2677 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2678 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2679 duration_dts = ((int64_t)AV_TIME_BASE *
2680 ist->dec_ctx->framerate.den * ticks) /
2681 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2684 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2685 ist->next_dts += duration_dts;
2687 ist->next_dts = AV_NOPTS_VALUE;
2691 if (duration_pts > 0) {
2692 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2694 ist->next_pts += duration_dts;
2697 av_packet_unref(avpkt);
2699 case AVMEDIA_TYPE_SUBTITLE:
2702 ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
2703 if (!pkt && ret >= 0)
2705 av_packet_unref(avpkt);
2711 if (ret == AVERROR_EOF) {
2717 if (decode_failed) {
2718 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2719 ist->file_index, ist->st->index, av_err2str(ret));
2721 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2722 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2724 if (!decode_failed || exit_on_error)
2730 ist->got_output = 1;
2735 // During draining, we might get multiple output frames in this loop.
2736 // ffmpeg.c does not drain the filter chain on configuration changes,
2737 // which means if we send multiple frames at once to the filters, and
2738 // one of those frames changes configuration, the buffered frames will
2739 // be lost. This can upset certain FATE tests.
2740 // Decode only 1 frame per call on EOF to appease these FATE tests.
2741 // The ideal solution would be to rewrite decoding to use the new
2742 // decoding API in a better way.
2749 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2750 /* except when looping we need to flush but not to send an EOF */
2751 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2752 int ret = send_filter_eof(ist);
2754 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2759 /* handle stream copy */
2760 if (!ist->decoding_needed && pkt) {
2761 ist->dts = ist->next_dts;
2762 switch (ist->dec_ctx->codec_type) {
2763 case AVMEDIA_TYPE_AUDIO:
2764 av_assert1(pkt->duration >= 0);
2765 if (ist->dec_ctx->sample_rate) {
2766 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2767 ist->dec_ctx->sample_rate;
2769 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2772 case AVMEDIA_TYPE_VIDEO:
2773 if (ist->framerate.num) {
2774 // TODO: Remove work-around for c99-to-c89 issue 7
2775 AVRational time_base_q = AV_TIME_BASE_Q;
2776 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2777 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2778 } else if (pkt->duration) {
2779 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2780 } else if(ist->dec_ctx->framerate.num != 0) {
2781 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2782 ist->next_dts += ((int64_t)AV_TIME_BASE *
2783 ist->dec_ctx->framerate.den * ticks) /
2784 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2788 ist->pts = ist->dts;
2789 ist->next_pts = ist->next_dts;
2791 for (i = 0; i < nb_output_streams; i++) {
2792 OutputStream *ost = output_streams[i];
2794 if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
2796 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2799 do_streamcopy(ist, ost, pkt);
2802 return !eof_reached;
2805 static void print_sdp(void)
2810 AVIOContext *sdp_pb;
2811 AVFormatContext **avc;
2813 for (i = 0; i < nb_output_files; i++) {
2814 if (!output_files[i]->header_written)
2818 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2821 for (i = 0, j = 0; i < nb_output_files; i++) {
2822 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2823 avc[j] = output_files[i]->ctx;
2831 av_sdp_create(avc, j, sdp, sizeof(sdp));
2833 if (!sdp_filename) {
2834 printf("SDP:\n%s\n", sdp);
2837 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2838 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2840 avio_print(sdp_pb, sdp);
2841 avio_closep(&sdp_pb);
2842 av_freep(&sdp_filename);
2850 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2852 InputStream *ist = s->opaque;
2853 const enum AVPixelFormat *p;
2856 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2857 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2858 const AVCodecHWConfig *config = NULL;
2861 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2864 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2865 ist->hwaccel_id == HWACCEL_AUTO) {
2867 config = avcodec_get_hw_config(s->codec, i);
2870 if (!(config->methods &
2871 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2873 if (config->pix_fmt == *p)
2878 if (config->device_type != ist->hwaccel_device_type) {
2879 // Different hwaccel offered, ignore.
2883 ret = hwaccel_decode_init(s);
2885 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2886 av_log(NULL, AV_LOG_FATAL,
2887 "%s hwaccel requested for input stream #%d:%d, "
2888 "but cannot be initialized.\n",
2889 av_hwdevice_get_type_name(config->device_type),
2890 ist->file_index, ist->st->index);
2891 return AV_PIX_FMT_NONE;
2896 const HWAccel *hwaccel = NULL;
2898 for (i = 0; hwaccels[i].name; i++) {
2899 if (hwaccels[i].pix_fmt == *p) {
2900 hwaccel = &hwaccels[i];
2905 // No hwaccel supporting this pixfmt.
2908 if (hwaccel->id != ist->hwaccel_id) {
2909 // Does not match requested hwaccel.
2913 ret = hwaccel->init(s);
2915 av_log(NULL, AV_LOG_FATAL,
2916 "%s hwaccel requested for input stream #%d:%d, "
2917 "but cannot be initialized.\n", hwaccel->name,
2918 ist->file_index, ist->st->index);
2919 return AV_PIX_FMT_NONE;
2923 if (ist->hw_frames_ctx) {
2924 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2925 if (!s->hw_frames_ctx)
2926 return AV_PIX_FMT_NONE;
2929 ist->hwaccel_pix_fmt = *p;
2936 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2938 InputStream *ist = s->opaque;
2940 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2941 return ist->hwaccel_get_buffer(s, frame, flags);
2943 return avcodec_default_get_buffer2(s, frame, flags);
2946 static int init_input_stream(int ist_index, char *error, int error_len)
2949 InputStream *ist = input_streams[ist_index];
2951 if (ist->decoding_needed) {
2952 const AVCodec *codec = ist->dec;
2954 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2955 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2956 return AVERROR(EINVAL);
2959 ist->dec_ctx->opaque = ist;
2960 ist->dec_ctx->get_format = get_format;
2961 ist->dec_ctx->get_buffer2 = get_buffer;
2962 #if LIBAVCODEC_VERSION_MAJOR < 60
2963 ist->dec_ctx->thread_safe_callbacks = 1;
2966 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2967 (ist->decoding_needed & DECODING_FOR_OST)) {
2968 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2969 if (ist->decoding_needed & DECODING_FOR_FILTER)
2970 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2973 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2975 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2976 * audio, and video decoders such as cuvid or mediacodec */
2977 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2979 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2980 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2981 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2982 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2983 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2985 ret = hw_device_setup_for_decode(ist);
2987 snprintf(error, error_len, "Device setup failed for "
2988 "decoder on input stream #%d:%d : %s",
2989 ist->file_index, ist->st->index, av_err2str(ret));
2993 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2994 if (ret == AVERROR_EXPERIMENTAL)
2995 abort_codec_experimental(codec, 0);
2997 snprintf(error, error_len,
2998 "Error while opening decoder for input stream "
3000 ist->file_index, ist->st->index, av_err2str(ret));
3003 assert_avoptions(ist->decoder_opts);
3006 ist->next_pts = AV_NOPTS_VALUE;
3007 ist->next_dts = AV_NOPTS_VALUE;
3012 static InputStream *get_input_stream(OutputStream *ost)
3014 if (ost->source_index >= 0)
3015 return input_streams[ost->source_index];
3019 static int compare_int64(const void *a, const void *b)
3021 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
3024 /* open the muxer when all the streams are initialized */
3025 static int check_init_output_file(OutputFile *of, int file_index)
3029 for (i = 0; i < of->ctx->nb_streams; i++) {
3030 OutputStream *ost = output_streams[of->ost_index + i];
3031 if (!ost->initialized)
3035 of->ctx->interrupt_callback = int_cb;
3037 ret = avformat_write_header(of->ctx, &of->opts);
3039 av_log(NULL, AV_LOG_ERROR,
3040 "Could not write header for output file #%d "
3041 "(incorrect codec parameters ?): %s\n",
3042 file_index, av_err2str(ret));
3045 //assert_avoptions(of->opts);
3046 of->header_written = 1;
3048 av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3051 if (sdp_filename || want_sdp)
3054 /* flush the muxing queues */
3055 for (i = 0; i < of->ctx->nb_streams; i++) {
3056 OutputStream *ost = output_streams[of->ost_index + i];
3058 /* try to improve muxing time_base (only possible if nothing has been written yet) */
3059 if (!av_fifo_size(ost->muxing_queue))
3060 ost->mux_timebase = ost->st->time_base;
3062 while (av_fifo_size(ost->muxing_queue)) {
3064 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3065 ost->muxing_queue_data_size -= pkt->size;
3066 write_packet(of, pkt, ost, 1);
3067 av_packet_free(&pkt);
3074 static int init_output_bsfs(OutputStream *ost)
3076 AVBSFContext *ctx = ost->bsf_ctx;
3082 ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3086 ctx->time_base_in = ost->st->time_base;
3088 ret = av_bsf_init(ctx);
3090 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3095 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3098 ost->st->time_base = ctx->time_base_out;
3103 static int init_output_stream_streamcopy(OutputStream *ost)
3105 OutputFile *of = output_files[ost->file_index];
3106 InputStream *ist = get_input_stream(ost);
3107 AVCodecParameters *par_dst = ost->st->codecpar;
3108 AVCodecParameters *par_src = ost->ref_par;
3111 uint32_t codec_tag = par_dst->codec_tag;
3113 av_assert0(ist && !ost->filter);
3115 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3117 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3119 av_log(NULL, AV_LOG_FATAL,
3120 "Error setting up codec context options.\n");
3124 ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3126 av_log(NULL, AV_LOG_FATAL,
3127 "Error getting reference codec parameters.\n");
3132 unsigned int codec_tag_tmp;
3133 if (!of->ctx->oformat->codec_tag ||
3134 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3135 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3136 codec_tag = par_src->codec_tag;
3139 ret = avcodec_parameters_copy(par_dst, par_src);
3143 par_dst->codec_tag = codec_tag;
3145 if (!ost->frame_rate.num)
3146 ost->frame_rate = ist->framerate;
3147 ost->st->avg_frame_rate = ost->frame_rate;
3149 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3153 // copy timebase while removing common factors
3154 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) {
3155 if (ost->frame_rate.num)
3156 ost->st->time_base = av_inv_q(ost->frame_rate);
3158 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3161 // copy estimated duration as a hint to the muxer
3162 if (ost->st->duration <= 0 && ist->st->duration > 0)
3163 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3166 ost->st->disposition = ist->st->disposition;
3168 if (ist->st->nb_side_data) {
3169 for (i = 0; i < ist->st->nb_side_data; i++) {
3170 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3173 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3175 return AVERROR(ENOMEM);
3176 memcpy(dst_data, sd_src->data, sd_src->size);
3180 if (ost->rotate_overridden) {
3181 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3182 sizeof(int32_t) * 9);
3184 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3187 switch (par_dst->codec_type) {
3188 case AVMEDIA_TYPE_AUDIO:
3189 if (audio_volume != 256) {
3190 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3193 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3194 par_dst->block_align= 0;
3195 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3196 par_dst->block_align= 0;
3198 case AVMEDIA_TYPE_VIDEO:
3199 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3201 av_mul_q(ost->frame_aspect_ratio,
3202 (AVRational){ par_dst->height, par_dst->width });
3203 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3204 "with stream copy may produce invalid files\n");
3206 else if (ist->st->sample_aspect_ratio.num)
3207 sar = ist->st->sample_aspect_ratio;
3209 sar = par_src->sample_aspect_ratio;
3210 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3211 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3212 ost->st->r_frame_rate = ist->st->r_frame_rate;
3216 ost->mux_timebase = ist->st->time_base;
3221 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3223 AVDictionaryEntry *e;
3225 uint8_t *encoder_string;
3226 int encoder_string_len;
3227 int format_flags = 0;
3228 int codec_flags = ost->enc_ctx->flags;
3230 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3233 e = av_dict_get(of->opts, "fflags", NULL, 0);
3235 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3238 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3240 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3242 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3245 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3248 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3249 encoder_string = av_mallocz(encoder_string_len);
3250 if (!encoder_string)
3253 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3254 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3256 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3257 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3258 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3259 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3262 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3263 AVCodecContext *avctx)
3266 int n = 1, i, size, index = 0;
3269 for (p = kf; *p; p++)
3273 pts = av_malloc_array(size, sizeof(*pts));
3275 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3280 for (i = 0; i < n; i++) {
3281 char *next = strchr(p, ',');
3286 if (!memcmp(p, "chapters", 8)) {
3288 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3291 if (avf->nb_chapters > INT_MAX - size ||
3292 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3294 av_log(NULL, AV_LOG_FATAL,
3295 "Could not allocate forced key frames array.\n");
3298 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3299 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3301 for (j = 0; j < avf->nb_chapters; j++) {
3302 AVChapter *c = avf->chapters[j];
3303 av_assert1(index < size);
3304 pts[index++] = av_rescale_q(c->start, c->time_base,
3305 avctx->time_base) + t;
3310 t = parse_time_or_die("force_key_frames", p, 1);
3311 av_assert1(index < size);
3312 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3319 av_assert0(index == size);
3320 qsort(pts, size, sizeof(*pts), compare_int64);
3321 ost->forced_kf_count = size;
3322 ost->forced_kf_pts = pts;
3325 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3327 InputStream *ist = get_input_stream(ost);
3328 AVCodecContext *enc_ctx = ost->enc_ctx;
3329 AVFormatContext *oc;
3331 if (ost->enc_timebase.num > 0) {
3332 enc_ctx->time_base = ost->enc_timebase;
3336 if (ost->enc_timebase.num < 0) {
3338 enc_ctx->time_base = ist->st->time_base;
3342 oc = output_files[ost->file_index]->ctx;
3343 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3346 enc_ctx->time_base = default_time_base;
3349 static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
3351 InputStream *ist = get_input_stream(ost);
3352 AVCodecContext *enc_ctx = ost->enc_ctx;
3353 AVCodecContext *dec_ctx = NULL;
3354 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3357 set_encoder_id(output_files[ost->file_index], ost);
3359 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3360 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3361 // which have to be filtered out to prevent leaking them to output files.
3362 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3365 ost->st->disposition = ist->st->disposition;
3367 dec_ctx = ist->dec_ctx;
3369 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3371 for (j = 0; j < oc->nb_streams; j++) {
3372 AVStream *st = oc->streams[j];
3373 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3376 if (j == oc->nb_streams)
3377 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3378 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3379 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3382 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3383 if (!ost->frame_rate.num)
3384 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3385 if (ist && !ost->frame_rate.num)
3386 ost->frame_rate = ist->framerate;
3387 if (ist && !ost->frame_rate.num)
3388 ost->frame_rate = ist->st->r_frame_rate;
3389 if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
3390 ost->frame_rate = (AVRational){25, 1};
3391 av_log(NULL, AV_LOG_WARNING,
3393 "about the input framerate is available. Falling "
3394 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3395 "if you want a different framerate.\n",
3396 ost->file_index, ost->index);
3399 if (ost->max_frame_rate.num &&
3400 (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) ||
3401 !ost->frame_rate.den))
3402 ost->frame_rate = ost->max_frame_rate;
3404 if (ost->enc->supported_framerates && !ost->force_fps) {
3405 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3406 ost->frame_rate = ost->enc->supported_framerates[idx];
3408 // reduce frame rate for mpeg4 to be within the spec limits
3409 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3410 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3411 ost->frame_rate.num, ost->frame_rate.den, 65535);
3415 switch (enc_ctx->codec_type) {
3416 case AVMEDIA_TYPE_AUDIO:
3417 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3419 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3420 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3421 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3422 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3423 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3425 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3428 case AVMEDIA_TYPE_VIDEO:
3429 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3431 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3432 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3433 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3434 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3435 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3436 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3439 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3440 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3441 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3442 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3443 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3444 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3446 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3448 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3449 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3452 enc_ctx->color_range = frame->color_range;
3453 enc_ctx->color_primaries = frame->color_primaries;
3454 enc_ctx->color_trc = frame->color_trc;
3455 enc_ctx->colorspace = frame->colorspace;
3456 enc_ctx->chroma_sample_location = frame->chroma_location;
3459 enc_ctx->framerate = ost->frame_rate;
3461 ost->st->avg_frame_rate = ost->frame_rate;
3464 enc_ctx->width != dec_ctx->width ||
3465 enc_ctx->height != dec_ctx->height ||
3466 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3467 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3470 if (ost->top_field_first == 0) {
3471 enc_ctx->field_order = AV_FIELD_BB;
3472 } else if (ost->top_field_first == 1) {
3473 enc_ctx->field_order = AV_FIELD_TT;
3477 if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
3478 ost->top_field_first >= 0)
3479 frame->top_field_first = !!ost->top_field_first;
3481 if (frame->interlaced_frame) {
3482 if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3483 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3485 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3487 enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3490 if (ost->forced_keyframes) {
3491 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3492 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3493 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3495 av_log(NULL, AV_LOG_ERROR,
3496 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3499 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3500 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3501 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3502 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3504 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3505 // parse it only for static kf timings
3506 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3507 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3511 case AVMEDIA_TYPE_SUBTITLE:
3512 enc_ctx->time_base = AV_TIME_BASE_Q;
3513 if (!enc_ctx->width) {
3514 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3515 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3518 case AVMEDIA_TYPE_DATA:
3525 ost->mux_timebase = enc_ctx->time_base;
3530 static int init_output_stream(OutputStream *ost, AVFrame *frame,
3531 char *error, int error_len)
3535 if (ost->encoding_needed) {
3536 const AVCodec *codec = ost->enc;
3537 AVCodecContext *dec = NULL;
3540 ret = init_output_stream_encode(ost, frame);
3544 if ((ist = get_input_stream(ost)))
3546 if (dec && dec->subtitle_header) {
3547 /* ASS code assumes this buffer is null terminated so add extra byte. */
3548 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3549 if (!ost->enc_ctx->subtitle_header)
3550 return AVERROR(ENOMEM);
3551 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3552 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3554 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3555 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3556 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3558 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3559 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3560 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3562 ret = hw_device_setup_for_encode(ost);
3564 snprintf(error, error_len, "Device setup failed for "
3565 "encoder on output stream #%d:%d : %s",
3566 ost->file_index, ost->index, av_err2str(ret));
3570 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3571 int input_props = 0, output_props = 0;
3572 AVCodecDescriptor const *input_descriptor =
3573 avcodec_descriptor_get(dec->codec_id);
3574 AVCodecDescriptor const *output_descriptor =
3575 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3576 if (input_descriptor)
3577 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3578 if (output_descriptor)
3579 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3580 if (input_props && output_props && input_props != output_props) {
3581 snprintf(error, error_len,
3582 "Subtitle encoding currently only possible from text to text "
3583 "or bitmap to bitmap");
3584 return AVERROR_INVALIDDATA;
3588 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3589 if (ret == AVERROR_EXPERIMENTAL)
3590 abort_codec_experimental(codec, 1);
3591 snprintf(error, error_len,
3592 "Error while opening encoder for output stream #%d:%d - "
3593 "maybe incorrect parameters such as bit_rate, rate, width or height",
3594 ost->file_index, ost->index);
3597 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3598 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3599 av_buffersink_set_frame_size(ost->filter->filter,
3600 ost->enc_ctx->frame_size);
3601 assert_avoptions(ost->encoder_opts);
3602 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3603 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3604 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3605 " It takes bits/s as argument, not kbits/s\n");
3607 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3609 av_log(NULL, AV_LOG_FATAL,
3610 "Error initializing the output stream codec context.\n");
3614 if (ost->enc_ctx->nb_coded_side_data) {
3617 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3618 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3621 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3623 return AVERROR(ENOMEM);
3624 memcpy(dst_data, sd_src->data, sd_src->size);
3629 * Add global input side data. For now this is naive, and copies it
3630 * from the input stream's global side data. All side data should
3631 * really be funneled over AVFrame and libavfilter, then added back to
3632 * packet side data, and then potentially using the first packet for
3637 for (i = 0; i < ist->st->nb_side_data; i++) {
3638 AVPacketSideData *sd = &ist->st->side_data[i];
3639 if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3640 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3642 return AVERROR(ENOMEM);
3643 memcpy(dst, sd->data, sd->size);
3644 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3645 av_display_rotation_set((uint32_t *)dst, 0);
3650 // copy timebase while removing common factors
3651 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3652 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3654 // copy estimated duration as a hint to the muxer
3655 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3656 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3657 } else if (ost->stream_copy) {
3658 ret = init_output_stream_streamcopy(ost);
3663 // parse user provided disposition, and update stream values
3664 if (ost->disposition) {
3665 static const AVOption opts[] = {
3666 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3667 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3668 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3669 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3670 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3671 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3672 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3673 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3674 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3675 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3676 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3677 { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3678 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3679 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3680 { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3681 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3684 static const AVClass class = {
3686 .item_name = av_default_item_name,
3688 .version = LIBAVUTIL_VERSION_INT,
3690 const AVClass *pclass = &class;
3692 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3697 /* initialize bitstream filters for the output stream
3698 * needs to be done here, because the codec id for streamcopy is not
3699 * known until now */
3700 ret = init_output_bsfs(ost);
3704 ost->initialized = 1;
3706 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3713 static void report_new_stream(int input_index, AVPacket *pkt)
3715 InputFile *file = input_files[input_index];
3716 AVStream *st = file->ctx->streams[pkt->stream_index];
3718 if (pkt->stream_index < file->nb_streams_warn)
3720 av_log(file->ctx, AV_LOG_WARNING,
3721 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3722 av_get_media_type_string(st->codecpar->codec_type),
3723 input_index, pkt->stream_index,
3724 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3725 file->nb_streams_warn = pkt->stream_index + 1;
3728 static int transcode_init(void)
3730 int ret = 0, i, j, k;
3731 AVFormatContext *oc;
3734 char error[1024] = {0};
3736 for (i = 0; i < nb_filtergraphs; i++) {
3737 FilterGraph *fg = filtergraphs[i];
3738 for (j = 0; j < fg->nb_outputs; j++) {
3739 OutputFilter *ofilter = fg->outputs[j];
3740 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3742 if (fg->nb_inputs != 1)
3744 for (k = nb_input_streams-1; k >= 0 ; k--)
3745 if (fg->inputs[0]->ist == input_streams[k])
3747 ofilter->ost->source_index = k;
3751 /* init framerate emulation */
3752 for (i = 0; i < nb_input_files; i++) {
3753 InputFile *ifile = input_files[i];
3754 if (ifile->rate_emu)
3755 for (j = 0; j < ifile->nb_streams; j++)
3756 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3759 /* init input streams */
3760 for (i = 0; i < nb_input_streams; i++)
3761 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3762 for (i = 0; i < nb_output_streams; i++) {
3763 ost = output_streams[i];
3764 avcodec_close(ost->enc_ctx);
3770 * initialize stream copy and subtitle/data streams.
3771 * Encoded AVFrame based streams will get initialized as follows:
3772 * - when the first AVFrame is received in do_video_out
3773 * - just before the first AVFrame is received in either transcode_step
3774 * or reap_filters due to us requiring the filter chain buffer sink
3775 * to be configured with the correct audio frame size, which is only
3776 * known after the encoder is initialized.
3778 for (i = 0; i < nb_output_streams; i++) {
3779 if (!output_streams[i]->stream_copy &&
3780 (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3781 output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO))
3784 ret = init_output_stream_wrapper(output_streams[i], NULL, 0);
3789 /* discard unused programs */
3790 for (i = 0; i < nb_input_files; i++) {
3791 InputFile *ifile = input_files[i];
3792 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3793 AVProgram *p = ifile->ctx->programs[j];
3794 int discard = AVDISCARD_ALL;
3796 for (k = 0; k < p->nb_stream_indexes; k++)
3797 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3798 discard = AVDISCARD_DEFAULT;
3801 p->discard = discard;
3805 /* write headers for files with no streams */
3806 for (i = 0; i < nb_output_files; i++) {
3807 oc = output_files[i]->ctx;
3808 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3809 ret = check_init_output_file(output_files[i], i);
3816 /* dump the stream mapping */
3817 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3818 for (i = 0; i < nb_input_streams; i++) {
3819 ist = input_streams[i];
3821 for (j = 0; j < ist->nb_filters; j++) {
3822 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3823 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3824 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3825 ist->filters[j]->name);
3826 if (nb_filtergraphs > 1)
3827 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3828 av_log(NULL, AV_LOG_INFO, "\n");
3833 for (i = 0; i < nb_output_streams; i++) {
3834 ost = output_streams[i];
3836 if (ost->attachment_filename) {
3837 /* an attached file */
3838 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3839 ost->attachment_filename, ost->file_index, ost->index);
3843 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3844 /* output from a complex graph */
3845 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3846 if (nb_filtergraphs > 1)
3847 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3849 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3850 ost->index, ost->enc ? ost->enc->name : "?");
3854 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3855 input_streams[ost->source_index]->file_index,
3856 input_streams[ost->source_index]->st->index,
3859 if (ost->sync_ist != input_streams[ost->source_index])
3860 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3861 ost->sync_ist->file_index,
3862 ost->sync_ist->st->index);
3863 if (ost->stream_copy)
3864 av_log(NULL, AV_LOG_INFO, " (copy)");
3866 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3867 const AVCodec *out_codec = ost->enc;
3868 const char *decoder_name = "?";
3869 const char *in_codec_name = "?";
3870 const char *encoder_name = "?";
3871 const char *out_codec_name = "?";
3872 const AVCodecDescriptor *desc;
3875 decoder_name = in_codec->name;
3876 desc = avcodec_descriptor_get(in_codec->id);
3878 in_codec_name = desc->name;
3879 if (!strcmp(decoder_name, in_codec_name))
3880 decoder_name = "native";
3884 encoder_name = out_codec->name;
3885 desc = avcodec_descriptor_get(out_codec->id);
3887 out_codec_name = desc->name;
3888 if (!strcmp(encoder_name, out_codec_name))
3889 encoder_name = "native";
3892 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3893 in_codec_name, decoder_name,
3894 out_codec_name, encoder_name);
3896 av_log(NULL, AV_LOG_INFO, "\n");
3900 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3904 atomic_store(&transcode_init_done, 1);
3909 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3910 static int need_output(void)
3914 for (i = 0; i < nb_output_streams; i++) {
3915 OutputStream *ost = output_streams[i];
3916 OutputFile *of = output_files[ost->file_index];
3917 AVFormatContext *os = output_files[ost->file_index]->ctx;
3919 if (ost->finished ||
3920 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3922 if (ost->frame_number >= ost->max_frames) {
3924 for (j = 0; j < of->ctx->nb_streams; j++)
3925 close_output_stream(output_streams[of->ost_index + j]);
3936 * Select the output stream to process.
3938 * @return selected output stream, or NULL if none available
3940 static OutputStream *choose_output(void)
3943 int64_t opts_min = INT64_MAX;
3944 OutputStream *ost_min = NULL;
3946 for (i = 0; i < nb_output_streams; i++) {
3947 OutputStream *ost = output_streams[i];
3948 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3949 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3951 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3952 av_log(NULL, AV_LOG_DEBUG,
3953 "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3954 ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3956 if (!ost->initialized && !ost->inputs_done)
3959 if (!ost->finished && opts < opts_min) {
3961 ost_min = ost->unavailable ? NULL : ost;
3967 static void set_tty_echo(int on)
3971 if (tcgetattr(0, &tty) == 0) {
3972 if (on) tty.c_lflag |= ECHO;
3973 else tty.c_lflag &= ~ECHO;
3974 tcsetattr(0, TCSANOW, &tty);
3979 static int check_keyboard_interaction(int64_t cur_time)
3982 static int64_t last_time;
3983 if (received_nb_signals)
3984 return AVERROR_EXIT;
3985 /* read_key() returns 0 on EOF */
3986 if(cur_time - last_time >= 100000 && !run_as_daemon){
3988 last_time = cur_time;
3992 return AVERROR_EXIT;
3993 if (key == '+') av_log_set_level(av_log_get_level()+10);
3994 if (key == '-') av_log_set_level(av_log_get_level()-10);
3995 if (key == 's') qp_hist ^= 1;
3998 do_hex_dump = do_pkt_dump = 0;
3999 } else if(do_pkt_dump){
4003 av_log_set_level(AV_LOG_DEBUG);
4005 if (key == 'c' || key == 'C'){
4006 char buf[4096], target[64], command[256], arg[256] = {0};
4009 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
4012 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4017 fprintf(stderr, "\n");
4019 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
4020 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
4021 target, time, command, arg);
4022 for (i = 0; i < nb_filtergraphs; i++) {
4023 FilterGraph *fg = filtergraphs[i];
4026 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
4027 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
4028 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
4029 } else if (key == 'c') {
4030 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
4031 ret = AVERROR_PATCHWELCOME;
4033 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
4035 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
4040 av_log(NULL, AV_LOG_ERROR,
4041 "Parse error, at least 3 arguments were expected, "
4042 "only %d given in string '%s'\n", n, buf);
4045 if (key == 'd' || key == 'D'){
4048 debug = input_streams[0]->dec_ctx->debug << 1;
4049 if(!debug) debug = 1;
4050 while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
4057 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4062 fprintf(stderr, "\n");
4063 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
4064 fprintf(stderr,"error parsing debug value\n");
4066 for(i=0;i<nb_input_streams;i++) {
4067 input_streams[i]->dec_ctx->debug = debug;
4069 for(i=0;i<nb_output_streams;i++) {
4070 OutputStream *ost = output_streams[i];
4071 ost->enc_ctx->debug = debug;
4073 if(debug) av_log_set_level(AV_LOG_DEBUG);
4074 fprintf(stderr,"debug=%d\n", debug);
4077 fprintf(stderr, "key function\n"
4078 "? show this help\n"
4079 "+ increase verbosity\n"
4080 "- decrease verbosity\n"
4081 "c Send command to first matching filter supporting it\n"
4082 "C Send/Queue command to all matching filters\n"
4083 "D cycle through available debug modes\n"
4084 "h dump packets/hex press to cycle through the 3 states\n"
4086 "s Show QP histogram\n"
4093 static void *input_thread(void *arg)
4096 AVPacket *pkt = f->pkt, *queue_pkt;
4097 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4101 ret = av_read_frame(f->ctx, pkt);
4103 if (ret == AVERROR(EAGAIN)) {
4108 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4111 queue_pkt = av_packet_alloc();
4113 av_packet_unref(pkt);
4114 av_thread_message_queue_set_err_recv(f->in_thread_queue, AVERROR(ENOMEM));
4117 av_packet_move_ref(queue_pkt, pkt);
4118 ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
4119 if (flags && ret == AVERROR(EAGAIN)) {
4121 ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
4122 av_log(f->ctx, AV_LOG_WARNING,
4123 "Thread message queue blocking; consider raising the "
4124 "thread_queue_size option (current value: %d)\n",
4125 f->thread_queue_size);
4128 if (ret != AVERROR_EOF)
4129 av_log(f->ctx, AV_LOG_ERROR,
4130 "Unable to send packet to main thread: %s\n",
4132 av_packet_free(&queue_pkt);
4133 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4141 static void free_input_thread(int i)
4143 InputFile *f = input_files[i];
4146 if (!f || !f->in_thread_queue)
4148 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4149 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4150 av_packet_free(&pkt);
4152 pthread_join(f->thread, NULL);
4154 av_thread_message_queue_free(&f->in_thread_queue);
4157 static void free_input_threads(void)
4161 for (i = 0; i < nb_input_files; i++)
4162 free_input_thread(i);
4165 static int init_input_thread(int i)
4168 InputFile *f = input_files[i];
4170 if (f->thread_queue_size < 0)
4171 f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4172 if (!f->thread_queue_size)
4175 if (f->ctx->pb ? !f->ctx->pb->seekable :
4176 strcmp(f->ctx->iformat->name, "lavfi"))
4177 f->non_blocking = 1;
4178 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4179 f->thread_queue_size, sizeof(f->pkt));
4183 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4184 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4185 av_thread_message_queue_free(&f->in_thread_queue);
4186 return AVERROR(ret);
4192 static int init_input_threads(void)
4196 for (i = 0; i < nb_input_files; i++) {
4197 ret = init_input_thread(i);
4204 static int get_input_packet_mt(InputFile *f, AVPacket **pkt)
4206 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4208 AV_THREAD_MESSAGE_NONBLOCK : 0);
4212 static int get_input_packet(InputFile *f, AVPacket **pkt)
4216 for (i = 0; i < f->nb_streams; i++) {
4217 InputStream *ist = input_streams[f->ist_index + i];
4218 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4219 int64_t now = av_gettime_relative() - ist->start;
4221 return AVERROR(EAGAIN);
4226 if (f->thread_queue_size)
4227 return get_input_packet_mt(f, pkt);
4230 return av_read_frame(f->ctx, *pkt);
4233 static int got_eagain(void)
4236 for (i = 0; i < nb_output_streams; i++)
4237 if (output_streams[i]->unavailable)
4242 static void reset_eagain(void)
4245 for (i = 0; i < nb_input_files; i++)
4246 input_files[i]->eagain = 0;
4247 for (i = 0; i < nb_output_streams; i++)
4248 output_streams[i]->unavailable = 0;
4251 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4252 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4253 AVRational time_base)
4259 return tmp_time_base;
4262 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4265 return tmp_time_base;
4271 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4274 AVCodecContext *avctx;
4275 int i, ret, has_audio = 0;
4276 int64_t duration = 0;
4278 ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4282 for (i = 0; i < ifile->nb_streams; i++) {
4283 ist = input_streams[ifile->ist_index + i];
4284 avctx = ist->dec_ctx;
4286 /* duration is the length of the last frame in a stream
4287 * when audio stream is present we don't care about
4288 * last video frame length because it's not defined exactly */
4289 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4293 for (i = 0; i < ifile->nb_streams; i++) {
4294 ist = input_streams[ifile->ist_index + i];
4295 avctx = ist->dec_ctx;
4298 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4299 AVRational sample_rate = {1, avctx->sample_rate};
4301 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4306 if (ist->framerate.num) {
4307 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4308 } else if (ist->st->avg_frame_rate.num) {
4309 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4314 if (!ifile->duration)
4315 ifile->time_base = ist->st->time_base;
4316 /* the total duration of the stream, max_pts - min_pts is
4317 * the duration of the stream without the last frame */
4318 if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4319 duration += ist->max_pts - ist->min_pts;
4320 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4324 if (ifile->loop > 0)
4332 * - 0 -- one packet was read and processed
4333 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4334 * this function should be called again
4335 * - AVERROR_EOF -- this function should not be called again
4337 static int process_input(int file_index)
4339 InputFile *ifile = input_files[file_index];
4340 AVFormatContext *is;
4343 int ret, thread_ret, i, j;
4346 int disable_discontinuity_correction = copy_ts;
4349 ret = get_input_packet(ifile, &pkt);
4351 if (ret == AVERROR(EAGAIN)) {
4355 if (ret < 0 && ifile->loop) {
4356 AVCodecContext *avctx;
4357 for (i = 0; i < ifile->nb_streams; i++) {
4358 ist = input_streams[ifile->ist_index + i];
4359 avctx = ist->dec_ctx;
4360 if (ist->decoding_needed) {
4361 ret = process_input_packet(ist, NULL, 1);
4364 avcodec_flush_buffers(avctx);
4368 free_input_thread(file_index);
4370 ret = seek_to_start(ifile, is);
4372 thread_ret = init_input_thread(file_index);
4377 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4379 ret = get_input_packet(ifile, &pkt);
4380 if (ret == AVERROR(EAGAIN)) {
4386 if (ret != AVERROR_EOF) {
4387 print_error(is->url, ret);
4392 for (i = 0; i < ifile->nb_streams; i++) {
4393 ist = input_streams[ifile->ist_index + i];
4394 if (ist->decoding_needed) {
4395 ret = process_input_packet(ist, NULL, 0);
4400 /* mark all outputs that don't go through lavfi as finished */
4401 for (j = 0; j < nb_output_streams; j++) {
4402 OutputStream *ost = output_streams[j];
4404 if (ost->source_index == ifile->ist_index + i &&
4405 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4406 finish_output_stream(ost);
4410 ifile->eof_reached = 1;
4411 return AVERROR(EAGAIN);
4417 av_pkt_dump_log2(NULL, AV_LOG_INFO, pkt, do_hex_dump,
4418 is->streams[pkt->stream_index]);
4420 /* the following test is needed in case new streams appear
4421 dynamically in stream : we ignore them */
4422 if (pkt->stream_index >= ifile->nb_streams) {
4423 report_new_stream(file_index, pkt);
4424 goto discard_packet;
4427 ist = input_streams[ifile->ist_index + pkt->stream_index];
4429 ist->data_size += pkt->size;
4433 goto discard_packet;
4435 if (pkt->flags & AV_PKT_FLAG_CORRUPT) {
4436 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
4437 "%s: corrupt input packet in stream %d\n", is->url, pkt->stream_index);
4443 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4444 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4445 ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4446 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4447 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4448 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
4449 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
4450 av_ts2str(input_files[ist->file_index]->ts_offset),
4451 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4454 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4455 int64_t stime, stime2;
4456 // Correcting starttime based on the enabled streams
4457 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4458 // so we instead do it here as part of discontinuity handling
4459 if ( ist->next_dts == AV_NOPTS_VALUE
4460 && ifile->ts_offset == -is->start_time
4461 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4462 int64_t new_start_time = INT64_MAX;
4463 for (i=0; i<is->nb_streams; i++) {
4464 AVStream *st = is->streams[i];
4465 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4467 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4469 if (new_start_time > is->start_time) {
4470 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4471 ifile->ts_offset = -new_start_time;
4475 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4476 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4477 ist->wrap_correction_done = 1;
4479 if(stime2 > stime && pkt->dts != AV_NOPTS_VALUE && pkt->dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4480 pkt->dts -= 1ULL<<ist->st->pts_wrap_bits;
4481 ist->wrap_correction_done = 0;
4483 if(stime2 > stime && pkt->pts != AV_NOPTS_VALUE && pkt->pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4484 pkt->pts -= 1ULL<<ist->st->pts_wrap_bits;
4485 ist->wrap_correction_done = 0;
4489 /* add the stream-global side data to the first packet */
4490 if (ist->nb_packets == 1) {
4491 for (i = 0; i < ist->st->nb_side_data; i++) {
4492 AVPacketSideData *src_sd = &ist->st->side_data[i];
4495 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4498 if (av_packet_get_side_data(pkt, src_sd->type, NULL))
4501 dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
4505 memcpy(dst_data, src_sd->data, src_sd->size);
4509 if (pkt->dts != AV_NOPTS_VALUE)
4510 pkt->dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4511 if (pkt->pts != AV_NOPTS_VALUE)
4512 pkt->pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4514 if (pkt->pts != AV_NOPTS_VALUE)
4515 pkt->pts *= ist->ts_scale;
4516 if (pkt->dts != AV_NOPTS_VALUE)
4517 pkt->dts *= ist->ts_scale;
4519 pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4520 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4521 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4522 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4523 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4524 int64_t delta = pkt_dts - ifile->last_ts;
4525 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4526 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4527 ifile->ts_offset -= delta;
4528 av_log(NULL, AV_LOG_DEBUG,
4529 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4530 delta, ifile->ts_offset);
4531 pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4532 if (pkt->pts != AV_NOPTS_VALUE)
4533 pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4537 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4538 if (pkt->pts != AV_NOPTS_VALUE) {
4539 pkt->pts += duration;
4540 ist->max_pts = FFMAX(pkt->pts, ist->max_pts);
4541 ist->min_pts = FFMIN(pkt->pts, ist->min_pts);
4544 if (pkt->dts != AV_NOPTS_VALUE)
4545 pkt->dts += duration;
4547 pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4549 if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4550 (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4551 int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits),
4552 ist->st->time_base, AV_TIME_BASE_Q,
4553 AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4554 if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4555 disable_discontinuity_correction = 0;
4558 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4559 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4560 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4561 !disable_discontinuity_correction) {
4562 int64_t delta = pkt_dts - ist->next_dts;
4563 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4564 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4565 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4566 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4567 ifile->ts_offset -= delta;
4568 av_log(NULL, AV_LOG_DEBUG,
4569 "timestamp discontinuity for stream #%d:%d "
4570 "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4571 ist->file_index, ist->st->index, ist->st->id,
4572 av_get_media_type_string(ist->dec_ctx->codec_type),
4573 delta, ifile->ts_offset);
4574 pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4575 if (pkt->pts != AV_NOPTS_VALUE)
4576 pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4579 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4580 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4581 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index);
4582 pkt->dts = AV_NOPTS_VALUE;
4584 if (pkt->pts != AV_NOPTS_VALUE){
4585 int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
4586 delta = pkt_pts - ist->next_dts;
4587 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4588 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4589 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index);
4590 pkt->pts = AV_NOPTS_VALUE;
4596 if (pkt->dts != AV_NOPTS_VALUE)
4597 ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
4600 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4601 ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4602 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
4603 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
4604 av_ts2str(input_files[ist->file_index]->ts_offset),
4605 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4608 sub2video_heartbeat(ist, pkt->pts);
4610 process_input_packet(ist, pkt, 0);
4614 if (ifile->thread_queue_size)
4615 av_packet_free(&pkt);
4618 av_packet_unref(pkt);
4624 * Perform a step of transcoding for the specified filter graph.
4626 * @param[in] graph filter graph to consider
4627 * @param[out] best_ist input stream where a frame would allow to continue
4628 * @return 0 for success, <0 for error
4630 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4633 int nb_requests, nb_requests_max = 0;
4634 InputFilter *ifilter;
4638 ret = avfilter_graph_request_oldest(graph->graph);
4640 return reap_filters(0);
4642 if (ret == AVERROR_EOF) {
4643 ret = reap_filters(1);
4644 for (i = 0; i < graph->nb_outputs; i++)
4645 close_output_stream(graph->outputs[i]->ost);
4648 if (ret != AVERROR(EAGAIN))
4651 for (i = 0; i < graph->nb_inputs; i++) {
4652 ifilter = graph->inputs[i];
4654 if (input_files[ist->file_index]->eagain ||
4655 input_files[ist->file_index]->eof_reached)
4657 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4658 if (nb_requests > nb_requests_max) {
4659 nb_requests_max = nb_requests;
4665 for (i = 0; i < graph->nb_outputs; i++)
4666 graph->outputs[i]->ost->unavailable = 1;
4672 * Run a single step of transcoding.
4674 * @return 0 for success, <0 for error
4676 static int transcode_step(void)
4679 InputStream *ist = NULL;
4682 ost = choose_output();
4689 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4693 if (ost->filter && !ost->filter->graph->graph) {
4694 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4695 ret = configure_filtergraph(ost->filter->graph);
4697 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4703 if (ost->filter && ost->filter->graph->graph) {
4705 * Similar case to the early audio initialization in reap_filters.
4706 * Audio is special in ffmpeg.c currently as we depend on lavfi's
4707 * audio frame buffering/creation to get the output audio frame size
4708 * in samples correct. The audio frame size for the filter chain is
4709 * configured during the output stream initialization.
4711 * Apparently avfilter_graph_request_oldest (called in
4712 * transcode_from_filter just down the line) peeks. Peeking already
4713 * puts one frame "ready to be given out", which means that any
4714 * update in filter buffer sink configuration afterwards will not
4715 * help us. And yes, even if it would be utilized,
4716 * av_buffersink_get_samples is affected, as it internally utilizes
4717 * the same early exit for peeked frames.
4719 * In other words, if avfilter_graph_request_oldest would not make
4720 * further filter chain configuration or usage of
4721 * av_buffersink_get_samples useless (by just causing the return
4722 * of the peeked AVFrame as-is), we could get rid of this additional
4723 * early encoder initialization.
4725 if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
4726 init_output_stream_wrapper(ost, NULL, 1);
4728 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4732 } else if (ost->filter) {
4734 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4735 InputFilter *ifilter = ost->filter->graph->inputs[i];
4736 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4742 ost->inputs_done = 1;
4746 av_assert0(ost->source_index >= 0);
4747 ist = input_streams[ost->source_index];
4750 ret = process_input(ist->file_index);
4751 if (ret == AVERROR(EAGAIN)) {
4752 if (input_files[ist->file_index]->eagain)
4753 ost->unavailable = 1;
4758 return ret == AVERROR_EOF ? 0 : ret;
4760 return reap_filters(0);
4764 * The following code is the main loop of the file converter
4766 static int transcode(void)
4769 AVFormatContext *os;
4772 int64_t timer_start;
4773 int64_t total_packets_written = 0;
4775 ret = transcode_init();
4779 if (stdin_interaction) {
4780 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4783 timer_start = av_gettime_relative();
4786 if ((ret = init_input_threads()) < 0)
4790 while (!received_sigterm) {
4791 int64_t cur_time= av_gettime_relative();
4793 /* if 'q' pressed, exits */
4794 if (stdin_interaction)
4795 if (check_keyboard_interaction(cur_time) < 0)
4798 /* check if there's any stream where output is still needed */
4799 if (!need_output()) {
4800 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4804 ret = transcode_step();
4805 if (ret < 0 && ret != AVERROR_EOF) {
4806 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4810 /* dump report by using the output first video and audio streams */
4811 print_report(0, timer_start, cur_time);
4814 free_input_threads();
4817 /* at the end of stream, we must flush the decoder buffers */
4818 for (i = 0; i < nb_input_streams; i++) {
4819 ist = input_streams[i];
4820 if (!input_files[ist->file_index]->eof_reached) {
4821 process_input_packet(ist, NULL, 0);
4828 /* write the trailer if needed and close file */
4829 for (i = 0; i < nb_output_files; i++) {
4830 os = output_files[i]->ctx;
4831 if (!output_files[i]->header_written) {
4832 av_log(NULL, AV_LOG_ERROR,
4833 "Nothing was written into output file %d (%s), because "
4834 "at least one of its streams received no packets.\n",
4838 if ((ret = av_write_trailer(os)) < 0) {
4839 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4845 /* dump report by using the first video and audio streams */
4846 print_report(1, timer_start, av_gettime_relative());
4848 /* close each encoder */
4849 for (i = 0; i < nb_output_streams; i++) {
4850 ost = output_streams[i];
4851 if (ost->encoding_needed) {
4852 av_freep(&ost->enc_ctx->stats_in);
4854 total_packets_written += ost->packets_written;
4855 if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
4856 av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4861 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4862 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4866 /* close each decoder */
4867 for (i = 0; i < nb_input_streams; i++) {
4868 ist = input_streams[i];
4869 if (ist->decoding_needed) {
4870 avcodec_close(ist->dec_ctx);
4871 if (ist->hwaccel_uninit)
4872 ist->hwaccel_uninit(ist->dec_ctx);
4876 hw_device_free_all();
4883 free_input_threads();
4886 if (output_streams) {
4887 for (i = 0; i < nb_output_streams; i++) {
4888 ost = output_streams[i];
4891 if (fclose(ost->logfile))
4892 av_log(NULL, AV_LOG_ERROR,
4893 "Error closing logfile, loss of information possible: %s\n",
4894 av_err2str(AVERROR(errno)));
4895 ost->logfile = NULL;
4897 av_freep(&ost->forced_kf_pts);
4898 av_freep(&ost->apad);
4899 av_freep(&ost->disposition);
4900 av_dict_free(&ost->encoder_opts);
4901 av_dict_free(&ost->sws_dict);
4902 av_dict_free(&ost->swr_opts);
4903 av_dict_free(&ost->resample_opts);
4910 static BenchmarkTimeStamps get_benchmark_time_stamps(void)
4912 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4914 struct rusage rusage;
4916 getrusage(RUSAGE_SELF, &rusage);
4917 time_stamps.user_usec =
4918 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4919 time_stamps.sys_usec =
4920 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4921 #elif HAVE_GETPROCESSTIMES
4923 FILETIME c, e, k, u;
4924 proc = GetCurrentProcess();
4925 GetProcessTimes(proc, &c, &e, &k, &u);
4926 time_stamps.user_usec =
4927 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4928 time_stamps.sys_usec =
4929 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4931 time_stamps.user_usec = time_stamps.sys_usec = 0;
4936 static int64_t getmaxrss(void)
4938 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4939 struct rusage rusage;
4940 getrusage(RUSAGE_SELF, &rusage);
4941 return (int64_t)rusage.ru_maxrss * 1024;
4942 #elif HAVE_GETPROCESSMEMORYINFO
4944 PROCESS_MEMORY_COUNTERS memcounters;
4945 proc = GetCurrentProcess();
4946 memcounters.cb = sizeof(memcounters);
4947 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4948 return memcounters.PeakPagefileUsage;
4954 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4958 int main(int argc, char **argv)
4961 BenchmarkTimeStamps ti;
4965 register_exit(ffmpeg_cleanup);
4967 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4969 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4970 parse_loglevel(argc, argv, options);
4972 if(argc>1 && !strcmp(argv[1], "-d")){
4974 av_log_set_callback(log_callback_null);
4980 avdevice_register_all();
4982 avformat_network_init();
4984 show_banner(argc, argv, options);
4986 /* parse options and open all input/output files */
4987 ret = ffmpeg_parse_options(argc, argv);
4991 if (nb_output_files <= 0 && nb_input_files == 0) {
4993 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4997 /* file converter / grab */
4998 if (nb_output_files <= 0) {
4999 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
5003 for (i = 0; i < nb_output_files; i++) {
5004 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
5008 current_time = ti = get_benchmark_time_stamps();
5009 if (transcode() < 0)
5012 int64_t utime, stime, rtime;
5013 current_time = get_benchmark_time_stamps();
5014 utime = current_time.user_usec - ti.user_usec;
5015 stime = current_time.sys_usec - ti.sys_usec;
5016 rtime = current_time.real_usec - ti.real_usec;
5017 av_log(NULL, AV_LOG_INFO,
5018 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
5019 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
5021 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
5022 decode_error_stat[0], decode_error_stat[1]);
5023 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
5026 exit_program(received_nb_signals ? 255 : main_return_code);
5027 return main_return_code;