2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
65 #include "libavutil/threadmessage.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
73 #if HAVE_SYS_RESOURCE_H
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
80 #if HAVE_GETPROCESSMEMORYINFO
84 #if HAVE_SETCONSOLECTRLHANDLER
90 #include <sys/select.h>
95 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 typedef struct BenchmarkTimeStamps {
127 } BenchmarkTimeStamps;
129 static void do_video_stats(OutputStream *ost, int frame_size);
130 static BenchmarkTimeStamps get_benchmark_time_stamps(void);
131 static int64_t getmaxrss(void);
132 static int ifilter_has_all_input_formats(FilterGraph *fg);
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 static unsigned nb_output_dumped = 0;
141 static int want_sdp = 1;
143 static BenchmarkTimeStamps current_time;
144 AVIOContext *progress_avio = NULL;
146 static uint8_t *subtitle_out;
148 InputStream **input_streams = NULL;
149 int nb_input_streams = 0;
150 InputFile **input_files = NULL;
151 int nb_input_files = 0;
153 OutputStream **output_streams = NULL;
154 int nb_output_streams = 0;
155 OutputFile **output_files = NULL;
156 int nb_output_files = 0;
158 FilterGraph **filtergraphs;
163 /* init terminal so that we can grab keys */
164 static struct termios oldtty;
165 static int restore_tty;
169 static void free_input_threads(void);
173 Convert subtitles to video with alpha to insert them in filter graphs.
174 This is a temporary solution until libavfilter gets real subtitles support.
177 static int sub2video_get_blank_frame(InputStream *ist)
180 AVFrame *frame = ist->sub2video.frame;
182 av_frame_unref(frame);
183 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
184 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
186 if ((ret = av_frame_get_buffer(frame, 0)) < 0)
188 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
192 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
195 uint32_t *pal, *dst2;
199 if (r->type != SUBTITLE_BITMAP) {
200 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
203 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
204 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
205 r->x, r->y, r->w, r->h, w, h
210 dst += r->y * dst_linesize + r->x * 4;
212 pal = (uint32_t *)r->data[1];
213 for (y = 0; y < r->h; y++) {
214 dst2 = (uint32_t *)dst;
216 for (x = 0; x < r->w; x++)
217 *(dst2++) = pal[*(src2++)];
219 src += r->linesize[0];
223 static void sub2video_push_ref(InputStream *ist, int64_t pts)
225 AVFrame *frame = ist->sub2video.frame;
229 av_assert1(frame->data[0]);
230 ist->sub2video.last_pts = frame->pts = pts;
231 for (i = 0; i < ist->nb_filters; i++) {
232 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
233 AV_BUFFERSRC_FLAG_KEEP_REF |
234 AV_BUFFERSRC_FLAG_PUSH);
235 if (ret != AVERROR_EOF && ret < 0)
236 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
241 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
243 AVFrame *frame = ist->sub2video.frame;
247 int64_t pts, end_pts;
252 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
253 AV_TIME_BASE_Q, ist->st->time_base);
254 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
255 AV_TIME_BASE_Q, ist->st->time_base);
256 num_rects = sub->num_rects;
258 /* If we are initializing the system, utilize current heartbeat
259 PTS as the start time, and show until the following subpicture
260 is received. Otherwise, utilize the previous subpicture's end time
261 as the fall-back value. */
262 pts = ist->sub2video.initialize ?
263 heartbeat_pts : ist->sub2video.end_pts;
267 if (sub2video_get_blank_frame(ist) < 0) {
268 av_log(ist->dec_ctx, AV_LOG_ERROR,
269 "Impossible to get a blank canvas.\n");
272 dst = frame->data [0];
273 dst_linesize = frame->linesize[0];
274 for (i = 0; i < num_rects; i++)
275 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
276 sub2video_push_ref(ist, pts);
277 ist->sub2video.end_pts = end_pts;
278 ist->sub2video.initialize = 0;
281 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
283 InputFile *infile = input_files[ist->file_index];
287 /* When a frame is read from a file, examine all sub2video streams in
288 the same file and send the sub2video frame again. Otherwise, decoded
289 video frames could be accumulating in the filter graph while a filter
290 (possibly overlay) is desperately waiting for a subtitle frame. */
291 for (i = 0; i < infile->nb_streams; i++) {
292 InputStream *ist2 = input_streams[infile->ist_index + i];
293 if (!ist2->sub2video.frame)
295 /* subtitles seem to be usually muxed ahead of other streams;
296 if not, subtracting a larger time here is necessary */
297 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
298 /* do not send the heartbeat frame if the subtitle is already ahead */
299 if (pts2 <= ist2->sub2video.last_pts)
301 if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
302 /* if we have hit the end of the current displayed subpicture,
303 or if we need to initialize the system, update the
304 overlayed subpicture and its start/end times */
305 sub2video_update(ist2, pts2 + 1, NULL);
306 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
307 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
309 sub2video_push_ref(ist2, pts2);
313 static void sub2video_flush(InputStream *ist)
318 if (ist->sub2video.end_pts < INT64_MAX)
319 sub2video_update(ist, INT64_MAX, NULL);
320 for (i = 0; i < ist->nb_filters; i++) {
321 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
322 if (ret != AVERROR_EOF && ret < 0)
323 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
327 /* end of sub2video hack */
329 static void term_exit_sigsafe(void)
333 tcsetattr (0, TCSANOW, &oldtty);
339 av_log(NULL, AV_LOG_QUIET, "%s", "");
343 static volatile int received_sigterm = 0;
344 static volatile int received_nb_signals = 0;
345 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
346 static volatile int ffmpeg_exited = 0;
347 static int main_return_code = 0;
348 static int64_t copy_ts_first_pts = AV_NOPTS_VALUE;
351 sigterm_handler(int sig)
354 received_sigterm = sig;
355 received_nb_signals++;
357 if(received_nb_signals > 3) {
358 ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
359 strlen("Received > 3 system signals, hard exiting\n"));
360 if (ret < 0) { /* Do nothing */ };
365 #if HAVE_SETCONSOLECTRLHANDLER
366 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
368 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
373 case CTRL_BREAK_EVENT:
374 sigterm_handler(SIGINT);
377 case CTRL_CLOSE_EVENT:
378 case CTRL_LOGOFF_EVENT:
379 case CTRL_SHUTDOWN_EVENT:
380 sigterm_handler(SIGTERM);
381 /* Basically, with these 3 events, when we return from this method the
382 process is hard terminated, so stall as long as we need to
383 to try and let the main thread(s) clean up and gracefully terminate
384 (we have at most 5 seconds, but should be done far before that). */
385 while (!ffmpeg_exited) {
391 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
400 if (!run_as_daemon && stdin_interaction) {
402 if (tcgetattr (0, &tty) == 0) {
406 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
407 |INLCR|IGNCR|ICRNL|IXON);
408 tty.c_oflag |= OPOST;
409 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
410 tty.c_cflag &= ~(CSIZE|PARENB);
415 tcsetattr (0, TCSANOW, &tty);
417 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
421 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
422 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
424 signal(SIGXCPU, sigterm_handler);
427 signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
429 #if HAVE_SETCONSOLECTRLHANDLER
430 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
434 /* read a key without blocking */
435 static int read_key(void)
447 n = select(1, &rfds, NULL, NULL, &tv);
456 # if HAVE_PEEKNAMEDPIPE
458 static HANDLE input_handle;
461 input_handle = GetStdHandle(STD_INPUT_HANDLE);
462 is_pipe = !GetConsoleMode(input_handle, &dw);
466 /* When running under a GUI, you will end here. */
467 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
468 // input pipe may have been closed by the program that ran ffmpeg
486 static int decode_interrupt_cb(void *ctx)
488 return received_nb_signals > atomic_load(&transcode_init_done);
491 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
493 static void ffmpeg_cleanup(int ret)
498 int maxrss = getmaxrss() / 1024;
499 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
502 for (i = 0; i < nb_filtergraphs; i++) {
503 FilterGraph *fg = filtergraphs[i];
504 avfilter_graph_free(&fg->graph);
505 for (j = 0; j < fg->nb_inputs; j++) {
506 InputFilter *ifilter = fg->inputs[j];
507 struct InputStream *ist = ifilter->ist;
509 while (av_fifo_size(ifilter->frame_queue)) {
511 av_fifo_generic_read(ifilter->frame_queue, &frame,
512 sizeof(frame), NULL);
513 av_frame_free(&frame);
515 av_fifo_freep(&ifilter->frame_queue);
516 if (ist->sub2video.sub_queue) {
517 while (av_fifo_size(ist->sub2video.sub_queue)) {
519 av_fifo_generic_read(ist->sub2video.sub_queue,
520 &sub, sizeof(sub), NULL);
521 avsubtitle_free(&sub);
523 av_fifo_freep(&ist->sub2video.sub_queue);
525 av_buffer_unref(&ifilter->hw_frames_ctx);
526 av_freep(&ifilter->name);
527 av_freep(&fg->inputs[j]);
529 av_freep(&fg->inputs);
530 for (j = 0; j < fg->nb_outputs; j++) {
531 OutputFilter *ofilter = fg->outputs[j];
533 avfilter_inout_free(&ofilter->out_tmp);
534 av_freep(&ofilter->name);
535 av_freep(&ofilter->formats);
536 av_freep(&ofilter->channel_layouts);
537 av_freep(&ofilter->sample_rates);
538 av_freep(&fg->outputs[j]);
540 av_freep(&fg->outputs);
541 av_freep(&fg->graph_desc);
543 av_freep(&filtergraphs[i]);
545 av_freep(&filtergraphs);
547 av_freep(&subtitle_out);
550 for (i = 0; i < nb_output_files; i++) {
551 OutputFile *of = output_files[i];
556 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
558 avformat_free_context(s);
559 av_dict_free(&of->opts);
561 av_freep(&output_files[i]);
563 for (i = 0; i < nb_output_streams; i++) {
564 OutputStream *ost = output_streams[i];
569 av_bsf_free(&ost->bsf_ctx);
571 av_frame_free(&ost->filtered_frame);
572 av_frame_free(&ost->last_frame);
573 av_dict_free(&ost->encoder_opts);
575 av_freep(&ost->forced_keyframes);
576 av_expr_free(ost->forced_keyframes_pexpr);
577 av_freep(&ost->avfilter);
578 av_freep(&ost->logfile_prefix);
580 av_freep(&ost->audio_channels_map);
581 ost->audio_channels_mapped = 0;
583 av_dict_free(&ost->sws_dict);
584 av_dict_free(&ost->swr_opts);
586 avcodec_free_context(&ost->enc_ctx);
587 avcodec_parameters_free(&ost->ref_par);
589 if (ost->muxing_queue) {
590 while (av_fifo_size(ost->muxing_queue)) {
592 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
593 av_packet_unref(&pkt);
595 av_fifo_freep(&ost->muxing_queue);
598 av_freep(&output_streams[i]);
601 free_input_threads();
603 for (i = 0; i < nb_input_files; i++) {
604 avformat_close_input(&input_files[i]->ctx);
605 av_freep(&input_files[i]);
607 for (i = 0; i < nb_input_streams; i++) {
608 InputStream *ist = input_streams[i];
610 av_frame_free(&ist->decoded_frame);
611 av_frame_free(&ist->filter_frame);
612 av_dict_free(&ist->decoder_opts);
613 avsubtitle_free(&ist->prev_sub.subtitle);
614 av_frame_free(&ist->sub2video.frame);
615 av_freep(&ist->filters);
616 av_freep(&ist->hwaccel_device);
617 av_freep(&ist->dts_buffer);
619 avcodec_free_context(&ist->dec_ctx);
621 av_freep(&input_streams[i]);
625 if (fclose(vstats_file))
626 av_log(NULL, AV_LOG_ERROR,
627 "Error closing vstats file, loss of information possible: %s\n",
628 av_err2str(AVERROR(errno)));
630 av_freep(&vstats_filename);
632 av_freep(&input_streams);
633 av_freep(&input_files);
634 av_freep(&output_streams);
635 av_freep(&output_files);
639 avformat_network_deinit();
641 if (received_sigterm) {
642 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
643 (int) received_sigterm);
644 } else if (ret && atomic_load(&transcode_init_done)) {
645 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
651 void remove_avoptions(AVDictionary **a, AVDictionary *b)
653 AVDictionaryEntry *t = NULL;
655 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
656 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
660 void assert_avoptions(AVDictionary *m)
662 AVDictionaryEntry *t;
663 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
664 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
669 static void abort_codec_experimental(AVCodec *c, int encoder)
674 static void update_benchmark(const char *fmt, ...)
676 if (do_benchmark_all) {
677 BenchmarkTimeStamps t = get_benchmark_time_stamps();
683 vsnprintf(buf, sizeof(buf), fmt, va);
685 av_log(NULL, AV_LOG_INFO,
686 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
687 t.user_usec - current_time.user_usec,
688 t.sys_usec - current_time.sys_usec,
689 t.real_usec - current_time.real_usec, buf);
695 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
698 for (i = 0; i < nb_output_streams; i++) {
699 OutputStream *ost2 = output_streams[i];
700 ost2->finished |= ost == ost2 ? this_stream : others;
704 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
706 AVFormatContext *s = of->ctx;
707 AVStream *st = ost->st;
711 * Audio encoders may split the packets -- #frames in != #packets out.
712 * But there is no reordering, so we can limit the number of output packets
713 * by simply dropping them here.
714 * Counting encoded video frames needs to be done separately because of
715 * reordering, see do_video_out().
716 * Do not count the packet when unqueued because it has been counted when queued.
718 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
719 if (ost->frame_number >= ost->max_frames) {
720 av_packet_unref(pkt);
726 if (!of->header_written) {
727 AVPacket tmp_pkt = {0};
728 /* the muxer is not initialized yet, buffer the packet */
729 if (!av_fifo_space(ost->muxing_queue)) {
730 unsigned int are_we_over_size =
731 (ost->muxing_queue_data_size + pkt->size) > ost->muxing_queue_data_threshold;
732 int new_size = are_we_over_size ?
733 FFMIN(2 * av_fifo_size(ost->muxing_queue),
734 ost->max_muxing_queue_size) :
735 2 * av_fifo_size(ost->muxing_queue);
737 if (new_size <= av_fifo_size(ost->muxing_queue)) {
738 av_log(NULL, AV_LOG_ERROR,
739 "Too many packets buffered for output stream %d:%d.\n",
740 ost->file_index, ost->st->index);
743 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
747 ret = av_packet_make_refcounted(pkt);
750 av_packet_move_ref(&tmp_pkt, pkt);
751 ost->muxing_queue_data_size += tmp_pkt.size;
752 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
756 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
757 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
758 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
760 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
762 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
764 ost->quality = sd ? AV_RL32(sd) : -1;
765 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
767 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
769 ost->error[i] = AV_RL64(sd + 8 + 8*i);
774 if (ost->frame_rate.num && ost->is_cfr) {
775 if (pkt->duration > 0)
776 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
777 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
782 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
784 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
785 if (pkt->dts != AV_NOPTS_VALUE &&
786 pkt->pts != AV_NOPTS_VALUE &&
787 pkt->dts > pkt->pts) {
788 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
790 ost->file_index, ost->st->index);
792 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
793 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
794 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
796 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
797 pkt->dts != AV_NOPTS_VALUE &&
798 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
799 ost->last_mux_dts != AV_NOPTS_VALUE) {
800 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
801 if (pkt->dts < max) {
802 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
804 loglevel = AV_LOG_ERROR;
805 av_log(s, loglevel, "Non-monotonous DTS in output stream "
806 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
807 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
809 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
812 av_log(s, loglevel, "changing to %"PRId64". This may result "
813 "in incorrect timestamps in the output file.\n",
815 if (pkt->pts >= pkt->dts)
816 pkt->pts = FFMAX(pkt->pts, max);
821 ost->last_mux_dts = pkt->dts;
823 ost->data_size += pkt->size;
824 ost->packets_written++;
826 pkt->stream_index = ost->index;
829 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
830 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
831 av_get_media_type_string(ost->enc_ctx->codec_type),
832 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
833 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
838 ret = av_interleaved_write_frame(s, pkt);
840 print_error("av_interleaved_write_frame()", ret);
841 main_return_code = 1;
842 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
844 av_packet_unref(pkt);
847 static void close_output_stream(OutputStream *ost)
849 OutputFile *of = output_files[ost->file_index];
851 ost->finished |= ENCODER_FINISHED;
853 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
854 of->recording_time = FFMIN(of->recording_time, end);
859 * Send a single packet to the output, applying any bitstream filters
860 * associated with the output stream. This may result in any number
861 * of packets actually being written, depending on what bitstream
862 * filters are applied. The supplied packet is consumed and will be
863 * blank (as if newly-allocated) when this function returns.
865 * If eof is set, instead indicate EOF to all bitstream filters and
866 * therefore flush any delayed packets to the output. A blank packet
867 * must be supplied in this case.
869 static void output_packet(OutputFile *of, AVPacket *pkt,
870 OutputStream *ost, int eof)
874 /* apply the output bitstream filters */
876 ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
879 while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
880 write_packet(of, pkt, ost, 0);
881 if (ret == AVERROR(EAGAIN))
884 write_packet(of, pkt, ost, 0);
887 if (ret < 0 && ret != AVERROR_EOF) {
888 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
889 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
895 static int check_recording_time(OutputStream *ost)
897 OutputFile *of = output_files[ost->file_index];
899 if (of->recording_time != INT64_MAX &&
900 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
901 AV_TIME_BASE_Q) >= 0) {
902 close_output_stream(ost);
908 static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost,
911 double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
912 AVCodecContext *enc = ost->enc_ctx;
913 if (!frame || frame->pts == AV_NOPTS_VALUE ||
914 !enc || !ost->filter || !ost->filter->graph->graph)
918 AVFilterContext *filter = ost->filter->filter;
920 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
921 AVRational filter_tb = av_buffersink_get_time_base(filter);
922 AVRational tb = enc->time_base;
923 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
925 tb.den <<= extra_bits;
927 av_rescale_q(frame->pts, filter_tb, tb) -
928 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
929 float_pts /= 1 << extra_bits;
930 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
931 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
934 av_rescale_q(frame->pts, filter_tb, enc->time_base) -
935 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
941 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
942 frame ? av_ts2str(frame->pts) : "NULL",
943 frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
945 enc ? enc->time_base.num : -1,
946 enc ? enc->time_base.den : -1);
952 static int init_output_stream(OutputStream *ost, AVFrame *frame,
953 char *error, int error_len);
955 static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame,
958 int ret = AVERROR_BUG;
959 char error[1024] = {0};
961 if (ost->initialized)
964 ret = init_output_stream(ost, frame, error, sizeof(error));
966 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
967 ost->file_index, ost->index, error);
976 static void do_audio_out(OutputFile *of, OutputStream *ost,
979 AVCodecContext *enc = ost->enc_ctx;
983 av_init_packet(&pkt);
987 adjust_frame_pts_to_encoder_tb(of, ost, frame);
989 if (!check_recording_time(ost))
992 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
993 frame->pts = ost->sync_opts;
994 ost->sync_opts = frame->pts + frame->nb_samples;
995 ost->samples_encoded += frame->nb_samples;
996 ost->frames_encoded++;
998 av_assert0(pkt.size || !pkt.data);
999 update_benchmark(NULL);
1001 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1002 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1003 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1004 enc->time_base.num, enc->time_base.den);
1007 ret = avcodec_send_frame(enc, frame);
1012 ret = avcodec_receive_packet(enc, &pkt);
1013 if (ret == AVERROR(EAGAIN))
1018 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1020 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1023 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1024 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1025 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1026 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1029 output_packet(of, &pkt, ost, 0);
1034 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1038 static void do_subtitle_out(OutputFile *of,
1042 int subtitle_out_max_size = 1024 * 1024;
1043 int subtitle_out_size, nb, i;
1044 AVCodecContext *enc;
1048 if (sub->pts == AV_NOPTS_VALUE) {
1049 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1057 if (!subtitle_out) {
1058 subtitle_out = av_malloc(subtitle_out_max_size);
1059 if (!subtitle_out) {
1060 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1065 /* Note: DVB subtitle need one packet to draw them and one other
1066 packet to clear them */
1067 /* XXX: signal it in the codec context ? */
1068 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1073 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1075 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1076 pts -= output_files[ost->file_index]->start_time;
1077 for (i = 0; i < nb; i++) {
1078 unsigned save_num_rects = sub->num_rects;
1080 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1081 if (!check_recording_time(ost))
1085 // start_display_time is required to be 0
1086 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1087 sub->end_display_time -= sub->start_display_time;
1088 sub->start_display_time = 0;
1092 ost->frames_encoded++;
1094 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1095 subtitle_out_max_size, sub);
1097 sub->num_rects = save_num_rects;
1098 if (subtitle_out_size < 0) {
1099 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1103 av_init_packet(&pkt);
1104 pkt.data = subtitle_out;
1105 pkt.size = subtitle_out_size;
1106 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1107 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1108 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1109 /* XXX: the pts correction is handled here. Maybe handling
1110 it in the codec would be better */
1112 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1114 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1117 output_packet(of, &pkt, ost, 0);
1121 static void do_video_out(OutputFile *of,
1123 AVFrame *next_picture)
1125 int ret, format_video_sync;
1127 AVCodecContext *enc = ost->enc_ctx;
1128 AVRational frame_rate;
1129 int nb_frames, nb0_frames, i;
1130 double delta, delta0;
1131 double duration = 0;
1132 double sync_ipts = AV_NOPTS_VALUE;
1134 InputStream *ist = NULL;
1135 AVFilterContext *filter = ost->filter->filter;
1137 init_output_stream_wrapper(ost, next_picture, 1);
1138 sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1140 if (ost->source_index >= 0)
1141 ist = input_streams[ost->source_index];
1143 frame_rate = av_buffersink_get_frame_rate(filter);
1144 if (frame_rate.num > 0 && frame_rate.den > 0)
1145 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1147 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1148 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1150 if (!ost->filters_script &&
1152 (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1155 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1156 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1159 if (!next_picture) {
1161 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1162 ost->last_nb0_frames[1],
1163 ost->last_nb0_frames[2]);
1165 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1166 delta = delta0 + duration;
1168 /* by default, we output a single frame */
1169 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1172 format_video_sync = video_sync_method;
1173 if (format_video_sync == VSYNC_AUTO) {
1174 if(!strcmp(of->ctx->oformat->name, "avi")) {
1175 format_video_sync = VSYNC_VFR;
1177 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1179 && format_video_sync == VSYNC_CFR
1180 && input_files[ist->file_index]->ctx->nb_streams == 1
1181 && input_files[ist->file_index]->input_ts_offset == 0) {
1182 format_video_sync = VSYNC_VSCFR;
1184 if (format_video_sync == VSYNC_CFR && copy_ts) {
1185 format_video_sync = VSYNC_VSCFR;
1188 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1192 format_video_sync != VSYNC_PASSTHROUGH &&
1193 format_video_sync != VSYNC_DROP) {
1194 if (delta0 < -0.6) {
1195 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1197 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1198 sync_ipts = ost->sync_opts;
1203 switch (format_video_sync) {
1205 if (ost->frame_number == 0 && delta0 >= 0.5) {
1206 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1209 ost->sync_opts = llrint(sync_ipts);
1212 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1213 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1215 } else if (delta < -1.1)
1217 else if (delta > 1.1) {
1218 nb_frames = lrintf(delta);
1220 nb0_frames = llrintf(delta0 - 0.6);
1226 else if (delta > 0.6)
1227 ost->sync_opts = llrint(sync_ipts);
1230 case VSYNC_PASSTHROUGH:
1231 ost->sync_opts = llrint(sync_ipts);
1238 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1239 nb0_frames = FFMIN(nb0_frames, nb_frames);
1241 memmove(ost->last_nb0_frames + 1,
1242 ost->last_nb0_frames,
1243 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1244 ost->last_nb0_frames[0] = nb0_frames;
1246 if (nb0_frames == 0 && ost->last_dropped) {
1248 av_log(NULL, AV_LOG_VERBOSE,
1249 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1250 ost->frame_number, ost->st->index, ost->last_frame->pts);
1252 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1253 if (nb_frames > dts_error_threshold * 30) {
1254 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1258 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1259 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1260 if (nb_frames_dup > dup_warning) {
1261 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1265 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1267 /* duplicates frame if needed */
1268 for (i = 0; i < nb_frames; i++) {
1269 AVFrame *in_picture;
1270 int forced_keyframe = 0;
1272 av_init_packet(&pkt);
1276 if (i < nb0_frames && ost->last_frame) {
1277 in_picture = ost->last_frame;
1279 in_picture = next_picture;
1284 in_picture->pts = ost->sync_opts;
1286 if (!check_recording_time(ost))
1289 in_picture->quality = enc->global_quality;
1290 in_picture->pict_type = 0;
1292 if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1293 in_picture->pts != AV_NOPTS_VALUE)
1294 ost->forced_kf_ref_pts = in_picture->pts;
1296 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1297 (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1298 if (ost->forced_kf_index < ost->forced_kf_count &&
1299 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1300 ost->forced_kf_index++;
1301 forced_keyframe = 1;
1302 } else if (ost->forced_keyframes_pexpr) {
1304 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1305 res = av_expr_eval(ost->forced_keyframes_pexpr,
1306 ost->forced_keyframes_expr_const_values, NULL);
1307 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1308 ost->forced_keyframes_expr_const_values[FKF_N],
1309 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1310 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1311 ost->forced_keyframes_expr_const_values[FKF_T],
1312 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1315 forced_keyframe = 1;
1316 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1317 ost->forced_keyframes_expr_const_values[FKF_N];
1318 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1319 ost->forced_keyframes_expr_const_values[FKF_T];
1320 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1323 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1324 } else if ( ost->forced_keyframes
1325 && !strncmp(ost->forced_keyframes, "source", 6)
1326 && in_picture->key_frame==1
1328 forced_keyframe = 1;
1331 if (forced_keyframe) {
1332 in_picture->pict_type = AV_PICTURE_TYPE_I;
1333 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1336 update_benchmark(NULL);
1338 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1339 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1340 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1341 enc->time_base.num, enc->time_base.den);
1344 ost->frames_encoded++;
1346 ret = avcodec_send_frame(enc, in_picture);
1349 // Make sure Closed Captions will not be duplicated
1350 av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
1353 ret = avcodec_receive_packet(enc, &pkt);
1354 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1355 if (ret == AVERROR(EAGAIN))
1361 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1362 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1363 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1364 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1367 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1368 pkt.pts = ost->sync_opts;
1370 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1373 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1374 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1375 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1376 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1379 frame_size = pkt.size;
1380 output_packet(of, &pkt, ost, 0);
1382 /* if two pass, output log */
1383 if (ost->logfile && enc->stats_out) {
1384 fprintf(ost->logfile, "%s", enc->stats_out);
1389 * For video, number of frames in == number of packets out.
1390 * But there may be reordering, so we can't throw away frames on encoder
1391 * flush, we need to limit them here, before they go into encoder.
1393 ost->frame_number++;
1395 if (vstats_filename && frame_size)
1396 do_video_stats(ost, frame_size);
1399 if (!ost->last_frame)
1400 ost->last_frame = av_frame_alloc();
1401 av_frame_unref(ost->last_frame);
1402 if (next_picture && ost->last_frame)
1403 av_frame_ref(ost->last_frame, next_picture);
1405 av_frame_free(&ost->last_frame);
1409 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1413 static double psnr(double d)
1415 return -10.0 * log10(d);
1418 static void do_video_stats(OutputStream *ost, int frame_size)
1420 AVCodecContext *enc;
1422 double ti1, bitrate, avg_bitrate;
1424 /* this is executed just the first time do_video_stats is called */
1426 vstats_file = fopen(vstats_filename, "w");
1434 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1435 frame_number = ost->st->nb_frames;
1436 if (vstats_version <= 1) {
1437 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1438 ost->quality / (float)FF_QP2LAMBDA);
1440 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1441 ost->quality / (float)FF_QP2LAMBDA);
1444 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1445 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1447 fprintf(vstats_file,"f_size= %6d ", frame_size);
1448 /* compute pts value */
1449 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1453 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1454 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1455 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1456 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1457 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1461 static void finish_output_stream(OutputStream *ost)
1463 OutputFile *of = output_files[ost->file_index];
1466 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1469 for (i = 0; i < of->ctx->nb_streams; i++)
1470 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1475 * Get and encode new output from any of the filtergraphs, without causing
1478 * @return 0 for success, <0 for severe errors
1480 static int reap_filters(int flush)
1482 AVFrame *filtered_frame = NULL;
1485 /* Reap all buffers present in the buffer sinks */
1486 for (i = 0; i < nb_output_streams; i++) {
1487 OutputStream *ost = output_streams[i];
1488 OutputFile *of = output_files[ost->file_index];
1489 AVFilterContext *filter;
1490 AVCodecContext *enc = ost->enc_ctx;
1493 if (!ost->filter || !ost->filter->graph->graph)
1495 filter = ost->filter->filter;
1498 * Unlike video, with audio the audio frame size matters.
1499 * Currently we are fully reliant on the lavfi filter chain to
1500 * do the buffering deed for us, and thus the frame size parameter
1501 * needs to be set accordingly. Where does one get the required
1502 * frame size? From the initialized AVCodecContext of an audio
1503 * encoder. Thus, if we have gotten to an audio stream, initialize
1504 * the encoder earlier than receiving the first AVFrame.
1506 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO)
1507 init_output_stream_wrapper(ost, NULL, 1);
1509 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1510 return AVERROR(ENOMEM);
1512 filtered_frame = ost->filtered_frame;
1515 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1516 AV_BUFFERSINK_FLAG_NO_REQUEST);
1518 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1519 av_log(NULL, AV_LOG_WARNING,
1520 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1521 } else if (flush && ret == AVERROR_EOF) {
1522 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1523 do_video_out(of, ost, NULL);
1527 if (ost->finished) {
1528 av_frame_unref(filtered_frame);
1532 switch (av_buffersink_get_type(filter)) {
1533 case AVMEDIA_TYPE_VIDEO:
1534 if (!ost->frame_aspect_ratio.num)
1535 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1537 do_video_out(of, ost, filtered_frame);
1539 case AVMEDIA_TYPE_AUDIO:
1540 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1541 enc->channels != filtered_frame->channels) {
1542 av_log(NULL, AV_LOG_ERROR,
1543 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1546 do_audio_out(of, ost, filtered_frame);
1549 // TODO support subtitle filters
1553 av_frame_unref(filtered_frame);
1560 static void print_final_stats(int64_t total_size)
1562 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1563 uint64_t subtitle_size = 0;
1564 uint64_t data_size = 0;
1565 float percent = -1.0;
1569 for (i = 0; i < nb_output_streams; i++) {
1570 OutputStream *ost = output_streams[i];
1571 switch (ost->enc_ctx->codec_type) {
1572 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1573 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1574 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1575 default: other_size += ost->data_size; break;
1577 extra_size += ost->enc_ctx->extradata_size;
1578 data_size += ost->data_size;
1579 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1580 != AV_CODEC_FLAG_PASS1)
1584 if (data_size && total_size>0 && total_size >= data_size)
1585 percent = 100.0 * (total_size - data_size) / data_size;
1587 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1588 video_size / 1024.0,
1589 audio_size / 1024.0,
1590 subtitle_size / 1024.0,
1591 other_size / 1024.0,
1592 extra_size / 1024.0);
1594 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1596 av_log(NULL, AV_LOG_INFO, "unknown");
1597 av_log(NULL, AV_LOG_INFO, "\n");
1599 /* print verbose per-stream stats */
1600 for (i = 0; i < nb_input_files; i++) {
1601 InputFile *f = input_files[i];
1602 uint64_t total_packets = 0, total_size = 0;
1604 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1607 for (j = 0; j < f->nb_streams; j++) {
1608 InputStream *ist = input_streams[f->ist_index + j];
1609 enum AVMediaType type = ist->dec_ctx->codec_type;
1611 total_size += ist->data_size;
1612 total_packets += ist->nb_packets;
1614 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1615 i, j, media_type_string(type));
1616 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1617 ist->nb_packets, ist->data_size);
1619 if (ist->decoding_needed) {
1620 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1621 ist->frames_decoded);
1622 if (type == AVMEDIA_TYPE_AUDIO)
1623 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1624 av_log(NULL, AV_LOG_VERBOSE, "; ");
1627 av_log(NULL, AV_LOG_VERBOSE, "\n");
1630 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1631 total_packets, total_size);
1634 for (i = 0; i < nb_output_files; i++) {
1635 OutputFile *of = output_files[i];
1636 uint64_t total_packets = 0, total_size = 0;
1638 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1641 for (j = 0; j < of->ctx->nb_streams; j++) {
1642 OutputStream *ost = output_streams[of->ost_index + j];
1643 enum AVMediaType type = ost->enc_ctx->codec_type;
1645 total_size += ost->data_size;
1646 total_packets += ost->packets_written;
1648 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1649 i, j, media_type_string(type));
1650 if (ost->encoding_needed) {
1651 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1652 ost->frames_encoded);
1653 if (type == AVMEDIA_TYPE_AUDIO)
1654 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1655 av_log(NULL, AV_LOG_VERBOSE, "; ");
1658 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1659 ost->packets_written, ost->data_size);
1661 av_log(NULL, AV_LOG_VERBOSE, "\n");
1664 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1665 total_packets, total_size);
1667 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1668 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1670 av_log(NULL, AV_LOG_WARNING, "\n");
1672 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1677 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1679 AVBPrint buf, buf_script;
1681 AVFormatContext *oc;
1683 AVCodecContext *enc;
1684 int frame_number, vid, i;
1687 int64_t pts = INT64_MIN + 1;
1688 static int64_t last_time = -1;
1689 static int first_report = 1;
1690 static int qp_histogram[52];
1691 int hours, mins, secs, us;
1692 const char *hours_sign;
1696 if (!print_stats && !is_last_report && !progress_avio)
1699 if (!is_last_report) {
1700 if (last_time == -1) {
1701 last_time = cur_time;
1703 if (((cur_time - last_time) < stats_period && !first_report) ||
1704 (first_report && nb_output_dumped < nb_output_files))
1706 last_time = cur_time;
1709 t = (cur_time-timer_start) / 1000000.0;
1712 oc = output_files[0]->ctx;
1714 total_size = avio_size(oc->pb);
1715 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1716 total_size = avio_tell(oc->pb);
1719 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1720 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1721 for (i = 0; i < nb_output_streams; i++) {
1723 ost = output_streams[i];
1725 if (!ost->stream_copy)
1726 q = ost->quality / (float) FF_QP2LAMBDA;
1728 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1729 av_bprintf(&buf, "q=%2.1f ", q);
1730 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1731 ost->file_index, ost->index, q);
1733 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1736 frame_number = ost->frame_number;
1737 fps = t > 1 ? frame_number / t : 0;
1738 av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1739 frame_number, fps < 9.95, fps, q);
1740 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1741 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1742 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1743 ost->file_index, ost->index, q);
1745 av_bprintf(&buf, "L");
1749 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1751 for (j = 0; j < 32; j++)
1752 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1755 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1757 double error, error_sum = 0;
1758 double scale, scale_sum = 0;
1760 char type[3] = { 'Y','U','V' };
1761 av_bprintf(&buf, "PSNR=");
1762 for (j = 0; j < 3; j++) {
1763 if (is_last_report) {
1764 error = enc->error[j];
1765 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1767 error = ost->error[j];
1768 scale = enc->width * enc->height * 255.0 * 255.0;
1774 p = psnr(error / scale);
1775 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1776 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1777 ost->file_index, ost->index, type[j] | 32, p);
1779 p = psnr(error_sum / scale_sum);
1780 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1781 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1782 ost->file_index, ost->index, p);
1786 /* compute min output value */
1787 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) {
1788 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1789 ost->st->time_base, AV_TIME_BASE_Q));
1791 if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1792 copy_ts_first_pts = pts;
1793 if (copy_ts_first_pts != AV_NOPTS_VALUE)
1794 pts -= copy_ts_first_pts;
1799 nb_frames_drop += ost->last_dropped;
1802 secs = FFABS(pts) / AV_TIME_BASE;
1803 us = FFABS(pts) % AV_TIME_BASE;
1808 hours_sign = (pts < 0) ? "-" : "";
1810 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1811 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1813 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1814 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1815 if (pts == AV_NOPTS_VALUE) {
1816 av_bprintf(&buf, "N/A ");
1818 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1819 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1823 av_bprintf(&buf, "bitrate=N/A");
1824 av_bprintf(&buf_script, "bitrate=N/A\n");
1826 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1827 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1830 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1831 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1832 if (pts == AV_NOPTS_VALUE) {
1833 av_bprintf(&buf_script, "out_time_us=N/A\n");
1834 av_bprintf(&buf_script, "out_time_ms=N/A\n");
1835 av_bprintf(&buf_script, "out_time=N/A\n");
1837 av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1838 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1839 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1840 hours_sign, hours, mins, secs, us);
1843 if (nb_frames_dup || nb_frames_drop)
1844 av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1845 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1846 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1849 av_bprintf(&buf, " speed=N/A");
1850 av_bprintf(&buf_script, "speed=N/A\n");
1852 av_bprintf(&buf, " speed=%4.3gx", speed);
1853 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1856 if (print_stats || is_last_report) {
1857 const char end = is_last_report ? '\n' : '\r';
1858 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1859 fprintf(stderr, "%s %c", buf.str, end);
1861 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1865 av_bprint_finalize(&buf, NULL);
1867 if (progress_avio) {
1868 av_bprintf(&buf_script, "progress=%s\n",
1869 is_last_report ? "end" : "continue");
1870 avio_write(progress_avio, buf_script.str,
1871 FFMIN(buf_script.len, buf_script.size - 1));
1872 avio_flush(progress_avio);
1873 av_bprint_finalize(&buf_script, NULL);
1874 if (is_last_report) {
1875 if ((ret = avio_closep(&progress_avio)) < 0)
1876 av_log(NULL, AV_LOG_ERROR,
1877 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1884 print_final_stats(total_size);
1887 static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1889 // We never got any input. Set a fake format, which will
1890 // come from libavformat.
1891 ifilter->format = par->format;
1892 ifilter->sample_rate = par->sample_rate;
1893 ifilter->channels = par->channels;
1894 ifilter->channel_layout = par->channel_layout;
1895 ifilter->width = par->width;
1896 ifilter->height = par->height;
1897 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1900 static void flush_encoders(void)
1904 for (i = 0; i < nb_output_streams; i++) {
1905 OutputStream *ost = output_streams[i];
1906 AVCodecContext *enc = ost->enc_ctx;
1907 OutputFile *of = output_files[ost->file_index];
1909 if (!ost->encoding_needed)
1912 // Try to enable encoding with no input frames.
1913 // Maybe we should just let encoding fail instead.
1914 if (!ost->initialized) {
1915 FilterGraph *fg = ost->filter->graph;
1917 av_log(NULL, AV_LOG_WARNING,
1918 "Finishing stream %d:%d without any data written to it.\n",
1919 ost->file_index, ost->st->index);
1921 if (ost->filter && !fg->graph) {
1923 for (x = 0; x < fg->nb_inputs; x++) {
1924 InputFilter *ifilter = fg->inputs[x];
1925 if (ifilter->format < 0)
1926 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1929 if (!ifilter_has_all_input_formats(fg))
1932 ret = configure_filtergraph(fg);
1934 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1938 finish_output_stream(ost);
1941 init_output_stream_wrapper(ost, NULL, 1);
1944 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1948 const char *desc = NULL;
1952 switch (enc->codec_type) {
1953 case AVMEDIA_TYPE_AUDIO:
1956 case AVMEDIA_TYPE_VIDEO:
1963 av_init_packet(&pkt);
1967 update_benchmark(NULL);
1969 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1970 ret = avcodec_send_frame(enc, NULL);
1972 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1979 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1980 if (ret < 0 && ret != AVERROR_EOF) {
1981 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1986 if (ost->logfile && enc->stats_out) {
1987 fprintf(ost->logfile, "%s", enc->stats_out);
1989 if (ret == AVERROR_EOF) {
1990 output_packet(of, &pkt, ost, 1);
1993 if (ost->finished & MUXER_FINISHED) {
1994 av_packet_unref(&pkt);
1997 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1998 pkt_size = pkt.size;
1999 output_packet(of, &pkt, ost, 0);
2000 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
2001 do_video_stats(ost, pkt_size);
2008 * Check whether a packet from ist should be written into ost at this time
2010 static int check_output_constraints(InputStream *ist, OutputStream *ost)
2012 OutputFile *of = output_files[ost->file_index];
2013 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2015 if (ost->source_index != ist_index)
2021 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2027 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2029 OutputFile *of = output_files[ost->file_index];
2030 InputFile *f = input_files [ist->file_index];
2031 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2032 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2035 // EOF: flush output bitstream filters.
2037 av_init_packet(&opkt);
2040 output_packet(of, &opkt, ost, 1);
2044 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2045 !ost->copy_initial_nonkeyframes)
2048 if (!ost->frame_number && !ost->copy_prior_start) {
2049 int64_t comp_start = start_time;
2050 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2051 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2052 if (pkt->pts == AV_NOPTS_VALUE ?
2053 ist->pts < comp_start :
2054 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2058 if (of->recording_time != INT64_MAX &&
2059 ist->pts >= of->recording_time + start_time) {
2060 close_output_stream(ost);
2064 if (f->recording_time != INT64_MAX) {
2065 start_time = f->ctx->start_time;
2066 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2067 start_time += f->start_time;
2068 if (ist->pts >= f->recording_time + start_time) {
2069 close_output_stream(ost);
2074 /* force the input stream PTS */
2075 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2078 if (av_packet_ref(&opkt, pkt) < 0)
2081 if (pkt->pts != AV_NOPTS_VALUE)
2082 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2084 if (pkt->dts == AV_NOPTS_VALUE) {
2085 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2086 } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2087 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2089 duration = ist->dec_ctx->frame_size;
2090 opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2091 (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2092 &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2093 /* dts will be set immediately afterwards to what pts is now */
2094 opkt.pts = opkt.dts - ost_tb_start_time;
2096 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2097 opkt.dts -= ost_tb_start_time;
2099 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2101 output_packet(of, &opkt, ost, 0);
2104 int guess_input_channel_layout(InputStream *ist)
2106 AVCodecContext *dec = ist->dec_ctx;
2108 if (!dec->channel_layout) {
2109 char layout_name[256];
2111 if (dec->channels > ist->guess_layout_max)
2113 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2114 if (!dec->channel_layout)
2116 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2117 dec->channels, dec->channel_layout);
2118 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2119 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2124 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2126 if (*got_output || ret<0)
2127 decode_error_stat[ret<0] ++;
2129 if (ret < 0 && exit_on_error)
2132 if (*got_output && ist) {
2133 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2134 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2135 "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2142 // Filters can be configured only if the formats of all inputs are known.
2143 static int ifilter_has_all_input_formats(FilterGraph *fg)
2146 for (i = 0; i < fg->nb_inputs; i++) {
2147 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2148 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2154 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2156 FilterGraph *fg = ifilter->graph;
2157 int need_reinit, ret, i;
2159 /* determine if the parameters for this input changed */
2160 need_reinit = ifilter->format != frame->format;
2162 switch (ifilter->ist->st->codecpar->codec_type) {
2163 case AVMEDIA_TYPE_AUDIO:
2164 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2165 ifilter->channels != frame->channels ||
2166 ifilter->channel_layout != frame->channel_layout;
2168 case AVMEDIA_TYPE_VIDEO:
2169 need_reinit |= ifilter->width != frame->width ||
2170 ifilter->height != frame->height;
2174 if (!ifilter->ist->reinit_filters && fg->graph)
2177 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2178 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2182 ret = ifilter_parameters_from_frame(ifilter, frame);
2187 /* (re)init the graph if possible, otherwise buffer the frame and return */
2188 if (need_reinit || !fg->graph) {
2189 for (i = 0; i < fg->nb_inputs; i++) {
2190 if (!ifilter_has_all_input_formats(fg)) {
2191 AVFrame *tmp = av_frame_clone(frame);
2193 return AVERROR(ENOMEM);
2194 av_frame_unref(frame);
2196 if (!av_fifo_space(ifilter->frame_queue)) {
2197 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2199 av_frame_free(&tmp);
2203 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2208 ret = reap_filters(1);
2209 if (ret < 0 && ret != AVERROR_EOF) {
2210 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2214 ret = configure_filtergraph(fg);
2216 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2221 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2223 if (ret != AVERROR_EOF)
2224 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2231 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2237 if (ifilter->filter) {
2238 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2242 // the filtergraph was never configured
2243 if (ifilter->format < 0)
2244 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2245 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2246 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2247 return AVERROR_INVALIDDATA;
2254 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2255 // There is the following difference: if you got a frame, you must call
2256 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2257 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2258 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2265 ret = avcodec_send_packet(avctx, pkt);
2266 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2267 // decoded frames with avcodec_receive_frame() until done.
2268 if (ret < 0 && ret != AVERROR_EOF)
2272 ret = avcodec_receive_frame(avctx, frame);
2273 if (ret < 0 && ret != AVERROR(EAGAIN))
2281 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2286 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2287 for (i = 0; i < ist->nb_filters; i++) {
2288 if (i < ist->nb_filters - 1) {
2289 f = ist->filter_frame;
2290 ret = av_frame_ref(f, decoded_frame);
2295 ret = ifilter_send_frame(ist->filters[i], f);
2296 if (ret == AVERROR_EOF)
2297 ret = 0; /* ignore */
2299 av_log(NULL, AV_LOG_ERROR,
2300 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2307 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2310 AVFrame *decoded_frame;
2311 AVCodecContext *avctx = ist->dec_ctx;
2313 AVRational decoded_frame_tb;
2315 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2316 return AVERROR(ENOMEM);
2317 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2318 return AVERROR(ENOMEM);
2319 decoded_frame = ist->decoded_frame;
2321 update_benchmark(NULL);
2322 ret = decode(avctx, decoded_frame, got_output, pkt);
2323 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2327 if (ret >= 0 && avctx->sample_rate <= 0) {
2328 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2329 ret = AVERROR_INVALIDDATA;
2332 if (ret != AVERROR_EOF)
2333 check_decode_result(ist, got_output, ret);
2335 if (!*got_output || ret < 0)
2338 ist->samples_decoded += decoded_frame->nb_samples;
2339 ist->frames_decoded++;
2341 /* increment next_dts to use for the case where the input stream does not
2342 have timestamps or there are multiple frames in the packet */
2343 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2345 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2348 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2349 decoded_frame_tb = ist->st->time_base;
2350 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2351 decoded_frame->pts = pkt->pts;
2352 decoded_frame_tb = ist->st->time_base;
2354 decoded_frame->pts = ist->dts;
2355 decoded_frame_tb = AV_TIME_BASE_Q;
2357 if (decoded_frame->pts != AV_NOPTS_VALUE)
2358 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2359 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2360 (AVRational){1, avctx->sample_rate});
2361 ist->nb_samples = decoded_frame->nb_samples;
2362 err = send_frame_to_filters(ist, decoded_frame);
2364 av_frame_unref(ist->filter_frame);
2365 av_frame_unref(decoded_frame);
2366 return err < 0 ? err : ret;
2369 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2372 AVFrame *decoded_frame;
2373 int i, ret = 0, err = 0;
2374 int64_t best_effort_timestamp;
2375 int64_t dts = AV_NOPTS_VALUE;
2378 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2379 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2381 if (!eof && pkt && pkt->size == 0)
2384 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2385 return AVERROR(ENOMEM);
2386 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2387 return AVERROR(ENOMEM);
2388 decoded_frame = ist->decoded_frame;
2389 if (ist->dts != AV_NOPTS_VALUE)
2390 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2393 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2396 // The old code used to set dts on the drain packet, which does not work
2397 // with the new API anymore.
2399 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2401 return AVERROR(ENOMEM);
2402 ist->dts_buffer = new;
2403 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2406 update_benchmark(NULL);
2407 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2408 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2412 // The following line may be required in some cases where there is no parser
2413 // or the parser does not has_b_frames correctly
2414 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2415 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2416 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2418 av_log(ist->dec_ctx, AV_LOG_WARNING,
2419 "video_delay is larger in decoder than demuxer %d > %d.\n"
2420 "If you want to help, upload a sample "
2421 "of this file to https://streams.videolan.org/upload/ "
2422 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2423 ist->dec_ctx->has_b_frames,
2424 ist->st->codecpar->video_delay);
2427 if (ret != AVERROR_EOF)
2428 check_decode_result(ist, got_output, ret);
2430 if (*got_output && ret >= 0) {
2431 if (ist->dec_ctx->width != decoded_frame->width ||
2432 ist->dec_ctx->height != decoded_frame->height ||
2433 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2434 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2435 decoded_frame->width,
2436 decoded_frame->height,
2437 decoded_frame->format,
2438 ist->dec_ctx->width,
2439 ist->dec_ctx->height,
2440 ist->dec_ctx->pix_fmt);
2444 if (!*got_output || ret < 0)
2447 if(ist->top_field_first>=0)
2448 decoded_frame->top_field_first = ist->top_field_first;
2450 ist->frames_decoded++;
2452 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2453 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2457 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2459 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2460 *duration_pts = decoded_frame->pkt_duration;
2462 if (ist->framerate.num)
2463 best_effort_timestamp = ist->cfr_next_pts++;
2465 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2466 best_effort_timestamp = ist->dts_buffer[0];
2468 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2469 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2470 ist->nb_dts_buffer--;
2473 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2474 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2476 if (ts != AV_NOPTS_VALUE)
2477 ist->next_pts = ist->pts = ts;
2481 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2482 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2483 ist->st->index, av_ts2str(decoded_frame->pts),
2484 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2485 best_effort_timestamp,
2486 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2487 decoded_frame->key_frame, decoded_frame->pict_type,
2488 ist->st->time_base.num, ist->st->time_base.den);
2491 if (ist->st->sample_aspect_ratio.num)
2492 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2494 err = send_frame_to_filters(ist, decoded_frame);
2497 av_frame_unref(ist->filter_frame);
2498 av_frame_unref(decoded_frame);
2499 return err < 0 ? err : ret;
2502 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2505 AVSubtitle subtitle;
2507 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2508 &subtitle, got_output, pkt);
2510 check_decode_result(NULL, got_output, ret);
2512 if (ret < 0 || !*got_output) {
2515 sub2video_flush(ist);
2519 if (ist->fix_sub_duration) {
2521 if (ist->prev_sub.got_output) {
2522 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2523 1000, AV_TIME_BASE);
2524 if (end < ist->prev_sub.subtitle.end_display_time) {
2525 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2526 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2527 ist->prev_sub.subtitle.end_display_time, end,
2528 end <= 0 ? ", dropping it" : "");
2529 ist->prev_sub.subtitle.end_display_time = end;
2532 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2533 FFSWAP(int, ret, ist->prev_sub.ret);
2534 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2542 if (ist->sub2video.frame) {
2543 sub2video_update(ist, INT64_MIN, &subtitle);
2544 } else if (ist->nb_filters) {
2545 if (!ist->sub2video.sub_queue)
2546 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2547 if (!ist->sub2video.sub_queue)
2549 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2550 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2554 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2558 if (!subtitle.num_rects)
2561 ist->frames_decoded++;
2563 for (i = 0; i < nb_output_streams; i++) {
2564 OutputStream *ost = output_streams[i];
2566 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2567 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2570 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2575 avsubtitle_free(&subtitle);
2579 static int send_filter_eof(InputStream *ist)
2582 /* TODO keep pts also in stream time base to avoid converting back */
2583 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2584 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2586 for (i = 0; i < ist->nb_filters; i++) {
2587 ret = ifilter_send_eof(ist->filters[i], pts);
2594 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2595 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2599 int eof_reached = 0;
2602 if (!ist->saw_first_ts) {
2603 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2605 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2606 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2607 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2609 ist->saw_first_ts = 1;
2612 if (ist->next_dts == AV_NOPTS_VALUE)
2613 ist->next_dts = ist->dts;
2614 if (ist->next_pts == AV_NOPTS_VALUE)
2615 ist->next_pts = ist->pts;
2619 av_init_packet(&avpkt);
2626 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2627 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2628 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2629 ist->next_pts = ist->pts = ist->dts;
2632 // while we have more to decode or while the decoder did output something on EOF
2633 while (ist->decoding_needed) {
2634 int64_t duration_dts = 0;
2635 int64_t duration_pts = 0;
2637 int decode_failed = 0;
2639 ist->pts = ist->next_pts;
2640 ist->dts = ist->next_dts;
2642 switch (ist->dec_ctx->codec_type) {
2643 case AVMEDIA_TYPE_AUDIO:
2644 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2647 case AVMEDIA_TYPE_VIDEO:
2648 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2650 if (!repeating || !pkt || got_output) {
2651 if (pkt && pkt->duration) {
2652 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2653 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2654 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2655 duration_dts = ((int64_t)AV_TIME_BASE *
2656 ist->dec_ctx->framerate.den * ticks) /
2657 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2660 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2661 ist->next_dts += duration_dts;
2663 ist->next_dts = AV_NOPTS_VALUE;
2667 if (duration_pts > 0) {
2668 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2670 ist->next_pts += duration_dts;
2674 case AVMEDIA_TYPE_SUBTITLE:
2677 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2678 if (!pkt && ret >= 0)
2685 if (ret == AVERROR_EOF) {
2691 if (decode_failed) {
2692 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2693 ist->file_index, ist->st->index, av_err2str(ret));
2695 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2696 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2698 if (!decode_failed || exit_on_error)
2704 ist->got_output = 1;
2709 // During draining, we might get multiple output frames in this loop.
2710 // ffmpeg.c does not drain the filter chain on configuration changes,
2711 // which means if we send multiple frames at once to the filters, and
2712 // one of those frames changes configuration, the buffered frames will
2713 // be lost. This can upset certain FATE tests.
2714 // Decode only 1 frame per call on EOF to appease these FATE tests.
2715 // The ideal solution would be to rewrite decoding to use the new
2716 // decoding API in a better way.
2723 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2724 /* except when looping we need to flush but not to send an EOF */
2725 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2726 int ret = send_filter_eof(ist);
2728 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2733 /* handle stream copy */
2734 if (!ist->decoding_needed && pkt) {
2735 ist->dts = ist->next_dts;
2736 switch (ist->dec_ctx->codec_type) {
2737 case AVMEDIA_TYPE_AUDIO:
2738 av_assert1(pkt->duration >= 0);
2739 if (ist->dec_ctx->sample_rate) {
2740 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2741 ist->dec_ctx->sample_rate;
2743 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2746 case AVMEDIA_TYPE_VIDEO:
2747 if (ist->framerate.num) {
2748 // TODO: Remove work-around for c99-to-c89 issue 7
2749 AVRational time_base_q = AV_TIME_BASE_Q;
2750 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2751 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2752 } else if (pkt->duration) {
2753 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2754 } else if(ist->dec_ctx->framerate.num != 0) {
2755 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2756 ist->next_dts += ((int64_t)AV_TIME_BASE *
2757 ist->dec_ctx->framerate.den * ticks) /
2758 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2762 ist->pts = ist->dts;
2763 ist->next_pts = ist->next_dts;
2765 for (i = 0; i < nb_output_streams; i++) {
2766 OutputStream *ost = output_streams[i];
2768 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2771 do_streamcopy(ist, ost, pkt);
2774 return !eof_reached;
2777 static void print_sdp(void)
2782 AVIOContext *sdp_pb;
2783 AVFormatContext **avc;
2785 for (i = 0; i < nb_output_files; i++) {
2786 if (!output_files[i]->header_written)
2790 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2793 for (i = 0, j = 0; i < nb_output_files; i++) {
2794 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2795 avc[j] = output_files[i]->ctx;
2803 av_sdp_create(avc, j, sdp, sizeof(sdp));
2805 if (!sdp_filename) {
2806 printf("SDP:\n%s\n", sdp);
2809 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2810 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2812 avio_print(sdp_pb, sdp);
2813 avio_closep(&sdp_pb);
2814 av_freep(&sdp_filename);
2822 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2824 InputStream *ist = s->opaque;
2825 const enum AVPixelFormat *p;
2828 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2829 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2830 const AVCodecHWConfig *config = NULL;
2833 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2836 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2837 ist->hwaccel_id == HWACCEL_AUTO) {
2839 config = avcodec_get_hw_config(s->codec, i);
2842 if (!(config->methods &
2843 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2845 if (config->pix_fmt == *p)
2850 if (config->device_type != ist->hwaccel_device_type) {
2851 // Different hwaccel offered, ignore.
2855 ret = hwaccel_decode_init(s);
2857 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2858 av_log(NULL, AV_LOG_FATAL,
2859 "%s hwaccel requested for input stream #%d:%d, "
2860 "but cannot be initialized.\n",
2861 av_hwdevice_get_type_name(config->device_type),
2862 ist->file_index, ist->st->index);
2863 return AV_PIX_FMT_NONE;
2868 const HWAccel *hwaccel = NULL;
2870 for (i = 0; hwaccels[i].name; i++) {
2871 if (hwaccels[i].pix_fmt == *p) {
2872 hwaccel = &hwaccels[i];
2877 // No hwaccel supporting this pixfmt.
2880 if (hwaccel->id != ist->hwaccel_id) {
2881 // Does not match requested hwaccel.
2885 ret = hwaccel->init(s);
2887 av_log(NULL, AV_LOG_FATAL,
2888 "%s hwaccel requested for input stream #%d:%d, "
2889 "but cannot be initialized.\n", hwaccel->name,
2890 ist->file_index, ist->st->index);
2891 return AV_PIX_FMT_NONE;
2895 if (ist->hw_frames_ctx) {
2896 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2897 if (!s->hw_frames_ctx)
2898 return AV_PIX_FMT_NONE;
2901 ist->hwaccel_pix_fmt = *p;
2908 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2910 InputStream *ist = s->opaque;
2912 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2913 return ist->hwaccel_get_buffer(s, frame, flags);
2915 return avcodec_default_get_buffer2(s, frame, flags);
2918 static int init_input_stream(int ist_index, char *error, int error_len)
2921 InputStream *ist = input_streams[ist_index];
2923 if (ist->decoding_needed) {
2924 AVCodec *codec = ist->dec;
2926 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2927 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2928 return AVERROR(EINVAL);
2931 ist->dec_ctx->opaque = ist;
2932 ist->dec_ctx->get_format = get_format;
2933 ist->dec_ctx->get_buffer2 = get_buffer;
2934 #if LIBAVCODEC_VERSION_MAJOR < 60
2935 ist->dec_ctx->thread_safe_callbacks = 1;
2938 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2939 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2940 (ist->decoding_needed & DECODING_FOR_OST)) {
2941 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2942 if (ist->decoding_needed & DECODING_FOR_FILTER)
2943 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2946 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2948 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2949 * audio, and video decoders such as cuvid or mediacodec */
2950 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2952 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2953 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2954 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2955 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2956 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2958 ret = hw_device_setup_for_decode(ist);
2960 snprintf(error, error_len, "Device setup failed for "
2961 "decoder on input stream #%d:%d : %s",
2962 ist->file_index, ist->st->index, av_err2str(ret));
2966 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2967 if (ret == AVERROR_EXPERIMENTAL)
2968 abort_codec_experimental(codec, 0);
2970 snprintf(error, error_len,
2971 "Error while opening decoder for input stream "
2973 ist->file_index, ist->st->index, av_err2str(ret));
2976 assert_avoptions(ist->decoder_opts);
2979 ist->next_pts = AV_NOPTS_VALUE;
2980 ist->next_dts = AV_NOPTS_VALUE;
2985 static InputStream *get_input_stream(OutputStream *ost)
2987 if (ost->source_index >= 0)
2988 return input_streams[ost->source_index];
2992 static int compare_int64(const void *a, const void *b)
2994 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2997 /* open the muxer when all the streams are initialized */
2998 static int check_init_output_file(OutputFile *of, int file_index)
3002 for (i = 0; i < of->ctx->nb_streams; i++) {
3003 OutputStream *ost = output_streams[of->ost_index + i];
3004 if (!ost->initialized)
3008 of->ctx->interrupt_callback = int_cb;
3010 ret = avformat_write_header(of->ctx, &of->opts);
3012 av_log(NULL, AV_LOG_ERROR,
3013 "Could not write header for output file #%d "
3014 "(incorrect codec parameters ?): %s\n",
3015 file_index, av_err2str(ret));
3018 //assert_avoptions(of->opts);
3019 of->header_written = 1;
3021 av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3024 if (sdp_filename || want_sdp)
3027 /* flush the muxing queues */
3028 for (i = 0; i < of->ctx->nb_streams; i++) {
3029 OutputStream *ost = output_streams[of->ost_index + i];
3031 /* try to improve muxing time_base (only possible if nothing has been written yet) */
3032 if (!av_fifo_size(ost->muxing_queue))
3033 ost->mux_timebase = ost->st->time_base;
3035 while (av_fifo_size(ost->muxing_queue)) {
3037 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3038 ost->muxing_queue_data_size -= pkt.size;
3039 write_packet(of, &pkt, ost, 1);
3046 static int init_output_bsfs(OutputStream *ost)
3048 AVBSFContext *ctx = ost->bsf_ctx;
3054 ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3058 ctx->time_base_in = ost->st->time_base;
3060 ret = av_bsf_init(ctx);
3062 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3067 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3070 ost->st->time_base = ctx->time_base_out;
3075 static int init_output_stream_streamcopy(OutputStream *ost)
3077 OutputFile *of = output_files[ost->file_index];
3078 InputStream *ist = get_input_stream(ost);
3079 AVCodecParameters *par_dst = ost->st->codecpar;
3080 AVCodecParameters *par_src = ost->ref_par;
3083 uint32_t codec_tag = par_dst->codec_tag;
3085 av_assert0(ist && !ost->filter);
3087 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3089 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3091 av_log(NULL, AV_LOG_FATAL,
3092 "Error setting up codec context options.\n");
3096 ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3098 av_log(NULL, AV_LOG_FATAL,
3099 "Error getting reference codec parameters.\n");
3104 unsigned int codec_tag_tmp;
3105 if (!of->ctx->oformat->codec_tag ||
3106 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3107 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3108 codec_tag = par_src->codec_tag;
3111 ret = avcodec_parameters_copy(par_dst, par_src);
3115 par_dst->codec_tag = codec_tag;
3117 if (!ost->frame_rate.num)
3118 ost->frame_rate = ist->framerate;
3119 ost->st->avg_frame_rate = ost->frame_rate;
3121 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3125 // copy timebase while removing common factors
3126 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3127 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3129 // copy estimated duration as a hint to the muxer
3130 if (ost->st->duration <= 0 && ist->st->duration > 0)
3131 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3134 ost->st->disposition = ist->st->disposition;
3136 if (ist->st->nb_side_data) {
3137 for (i = 0; i < ist->st->nb_side_data; i++) {
3138 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3141 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3143 return AVERROR(ENOMEM);
3144 memcpy(dst_data, sd_src->data, sd_src->size);
3148 if (ost->rotate_overridden) {
3149 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3150 sizeof(int32_t) * 9);
3152 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3155 switch (par_dst->codec_type) {
3156 case AVMEDIA_TYPE_AUDIO:
3157 if (audio_volume != 256) {
3158 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3161 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3162 par_dst->block_align= 0;
3163 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3164 par_dst->block_align= 0;
3166 case AVMEDIA_TYPE_VIDEO:
3167 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3169 av_mul_q(ost->frame_aspect_ratio,
3170 (AVRational){ par_dst->height, par_dst->width });
3171 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3172 "with stream copy may produce invalid files\n");
3174 else if (ist->st->sample_aspect_ratio.num)
3175 sar = ist->st->sample_aspect_ratio;
3177 sar = par_src->sample_aspect_ratio;
3178 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3179 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3180 ost->st->r_frame_rate = ist->st->r_frame_rate;
3184 ost->mux_timebase = ist->st->time_base;
3189 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3191 AVDictionaryEntry *e;
3193 uint8_t *encoder_string;
3194 int encoder_string_len;
3195 int format_flags = 0;
3196 int codec_flags = ost->enc_ctx->flags;
3198 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3201 e = av_dict_get(of->opts, "fflags", NULL, 0);
3203 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3206 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3208 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3210 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3213 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3216 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3217 encoder_string = av_mallocz(encoder_string_len);
3218 if (!encoder_string)
3221 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3222 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3224 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3225 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3226 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3227 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3230 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3231 AVCodecContext *avctx)
3234 int n = 1, i, size, index = 0;
3237 for (p = kf; *p; p++)
3241 pts = av_malloc_array(size, sizeof(*pts));
3243 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3248 for (i = 0; i < n; i++) {
3249 char *next = strchr(p, ',');
3254 if (!memcmp(p, "chapters", 8)) {
3256 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3259 if (avf->nb_chapters > INT_MAX - size ||
3260 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3262 av_log(NULL, AV_LOG_FATAL,
3263 "Could not allocate forced key frames array.\n");
3266 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3267 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3269 for (j = 0; j < avf->nb_chapters; j++) {
3270 AVChapter *c = avf->chapters[j];
3271 av_assert1(index < size);
3272 pts[index++] = av_rescale_q(c->start, c->time_base,
3273 avctx->time_base) + t;
3278 t = parse_time_or_die("force_key_frames", p, 1);
3279 av_assert1(index < size);
3280 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3287 av_assert0(index == size);
3288 qsort(pts, size, sizeof(*pts), compare_int64);
3289 ost->forced_kf_count = size;
3290 ost->forced_kf_pts = pts;
3293 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3295 InputStream *ist = get_input_stream(ost);
3296 AVCodecContext *enc_ctx = ost->enc_ctx;
3297 AVFormatContext *oc;
3299 if (ost->enc_timebase.num > 0) {
3300 enc_ctx->time_base = ost->enc_timebase;
3304 if (ost->enc_timebase.num < 0) {
3306 enc_ctx->time_base = ist->st->time_base;
3310 oc = output_files[ost->file_index]->ctx;
3311 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3314 enc_ctx->time_base = default_time_base;
3317 static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
3319 InputStream *ist = get_input_stream(ost);
3320 AVCodecContext *enc_ctx = ost->enc_ctx;
3321 AVCodecContext *dec_ctx = NULL;
3322 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3325 set_encoder_id(output_files[ost->file_index], ost);
3327 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3328 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3329 // which have to be filtered out to prevent leaking them to output files.
3330 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3333 ost->st->disposition = ist->st->disposition;
3335 dec_ctx = ist->dec_ctx;
3337 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3339 for (j = 0; j < oc->nb_streams; j++) {
3340 AVStream *st = oc->streams[j];
3341 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3344 if (j == oc->nb_streams)
3345 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3346 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3347 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3350 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3351 if (!ost->frame_rate.num)
3352 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3353 if (ist && !ost->frame_rate.num)
3354 ost->frame_rate = ist->framerate;
3355 if (ist && !ost->frame_rate.num)
3356 ost->frame_rate = ist->st->r_frame_rate;
3357 if (ist && !ost->frame_rate.num) {
3358 ost->frame_rate = (AVRational){25, 1};
3359 av_log(NULL, AV_LOG_WARNING,
3361 "about the input framerate is available. Falling "
3362 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3363 "if you want a different framerate.\n",
3364 ost->file_index, ost->index);
3367 if (ost->enc->supported_framerates && !ost->force_fps) {
3368 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3369 ost->frame_rate = ost->enc->supported_framerates[idx];
3371 // reduce frame rate for mpeg4 to be within the spec limits
3372 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3373 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3374 ost->frame_rate.num, ost->frame_rate.den, 65535);
3378 switch (enc_ctx->codec_type) {
3379 case AVMEDIA_TYPE_AUDIO:
3380 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3382 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3383 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3384 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3385 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3386 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3388 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3391 case AVMEDIA_TYPE_VIDEO:
3392 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3394 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3395 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3396 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3397 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3398 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3399 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3402 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3403 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3404 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3405 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3406 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3407 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3409 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3411 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3412 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3415 enc_ctx->color_range = frame->color_range;
3416 enc_ctx->color_primaries = frame->color_primaries;
3417 enc_ctx->color_trc = frame->color_trc;
3418 enc_ctx->colorspace = frame->colorspace;
3419 enc_ctx->chroma_sample_location = frame->chroma_location;
3422 enc_ctx->framerate = ost->frame_rate;
3424 ost->st->avg_frame_rate = ost->frame_rate;
3427 enc_ctx->width != dec_ctx->width ||
3428 enc_ctx->height != dec_ctx->height ||
3429 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3430 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3433 if (ost->top_field_first == 0) {
3434 enc_ctx->field_order = AV_FIELD_BB;
3435 } else if (ost->top_field_first == 1) {
3436 enc_ctx->field_order = AV_FIELD_TT;
3440 if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
3441 ost->top_field_first >= 0)
3442 frame->top_field_first = !!ost->top_field_first;
3444 if (frame->interlaced_frame) {
3445 if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3446 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3448 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3450 enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3453 if (ost->forced_keyframes) {
3454 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3455 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3456 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3458 av_log(NULL, AV_LOG_ERROR,
3459 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3462 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3463 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3464 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3465 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3467 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3468 // parse it only for static kf timings
3469 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3470 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3474 case AVMEDIA_TYPE_SUBTITLE:
3475 enc_ctx->time_base = AV_TIME_BASE_Q;
3476 if (!enc_ctx->width) {
3477 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3478 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3481 case AVMEDIA_TYPE_DATA:
3488 ost->mux_timebase = enc_ctx->time_base;
3493 static int init_output_stream(OutputStream *ost, AVFrame *frame,
3494 char *error, int error_len)
3498 if (ost->encoding_needed) {
3499 AVCodec *codec = ost->enc;
3500 AVCodecContext *dec = NULL;
3503 ret = init_output_stream_encode(ost, frame);
3507 if ((ist = get_input_stream(ost)))
3509 if (dec && dec->subtitle_header) {
3510 /* ASS code assumes this buffer is null terminated so add extra byte. */
3511 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3512 if (!ost->enc_ctx->subtitle_header)
3513 return AVERROR(ENOMEM);
3514 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3515 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3517 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3518 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3519 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3521 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3522 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3523 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3525 ret = hw_device_setup_for_encode(ost);
3527 snprintf(error, error_len, "Device setup failed for "
3528 "encoder on output stream #%d:%d : %s",
3529 ost->file_index, ost->index, av_err2str(ret));
3533 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3534 int input_props = 0, output_props = 0;
3535 AVCodecDescriptor const *input_descriptor =
3536 avcodec_descriptor_get(dec->codec_id);
3537 AVCodecDescriptor const *output_descriptor =
3538 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3539 if (input_descriptor)
3540 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3541 if (output_descriptor)
3542 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3543 if (input_props && output_props && input_props != output_props) {
3544 snprintf(error, error_len,
3545 "Subtitle encoding currently only possible from text to text "
3546 "or bitmap to bitmap");
3547 return AVERROR_INVALIDDATA;
3551 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3552 if (ret == AVERROR_EXPERIMENTAL)
3553 abort_codec_experimental(codec, 1);
3554 snprintf(error, error_len,
3555 "Error while opening encoder for output stream #%d:%d - "
3556 "maybe incorrect parameters such as bit_rate, rate, width or height",
3557 ost->file_index, ost->index);
3560 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3561 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3562 av_buffersink_set_frame_size(ost->filter->filter,
3563 ost->enc_ctx->frame_size);
3564 assert_avoptions(ost->encoder_opts);
3565 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3566 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3567 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3568 " It takes bits/s as argument, not kbits/s\n");
3570 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3572 av_log(NULL, AV_LOG_FATAL,
3573 "Error initializing the output stream codec context.\n");
3577 if (ost->enc_ctx->nb_coded_side_data) {
3580 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3581 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3584 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3586 return AVERROR(ENOMEM);
3587 memcpy(dst_data, sd_src->data, sd_src->size);
3592 * Add global input side data. For now this is naive, and copies it
3593 * from the input stream's global side data. All side data should
3594 * really be funneled over AVFrame and libavfilter, then added back to
3595 * packet side data, and then potentially using the first packet for
3600 for (i = 0; i < ist->st->nb_side_data; i++) {
3601 AVPacketSideData *sd = &ist->st->side_data[i];
3602 if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3603 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3605 return AVERROR(ENOMEM);
3606 memcpy(dst, sd->data, sd->size);
3607 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3608 av_display_rotation_set((uint32_t *)dst, 0);
3613 // copy timebase while removing common factors
3614 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3615 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3617 // copy estimated duration as a hint to the muxer
3618 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3619 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3620 } else if (ost->stream_copy) {
3621 ret = init_output_stream_streamcopy(ost);
3626 // parse user provided disposition, and update stream values
3627 if (ost->disposition) {
3628 static const AVOption opts[] = {
3629 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3630 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3631 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3632 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3633 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3634 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3635 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3636 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3637 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3638 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3639 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3640 { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3641 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3642 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3643 { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3644 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3647 static const AVClass class = {
3649 .item_name = av_default_item_name,
3651 .version = LIBAVUTIL_VERSION_INT,
3653 const AVClass *pclass = &class;
3655 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3660 /* initialize bitstream filters for the output stream
3661 * needs to be done here, because the codec id for streamcopy is not
3662 * known until now */
3663 ret = init_output_bsfs(ost);
3667 ost->initialized = 1;
3669 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3676 static void report_new_stream(int input_index, AVPacket *pkt)
3678 InputFile *file = input_files[input_index];
3679 AVStream *st = file->ctx->streams[pkt->stream_index];
3681 if (pkt->stream_index < file->nb_streams_warn)
3683 av_log(file->ctx, AV_LOG_WARNING,
3684 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3685 av_get_media_type_string(st->codecpar->codec_type),
3686 input_index, pkt->stream_index,
3687 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3688 file->nb_streams_warn = pkt->stream_index + 1;
3691 static int transcode_init(void)
3693 int ret = 0, i, j, k;
3694 AVFormatContext *oc;
3697 char error[1024] = {0};
3699 for (i = 0; i < nb_filtergraphs; i++) {
3700 FilterGraph *fg = filtergraphs[i];
3701 for (j = 0; j < fg->nb_outputs; j++) {
3702 OutputFilter *ofilter = fg->outputs[j];
3703 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3705 if (fg->nb_inputs != 1)
3707 for (k = nb_input_streams-1; k >= 0 ; k--)
3708 if (fg->inputs[0]->ist == input_streams[k])
3710 ofilter->ost->source_index = k;
3714 /* init framerate emulation */
3715 for (i = 0; i < nb_input_files; i++) {
3716 InputFile *ifile = input_files[i];
3717 if (ifile->rate_emu)
3718 for (j = 0; j < ifile->nb_streams; j++)
3719 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3722 /* init input streams */
3723 for (i = 0; i < nb_input_streams; i++)
3724 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3725 for (i = 0; i < nb_output_streams; i++) {
3726 ost = output_streams[i];
3727 avcodec_close(ost->enc_ctx);
3733 * initialize stream copy and subtitle/data streams.
3734 * Encoded AVFrame based streams will get initialized as follows:
3735 * - when the first AVFrame is received in do_video_out
3736 * - just before the first AVFrame is received in either transcode_step
3737 * or reap_filters due to us requiring the filter chain buffer sink
3738 * to be configured with the correct audio frame size, which is only
3739 * known after the encoder is initialized.
3741 for (i = 0; i < nb_output_streams; i++) {
3742 if (!output_streams[i]->stream_copy &&
3743 (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3744 output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO))
3747 ret = init_output_stream_wrapper(output_streams[i], NULL, 0);
3752 /* discard unused programs */
3753 for (i = 0; i < nb_input_files; i++) {
3754 InputFile *ifile = input_files[i];
3755 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3756 AVProgram *p = ifile->ctx->programs[j];
3757 int discard = AVDISCARD_ALL;
3759 for (k = 0; k < p->nb_stream_indexes; k++)
3760 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3761 discard = AVDISCARD_DEFAULT;
3764 p->discard = discard;
3768 /* write headers for files with no streams */
3769 for (i = 0; i < nb_output_files; i++) {
3770 oc = output_files[i]->ctx;
3771 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3772 ret = check_init_output_file(output_files[i], i);
3779 /* dump the stream mapping */
3780 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3781 for (i = 0; i < nb_input_streams; i++) {
3782 ist = input_streams[i];
3784 for (j = 0; j < ist->nb_filters; j++) {
3785 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3786 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3787 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3788 ist->filters[j]->name);
3789 if (nb_filtergraphs > 1)
3790 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3791 av_log(NULL, AV_LOG_INFO, "\n");
3796 for (i = 0; i < nb_output_streams; i++) {
3797 ost = output_streams[i];
3799 if (ost->attachment_filename) {
3800 /* an attached file */
3801 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3802 ost->attachment_filename, ost->file_index, ost->index);
3806 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3807 /* output from a complex graph */
3808 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3809 if (nb_filtergraphs > 1)
3810 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3812 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3813 ost->index, ost->enc ? ost->enc->name : "?");
3817 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3818 input_streams[ost->source_index]->file_index,
3819 input_streams[ost->source_index]->st->index,
3822 if (ost->sync_ist != input_streams[ost->source_index])
3823 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3824 ost->sync_ist->file_index,
3825 ost->sync_ist->st->index);
3826 if (ost->stream_copy)
3827 av_log(NULL, AV_LOG_INFO, " (copy)");
3829 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3830 const AVCodec *out_codec = ost->enc;
3831 const char *decoder_name = "?";
3832 const char *in_codec_name = "?";
3833 const char *encoder_name = "?";
3834 const char *out_codec_name = "?";
3835 const AVCodecDescriptor *desc;
3838 decoder_name = in_codec->name;
3839 desc = avcodec_descriptor_get(in_codec->id);
3841 in_codec_name = desc->name;
3842 if (!strcmp(decoder_name, in_codec_name))
3843 decoder_name = "native";
3847 encoder_name = out_codec->name;
3848 desc = avcodec_descriptor_get(out_codec->id);
3850 out_codec_name = desc->name;
3851 if (!strcmp(encoder_name, out_codec_name))
3852 encoder_name = "native";
3855 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3856 in_codec_name, decoder_name,
3857 out_codec_name, encoder_name);
3859 av_log(NULL, AV_LOG_INFO, "\n");
3863 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3867 atomic_store(&transcode_init_done, 1);
3872 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3873 static int need_output(void)
3877 for (i = 0; i < nb_output_streams; i++) {
3878 OutputStream *ost = output_streams[i];
3879 OutputFile *of = output_files[ost->file_index];
3880 AVFormatContext *os = output_files[ost->file_index]->ctx;
3882 if (ost->finished ||
3883 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3885 if (ost->frame_number >= ost->max_frames) {
3887 for (j = 0; j < of->ctx->nb_streams; j++)
3888 close_output_stream(output_streams[of->ost_index + j]);
3899 * Select the output stream to process.
3901 * @return selected output stream, or NULL if none available
3903 static OutputStream *choose_output(void)
3906 int64_t opts_min = INT64_MAX;
3907 OutputStream *ost_min = NULL;
3909 for (i = 0; i < nb_output_streams; i++) {
3910 OutputStream *ost = output_streams[i];
3911 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3912 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3914 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3915 av_log(NULL, AV_LOG_DEBUG,
3916 "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3917 ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3919 if (!ost->initialized && !ost->inputs_done)
3922 if (!ost->finished && opts < opts_min) {
3924 ost_min = ost->unavailable ? NULL : ost;
3930 static void set_tty_echo(int on)
3934 if (tcgetattr(0, &tty) == 0) {
3935 if (on) tty.c_lflag |= ECHO;
3936 else tty.c_lflag &= ~ECHO;
3937 tcsetattr(0, TCSANOW, &tty);
3942 static int check_keyboard_interaction(int64_t cur_time)
3945 static int64_t last_time;
3946 if (received_nb_signals)
3947 return AVERROR_EXIT;
3948 /* read_key() returns 0 on EOF */
3949 if(cur_time - last_time >= 100000 && !run_as_daemon){
3951 last_time = cur_time;
3955 return AVERROR_EXIT;
3956 if (key == '+') av_log_set_level(av_log_get_level()+10);
3957 if (key == '-') av_log_set_level(av_log_get_level()-10);
3958 if (key == 's') qp_hist ^= 1;
3961 do_hex_dump = do_pkt_dump = 0;
3962 } else if(do_pkt_dump){
3966 av_log_set_level(AV_LOG_DEBUG);
3968 if (key == 'c' || key == 'C'){
3969 char buf[4096], target[64], command[256], arg[256] = {0};
3972 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3975 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3980 fprintf(stderr, "\n");
3982 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3983 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3984 target, time, command, arg);
3985 for (i = 0; i < nb_filtergraphs; i++) {
3986 FilterGraph *fg = filtergraphs[i];
3989 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3990 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3991 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3992 } else if (key == 'c') {
3993 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3994 ret = AVERROR_PATCHWELCOME;
3996 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3998 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
4003 av_log(NULL, AV_LOG_ERROR,
4004 "Parse error, at least 3 arguments were expected, "
4005 "only %d given in string '%s'\n", n, buf);
4008 if (key == 'd' || key == 'D'){
4011 debug = input_streams[0]->dec_ctx->debug << 1;
4012 if(!debug) debug = 1;
4013 while(debug & (FF_DEBUG_DCT_COEFF
4015 |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
4017 )) //unsupported, would just crash
4024 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4029 fprintf(stderr, "\n");
4030 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
4031 fprintf(stderr,"error parsing debug value\n");
4033 for(i=0;i<nb_input_streams;i++) {
4034 input_streams[i]->dec_ctx->debug = debug;
4036 for(i=0;i<nb_output_streams;i++) {
4037 OutputStream *ost = output_streams[i];
4038 ost->enc_ctx->debug = debug;
4040 if(debug) av_log_set_level(AV_LOG_DEBUG);
4041 fprintf(stderr,"debug=%d\n", debug);
4044 fprintf(stderr, "key function\n"
4045 "? show this help\n"
4046 "+ increase verbosity\n"
4047 "- decrease verbosity\n"
4048 "c Send command to first matching filter supporting it\n"
4049 "C Send/Queue command to all matching filters\n"
4050 "D cycle through available debug modes\n"
4051 "h dump packets/hex press to cycle through the 3 states\n"
4053 "s Show QP histogram\n"
4060 static void *input_thread(void *arg)
4063 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4068 ret = av_read_frame(f->ctx, &pkt);
4070 if (ret == AVERROR(EAGAIN)) {
4075 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4078 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4079 if (flags && ret == AVERROR(EAGAIN)) {
4081 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4082 av_log(f->ctx, AV_LOG_WARNING,
4083 "Thread message queue blocking; consider raising the "
4084 "thread_queue_size option (current value: %d)\n",
4085 f->thread_queue_size);
4088 if (ret != AVERROR_EOF)
4089 av_log(f->ctx, AV_LOG_ERROR,
4090 "Unable to send packet to main thread: %s\n",
4092 av_packet_unref(&pkt);
4093 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4101 static void free_input_thread(int i)
4103 InputFile *f = input_files[i];
4106 if (!f || !f->in_thread_queue)
4108 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4109 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4110 av_packet_unref(&pkt);
4112 pthread_join(f->thread, NULL);
4114 av_thread_message_queue_free(&f->in_thread_queue);
4117 static void free_input_threads(void)
4121 for (i = 0; i < nb_input_files; i++)
4122 free_input_thread(i);
4125 static int init_input_thread(int i)
4128 InputFile *f = input_files[i];
4130 if (f->thread_queue_size < 0)
4131 f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4132 if (!f->thread_queue_size)
4135 if (f->ctx->pb ? !f->ctx->pb->seekable :
4136 strcmp(f->ctx->iformat->name, "lavfi"))
4137 f->non_blocking = 1;
4138 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4139 f->thread_queue_size, sizeof(AVPacket));
4143 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4144 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4145 av_thread_message_queue_free(&f->in_thread_queue);
4146 return AVERROR(ret);
4152 static int init_input_threads(void)
4156 for (i = 0; i < nb_input_files; i++) {
4157 ret = init_input_thread(i);
4164 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4166 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4168 AV_THREAD_MESSAGE_NONBLOCK : 0);
4172 static int get_input_packet(InputFile *f, AVPacket *pkt)
4176 for (i = 0; i < f->nb_streams; i++) {
4177 InputStream *ist = input_streams[f->ist_index + i];
4178 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4179 int64_t now = av_gettime_relative() - ist->start;
4181 return AVERROR(EAGAIN);
4186 if (f->thread_queue_size)
4187 return get_input_packet_mt(f, pkt);
4189 return av_read_frame(f->ctx, pkt);
4192 static int got_eagain(void)
4195 for (i = 0; i < nb_output_streams; i++)
4196 if (output_streams[i]->unavailable)
4201 static void reset_eagain(void)
4204 for (i = 0; i < nb_input_files; i++)
4205 input_files[i]->eagain = 0;
4206 for (i = 0; i < nb_output_streams; i++)
4207 output_streams[i]->unavailable = 0;
4210 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4211 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4212 AVRational time_base)
4218 return tmp_time_base;
4221 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4224 return tmp_time_base;
4230 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4233 AVCodecContext *avctx;
4234 int i, ret, has_audio = 0;
4235 int64_t duration = 0;
4237 ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4241 for (i = 0; i < ifile->nb_streams; i++) {
4242 ist = input_streams[ifile->ist_index + i];
4243 avctx = ist->dec_ctx;
4245 /* duration is the length of the last frame in a stream
4246 * when audio stream is present we don't care about
4247 * last video frame length because it's not defined exactly */
4248 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4252 for (i = 0; i < ifile->nb_streams; i++) {
4253 ist = input_streams[ifile->ist_index + i];
4254 avctx = ist->dec_ctx;
4257 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4258 AVRational sample_rate = {1, avctx->sample_rate};
4260 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4265 if (ist->framerate.num) {
4266 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4267 } else if (ist->st->avg_frame_rate.num) {
4268 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4273 if (!ifile->duration)
4274 ifile->time_base = ist->st->time_base;
4275 /* the total duration of the stream, max_pts - min_pts is
4276 * the duration of the stream without the last frame */
4277 if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4278 duration += ist->max_pts - ist->min_pts;
4279 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4283 if (ifile->loop > 0)
4291 * - 0 -- one packet was read and processed
4292 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4293 * this function should be called again
4294 * - AVERROR_EOF -- this function should not be called again
4296 static int process_input(int file_index)
4298 InputFile *ifile = input_files[file_index];
4299 AVFormatContext *is;
4302 int ret, thread_ret, i, j;
4305 int disable_discontinuity_correction = copy_ts;
4308 ret = get_input_packet(ifile, &pkt);
4310 if (ret == AVERROR(EAGAIN)) {
4314 if (ret < 0 && ifile->loop) {
4315 AVCodecContext *avctx;
4316 for (i = 0; i < ifile->nb_streams; i++) {
4317 ist = input_streams[ifile->ist_index + i];
4318 avctx = ist->dec_ctx;
4319 if (ist->decoding_needed) {
4320 ret = process_input_packet(ist, NULL, 1);
4323 avcodec_flush_buffers(avctx);
4327 free_input_thread(file_index);
4329 ret = seek_to_start(ifile, is);
4331 thread_ret = init_input_thread(file_index);
4336 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4338 ret = get_input_packet(ifile, &pkt);
4339 if (ret == AVERROR(EAGAIN)) {
4345 if (ret != AVERROR_EOF) {
4346 print_error(is->url, ret);
4351 for (i = 0; i < ifile->nb_streams; i++) {
4352 ist = input_streams[ifile->ist_index + i];
4353 if (ist->decoding_needed) {
4354 ret = process_input_packet(ist, NULL, 0);
4359 /* mark all outputs that don't go through lavfi as finished */
4360 for (j = 0; j < nb_output_streams; j++) {
4361 OutputStream *ost = output_streams[j];
4363 if (ost->source_index == ifile->ist_index + i &&
4364 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4365 finish_output_stream(ost);
4369 ifile->eof_reached = 1;
4370 return AVERROR(EAGAIN);
4376 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4377 is->streams[pkt.stream_index]);
4379 /* the following test is needed in case new streams appear
4380 dynamically in stream : we ignore them */
4381 if (pkt.stream_index >= ifile->nb_streams) {
4382 report_new_stream(file_index, &pkt);
4383 goto discard_packet;
4386 ist = input_streams[ifile->ist_index + pkt.stream_index];
4388 ist->data_size += pkt.size;
4392 goto discard_packet;
4394 if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4395 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
4396 "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4402 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4403 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4404 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4405 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4406 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4407 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4408 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4409 av_ts2str(input_files[ist->file_index]->ts_offset),
4410 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4413 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4414 int64_t stime, stime2;
4415 // Correcting starttime based on the enabled streams
4416 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4417 // so we instead do it here as part of discontinuity handling
4418 if ( ist->next_dts == AV_NOPTS_VALUE
4419 && ifile->ts_offset == -is->start_time
4420 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4421 int64_t new_start_time = INT64_MAX;
4422 for (i=0; i<is->nb_streams; i++) {
4423 AVStream *st = is->streams[i];
4424 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4426 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4428 if (new_start_time > is->start_time) {
4429 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4430 ifile->ts_offset = -new_start_time;
4434 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4435 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4436 ist->wrap_correction_done = 1;
4438 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4439 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4440 ist->wrap_correction_done = 0;
4442 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4443 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4444 ist->wrap_correction_done = 0;
4448 /* add the stream-global side data to the first packet */
4449 if (ist->nb_packets == 1) {
4450 for (i = 0; i < ist->st->nb_side_data; i++) {
4451 AVPacketSideData *src_sd = &ist->st->side_data[i];
4454 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4457 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4460 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4464 memcpy(dst_data, src_sd->data, src_sd->size);
4468 if (pkt.dts != AV_NOPTS_VALUE)
4469 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4470 if (pkt.pts != AV_NOPTS_VALUE)
4471 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4473 if (pkt.pts != AV_NOPTS_VALUE)
4474 pkt.pts *= ist->ts_scale;
4475 if (pkt.dts != AV_NOPTS_VALUE)
4476 pkt.dts *= ist->ts_scale;
4478 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4479 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4480 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4481 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4482 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4483 int64_t delta = pkt_dts - ifile->last_ts;
4484 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4485 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4486 ifile->ts_offset -= delta;
4487 av_log(NULL, AV_LOG_DEBUG,
4488 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4489 delta, ifile->ts_offset);
4490 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4491 if (pkt.pts != AV_NOPTS_VALUE)
4492 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4496 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4497 if (pkt.pts != AV_NOPTS_VALUE) {
4498 pkt.pts += duration;
4499 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4500 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4503 if (pkt.dts != AV_NOPTS_VALUE)
4504 pkt.dts += duration;
4506 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4508 if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4509 (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4510 int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
4511 ist->st->time_base, AV_TIME_BASE_Q,
4512 AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4513 if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4514 disable_discontinuity_correction = 0;
4517 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4518 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4519 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4520 !disable_discontinuity_correction) {
4521 int64_t delta = pkt_dts - ist->next_dts;
4522 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4523 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4524 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4525 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4526 ifile->ts_offset -= delta;
4527 av_log(NULL, AV_LOG_DEBUG,
4528 "timestamp discontinuity for stream #%d:%d "
4529 "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4530 ist->file_index, ist->st->index, ist->st->id,
4531 av_get_media_type_string(ist->dec_ctx->codec_type),
4532 delta, ifile->ts_offset);
4533 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4534 if (pkt.pts != AV_NOPTS_VALUE)
4535 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4538 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4539 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4540 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4541 pkt.dts = AV_NOPTS_VALUE;
4543 if (pkt.pts != AV_NOPTS_VALUE){
4544 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4545 delta = pkt_pts - ist->next_dts;
4546 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4547 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4548 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4549 pkt.pts = AV_NOPTS_VALUE;
4555 if (pkt.dts != AV_NOPTS_VALUE)
4556 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4559 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4560 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4561 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4562 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4563 av_ts2str(input_files[ist->file_index]->ts_offset),
4564 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4567 sub2video_heartbeat(ist, pkt.pts);
4569 process_input_packet(ist, &pkt, 0);
4572 av_packet_unref(&pkt);
4578 * Perform a step of transcoding for the specified filter graph.
4580 * @param[in] graph filter graph to consider
4581 * @param[out] best_ist input stream where a frame would allow to continue
4582 * @return 0 for success, <0 for error
4584 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4587 int nb_requests, nb_requests_max = 0;
4588 InputFilter *ifilter;
4592 ret = avfilter_graph_request_oldest(graph->graph);
4594 return reap_filters(0);
4596 if (ret == AVERROR_EOF) {
4597 ret = reap_filters(1);
4598 for (i = 0; i < graph->nb_outputs; i++)
4599 close_output_stream(graph->outputs[i]->ost);
4602 if (ret != AVERROR(EAGAIN))
4605 for (i = 0; i < graph->nb_inputs; i++) {
4606 ifilter = graph->inputs[i];
4608 if (input_files[ist->file_index]->eagain ||
4609 input_files[ist->file_index]->eof_reached)
4611 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4612 if (nb_requests > nb_requests_max) {
4613 nb_requests_max = nb_requests;
4619 for (i = 0; i < graph->nb_outputs; i++)
4620 graph->outputs[i]->ost->unavailable = 1;
4626 * Run a single step of transcoding.
4628 * @return 0 for success, <0 for error
4630 static int transcode_step(void)
4633 InputStream *ist = NULL;
4636 ost = choose_output();
4643 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4647 if (ost->filter && !ost->filter->graph->graph) {
4648 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4649 ret = configure_filtergraph(ost->filter->graph);
4651 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4657 if (ost->filter && ost->filter->graph->graph) {
4659 * Similar case to the early audio initialization in reap_filters.
4660 * Audio is special in ffmpeg.c currently as we depend on lavfi's
4661 * audio frame buffering/creation to get the output audio frame size
4662 * in samples correct. The audio frame size for the filter chain is
4663 * configured during the output stream initialization.
4665 * Apparently avfilter_graph_request_oldest (called in
4666 * transcode_from_filter just down the line) peeks. Peeking already
4667 * puts one frame "ready to be given out", which means that any
4668 * update in filter buffer sink configuration afterwards will not
4669 * help us. And yes, even if it would be utilized,
4670 * av_buffersink_get_samples is affected, as it internally utilizes
4671 * the same early exit for peeked frames.
4673 * In other words, if avfilter_graph_request_oldest would not make
4674 * further filter chain configuration or usage of
4675 * av_buffersink_get_samples useless (by just causing the return
4676 * of the peeked AVFrame as-is), we could get rid of this additional
4677 * early encoder initialization.
4679 if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
4680 init_output_stream_wrapper(ost, NULL, 1);
4682 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4686 } else if (ost->filter) {
4688 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4689 InputFilter *ifilter = ost->filter->graph->inputs[i];
4690 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4696 ost->inputs_done = 1;
4700 av_assert0(ost->source_index >= 0);
4701 ist = input_streams[ost->source_index];
4704 ret = process_input(ist->file_index);
4705 if (ret == AVERROR(EAGAIN)) {
4706 if (input_files[ist->file_index]->eagain)
4707 ost->unavailable = 1;
4712 return ret == AVERROR_EOF ? 0 : ret;
4714 return reap_filters(0);
4718 * The following code is the main loop of the file converter
4720 static int transcode(void)
4723 AVFormatContext *os;
4726 int64_t timer_start;
4727 int64_t total_packets_written = 0;
4729 ret = transcode_init();
4733 if (stdin_interaction) {
4734 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4737 timer_start = av_gettime_relative();
4740 if ((ret = init_input_threads()) < 0)
4744 while (!received_sigterm) {
4745 int64_t cur_time= av_gettime_relative();
4747 /* if 'q' pressed, exits */
4748 if (stdin_interaction)
4749 if (check_keyboard_interaction(cur_time) < 0)
4752 /* check if there's any stream where output is still needed */
4753 if (!need_output()) {
4754 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4758 ret = transcode_step();
4759 if (ret < 0 && ret != AVERROR_EOF) {
4760 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4764 /* dump report by using the output first video and audio streams */
4765 print_report(0, timer_start, cur_time);
4768 free_input_threads();
4771 /* at the end of stream, we must flush the decoder buffers */
4772 for (i = 0; i < nb_input_streams; i++) {
4773 ist = input_streams[i];
4774 if (!input_files[ist->file_index]->eof_reached) {
4775 process_input_packet(ist, NULL, 0);
4782 /* write the trailer if needed and close file */
4783 for (i = 0; i < nb_output_files; i++) {
4784 os = output_files[i]->ctx;
4785 if (!output_files[i]->header_written) {
4786 av_log(NULL, AV_LOG_ERROR,
4787 "Nothing was written into output file %d (%s), because "
4788 "at least one of its streams received no packets.\n",
4792 if ((ret = av_write_trailer(os)) < 0) {
4793 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4799 /* dump report by using the first video and audio streams */
4800 print_report(1, timer_start, av_gettime_relative());
4802 /* close each encoder */
4803 for (i = 0; i < nb_output_streams; i++) {
4804 ost = output_streams[i];
4805 if (ost->encoding_needed) {
4806 av_freep(&ost->enc_ctx->stats_in);
4808 total_packets_written += ost->packets_written;
4809 if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
4810 av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4815 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4816 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4820 /* close each decoder */
4821 for (i = 0; i < nb_input_streams; i++) {
4822 ist = input_streams[i];
4823 if (ist->decoding_needed) {
4824 avcodec_close(ist->dec_ctx);
4825 if (ist->hwaccel_uninit)
4826 ist->hwaccel_uninit(ist->dec_ctx);
4830 hw_device_free_all();
4837 free_input_threads();
4840 if (output_streams) {
4841 for (i = 0; i < nb_output_streams; i++) {
4842 ost = output_streams[i];
4845 if (fclose(ost->logfile))
4846 av_log(NULL, AV_LOG_ERROR,
4847 "Error closing logfile, loss of information possible: %s\n",
4848 av_err2str(AVERROR(errno)));
4849 ost->logfile = NULL;
4851 av_freep(&ost->forced_kf_pts);
4852 av_freep(&ost->apad);
4853 av_freep(&ost->disposition);
4854 av_dict_free(&ost->encoder_opts);
4855 av_dict_free(&ost->sws_dict);
4856 av_dict_free(&ost->swr_opts);
4857 av_dict_free(&ost->resample_opts);
4864 static BenchmarkTimeStamps get_benchmark_time_stamps(void)
4866 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4868 struct rusage rusage;
4870 getrusage(RUSAGE_SELF, &rusage);
4871 time_stamps.user_usec =
4872 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4873 time_stamps.sys_usec =
4874 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4875 #elif HAVE_GETPROCESSTIMES
4877 FILETIME c, e, k, u;
4878 proc = GetCurrentProcess();
4879 GetProcessTimes(proc, &c, &e, &k, &u);
4880 time_stamps.user_usec =
4881 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4882 time_stamps.sys_usec =
4883 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4885 time_stamps.user_usec = time_stamps.sys_usec = 0;
4890 static int64_t getmaxrss(void)
4892 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4893 struct rusage rusage;
4894 getrusage(RUSAGE_SELF, &rusage);
4895 return (int64_t)rusage.ru_maxrss * 1024;
4896 #elif HAVE_GETPROCESSMEMORYINFO
4898 PROCESS_MEMORY_COUNTERS memcounters;
4899 proc = GetCurrentProcess();
4900 memcounters.cb = sizeof(memcounters);
4901 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4902 return memcounters.PeakPagefileUsage;
4908 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4912 int main(int argc, char **argv)
4915 BenchmarkTimeStamps ti;
4919 register_exit(ffmpeg_cleanup);
4921 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4923 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4924 parse_loglevel(argc, argv, options);
4926 if(argc>1 && !strcmp(argv[1], "-d")){
4928 av_log_set_callback(log_callback_null);
4934 avdevice_register_all();
4936 avformat_network_init();
4938 show_banner(argc, argv, options);
4940 /* parse options and open all input/output files */
4941 ret = ffmpeg_parse_options(argc, argv);
4945 if (nb_output_files <= 0 && nb_input_files == 0) {
4947 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4951 /* file converter / grab */
4952 if (nb_output_files <= 0) {
4953 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4957 for (i = 0; i < nb_output_files; i++) {
4958 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4962 current_time = ti = get_benchmark_time_stamps();
4963 if (transcode() < 0)
4966 int64_t utime, stime, rtime;
4967 current_time = get_benchmark_time_stamps();
4968 utime = current_time.user_usec - ti.user_usec;
4969 stime = current_time.sys_usec - ti.sys_usec;
4970 rtime = current_time.real_usec - ti.real_usec;
4971 av_log(NULL, AV_LOG_INFO,
4972 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4973 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4975 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4976 decode_error_stat[0], decode_error_stat[1]);
4977 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4980 exit_program(received_nb_signals ? 255 : main_return_code);
4981 return main_return_code;