2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
65 #include "libavutil/threadmessage.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
73 #if HAVE_SYS_RESOURCE_H
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
80 #if HAVE_GETPROCESSMEMORYINFO
84 #if HAVE_SETCONSOLECTRLHANDLER
90 #include <sys/select.h>
95 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 typedef struct BenchmarkTimeStamps {
127 } BenchmarkTimeStamps;
129 static void do_video_stats(OutputStream *ost, int frame_size);
130 static BenchmarkTimeStamps get_benchmark_time_stamps(void);
131 static int64_t getmaxrss(void);
132 static int ifilter_has_all_input_formats(FilterGraph *fg);
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
140 static int want_sdp = 1;
142 static BenchmarkTimeStamps current_time;
143 AVIOContext *progress_avio = NULL;
145 static uint8_t *subtitle_out;
147 InputStream **input_streams = NULL;
148 int nb_input_streams = 0;
149 InputFile **input_files = NULL;
150 int nb_input_files = 0;
152 OutputStream **output_streams = NULL;
153 int nb_output_streams = 0;
154 OutputFile **output_files = NULL;
155 int nb_output_files = 0;
157 FilterGraph **filtergraphs;
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
168 static void free_input_threads(void);
172 Convert subtitles to video with alpha to insert them in filter graphs.
173 This is a temporary solution until libavfilter gets real subtitles support.
176 static int sub2video_get_blank_frame(InputStream *ist)
179 AVFrame *frame = ist->sub2video.frame;
181 av_frame_unref(frame);
182 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
184 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
185 if ((ret = av_frame_get_buffer(frame, 0)) < 0)
187 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
194 uint32_t *pal, *dst2;
198 if (r->type != SUBTITLE_BITMAP) {
199 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
202 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204 r->x, r->y, r->w, r->h, w, h
209 dst += r->y * dst_linesize + r->x * 4;
211 pal = (uint32_t *)r->data[1];
212 for (y = 0; y < r->h; y++) {
213 dst2 = (uint32_t *)dst;
215 for (x = 0; x < r->w; x++)
216 *(dst2++) = pal[*(src2++)];
218 src += r->linesize[0];
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
224 AVFrame *frame = ist->sub2video.frame;
228 av_assert1(frame->data[0]);
229 ist->sub2video.last_pts = frame->pts = pts;
230 for (i = 0; i < ist->nb_filters; i++) {
231 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
232 AV_BUFFERSRC_FLAG_KEEP_REF |
233 AV_BUFFERSRC_FLAG_PUSH);
234 if (ret != AVERROR_EOF && ret < 0)
235 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
240 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
242 AVFrame *frame = ist->sub2video.frame;
246 int64_t pts, end_pts;
251 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252 AV_TIME_BASE_Q, ist->st->time_base);
253 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254 AV_TIME_BASE_Q, ist->st->time_base);
255 num_rects = sub->num_rects;
257 /* If we are initializing the system, utilize current heartbeat
258 PTS as the start time, and show until the following subpicture
259 is received. Otherwise, utilize the previous subpicture's end time
260 as the fall-back value. */
261 pts = ist->sub2video.initialize ?
262 heartbeat_pts : ist->sub2video.end_pts;
266 if (sub2video_get_blank_frame(ist) < 0) {
267 av_log(ist->dec_ctx, AV_LOG_ERROR,
268 "Impossible to get a blank canvas.\n");
271 dst = frame->data [0];
272 dst_linesize = frame->linesize[0];
273 for (i = 0; i < num_rects; i++)
274 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
275 sub2video_push_ref(ist, pts);
276 ist->sub2video.end_pts = end_pts;
277 ist->sub2video.initialize = 0;
280 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
282 InputFile *infile = input_files[ist->file_index];
286 /* When a frame is read from a file, examine all sub2video streams in
287 the same file and send the sub2video frame again. Otherwise, decoded
288 video frames could be accumulating in the filter graph while a filter
289 (possibly overlay) is desperately waiting for a subtitle frame. */
290 for (i = 0; i < infile->nb_streams; i++) {
291 InputStream *ist2 = input_streams[infile->ist_index + i];
292 if (!ist2->sub2video.frame)
294 /* subtitles seem to be usually muxed ahead of other streams;
295 if not, subtracting a larger time here is necessary */
296 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
297 /* do not send the heartbeat frame if the subtitle is already ahead */
298 if (pts2 <= ist2->sub2video.last_pts)
300 if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
301 /* if we have hit the end of the current displayed subpicture,
302 or if we need to initialize the system, update the
303 overlayed subpicture and its start/end times */
304 sub2video_update(ist2, pts2 + 1, NULL);
305 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
306 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
308 sub2video_push_ref(ist2, pts2);
312 static void sub2video_flush(InputStream *ist)
317 if (ist->sub2video.end_pts < INT64_MAX)
318 sub2video_update(ist, INT64_MAX, NULL);
319 for (i = 0; i < ist->nb_filters; i++) {
320 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
321 if (ret != AVERROR_EOF && ret < 0)
322 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
326 /* end of sub2video hack */
328 static void term_exit_sigsafe(void)
332 tcsetattr (0, TCSANOW, &oldtty);
338 av_log(NULL, AV_LOG_QUIET, "%s", "");
342 static volatile int received_sigterm = 0;
343 static volatile int received_nb_signals = 0;
344 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
345 static volatile int ffmpeg_exited = 0;
346 static int main_return_code = 0;
347 static int64_t copy_ts_first_pts = AV_NOPTS_VALUE;
350 sigterm_handler(int sig)
353 received_sigterm = sig;
354 received_nb_signals++;
356 if(received_nb_signals > 3) {
357 ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
358 strlen("Received > 3 system signals, hard exiting\n"));
359 if (ret < 0) { /* Do nothing */ };
364 #if HAVE_SETCONSOLECTRLHANDLER
365 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
367 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
372 case CTRL_BREAK_EVENT:
373 sigterm_handler(SIGINT);
376 case CTRL_CLOSE_EVENT:
377 case CTRL_LOGOFF_EVENT:
378 case CTRL_SHUTDOWN_EVENT:
379 sigterm_handler(SIGTERM);
380 /* Basically, with these 3 events, when we return from this method the
381 process is hard terminated, so stall as long as we need to
382 to try and let the main thread(s) clean up and gracefully terminate
383 (we have at most 5 seconds, but should be done far before that). */
384 while (!ffmpeg_exited) {
390 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
399 if (!run_as_daemon && stdin_interaction) {
401 if (tcgetattr (0, &tty) == 0) {
405 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
406 |INLCR|IGNCR|ICRNL|IXON);
407 tty.c_oflag |= OPOST;
408 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
409 tty.c_cflag &= ~(CSIZE|PARENB);
414 tcsetattr (0, TCSANOW, &tty);
416 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
420 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
421 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
423 signal(SIGXCPU, sigterm_handler);
426 signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
428 #if HAVE_SETCONSOLECTRLHANDLER
429 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
433 /* read a key without blocking */
434 static int read_key(void)
446 n = select(1, &rfds, NULL, NULL, &tv);
455 # if HAVE_PEEKNAMEDPIPE
457 static HANDLE input_handle;
460 input_handle = GetStdHandle(STD_INPUT_HANDLE);
461 is_pipe = !GetConsoleMode(input_handle, &dw);
465 /* When running under a GUI, you will end here. */
466 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
467 // input pipe may have been closed by the program that ran ffmpeg
485 static int decode_interrupt_cb(void *ctx)
487 return received_nb_signals > atomic_load(&transcode_init_done);
490 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
492 static void ffmpeg_cleanup(int ret)
497 int maxrss = getmaxrss() / 1024;
498 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
501 for (i = 0; i < nb_filtergraphs; i++) {
502 FilterGraph *fg = filtergraphs[i];
503 avfilter_graph_free(&fg->graph);
504 for (j = 0; j < fg->nb_inputs; j++) {
505 InputFilter *ifilter = fg->inputs[j];
506 struct InputStream *ist = ifilter->ist;
508 while (av_fifo_size(ifilter->frame_queue)) {
510 av_fifo_generic_read(ifilter->frame_queue, &frame,
511 sizeof(frame), NULL);
512 av_frame_free(&frame);
514 av_fifo_freep(&ifilter->frame_queue);
515 if (ist->sub2video.sub_queue) {
516 while (av_fifo_size(ist->sub2video.sub_queue)) {
518 av_fifo_generic_read(ist->sub2video.sub_queue,
519 &sub, sizeof(sub), NULL);
520 avsubtitle_free(&sub);
522 av_fifo_freep(&ist->sub2video.sub_queue);
524 av_buffer_unref(&ifilter->hw_frames_ctx);
525 av_freep(&ifilter->name);
526 av_freep(&fg->inputs[j]);
528 av_freep(&fg->inputs);
529 for (j = 0; j < fg->nb_outputs; j++) {
530 OutputFilter *ofilter = fg->outputs[j];
532 avfilter_inout_free(&ofilter->out_tmp);
533 av_freep(&ofilter->name);
534 av_freep(&ofilter->formats);
535 av_freep(&ofilter->channel_layouts);
536 av_freep(&ofilter->sample_rates);
537 av_freep(&fg->outputs[j]);
539 av_freep(&fg->outputs);
540 av_freep(&fg->graph_desc);
542 av_freep(&filtergraphs[i]);
544 av_freep(&filtergraphs);
546 av_freep(&subtitle_out);
549 for (i = 0; i < nb_output_files; i++) {
550 OutputFile *of = output_files[i];
555 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
557 avformat_free_context(s);
558 av_dict_free(&of->opts);
560 av_freep(&output_files[i]);
562 for (i = 0; i < nb_output_streams; i++) {
563 OutputStream *ost = output_streams[i];
568 av_bsf_free(&ost->bsf_ctx);
570 av_frame_free(&ost->filtered_frame);
571 av_frame_free(&ost->last_frame);
572 av_dict_free(&ost->encoder_opts);
574 av_freep(&ost->forced_keyframes);
575 av_expr_free(ost->forced_keyframes_pexpr);
576 av_freep(&ost->avfilter);
577 av_freep(&ost->logfile_prefix);
579 av_freep(&ost->audio_channels_map);
580 ost->audio_channels_mapped = 0;
582 av_dict_free(&ost->sws_dict);
583 av_dict_free(&ost->swr_opts);
585 avcodec_free_context(&ost->enc_ctx);
586 avcodec_parameters_free(&ost->ref_par);
588 if (ost->muxing_queue) {
589 while (av_fifo_size(ost->muxing_queue)) {
591 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
592 av_packet_unref(&pkt);
594 av_fifo_freep(&ost->muxing_queue);
597 av_freep(&output_streams[i]);
600 free_input_threads();
602 for (i = 0; i < nb_input_files; i++) {
603 avformat_close_input(&input_files[i]->ctx);
604 av_freep(&input_files[i]);
606 for (i = 0; i < nb_input_streams; i++) {
607 InputStream *ist = input_streams[i];
609 av_frame_free(&ist->decoded_frame);
610 av_frame_free(&ist->filter_frame);
611 av_dict_free(&ist->decoder_opts);
612 avsubtitle_free(&ist->prev_sub.subtitle);
613 av_frame_free(&ist->sub2video.frame);
614 av_freep(&ist->filters);
615 av_freep(&ist->hwaccel_device);
616 av_freep(&ist->dts_buffer);
618 avcodec_free_context(&ist->dec_ctx);
620 av_freep(&input_streams[i]);
624 if (fclose(vstats_file))
625 av_log(NULL, AV_LOG_ERROR,
626 "Error closing vstats file, loss of information possible: %s\n",
627 av_err2str(AVERROR(errno)));
629 av_freep(&vstats_filename);
631 av_freep(&input_streams);
632 av_freep(&input_files);
633 av_freep(&output_streams);
634 av_freep(&output_files);
638 avformat_network_deinit();
640 if (received_sigterm) {
641 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
642 (int) received_sigterm);
643 } else if (ret && atomic_load(&transcode_init_done)) {
644 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
650 void remove_avoptions(AVDictionary **a, AVDictionary *b)
652 AVDictionaryEntry *t = NULL;
654 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
655 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
659 void assert_avoptions(AVDictionary *m)
661 AVDictionaryEntry *t;
662 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
663 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
668 static void abort_codec_experimental(AVCodec *c, int encoder)
673 static void update_benchmark(const char *fmt, ...)
675 if (do_benchmark_all) {
676 BenchmarkTimeStamps t = get_benchmark_time_stamps();
682 vsnprintf(buf, sizeof(buf), fmt, va);
684 av_log(NULL, AV_LOG_INFO,
685 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
686 t.user_usec - current_time.user_usec,
687 t.sys_usec - current_time.sys_usec,
688 t.real_usec - current_time.real_usec, buf);
694 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
697 for (i = 0; i < nb_output_streams; i++) {
698 OutputStream *ost2 = output_streams[i];
699 ost2->finished |= ost == ost2 ? this_stream : others;
703 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
705 AVFormatContext *s = of->ctx;
706 AVStream *st = ost->st;
710 * Audio encoders may split the packets -- #frames in != #packets out.
711 * But there is no reordering, so we can limit the number of output packets
712 * by simply dropping them here.
713 * Counting encoded video frames needs to be done separately because of
714 * reordering, see do_video_out().
715 * Do not count the packet when unqueued because it has been counted when queued.
717 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
718 if (ost->frame_number >= ost->max_frames) {
719 av_packet_unref(pkt);
725 if (!of->header_written) {
726 AVPacket tmp_pkt = {0};
727 /* the muxer is not initialized yet, buffer the packet */
728 if (!av_fifo_space(ost->muxing_queue)) {
729 unsigned int are_we_over_size =
730 (ost->muxing_queue_data_size + pkt->size) > ost->muxing_queue_data_threshold;
731 int new_size = are_we_over_size ?
732 FFMIN(2 * av_fifo_size(ost->muxing_queue),
733 ost->max_muxing_queue_size) :
734 2 * av_fifo_size(ost->muxing_queue);
736 if (new_size <= av_fifo_size(ost->muxing_queue)) {
737 av_log(NULL, AV_LOG_ERROR,
738 "Too many packets buffered for output stream %d:%d.\n",
739 ost->file_index, ost->st->index);
742 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
746 ret = av_packet_make_refcounted(pkt);
749 av_packet_move_ref(&tmp_pkt, pkt);
750 ost->muxing_queue_data_size += tmp_pkt.size;
751 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
755 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
756 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
757 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
759 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
761 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
763 ost->quality = sd ? AV_RL32(sd) : -1;
764 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
766 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
768 ost->error[i] = AV_RL64(sd + 8 + 8*i);
773 if (ost->frame_rate.num && ost->is_cfr) {
774 if (pkt->duration > 0)
775 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
776 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
781 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
783 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
784 if (pkt->dts != AV_NOPTS_VALUE &&
785 pkt->pts != AV_NOPTS_VALUE &&
786 pkt->dts > pkt->pts) {
787 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
789 ost->file_index, ost->st->index);
791 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
792 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
793 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
795 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
796 pkt->dts != AV_NOPTS_VALUE &&
797 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
798 ost->last_mux_dts != AV_NOPTS_VALUE) {
799 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
800 if (pkt->dts < max) {
801 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
803 loglevel = AV_LOG_ERROR;
804 av_log(s, loglevel, "Non-monotonous DTS in output stream "
805 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
806 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
808 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
811 av_log(s, loglevel, "changing to %"PRId64". This may result "
812 "in incorrect timestamps in the output file.\n",
814 if (pkt->pts >= pkt->dts)
815 pkt->pts = FFMAX(pkt->pts, max);
820 ost->last_mux_dts = pkt->dts;
822 ost->data_size += pkt->size;
823 ost->packets_written++;
825 pkt->stream_index = ost->index;
828 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
829 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
830 av_get_media_type_string(ost->enc_ctx->codec_type),
831 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
832 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
837 ret = av_interleaved_write_frame(s, pkt);
839 print_error("av_interleaved_write_frame()", ret);
840 main_return_code = 1;
841 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
843 av_packet_unref(pkt);
846 static void close_output_stream(OutputStream *ost)
848 OutputFile *of = output_files[ost->file_index];
850 ost->finished |= ENCODER_FINISHED;
852 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
853 of->recording_time = FFMIN(of->recording_time, end);
858 * Send a single packet to the output, applying any bitstream filters
859 * associated with the output stream. This may result in any number
860 * of packets actually being written, depending on what bitstream
861 * filters are applied. The supplied packet is consumed and will be
862 * blank (as if newly-allocated) when this function returns.
864 * If eof is set, instead indicate EOF to all bitstream filters and
865 * therefore flush any delayed packets to the output. A blank packet
866 * must be supplied in this case.
868 static void output_packet(OutputFile *of, AVPacket *pkt,
869 OutputStream *ost, int eof)
873 /* apply the output bitstream filters */
875 ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
878 while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
879 write_packet(of, pkt, ost, 0);
880 if (ret == AVERROR(EAGAIN))
883 write_packet(of, pkt, ost, 0);
886 if (ret < 0 && ret != AVERROR_EOF) {
887 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
888 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
894 static int check_recording_time(OutputStream *ost)
896 OutputFile *of = output_files[ost->file_index];
898 if (of->recording_time != INT64_MAX &&
899 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
900 AV_TIME_BASE_Q) >= 0) {
901 close_output_stream(ost);
907 static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost,
910 double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
911 AVCodecContext *enc = ost->enc_ctx;
912 if (!frame || frame->pts == AV_NOPTS_VALUE ||
913 !enc || !ost->filter || !ost->filter->graph->graph)
917 AVFilterContext *filter = ost->filter->filter;
919 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
920 AVRational filter_tb = av_buffersink_get_time_base(filter);
921 AVRational tb = enc->time_base;
922 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
924 tb.den <<= extra_bits;
926 av_rescale_q(frame->pts, filter_tb, tb) -
927 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
928 float_pts /= 1 << extra_bits;
929 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
930 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
933 av_rescale_q(frame->pts, filter_tb, enc->time_base) -
934 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
940 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
941 frame ? av_ts2str(frame->pts) : "NULL",
942 frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
944 enc ? enc->time_base.num : -1,
945 enc ? enc->time_base.den : -1);
951 static int init_output_stream(OutputStream *ost, AVFrame *frame,
952 char *error, int error_len);
954 static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame,
957 int ret = AVERROR_BUG;
958 char error[1024] = {0};
960 if (ost->initialized)
963 ret = init_output_stream(ost, frame, error, sizeof(error));
965 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
966 ost->file_index, ost->index, error);
975 static void do_audio_out(OutputFile *of, OutputStream *ost,
978 AVCodecContext *enc = ost->enc_ctx;
982 av_init_packet(&pkt);
986 adjust_frame_pts_to_encoder_tb(of, ost, frame);
988 if (!check_recording_time(ost))
991 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
992 frame->pts = ost->sync_opts;
993 ost->sync_opts = frame->pts + frame->nb_samples;
994 ost->samples_encoded += frame->nb_samples;
995 ost->frames_encoded++;
997 av_assert0(pkt.size || !pkt.data);
998 update_benchmark(NULL);
1000 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1001 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1002 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1003 enc->time_base.num, enc->time_base.den);
1006 ret = avcodec_send_frame(enc, frame);
1011 ret = avcodec_receive_packet(enc, &pkt);
1012 if (ret == AVERROR(EAGAIN))
1017 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1019 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1022 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1023 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1024 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1025 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1028 output_packet(of, &pkt, ost, 0);
1033 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1037 static void do_subtitle_out(OutputFile *of,
1041 int subtitle_out_max_size = 1024 * 1024;
1042 int subtitle_out_size, nb, i;
1043 AVCodecContext *enc;
1047 if (sub->pts == AV_NOPTS_VALUE) {
1048 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1056 if (!subtitle_out) {
1057 subtitle_out = av_malloc(subtitle_out_max_size);
1058 if (!subtitle_out) {
1059 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1064 /* Note: DVB subtitle need one packet to draw them and one other
1065 packet to clear them */
1066 /* XXX: signal it in the codec context ? */
1067 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1072 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1074 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1075 pts -= output_files[ost->file_index]->start_time;
1076 for (i = 0; i < nb; i++) {
1077 unsigned save_num_rects = sub->num_rects;
1079 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1080 if (!check_recording_time(ost))
1084 // start_display_time is required to be 0
1085 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1086 sub->end_display_time -= sub->start_display_time;
1087 sub->start_display_time = 0;
1091 ost->frames_encoded++;
1093 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1094 subtitle_out_max_size, sub);
1096 sub->num_rects = save_num_rects;
1097 if (subtitle_out_size < 0) {
1098 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1102 av_init_packet(&pkt);
1103 pkt.data = subtitle_out;
1104 pkt.size = subtitle_out_size;
1105 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1106 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1107 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1108 /* XXX: the pts correction is handled here. Maybe handling
1109 it in the codec would be better */
1111 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1113 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1116 output_packet(of, &pkt, ost, 0);
1120 static void do_video_out(OutputFile *of,
1122 AVFrame *next_picture)
1124 int ret, format_video_sync;
1126 AVCodecContext *enc = ost->enc_ctx;
1127 AVRational frame_rate;
1128 int nb_frames, nb0_frames, i;
1129 double delta, delta0;
1130 double duration = 0;
1131 double sync_ipts = AV_NOPTS_VALUE;
1133 InputStream *ist = NULL;
1134 AVFilterContext *filter = ost->filter->filter;
1136 init_output_stream_wrapper(ost, next_picture, 1);
1137 sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1139 if (ost->source_index >= 0)
1140 ist = input_streams[ost->source_index];
1142 frame_rate = av_buffersink_get_frame_rate(filter);
1143 if (frame_rate.num > 0 && frame_rate.den > 0)
1144 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1146 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1147 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1149 if (!ost->filters_script &&
1151 (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1154 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1155 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1158 if (!next_picture) {
1160 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1161 ost->last_nb0_frames[1],
1162 ost->last_nb0_frames[2]);
1164 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1165 delta = delta0 + duration;
1167 /* by default, we output a single frame */
1168 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1171 format_video_sync = video_sync_method;
1172 if (format_video_sync == VSYNC_AUTO) {
1173 if(!strcmp(of->ctx->oformat->name, "avi")) {
1174 format_video_sync = VSYNC_VFR;
1176 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1178 && format_video_sync == VSYNC_CFR
1179 && input_files[ist->file_index]->ctx->nb_streams == 1
1180 && input_files[ist->file_index]->input_ts_offset == 0) {
1181 format_video_sync = VSYNC_VSCFR;
1183 if (format_video_sync == VSYNC_CFR && copy_ts) {
1184 format_video_sync = VSYNC_VSCFR;
1187 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1191 format_video_sync != VSYNC_PASSTHROUGH &&
1192 format_video_sync != VSYNC_DROP) {
1193 if (delta0 < -0.6) {
1194 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1196 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1197 sync_ipts = ost->sync_opts;
1202 switch (format_video_sync) {
1204 if (ost->frame_number == 0 && delta0 >= 0.5) {
1205 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1208 ost->sync_opts = llrint(sync_ipts);
1211 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1212 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1214 } else if (delta < -1.1)
1216 else if (delta > 1.1) {
1217 nb_frames = lrintf(delta);
1219 nb0_frames = llrintf(delta0 - 0.6);
1225 else if (delta > 0.6)
1226 ost->sync_opts = llrint(sync_ipts);
1229 case VSYNC_PASSTHROUGH:
1230 ost->sync_opts = llrint(sync_ipts);
1237 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1238 nb0_frames = FFMIN(nb0_frames, nb_frames);
1240 memmove(ost->last_nb0_frames + 1,
1241 ost->last_nb0_frames,
1242 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1243 ost->last_nb0_frames[0] = nb0_frames;
1245 if (nb0_frames == 0 && ost->last_dropped) {
1247 av_log(NULL, AV_LOG_VERBOSE,
1248 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1249 ost->frame_number, ost->st->index, ost->last_frame->pts);
1251 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1252 if (nb_frames > dts_error_threshold * 30) {
1253 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1257 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1258 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1259 if (nb_frames_dup > dup_warning) {
1260 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1264 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1266 /* duplicates frame if needed */
1267 for (i = 0; i < nb_frames; i++) {
1268 AVFrame *in_picture;
1269 int forced_keyframe = 0;
1271 av_init_packet(&pkt);
1275 if (i < nb0_frames && ost->last_frame) {
1276 in_picture = ost->last_frame;
1278 in_picture = next_picture;
1283 in_picture->pts = ost->sync_opts;
1285 if (!check_recording_time(ost))
1288 in_picture->quality = enc->global_quality;
1289 in_picture->pict_type = 0;
1291 if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1292 in_picture->pts != AV_NOPTS_VALUE)
1293 ost->forced_kf_ref_pts = in_picture->pts;
1295 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1296 (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1297 if (ost->forced_kf_index < ost->forced_kf_count &&
1298 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1299 ost->forced_kf_index++;
1300 forced_keyframe = 1;
1301 } else if (ost->forced_keyframes_pexpr) {
1303 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1304 res = av_expr_eval(ost->forced_keyframes_pexpr,
1305 ost->forced_keyframes_expr_const_values, NULL);
1306 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1307 ost->forced_keyframes_expr_const_values[FKF_N],
1308 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1309 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1310 ost->forced_keyframes_expr_const_values[FKF_T],
1311 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1314 forced_keyframe = 1;
1315 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1316 ost->forced_keyframes_expr_const_values[FKF_N];
1317 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1318 ost->forced_keyframes_expr_const_values[FKF_T];
1319 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1322 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1323 } else if ( ost->forced_keyframes
1324 && !strncmp(ost->forced_keyframes, "source", 6)
1325 && in_picture->key_frame==1
1327 forced_keyframe = 1;
1330 if (forced_keyframe) {
1331 in_picture->pict_type = AV_PICTURE_TYPE_I;
1332 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1335 update_benchmark(NULL);
1337 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1338 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1339 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1340 enc->time_base.num, enc->time_base.den);
1343 ost->frames_encoded++;
1345 ret = avcodec_send_frame(enc, in_picture);
1348 // Make sure Closed Captions will not be duplicated
1349 av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
1352 ret = avcodec_receive_packet(enc, &pkt);
1353 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1354 if (ret == AVERROR(EAGAIN))
1360 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1361 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1362 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1363 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1366 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1367 pkt.pts = ost->sync_opts;
1369 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1372 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1373 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1374 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1375 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1378 frame_size = pkt.size;
1379 output_packet(of, &pkt, ost, 0);
1381 /* if two pass, output log */
1382 if (ost->logfile && enc->stats_out) {
1383 fprintf(ost->logfile, "%s", enc->stats_out);
1388 * For video, number of frames in == number of packets out.
1389 * But there may be reordering, so we can't throw away frames on encoder
1390 * flush, we need to limit them here, before they go into encoder.
1392 ost->frame_number++;
1394 if (vstats_filename && frame_size)
1395 do_video_stats(ost, frame_size);
1398 if (!ost->last_frame)
1399 ost->last_frame = av_frame_alloc();
1400 av_frame_unref(ost->last_frame);
1401 if (next_picture && ost->last_frame)
1402 av_frame_ref(ost->last_frame, next_picture);
1404 av_frame_free(&ost->last_frame);
1408 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1412 static double psnr(double d)
1414 return -10.0 * log10(d);
1417 static void do_video_stats(OutputStream *ost, int frame_size)
1419 AVCodecContext *enc;
1421 double ti1, bitrate, avg_bitrate;
1423 /* this is executed just the first time do_video_stats is called */
1425 vstats_file = fopen(vstats_filename, "w");
1433 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1434 frame_number = ost->st->nb_frames;
1435 if (vstats_version <= 1) {
1436 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1437 ost->quality / (float)FF_QP2LAMBDA);
1439 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1440 ost->quality / (float)FF_QP2LAMBDA);
1443 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1444 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1446 fprintf(vstats_file,"f_size= %6d ", frame_size);
1447 /* compute pts value */
1448 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1452 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1453 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1454 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1455 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1456 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1460 static void finish_output_stream(OutputStream *ost)
1462 OutputFile *of = output_files[ost->file_index];
1465 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1468 for (i = 0; i < of->ctx->nb_streams; i++)
1469 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1474 * Get and encode new output from any of the filtergraphs, without causing
1477 * @return 0 for success, <0 for severe errors
1479 static int reap_filters(int flush)
1481 AVFrame *filtered_frame = NULL;
1484 /* Reap all buffers present in the buffer sinks */
1485 for (i = 0; i < nb_output_streams; i++) {
1486 OutputStream *ost = output_streams[i];
1487 OutputFile *of = output_files[ost->file_index];
1488 AVFilterContext *filter;
1489 AVCodecContext *enc = ost->enc_ctx;
1492 if (!ost->filter || !ost->filter->graph->graph)
1494 filter = ost->filter->filter;
1497 * Unlike video, with audio the audio frame size matters.
1498 * Currently we are fully reliant on the lavfi filter chain to
1499 * do the buffering deed for us, and thus the frame size parameter
1500 * needs to be set accordingly. Where does one get the required
1501 * frame size? From the initialized AVCodecContext of an audio
1502 * encoder. Thus, if we have gotten to an audio stream, initialize
1503 * the encoder earlier than receiving the first AVFrame.
1505 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO)
1506 init_output_stream_wrapper(ost, NULL, 1);
1508 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1509 return AVERROR(ENOMEM);
1511 filtered_frame = ost->filtered_frame;
1514 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1515 AV_BUFFERSINK_FLAG_NO_REQUEST);
1517 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1518 av_log(NULL, AV_LOG_WARNING,
1519 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1520 } else if (flush && ret == AVERROR_EOF) {
1521 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1522 do_video_out(of, ost, NULL);
1526 if (ost->finished) {
1527 av_frame_unref(filtered_frame);
1531 switch (av_buffersink_get_type(filter)) {
1532 case AVMEDIA_TYPE_VIDEO:
1533 if (!ost->frame_aspect_ratio.num)
1534 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1536 do_video_out(of, ost, filtered_frame);
1538 case AVMEDIA_TYPE_AUDIO:
1539 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1540 enc->channels != filtered_frame->channels) {
1541 av_log(NULL, AV_LOG_ERROR,
1542 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1545 do_audio_out(of, ost, filtered_frame);
1548 // TODO support subtitle filters
1552 av_frame_unref(filtered_frame);
1559 static void print_final_stats(int64_t total_size)
1561 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1562 uint64_t subtitle_size = 0;
1563 uint64_t data_size = 0;
1564 float percent = -1.0;
1568 for (i = 0; i < nb_output_streams; i++) {
1569 OutputStream *ost = output_streams[i];
1570 switch (ost->enc_ctx->codec_type) {
1571 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1572 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1573 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1574 default: other_size += ost->data_size; break;
1576 extra_size += ost->enc_ctx->extradata_size;
1577 data_size += ost->data_size;
1578 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1579 != AV_CODEC_FLAG_PASS1)
1583 if (data_size && total_size>0 && total_size >= data_size)
1584 percent = 100.0 * (total_size - data_size) / data_size;
1586 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1587 video_size / 1024.0,
1588 audio_size / 1024.0,
1589 subtitle_size / 1024.0,
1590 other_size / 1024.0,
1591 extra_size / 1024.0);
1593 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1595 av_log(NULL, AV_LOG_INFO, "unknown");
1596 av_log(NULL, AV_LOG_INFO, "\n");
1598 /* print verbose per-stream stats */
1599 for (i = 0; i < nb_input_files; i++) {
1600 InputFile *f = input_files[i];
1601 uint64_t total_packets = 0, total_size = 0;
1603 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1606 for (j = 0; j < f->nb_streams; j++) {
1607 InputStream *ist = input_streams[f->ist_index + j];
1608 enum AVMediaType type = ist->dec_ctx->codec_type;
1610 total_size += ist->data_size;
1611 total_packets += ist->nb_packets;
1613 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1614 i, j, media_type_string(type));
1615 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1616 ist->nb_packets, ist->data_size);
1618 if (ist->decoding_needed) {
1619 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1620 ist->frames_decoded);
1621 if (type == AVMEDIA_TYPE_AUDIO)
1622 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1623 av_log(NULL, AV_LOG_VERBOSE, "; ");
1626 av_log(NULL, AV_LOG_VERBOSE, "\n");
1629 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1630 total_packets, total_size);
1633 for (i = 0; i < nb_output_files; i++) {
1634 OutputFile *of = output_files[i];
1635 uint64_t total_packets = 0, total_size = 0;
1637 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1640 for (j = 0; j < of->ctx->nb_streams; j++) {
1641 OutputStream *ost = output_streams[of->ost_index + j];
1642 enum AVMediaType type = ost->enc_ctx->codec_type;
1644 total_size += ost->data_size;
1645 total_packets += ost->packets_written;
1647 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1648 i, j, media_type_string(type));
1649 if (ost->encoding_needed) {
1650 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1651 ost->frames_encoded);
1652 if (type == AVMEDIA_TYPE_AUDIO)
1653 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1654 av_log(NULL, AV_LOG_VERBOSE, "; ");
1657 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1658 ost->packets_written, ost->data_size);
1660 av_log(NULL, AV_LOG_VERBOSE, "\n");
1663 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1664 total_packets, total_size);
1666 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1667 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1669 av_log(NULL, AV_LOG_WARNING, "\n");
1671 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1676 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1678 AVBPrint buf, buf_script;
1680 AVFormatContext *oc;
1682 AVCodecContext *enc;
1683 int frame_number, vid, i;
1686 int64_t pts = INT64_MIN + 1;
1687 static int64_t last_time = -1;
1688 static int qp_histogram[52];
1689 int hours, mins, secs, us;
1690 const char *hours_sign;
1694 if (!print_stats && !is_last_report && !progress_avio)
1697 if (!is_last_report) {
1698 if (last_time == -1) {
1699 last_time = cur_time;
1702 if ((cur_time - last_time) < 500000)
1704 last_time = cur_time;
1707 t = (cur_time-timer_start) / 1000000.0;
1710 oc = output_files[0]->ctx;
1712 total_size = avio_size(oc->pb);
1713 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1714 total_size = avio_tell(oc->pb);
1717 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1718 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1719 for (i = 0; i < nb_output_streams; i++) {
1721 ost = output_streams[i];
1723 if (!ost->stream_copy)
1724 q = ost->quality / (float) FF_QP2LAMBDA;
1726 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1727 av_bprintf(&buf, "q=%2.1f ", q);
1728 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1729 ost->file_index, ost->index, q);
1731 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1734 frame_number = ost->frame_number;
1735 fps = t > 1 ? frame_number / t : 0;
1736 av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1737 frame_number, fps < 9.95, fps, q);
1738 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1739 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1740 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1741 ost->file_index, ost->index, q);
1743 av_bprintf(&buf, "L");
1747 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1749 for (j = 0; j < 32; j++)
1750 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1753 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1755 double error, error_sum = 0;
1756 double scale, scale_sum = 0;
1758 char type[3] = { 'Y','U','V' };
1759 av_bprintf(&buf, "PSNR=");
1760 for (j = 0; j < 3; j++) {
1761 if (is_last_report) {
1762 error = enc->error[j];
1763 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1765 error = ost->error[j];
1766 scale = enc->width * enc->height * 255.0 * 255.0;
1772 p = psnr(error / scale);
1773 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1774 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1775 ost->file_index, ost->index, type[j] | 32, p);
1777 p = psnr(error_sum / scale_sum);
1778 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1779 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1780 ost->file_index, ost->index, p);
1784 /* compute min output value */
1785 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) {
1786 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1787 ost->st->time_base, AV_TIME_BASE_Q));
1789 if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1790 copy_ts_first_pts = pts;
1791 if (copy_ts_first_pts != AV_NOPTS_VALUE)
1792 pts -= copy_ts_first_pts;
1797 nb_frames_drop += ost->last_dropped;
1800 secs = FFABS(pts) / AV_TIME_BASE;
1801 us = FFABS(pts) % AV_TIME_BASE;
1806 hours_sign = (pts < 0) ? "-" : "";
1808 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1809 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1811 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1812 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1813 if (pts == AV_NOPTS_VALUE) {
1814 av_bprintf(&buf, "N/A ");
1816 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1817 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1821 av_bprintf(&buf, "bitrate=N/A");
1822 av_bprintf(&buf_script, "bitrate=N/A\n");
1824 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1825 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1828 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1829 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1830 if (pts == AV_NOPTS_VALUE) {
1831 av_bprintf(&buf_script, "out_time_us=N/A\n");
1832 av_bprintf(&buf_script, "out_time_ms=N/A\n");
1833 av_bprintf(&buf_script, "out_time=N/A\n");
1835 av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1836 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1837 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1838 hours_sign, hours, mins, secs, us);
1841 if (nb_frames_dup || nb_frames_drop)
1842 av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1843 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1844 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1847 av_bprintf(&buf, " speed=N/A");
1848 av_bprintf(&buf_script, "speed=N/A\n");
1850 av_bprintf(&buf, " speed=%4.3gx", speed);
1851 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1854 if (print_stats || is_last_report) {
1855 const char end = is_last_report ? '\n' : '\r';
1856 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1857 fprintf(stderr, "%s %c", buf.str, end);
1859 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1863 av_bprint_finalize(&buf, NULL);
1865 if (progress_avio) {
1866 av_bprintf(&buf_script, "progress=%s\n",
1867 is_last_report ? "end" : "continue");
1868 avio_write(progress_avio, buf_script.str,
1869 FFMIN(buf_script.len, buf_script.size - 1));
1870 avio_flush(progress_avio);
1871 av_bprint_finalize(&buf_script, NULL);
1872 if (is_last_report) {
1873 if ((ret = avio_closep(&progress_avio)) < 0)
1874 av_log(NULL, AV_LOG_ERROR,
1875 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1880 print_final_stats(total_size);
1883 static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1885 // We never got any input. Set a fake format, which will
1886 // come from libavformat.
1887 ifilter->format = par->format;
1888 ifilter->sample_rate = par->sample_rate;
1889 ifilter->channels = par->channels;
1890 ifilter->channel_layout = par->channel_layout;
1891 ifilter->width = par->width;
1892 ifilter->height = par->height;
1893 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1896 static void flush_encoders(void)
1900 for (i = 0; i < nb_output_streams; i++) {
1901 OutputStream *ost = output_streams[i];
1902 AVCodecContext *enc = ost->enc_ctx;
1903 OutputFile *of = output_files[ost->file_index];
1905 if (!ost->encoding_needed)
1908 // Try to enable encoding with no input frames.
1909 // Maybe we should just let encoding fail instead.
1910 if (!ost->initialized) {
1911 FilterGraph *fg = ost->filter->graph;
1913 av_log(NULL, AV_LOG_WARNING,
1914 "Finishing stream %d:%d without any data written to it.\n",
1915 ost->file_index, ost->st->index);
1917 if (ost->filter && !fg->graph) {
1919 for (x = 0; x < fg->nb_inputs; x++) {
1920 InputFilter *ifilter = fg->inputs[x];
1921 if (ifilter->format < 0)
1922 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1925 if (!ifilter_has_all_input_formats(fg))
1928 ret = configure_filtergraph(fg);
1930 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1934 finish_output_stream(ost);
1937 init_output_stream_wrapper(ost, NULL, 1);
1940 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1944 const char *desc = NULL;
1948 switch (enc->codec_type) {
1949 case AVMEDIA_TYPE_AUDIO:
1952 case AVMEDIA_TYPE_VIDEO:
1959 av_init_packet(&pkt);
1963 update_benchmark(NULL);
1965 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1966 ret = avcodec_send_frame(enc, NULL);
1968 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1975 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1976 if (ret < 0 && ret != AVERROR_EOF) {
1977 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1982 if (ost->logfile && enc->stats_out) {
1983 fprintf(ost->logfile, "%s", enc->stats_out);
1985 if (ret == AVERROR_EOF) {
1986 output_packet(of, &pkt, ost, 1);
1989 if (ost->finished & MUXER_FINISHED) {
1990 av_packet_unref(&pkt);
1993 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1994 pkt_size = pkt.size;
1995 output_packet(of, &pkt, ost, 0);
1996 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1997 do_video_stats(ost, pkt_size);
2004 * Check whether a packet from ist should be written into ost at this time
2006 static int check_output_constraints(InputStream *ist, OutputStream *ost)
2008 OutputFile *of = output_files[ost->file_index];
2009 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2011 if (ost->source_index != ist_index)
2017 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2023 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2025 OutputFile *of = output_files[ost->file_index];
2026 InputFile *f = input_files [ist->file_index];
2027 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2028 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2031 // EOF: flush output bitstream filters.
2033 av_init_packet(&opkt);
2036 output_packet(of, &opkt, ost, 1);
2040 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2041 !ost->copy_initial_nonkeyframes)
2044 if (!ost->frame_number && !ost->copy_prior_start) {
2045 int64_t comp_start = start_time;
2046 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2047 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2048 if (pkt->pts == AV_NOPTS_VALUE ?
2049 ist->pts < comp_start :
2050 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2054 if (of->recording_time != INT64_MAX &&
2055 ist->pts >= of->recording_time + start_time) {
2056 close_output_stream(ost);
2060 if (f->recording_time != INT64_MAX) {
2061 start_time = f->ctx->start_time;
2062 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2063 start_time += f->start_time;
2064 if (ist->pts >= f->recording_time + start_time) {
2065 close_output_stream(ost);
2070 /* force the input stream PTS */
2071 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2074 if (av_packet_ref(&opkt, pkt) < 0)
2077 if (pkt->pts != AV_NOPTS_VALUE)
2078 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2080 if (pkt->dts == AV_NOPTS_VALUE) {
2081 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2082 } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2083 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2085 duration = ist->dec_ctx->frame_size;
2086 opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2087 (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2088 &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2089 /* dts will be set immediately afterwards to what pts is now */
2090 opkt.pts = opkt.dts - ost_tb_start_time;
2092 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2093 opkt.dts -= ost_tb_start_time;
2095 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2097 output_packet(of, &opkt, ost, 0);
2100 int guess_input_channel_layout(InputStream *ist)
2102 AVCodecContext *dec = ist->dec_ctx;
2104 if (!dec->channel_layout) {
2105 char layout_name[256];
2107 if (dec->channels > ist->guess_layout_max)
2109 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2110 if (!dec->channel_layout)
2112 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2113 dec->channels, dec->channel_layout);
2114 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2115 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2120 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2122 if (*got_output || ret<0)
2123 decode_error_stat[ret<0] ++;
2125 if (ret < 0 && exit_on_error)
2128 if (*got_output && ist) {
2129 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2130 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2131 "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2138 // Filters can be configured only if the formats of all inputs are known.
2139 static int ifilter_has_all_input_formats(FilterGraph *fg)
2142 for (i = 0; i < fg->nb_inputs; i++) {
2143 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2144 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2150 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2152 FilterGraph *fg = ifilter->graph;
2153 int need_reinit, ret, i;
2155 /* determine if the parameters for this input changed */
2156 need_reinit = ifilter->format != frame->format;
2158 switch (ifilter->ist->st->codecpar->codec_type) {
2159 case AVMEDIA_TYPE_AUDIO:
2160 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2161 ifilter->channels != frame->channels ||
2162 ifilter->channel_layout != frame->channel_layout;
2164 case AVMEDIA_TYPE_VIDEO:
2165 need_reinit |= ifilter->width != frame->width ||
2166 ifilter->height != frame->height;
2170 if (!ifilter->ist->reinit_filters && fg->graph)
2173 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2174 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2178 ret = ifilter_parameters_from_frame(ifilter, frame);
2183 /* (re)init the graph if possible, otherwise buffer the frame and return */
2184 if (need_reinit || !fg->graph) {
2185 for (i = 0; i < fg->nb_inputs; i++) {
2186 if (!ifilter_has_all_input_formats(fg)) {
2187 AVFrame *tmp = av_frame_clone(frame);
2189 return AVERROR(ENOMEM);
2190 av_frame_unref(frame);
2192 if (!av_fifo_space(ifilter->frame_queue)) {
2193 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2195 av_frame_free(&tmp);
2199 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2204 ret = reap_filters(1);
2205 if (ret < 0 && ret != AVERROR_EOF) {
2206 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2210 ret = configure_filtergraph(fg);
2212 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2217 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2219 if (ret != AVERROR_EOF)
2220 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2227 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2233 if (ifilter->filter) {
2234 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2238 // the filtergraph was never configured
2239 if (ifilter->format < 0)
2240 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2241 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2242 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2243 return AVERROR_INVALIDDATA;
2250 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2251 // There is the following difference: if you got a frame, you must call
2252 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2253 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2254 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2261 ret = avcodec_send_packet(avctx, pkt);
2262 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2263 // decoded frames with avcodec_receive_frame() until done.
2264 if (ret < 0 && ret != AVERROR_EOF)
2268 ret = avcodec_receive_frame(avctx, frame);
2269 if (ret < 0 && ret != AVERROR(EAGAIN))
2277 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2282 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2283 for (i = 0; i < ist->nb_filters; i++) {
2284 if (i < ist->nb_filters - 1) {
2285 f = ist->filter_frame;
2286 ret = av_frame_ref(f, decoded_frame);
2291 ret = ifilter_send_frame(ist->filters[i], f);
2292 if (ret == AVERROR_EOF)
2293 ret = 0; /* ignore */
2295 av_log(NULL, AV_LOG_ERROR,
2296 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2303 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2306 AVFrame *decoded_frame;
2307 AVCodecContext *avctx = ist->dec_ctx;
2309 AVRational decoded_frame_tb;
2311 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2312 return AVERROR(ENOMEM);
2313 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2314 return AVERROR(ENOMEM);
2315 decoded_frame = ist->decoded_frame;
2317 update_benchmark(NULL);
2318 ret = decode(avctx, decoded_frame, got_output, pkt);
2319 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2323 if (ret >= 0 && avctx->sample_rate <= 0) {
2324 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2325 ret = AVERROR_INVALIDDATA;
2328 if (ret != AVERROR_EOF)
2329 check_decode_result(ist, got_output, ret);
2331 if (!*got_output || ret < 0)
2334 ist->samples_decoded += decoded_frame->nb_samples;
2335 ist->frames_decoded++;
2337 /* increment next_dts to use for the case where the input stream does not
2338 have timestamps or there are multiple frames in the packet */
2339 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2341 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2344 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2345 decoded_frame_tb = ist->st->time_base;
2346 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2347 decoded_frame->pts = pkt->pts;
2348 decoded_frame_tb = ist->st->time_base;
2350 decoded_frame->pts = ist->dts;
2351 decoded_frame_tb = AV_TIME_BASE_Q;
2353 if (decoded_frame->pts != AV_NOPTS_VALUE)
2354 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2355 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2356 (AVRational){1, avctx->sample_rate});
2357 ist->nb_samples = decoded_frame->nb_samples;
2358 err = send_frame_to_filters(ist, decoded_frame);
2360 av_frame_unref(ist->filter_frame);
2361 av_frame_unref(decoded_frame);
2362 return err < 0 ? err : ret;
2365 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2368 AVFrame *decoded_frame;
2369 int i, ret = 0, err = 0;
2370 int64_t best_effort_timestamp;
2371 int64_t dts = AV_NOPTS_VALUE;
2374 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2375 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2377 if (!eof && pkt && pkt->size == 0)
2380 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2381 return AVERROR(ENOMEM);
2382 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2383 return AVERROR(ENOMEM);
2384 decoded_frame = ist->decoded_frame;
2385 if (ist->dts != AV_NOPTS_VALUE)
2386 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2389 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2392 // The old code used to set dts on the drain packet, which does not work
2393 // with the new API anymore.
2395 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2397 return AVERROR(ENOMEM);
2398 ist->dts_buffer = new;
2399 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2402 update_benchmark(NULL);
2403 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2404 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2408 // The following line may be required in some cases where there is no parser
2409 // or the parser does not has_b_frames correctly
2410 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2411 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2412 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2414 av_log(ist->dec_ctx, AV_LOG_WARNING,
2415 "video_delay is larger in decoder than demuxer %d > %d.\n"
2416 "If you want to help, upload a sample "
2417 "of this file to https://streams.videolan.org/upload/ "
2418 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2419 ist->dec_ctx->has_b_frames,
2420 ist->st->codecpar->video_delay);
2423 if (ret != AVERROR_EOF)
2424 check_decode_result(ist, got_output, ret);
2426 if (*got_output && ret >= 0) {
2427 if (ist->dec_ctx->width != decoded_frame->width ||
2428 ist->dec_ctx->height != decoded_frame->height ||
2429 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2430 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2431 decoded_frame->width,
2432 decoded_frame->height,
2433 decoded_frame->format,
2434 ist->dec_ctx->width,
2435 ist->dec_ctx->height,
2436 ist->dec_ctx->pix_fmt);
2440 if (!*got_output || ret < 0)
2443 if(ist->top_field_first>=0)
2444 decoded_frame->top_field_first = ist->top_field_first;
2446 ist->frames_decoded++;
2448 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2449 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2453 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2455 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2456 *duration_pts = decoded_frame->pkt_duration;
2458 if (ist->framerate.num)
2459 best_effort_timestamp = ist->cfr_next_pts++;
2461 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2462 best_effort_timestamp = ist->dts_buffer[0];
2464 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2465 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2466 ist->nb_dts_buffer--;
2469 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2470 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2472 if (ts != AV_NOPTS_VALUE)
2473 ist->next_pts = ist->pts = ts;
2477 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2478 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2479 ist->st->index, av_ts2str(decoded_frame->pts),
2480 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2481 best_effort_timestamp,
2482 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2483 decoded_frame->key_frame, decoded_frame->pict_type,
2484 ist->st->time_base.num, ist->st->time_base.den);
2487 if (ist->st->sample_aspect_ratio.num)
2488 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2490 err = send_frame_to_filters(ist, decoded_frame);
2493 av_frame_unref(ist->filter_frame);
2494 av_frame_unref(decoded_frame);
2495 return err < 0 ? err : ret;
2498 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2501 AVSubtitle subtitle;
2503 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2504 &subtitle, got_output, pkt);
2506 check_decode_result(NULL, got_output, ret);
2508 if (ret < 0 || !*got_output) {
2511 sub2video_flush(ist);
2515 if (ist->fix_sub_duration) {
2517 if (ist->prev_sub.got_output) {
2518 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2519 1000, AV_TIME_BASE);
2520 if (end < ist->prev_sub.subtitle.end_display_time) {
2521 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2522 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2523 ist->prev_sub.subtitle.end_display_time, end,
2524 end <= 0 ? ", dropping it" : "");
2525 ist->prev_sub.subtitle.end_display_time = end;
2528 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2529 FFSWAP(int, ret, ist->prev_sub.ret);
2530 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2538 if (ist->sub2video.frame) {
2539 sub2video_update(ist, INT64_MIN, &subtitle);
2540 } else if (ist->nb_filters) {
2541 if (!ist->sub2video.sub_queue)
2542 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2543 if (!ist->sub2video.sub_queue)
2545 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2546 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2550 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2554 if (!subtitle.num_rects)
2557 ist->frames_decoded++;
2559 for (i = 0; i < nb_output_streams; i++) {
2560 OutputStream *ost = output_streams[i];
2562 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2563 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2566 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2571 avsubtitle_free(&subtitle);
2575 static int send_filter_eof(InputStream *ist)
2578 /* TODO keep pts also in stream time base to avoid converting back */
2579 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2580 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2582 for (i = 0; i < ist->nb_filters; i++) {
2583 ret = ifilter_send_eof(ist->filters[i], pts);
2590 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2591 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2595 int eof_reached = 0;
2598 if (!ist->saw_first_ts) {
2599 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2601 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2602 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2603 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2605 ist->saw_first_ts = 1;
2608 if (ist->next_dts == AV_NOPTS_VALUE)
2609 ist->next_dts = ist->dts;
2610 if (ist->next_pts == AV_NOPTS_VALUE)
2611 ist->next_pts = ist->pts;
2615 av_init_packet(&avpkt);
2622 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2623 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2624 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2625 ist->next_pts = ist->pts = ist->dts;
2628 // while we have more to decode or while the decoder did output something on EOF
2629 while (ist->decoding_needed) {
2630 int64_t duration_dts = 0;
2631 int64_t duration_pts = 0;
2633 int decode_failed = 0;
2635 ist->pts = ist->next_pts;
2636 ist->dts = ist->next_dts;
2638 switch (ist->dec_ctx->codec_type) {
2639 case AVMEDIA_TYPE_AUDIO:
2640 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2643 case AVMEDIA_TYPE_VIDEO:
2644 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2646 if (!repeating || !pkt || got_output) {
2647 if (pkt && pkt->duration) {
2648 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2649 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2650 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2651 duration_dts = ((int64_t)AV_TIME_BASE *
2652 ist->dec_ctx->framerate.den * ticks) /
2653 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2656 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2657 ist->next_dts += duration_dts;
2659 ist->next_dts = AV_NOPTS_VALUE;
2663 if (duration_pts > 0) {
2664 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2666 ist->next_pts += duration_dts;
2670 case AVMEDIA_TYPE_SUBTITLE:
2673 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2674 if (!pkt && ret >= 0)
2681 if (ret == AVERROR_EOF) {
2687 if (decode_failed) {
2688 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2689 ist->file_index, ist->st->index, av_err2str(ret));
2691 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2692 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2694 if (!decode_failed || exit_on_error)
2700 ist->got_output = 1;
2705 // During draining, we might get multiple output frames in this loop.
2706 // ffmpeg.c does not drain the filter chain on configuration changes,
2707 // which means if we send multiple frames at once to the filters, and
2708 // one of those frames changes configuration, the buffered frames will
2709 // be lost. This can upset certain FATE tests.
2710 // Decode only 1 frame per call on EOF to appease these FATE tests.
2711 // The ideal solution would be to rewrite decoding to use the new
2712 // decoding API in a better way.
2719 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2720 /* except when looping we need to flush but not to send an EOF */
2721 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2722 int ret = send_filter_eof(ist);
2724 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2729 /* handle stream copy */
2730 if (!ist->decoding_needed && pkt) {
2731 ist->dts = ist->next_dts;
2732 switch (ist->dec_ctx->codec_type) {
2733 case AVMEDIA_TYPE_AUDIO:
2734 av_assert1(pkt->duration >= 0);
2735 if (ist->dec_ctx->sample_rate) {
2736 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2737 ist->dec_ctx->sample_rate;
2739 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2742 case AVMEDIA_TYPE_VIDEO:
2743 if (ist->framerate.num) {
2744 // TODO: Remove work-around for c99-to-c89 issue 7
2745 AVRational time_base_q = AV_TIME_BASE_Q;
2746 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2747 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2748 } else if (pkt->duration) {
2749 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2750 } else if(ist->dec_ctx->framerate.num != 0) {
2751 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2752 ist->next_dts += ((int64_t)AV_TIME_BASE *
2753 ist->dec_ctx->framerate.den * ticks) /
2754 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2758 ist->pts = ist->dts;
2759 ist->next_pts = ist->next_dts;
2761 for (i = 0; i < nb_output_streams; i++) {
2762 OutputStream *ost = output_streams[i];
2764 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2767 do_streamcopy(ist, ost, pkt);
2770 return !eof_reached;
2773 static void print_sdp(void)
2778 AVIOContext *sdp_pb;
2779 AVFormatContext **avc;
2781 for (i = 0; i < nb_output_files; i++) {
2782 if (!output_files[i]->header_written)
2786 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2789 for (i = 0, j = 0; i < nb_output_files; i++) {
2790 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2791 avc[j] = output_files[i]->ctx;
2799 av_sdp_create(avc, j, sdp, sizeof(sdp));
2801 if (!sdp_filename) {
2802 printf("SDP:\n%s\n", sdp);
2805 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2806 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2808 avio_print(sdp_pb, sdp);
2809 avio_closep(&sdp_pb);
2810 av_freep(&sdp_filename);
2818 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2820 InputStream *ist = s->opaque;
2821 const enum AVPixelFormat *p;
2824 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2825 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2826 const AVCodecHWConfig *config = NULL;
2829 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2832 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2833 ist->hwaccel_id == HWACCEL_AUTO) {
2835 config = avcodec_get_hw_config(s->codec, i);
2838 if (!(config->methods &
2839 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2841 if (config->pix_fmt == *p)
2846 if (config->device_type != ist->hwaccel_device_type) {
2847 // Different hwaccel offered, ignore.
2851 ret = hwaccel_decode_init(s);
2853 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2854 av_log(NULL, AV_LOG_FATAL,
2855 "%s hwaccel requested for input stream #%d:%d, "
2856 "but cannot be initialized.\n",
2857 av_hwdevice_get_type_name(config->device_type),
2858 ist->file_index, ist->st->index);
2859 return AV_PIX_FMT_NONE;
2864 const HWAccel *hwaccel = NULL;
2866 for (i = 0; hwaccels[i].name; i++) {
2867 if (hwaccels[i].pix_fmt == *p) {
2868 hwaccel = &hwaccels[i];
2873 // No hwaccel supporting this pixfmt.
2876 if (hwaccel->id != ist->hwaccel_id) {
2877 // Does not match requested hwaccel.
2881 ret = hwaccel->init(s);
2883 av_log(NULL, AV_LOG_FATAL,
2884 "%s hwaccel requested for input stream #%d:%d, "
2885 "but cannot be initialized.\n", hwaccel->name,
2886 ist->file_index, ist->st->index);
2887 return AV_PIX_FMT_NONE;
2891 if (ist->hw_frames_ctx) {
2892 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2893 if (!s->hw_frames_ctx)
2894 return AV_PIX_FMT_NONE;
2897 ist->hwaccel_pix_fmt = *p;
2904 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2906 InputStream *ist = s->opaque;
2908 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2909 return ist->hwaccel_get_buffer(s, frame, flags);
2911 return avcodec_default_get_buffer2(s, frame, flags);
2914 static int init_input_stream(int ist_index, char *error, int error_len)
2917 InputStream *ist = input_streams[ist_index];
2919 if (ist->decoding_needed) {
2920 AVCodec *codec = ist->dec;
2922 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2923 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2924 return AVERROR(EINVAL);
2927 ist->dec_ctx->opaque = ist;
2928 ist->dec_ctx->get_format = get_format;
2929 ist->dec_ctx->get_buffer2 = get_buffer;
2930 ist->dec_ctx->thread_safe_callbacks = 1;
2932 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2933 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2934 (ist->decoding_needed & DECODING_FOR_OST)) {
2935 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2936 if (ist->decoding_needed & DECODING_FOR_FILTER)
2937 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2940 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2942 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2943 * audio, and video decoders such as cuvid or mediacodec */
2944 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2946 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2947 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2948 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2949 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2950 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2952 ret = hw_device_setup_for_decode(ist);
2954 snprintf(error, error_len, "Device setup failed for "
2955 "decoder on input stream #%d:%d : %s",
2956 ist->file_index, ist->st->index, av_err2str(ret));
2960 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2961 if (ret == AVERROR_EXPERIMENTAL)
2962 abort_codec_experimental(codec, 0);
2964 snprintf(error, error_len,
2965 "Error while opening decoder for input stream "
2967 ist->file_index, ist->st->index, av_err2str(ret));
2970 assert_avoptions(ist->decoder_opts);
2973 ist->next_pts = AV_NOPTS_VALUE;
2974 ist->next_dts = AV_NOPTS_VALUE;
2979 static InputStream *get_input_stream(OutputStream *ost)
2981 if (ost->source_index >= 0)
2982 return input_streams[ost->source_index];
2986 static int compare_int64(const void *a, const void *b)
2988 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2991 /* open the muxer when all the streams are initialized */
2992 static int check_init_output_file(OutputFile *of, int file_index)
2996 for (i = 0; i < of->ctx->nb_streams; i++) {
2997 OutputStream *ost = output_streams[of->ost_index + i];
2998 if (!ost->initialized)
3002 of->ctx->interrupt_callback = int_cb;
3004 ret = avformat_write_header(of->ctx, &of->opts);
3006 av_log(NULL, AV_LOG_ERROR,
3007 "Could not write header for output file #%d "
3008 "(incorrect codec parameters ?): %s\n",
3009 file_index, av_err2str(ret));
3012 //assert_avoptions(of->opts);
3013 of->header_written = 1;
3015 av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3017 if (sdp_filename || want_sdp)
3020 /* flush the muxing queues */
3021 for (i = 0; i < of->ctx->nb_streams; i++) {
3022 OutputStream *ost = output_streams[of->ost_index + i];
3024 /* try to improve muxing time_base (only possible if nothing has been written yet) */
3025 if (!av_fifo_size(ost->muxing_queue))
3026 ost->mux_timebase = ost->st->time_base;
3028 while (av_fifo_size(ost->muxing_queue)) {
3030 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3031 ost->muxing_queue_data_size -= pkt.size;
3032 write_packet(of, &pkt, ost, 1);
3039 static int init_output_bsfs(OutputStream *ost)
3041 AVBSFContext *ctx = ost->bsf_ctx;
3047 ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3051 ctx->time_base_in = ost->st->time_base;
3053 ret = av_bsf_init(ctx);
3055 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3060 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3063 ost->st->time_base = ctx->time_base_out;
3068 static int init_output_stream_streamcopy(OutputStream *ost)
3070 OutputFile *of = output_files[ost->file_index];
3071 InputStream *ist = get_input_stream(ost);
3072 AVCodecParameters *par_dst = ost->st->codecpar;
3073 AVCodecParameters *par_src = ost->ref_par;
3076 uint32_t codec_tag = par_dst->codec_tag;
3078 av_assert0(ist && !ost->filter);
3080 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3082 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3084 av_log(NULL, AV_LOG_FATAL,
3085 "Error setting up codec context options.\n");
3089 ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3091 av_log(NULL, AV_LOG_FATAL,
3092 "Error getting reference codec parameters.\n");
3097 unsigned int codec_tag_tmp;
3098 if (!of->ctx->oformat->codec_tag ||
3099 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3100 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3101 codec_tag = par_src->codec_tag;
3104 ret = avcodec_parameters_copy(par_dst, par_src);
3108 par_dst->codec_tag = codec_tag;
3110 if (!ost->frame_rate.num)
3111 ost->frame_rate = ist->framerate;
3112 ost->st->avg_frame_rate = ost->frame_rate;
3114 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3118 // copy timebase while removing common factors
3119 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3120 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3122 // copy estimated duration as a hint to the muxer
3123 if (ost->st->duration <= 0 && ist->st->duration > 0)
3124 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3127 ost->st->disposition = ist->st->disposition;
3129 if (ist->st->nb_side_data) {
3130 for (i = 0; i < ist->st->nb_side_data; i++) {
3131 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3134 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3136 return AVERROR(ENOMEM);
3137 memcpy(dst_data, sd_src->data, sd_src->size);
3141 if (ost->rotate_overridden) {
3142 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3143 sizeof(int32_t) * 9);
3145 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3148 switch (par_dst->codec_type) {
3149 case AVMEDIA_TYPE_AUDIO:
3150 if (audio_volume != 256) {
3151 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3154 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3155 par_dst->block_align= 0;
3156 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3157 par_dst->block_align= 0;
3159 case AVMEDIA_TYPE_VIDEO:
3160 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3162 av_mul_q(ost->frame_aspect_ratio,
3163 (AVRational){ par_dst->height, par_dst->width });
3164 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3165 "with stream copy may produce invalid files\n");
3167 else if (ist->st->sample_aspect_ratio.num)
3168 sar = ist->st->sample_aspect_ratio;
3170 sar = par_src->sample_aspect_ratio;
3171 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3172 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3173 ost->st->r_frame_rate = ist->st->r_frame_rate;
3177 ost->mux_timebase = ist->st->time_base;
3182 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3184 AVDictionaryEntry *e;
3186 uint8_t *encoder_string;
3187 int encoder_string_len;
3188 int format_flags = 0;
3189 int codec_flags = ost->enc_ctx->flags;
3191 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3194 e = av_dict_get(of->opts, "fflags", NULL, 0);
3196 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3199 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3201 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3203 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3206 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3209 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3210 encoder_string = av_mallocz(encoder_string_len);
3211 if (!encoder_string)
3214 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3215 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3217 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3218 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3219 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3220 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3223 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3224 AVCodecContext *avctx)
3227 int n = 1, i, size, index = 0;
3230 for (p = kf; *p; p++)
3234 pts = av_malloc_array(size, sizeof(*pts));
3236 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3241 for (i = 0; i < n; i++) {
3242 char *next = strchr(p, ',');
3247 if (!memcmp(p, "chapters", 8)) {
3249 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3252 if (avf->nb_chapters > INT_MAX - size ||
3253 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3255 av_log(NULL, AV_LOG_FATAL,
3256 "Could not allocate forced key frames array.\n");
3259 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3260 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3262 for (j = 0; j < avf->nb_chapters; j++) {
3263 AVChapter *c = avf->chapters[j];
3264 av_assert1(index < size);
3265 pts[index++] = av_rescale_q(c->start, c->time_base,
3266 avctx->time_base) + t;
3271 t = parse_time_or_die("force_key_frames", p, 1);
3272 av_assert1(index < size);
3273 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3280 av_assert0(index == size);
3281 qsort(pts, size, sizeof(*pts), compare_int64);
3282 ost->forced_kf_count = size;
3283 ost->forced_kf_pts = pts;
3286 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3288 InputStream *ist = get_input_stream(ost);
3289 AVCodecContext *enc_ctx = ost->enc_ctx;
3290 AVFormatContext *oc;
3292 if (ost->enc_timebase.num > 0) {
3293 enc_ctx->time_base = ost->enc_timebase;
3297 if (ost->enc_timebase.num < 0) {
3299 enc_ctx->time_base = ist->st->time_base;
3303 oc = output_files[ost->file_index]->ctx;
3304 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3307 enc_ctx->time_base = default_time_base;
3310 static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
3312 InputStream *ist = get_input_stream(ost);
3313 AVCodecContext *enc_ctx = ost->enc_ctx;
3314 AVCodecContext *dec_ctx = NULL;
3315 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3318 set_encoder_id(output_files[ost->file_index], ost);
3320 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3321 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3322 // which have to be filtered out to prevent leaking them to output files.
3323 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3326 ost->st->disposition = ist->st->disposition;
3328 dec_ctx = ist->dec_ctx;
3330 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3332 for (j = 0; j < oc->nb_streams; j++) {
3333 AVStream *st = oc->streams[j];
3334 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3337 if (j == oc->nb_streams)
3338 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3339 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3340 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3343 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3344 if (!ost->frame_rate.num)
3345 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3346 if (ist && !ost->frame_rate.num)
3347 ost->frame_rate = ist->framerate;
3348 if (ist && !ost->frame_rate.num)
3349 ost->frame_rate = ist->st->r_frame_rate;
3350 if (ist && !ost->frame_rate.num) {
3351 ost->frame_rate = (AVRational){25, 1};
3352 av_log(NULL, AV_LOG_WARNING,
3354 "about the input framerate is available. Falling "
3355 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3356 "if you want a different framerate.\n",
3357 ost->file_index, ost->index);
3360 if (ost->enc->supported_framerates && !ost->force_fps) {
3361 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3362 ost->frame_rate = ost->enc->supported_framerates[idx];
3364 // reduce frame rate for mpeg4 to be within the spec limits
3365 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3366 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3367 ost->frame_rate.num, ost->frame_rate.den, 65535);
3371 switch (enc_ctx->codec_type) {
3372 case AVMEDIA_TYPE_AUDIO:
3373 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3375 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3376 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3377 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3378 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3379 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3381 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3384 case AVMEDIA_TYPE_VIDEO:
3385 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3387 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3388 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3389 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3390 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3391 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3392 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3395 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3396 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3397 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3398 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3399 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3400 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3402 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3404 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3405 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3408 enc_ctx->color_range = frame->color_range;
3409 enc_ctx->color_primaries = frame->color_primaries;
3410 enc_ctx->color_trc = frame->color_trc;
3411 enc_ctx->colorspace = frame->colorspace;
3412 enc_ctx->chroma_sample_location = frame->chroma_location;
3415 enc_ctx->framerate = ost->frame_rate;
3417 ost->st->avg_frame_rate = ost->frame_rate;
3420 enc_ctx->width != dec_ctx->width ||
3421 enc_ctx->height != dec_ctx->height ||
3422 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3423 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3426 if (ost->top_field_first == 0) {
3427 enc_ctx->field_order = AV_FIELD_BB;
3428 } else if (ost->top_field_first == 1) {
3429 enc_ctx->field_order = AV_FIELD_TT;
3433 if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
3434 ost->top_field_first >= 0)
3435 frame->top_field_first = !!ost->top_field_first;
3437 if (frame->interlaced_frame) {
3438 if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3439 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3441 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3443 enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3446 if (ost->forced_keyframes) {
3447 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3448 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3449 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3451 av_log(NULL, AV_LOG_ERROR,
3452 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3455 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3456 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3457 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3458 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3460 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3461 // parse it only for static kf timings
3462 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3463 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3467 case AVMEDIA_TYPE_SUBTITLE:
3468 enc_ctx->time_base = AV_TIME_BASE_Q;
3469 if (!enc_ctx->width) {
3470 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3471 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3474 case AVMEDIA_TYPE_DATA:
3481 ost->mux_timebase = enc_ctx->time_base;
3486 static int init_output_stream(OutputStream *ost, AVFrame *frame,
3487 char *error, int error_len)
3491 if (ost->encoding_needed) {
3492 AVCodec *codec = ost->enc;
3493 AVCodecContext *dec = NULL;
3496 ret = init_output_stream_encode(ost, frame);
3500 if ((ist = get_input_stream(ost)))
3502 if (dec && dec->subtitle_header) {
3503 /* ASS code assumes this buffer is null terminated so add extra byte. */
3504 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3505 if (!ost->enc_ctx->subtitle_header)
3506 return AVERROR(ENOMEM);
3507 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3508 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3510 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3511 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3512 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3514 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3515 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3516 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3518 ret = hw_device_setup_for_encode(ost);
3520 snprintf(error, error_len, "Device setup failed for "
3521 "encoder on output stream #%d:%d : %s",
3522 ost->file_index, ost->index, av_err2str(ret));
3526 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3527 int input_props = 0, output_props = 0;
3528 AVCodecDescriptor const *input_descriptor =
3529 avcodec_descriptor_get(dec->codec_id);
3530 AVCodecDescriptor const *output_descriptor =
3531 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3532 if (input_descriptor)
3533 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3534 if (output_descriptor)
3535 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3536 if (input_props && output_props && input_props != output_props) {
3537 snprintf(error, error_len,
3538 "Subtitle encoding currently only possible from text to text "
3539 "or bitmap to bitmap");
3540 return AVERROR_INVALIDDATA;
3544 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3545 if (ret == AVERROR_EXPERIMENTAL)
3546 abort_codec_experimental(codec, 1);
3547 snprintf(error, error_len,
3548 "Error while opening encoder for output stream #%d:%d - "
3549 "maybe incorrect parameters such as bit_rate, rate, width or height",
3550 ost->file_index, ost->index);
3553 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3554 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3555 av_buffersink_set_frame_size(ost->filter->filter,
3556 ost->enc_ctx->frame_size);
3557 assert_avoptions(ost->encoder_opts);
3558 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3559 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3560 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3561 " It takes bits/s as argument, not kbits/s\n");
3563 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3565 av_log(NULL, AV_LOG_FATAL,
3566 "Error initializing the output stream codec context.\n");
3570 * FIXME: ost->st->codec should't be needed here anymore.
3572 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3576 if (ost->enc_ctx->nb_coded_side_data) {
3579 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3580 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3583 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3585 return AVERROR(ENOMEM);
3586 memcpy(dst_data, sd_src->data, sd_src->size);
3591 * Add global input side data. For now this is naive, and copies it
3592 * from the input stream's global side data. All side data should
3593 * really be funneled over AVFrame and libavfilter, then added back to
3594 * packet side data, and then potentially using the first packet for
3599 for (i = 0; i < ist->st->nb_side_data; i++) {
3600 AVPacketSideData *sd = &ist->st->side_data[i];
3601 if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3602 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3604 return AVERROR(ENOMEM);
3605 memcpy(dst, sd->data, sd->size);
3606 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3607 av_display_rotation_set((uint32_t *)dst, 0);
3612 // copy timebase while removing common factors
3613 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3614 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3616 // copy estimated duration as a hint to the muxer
3617 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3618 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3620 ost->st->codec->codec= ost->enc_ctx->codec;
3621 } else if (ost->stream_copy) {
3622 ret = init_output_stream_streamcopy(ost);
3627 // parse user provided disposition, and update stream values
3628 if (ost->disposition) {
3629 static const AVOption opts[] = {
3630 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3631 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3632 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3633 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3634 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3635 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3636 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3637 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3638 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3639 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3640 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3641 { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3642 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3643 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3644 { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3645 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3648 static const AVClass class = {
3650 .item_name = av_default_item_name,
3652 .version = LIBAVUTIL_VERSION_INT,
3654 const AVClass *pclass = &class;
3656 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3661 /* initialize bitstream filters for the output stream
3662 * needs to be done here, because the codec id for streamcopy is not
3663 * known until now */
3664 ret = init_output_bsfs(ost);
3668 ost->initialized = 1;
3670 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3677 static void report_new_stream(int input_index, AVPacket *pkt)
3679 InputFile *file = input_files[input_index];
3680 AVStream *st = file->ctx->streams[pkt->stream_index];
3682 if (pkt->stream_index < file->nb_streams_warn)
3684 av_log(file->ctx, AV_LOG_WARNING,
3685 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3686 av_get_media_type_string(st->codecpar->codec_type),
3687 input_index, pkt->stream_index,
3688 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3689 file->nb_streams_warn = pkt->stream_index + 1;
3692 static int transcode_init(void)
3694 int ret = 0, i, j, k;
3695 AVFormatContext *oc;
3698 char error[1024] = {0};
3700 for (i = 0; i < nb_filtergraphs; i++) {
3701 FilterGraph *fg = filtergraphs[i];
3702 for (j = 0; j < fg->nb_outputs; j++) {
3703 OutputFilter *ofilter = fg->outputs[j];
3704 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3706 if (fg->nb_inputs != 1)
3708 for (k = nb_input_streams-1; k >= 0 ; k--)
3709 if (fg->inputs[0]->ist == input_streams[k])
3711 ofilter->ost->source_index = k;
3715 /* init framerate emulation */
3716 for (i = 0; i < nb_input_files; i++) {
3717 InputFile *ifile = input_files[i];
3718 if (ifile->rate_emu)
3719 for (j = 0; j < ifile->nb_streams; j++)
3720 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3723 /* init input streams */
3724 for (i = 0; i < nb_input_streams; i++)
3725 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3726 for (i = 0; i < nb_output_streams; i++) {
3727 ost = output_streams[i];
3728 avcodec_close(ost->enc_ctx);
3734 * initialize stream copy and subtitle/data streams.
3735 * Encoded AVFrame based streams will get initialized as follows:
3736 * - when the first AVFrame is received in do_video_out
3737 * - just before the first AVFrame is received in either transcode_step
3738 * or reap_filters due to us requiring the filter chain buffer sink
3739 * to be configured with the correct audio frame size, which is only
3740 * known after the encoder is initialized.
3742 for (i = 0; i < nb_output_streams; i++) {
3743 if (!output_streams[i]->stream_copy &&
3744 (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3745 output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO))
3748 ret = init_output_stream_wrapper(output_streams[i], NULL, 0);
3753 /* discard unused programs */
3754 for (i = 0; i < nb_input_files; i++) {
3755 InputFile *ifile = input_files[i];
3756 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3757 AVProgram *p = ifile->ctx->programs[j];
3758 int discard = AVDISCARD_ALL;
3760 for (k = 0; k < p->nb_stream_indexes; k++)
3761 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3762 discard = AVDISCARD_DEFAULT;
3765 p->discard = discard;
3769 /* write headers for files with no streams */
3770 for (i = 0; i < nb_output_files; i++) {
3771 oc = output_files[i]->ctx;
3772 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3773 ret = check_init_output_file(output_files[i], i);
3780 /* dump the stream mapping */
3781 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3782 for (i = 0; i < nb_input_streams; i++) {
3783 ist = input_streams[i];
3785 for (j = 0; j < ist->nb_filters; j++) {
3786 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3787 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3788 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3789 ist->filters[j]->name);
3790 if (nb_filtergraphs > 1)
3791 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3792 av_log(NULL, AV_LOG_INFO, "\n");
3797 for (i = 0; i < nb_output_streams; i++) {
3798 ost = output_streams[i];
3800 if (ost->attachment_filename) {
3801 /* an attached file */
3802 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3803 ost->attachment_filename, ost->file_index, ost->index);
3807 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3808 /* output from a complex graph */
3809 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3810 if (nb_filtergraphs > 1)
3811 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3813 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3814 ost->index, ost->enc ? ost->enc->name : "?");
3818 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3819 input_streams[ost->source_index]->file_index,
3820 input_streams[ost->source_index]->st->index,
3823 if (ost->sync_ist != input_streams[ost->source_index])
3824 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3825 ost->sync_ist->file_index,
3826 ost->sync_ist->st->index);
3827 if (ost->stream_copy)
3828 av_log(NULL, AV_LOG_INFO, " (copy)");
3830 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3831 const AVCodec *out_codec = ost->enc;
3832 const char *decoder_name = "?";
3833 const char *in_codec_name = "?";
3834 const char *encoder_name = "?";
3835 const char *out_codec_name = "?";
3836 const AVCodecDescriptor *desc;
3839 decoder_name = in_codec->name;
3840 desc = avcodec_descriptor_get(in_codec->id);
3842 in_codec_name = desc->name;
3843 if (!strcmp(decoder_name, in_codec_name))
3844 decoder_name = "native";
3848 encoder_name = out_codec->name;
3849 desc = avcodec_descriptor_get(out_codec->id);
3851 out_codec_name = desc->name;
3852 if (!strcmp(encoder_name, out_codec_name))
3853 encoder_name = "native";
3856 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3857 in_codec_name, decoder_name,
3858 out_codec_name, encoder_name);
3860 av_log(NULL, AV_LOG_INFO, "\n");
3864 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3868 atomic_store(&transcode_init_done, 1);
3873 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3874 static int need_output(void)
3878 for (i = 0; i < nb_output_streams; i++) {
3879 OutputStream *ost = output_streams[i];
3880 OutputFile *of = output_files[ost->file_index];
3881 AVFormatContext *os = output_files[ost->file_index]->ctx;
3883 if (ost->finished ||
3884 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3886 if (ost->frame_number >= ost->max_frames) {
3888 for (j = 0; j < of->ctx->nb_streams; j++)
3889 close_output_stream(output_streams[of->ost_index + j]);
3900 * Select the output stream to process.
3902 * @return selected output stream, or NULL if none available
3904 static OutputStream *choose_output(void)
3907 int64_t opts_min = INT64_MAX;
3908 OutputStream *ost_min = NULL;
3910 for (i = 0; i < nb_output_streams; i++) {
3911 OutputStream *ost = output_streams[i];
3912 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3913 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3915 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3916 av_log(NULL, AV_LOG_DEBUG,
3917 "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3918 ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3920 if (!ost->initialized && !ost->inputs_done)
3923 if (!ost->finished && opts < opts_min) {
3925 ost_min = ost->unavailable ? NULL : ost;
3931 static void set_tty_echo(int on)
3935 if (tcgetattr(0, &tty) == 0) {
3936 if (on) tty.c_lflag |= ECHO;
3937 else tty.c_lflag &= ~ECHO;
3938 tcsetattr(0, TCSANOW, &tty);
3943 static int check_keyboard_interaction(int64_t cur_time)
3946 static int64_t last_time;
3947 if (received_nb_signals)
3948 return AVERROR_EXIT;
3949 /* read_key() returns 0 on EOF */
3950 if(cur_time - last_time >= 100000 && !run_as_daemon){
3952 last_time = cur_time;
3956 return AVERROR_EXIT;
3957 if (key == '+') av_log_set_level(av_log_get_level()+10);
3958 if (key == '-') av_log_set_level(av_log_get_level()-10);
3959 if (key == 's') qp_hist ^= 1;
3962 do_hex_dump = do_pkt_dump = 0;
3963 } else if(do_pkt_dump){
3967 av_log_set_level(AV_LOG_DEBUG);
3969 if (key == 'c' || key == 'C'){
3970 char buf[4096], target[64], command[256], arg[256] = {0};
3973 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3976 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3981 fprintf(stderr, "\n");
3983 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3984 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3985 target, time, command, arg);
3986 for (i = 0; i < nb_filtergraphs; i++) {
3987 FilterGraph *fg = filtergraphs[i];
3990 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3991 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3992 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3993 } else if (key == 'c') {
3994 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3995 ret = AVERROR_PATCHWELCOME;
3997 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3999 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
4004 av_log(NULL, AV_LOG_ERROR,
4005 "Parse error, at least 3 arguments were expected, "
4006 "only %d given in string '%s'\n", n, buf);
4009 if (key == 'd' || key == 'D'){
4012 debug = input_streams[0]->st->codec->debug<<1;
4013 if(!debug) debug = 1;
4014 while(debug & (FF_DEBUG_DCT_COEFF
4016 |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
4018 )) //unsupported, would just crash
4025 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4030 fprintf(stderr, "\n");
4031 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
4032 fprintf(stderr,"error parsing debug value\n");
4034 for(i=0;i<nb_input_streams;i++) {
4035 input_streams[i]->st->codec->debug = debug;
4037 for(i=0;i<nb_output_streams;i++) {
4038 OutputStream *ost = output_streams[i];
4039 ost->enc_ctx->debug = debug;
4041 if(debug) av_log_set_level(AV_LOG_DEBUG);
4042 fprintf(stderr,"debug=%d\n", debug);
4045 fprintf(stderr, "key function\n"
4046 "? show this help\n"
4047 "+ increase verbosity\n"
4048 "- decrease verbosity\n"
4049 "c Send command to first matching filter supporting it\n"
4050 "C Send/Queue command to all matching filters\n"
4051 "D cycle through available debug modes\n"
4052 "h dump packets/hex press to cycle through the 3 states\n"
4054 "s Show QP histogram\n"
4061 static void *input_thread(void *arg)
4064 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4069 ret = av_read_frame(f->ctx, &pkt);
4071 if (ret == AVERROR(EAGAIN)) {
4076 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4079 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4080 if (flags && ret == AVERROR(EAGAIN)) {
4082 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4083 av_log(f->ctx, AV_LOG_WARNING,
4084 "Thread message queue blocking; consider raising the "
4085 "thread_queue_size option (current value: %d)\n",
4086 f->thread_queue_size);
4089 if (ret != AVERROR_EOF)
4090 av_log(f->ctx, AV_LOG_ERROR,
4091 "Unable to send packet to main thread: %s\n",
4093 av_packet_unref(&pkt);
4094 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4102 static void free_input_thread(int i)
4104 InputFile *f = input_files[i];
4107 if (!f || !f->in_thread_queue)
4109 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4110 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4111 av_packet_unref(&pkt);
4113 pthread_join(f->thread, NULL);
4115 av_thread_message_queue_free(&f->in_thread_queue);
4118 static void free_input_threads(void)
4122 for (i = 0; i < nb_input_files; i++)
4123 free_input_thread(i);
4126 static int init_input_thread(int i)
4129 InputFile *f = input_files[i];
4131 if (f->thread_queue_size < 0)
4132 f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4133 if (!f->thread_queue_size)
4136 if (f->ctx->pb ? !f->ctx->pb->seekable :
4137 strcmp(f->ctx->iformat->name, "lavfi"))
4138 f->non_blocking = 1;
4139 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4140 f->thread_queue_size, sizeof(AVPacket));
4144 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4145 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4146 av_thread_message_queue_free(&f->in_thread_queue);
4147 return AVERROR(ret);
4153 static int init_input_threads(void)
4157 for (i = 0; i < nb_input_files; i++) {
4158 ret = init_input_thread(i);
4165 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4167 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4169 AV_THREAD_MESSAGE_NONBLOCK : 0);
4173 static int get_input_packet(InputFile *f, AVPacket *pkt)
4177 for (i = 0; i < f->nb_streams; i++) {
4178 InputStream *ist = input_streams[f->ist_index + i];
4179 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4180 int64_t now = av_gettime_relative() - ist->start;
4182 return AVERROR(EAGAIN);
4187 if (f->thread_queue_size)
4188 return get_input_packet_mt(f, pkt);
4190 return av_read_frame(f->ctx, pkt);
4193 static int got_eagain(void)
4196 for (i = 0; i < nb_output_streams; i++)
4197 if (output_streams[i]->unavailable)
4202 static void reset_eagain(void)
4205 for (i = 0; i < nb_input_files; i++)
4206 input_files[i]->eagain = 0;
4207 for (i = 0; i < nb_output_streams; i++)
4208 output_streams[i]->unavailable = 0;
4211 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4212 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4213 AVRational time_base)
4219 return tmp_time_base;
4222 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4225 return tmp_time_base;
4231 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4234 AVCodecContext *avctx;
4235 int i, ret, has_audio = 0;
4236 int64_t duration = 0;
4238 ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4242 for (i = 0; i < ifile->nb_streams; i++) {
4243 ist = input_streams[ifile->ist_index + i];
4244 avctx = ist->dec_ctx;
4246 /* duration is the length of the last frame in a stream
4247 * when audio stream is present we don't care about
4248 * last video frame length because it's not defined exactly */
4249 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4253 for (i = 0; i < ifile->nb_streams; i++) {
4254 ist = input_streams[ifile->ist_index + i];
4255 avctx = ist->dec_ctx;
4258 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4259 AVRational sample_rate = {1, avctx->sample_rate};
4261 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4266 if (ist->framerate.num) {
4267 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4268 } else if (ist->st->avg_frame_rate.num) {
4269 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4274 if (!ifile->duration)
4275 ifile->time_base = ist->st->time_base;
4276 /* the total duration of the stream, max_pts - min_pts is
4277 * the duration of the stream without the last frame */
4278 if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4279 duration += ist->max_pts - ist->min_pts;
4280 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4284 if (ifile->loop > 0)
4292 * - 0 -- one packet was read and processed
4293 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4294 * this function should be called again
4295 * - AVERROR_EOF -- this function should not be called again
4297 static int process_input(int file_index)
4299 InputFile *ifile = input_files[file_index];
4300 AVFormatContext *is;
4303 int ret, thread_ret, i, j;
4306 int disable_discontinuity_correction = copy_ts;
4309 ret = get_input_packet(ifile, &pkt);
4311 if (ret == AVERROR(EAGAIN)) {
4315 if (ret < 0 && ifile->loop) {
4316 AVCodecContext *avctx;
4317 for (i = 0; i < ifile->nb_streams; i++) {
4318 ist = input_streams[ifile->ist_index + i];
4319 avctx = ist->dec_ctx;
4320 if (ist->decoding_needed) {
4321 ret = process_input_packet(ist, NULL, 1);
4324 avcodec_flush_buffers(avctx);
4328 free_input_thread(file_index);
4330 ret = seek_to_start(ifile, is);
4332 thread_ret = init_input_thread(file_index);
4337 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4339 ret = get_input_packet(ifile, &pkt);
4340 if (ret == AVERROR(EAGAIN)) {
4346 if (ret != AVERROR_EOF) {
4347 print_error(is->url, ret);
4352 for (i = 0; i < ifile->nb_streams; i++) {
4353 ist = input_streams[ifile->ist_index + i];
4354 if (ist->decoding_needed) {
4355 ret = process_input_packet(ist, NULL, 0);
4360 /* mark all outputs that don't go through lavfi as finished */
4361 for (j = 0; j < nb_output_streams; j++) {
4362 OutputStream *ost = output_streams[j];
4364 if (ost->source_index == ifile->ist_index + i &&
4365 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4366 finish_output_stream(ost);
4370 ifile->eof_reached = 1;
4371 return AVERROR(EAGAIN);
4377 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4378 is->streams[pkt.stream_index]);
4380 /* the following test is needed in case new streams appear
4381 dynamically in stream : we ignore them */
4382 if (pkt.stream_index >= ifile->nb_streams) {
4383 report_new_stream(file_index, &pkt);
4384 goto discard_packet;
4387 ist = input_streams[ifile->ist_index + pkt.stream_index];
4389 ist->data_size += pkt.size;
4393 goto discard_packet;
4395 if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4396 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
4397 "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4403 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4404 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4405 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4406 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4407 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4408 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4409 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4410 av_ts2str(input_files[ist->file_index]->ts_offset),
4411 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4414 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4415 int64_t stime, stime2;
4416 // Correcting starttime based on the enabled streams
4417 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4418 // so we instead do it here as part of discontinuity handling
4419 if ( ist->next_dts == AV_NOPTS_VALUE
4420 && ifile->ts_offset == -is->start_time
4421 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4422 int64_t new_start_time = INT64_MAX;
4423 for (i=0; i<is->nb_streams; i++) {
4424 AVStream *st = is->streams[i];
4425 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4427 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4429 if (new_start_time > is->start_time) {
4430 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4431 ifile->ts_offset = -new_start_time;
4435 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4436 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4437 ist->wrap_correction_done = 1;
4439 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4440 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4441 ist->wrap_correction_done = 0;
4443 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4444 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4445 ist->wrap_correction_done = 0;
4449 /* add the stream-global side data to the first packet */
4450 if (ist->nb_packets == 1) {
4451 for (i = 0; i < ist->st->nb_side_data; i++) {
4452 AVPacketSideData *src_sd = &ist->st->side_data[i];
4455 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4458 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4461 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4465 memcpy(dst_data, src_sd->data, src_sd->size);
4469 if (pkt.dts != AV_NOPTS_VALUE)
4470 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4471 if (pkt.pts != AV_NOPTS_VALUE)
4472 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4474 if (pkt.pts != AV_NOPTS_VALUE)
4475 pkt.pts *= ist->ts_scale;
4476 if (pkt.dts != AV_NOPTS_VALUE)
4477 pkt.dts *= ist->ts_scale;
4479 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4480 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4481 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4482 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4483 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4484 int64_t delta = pkt_dts - ifile->last_ts;
4485 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4486 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4487 ifile->ts_offset -= delta;
4488 av_log(NULL, AV_LOG_DEBUG,
4489 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4490 delta, ifile->ts_offset);
4491 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4492 if (pkt.pts != AV_NOPTS_VALUE)
4493 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4497 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4498 if (pkt.pts != AV_NOPTS_VALUE) {
4499 pkt.pts += duration;
4500 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4501 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4504 if (pkt.dts != AV_NOPTS_VALUE)
4505 pkt.dts += duration;
4507 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4509 if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4510 (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4511 int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
4512 ist->st->time_base, AV_TIME_BASE_Q,
4513 AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4514 if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4515 disable_discontinuity_correction = 0;
4518 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4519 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4520 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4521 !disable_discontinuity_correction) {
4522 int64_t delta = pkt_dts - ist->next_dts;
4523 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4524 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4525 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4526 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4527 ifile->ts_offset -= delta;
4528 av_log(NULL, AV_LOG_DEBUG,
4529 "timestamp discontinuity for stream #%d:%d "
4530 "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4531 ist->file_index, ist->st->index, ist->st->id,
4532 av_get_media_type_string(ist->dec_ctx->codec_type),
4533 delta, ifile->ts_offset);
4534 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4535 if (pkt.pts != AV_NOPTS_VALUE)
4536 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4539 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4540 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4541 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4542 pkt.dts = AV_NOPTS_VALUE;
4544 if (pkt.pts != AV_NOPTS_VALUE){
4545 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4546 delta = pkt_pts - ist->next_dts;
4547 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4548 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4549 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4550 pkt.pts = AV_NOPTS_VALUE;
4556 if (pkt.dts != AV_NOPTS_VALUE)
4557 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4560 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4561 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4562 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4563 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4564 av_ts2str(input_files[ist->file_index]->ts_offset),
4565 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4568 sub2video_heartbeat(ist, pkt.pts);
4570 process_input_packet(ist, &pkt, 0);
4573 av_packet_unref(&pkt);
4579 * Perform a step of transcoding for the specified filter graph.
4581 * @param[in] graph filter graph to consider
4582 * @param[out] best_ist input stream where a frame would allow to continue
4583 * @return 0 for success, <0 for error
4585 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4588 int nb_requests, nb_requests_max = 0;
4589 InputFilter *ifilter;
4593 ret = avfilter_graph_request_oldest(graph->graph);
4595 return reap_filters(0);
4597 if (ret == AVERROR_EOF) {
4598 ret = reap_filters(1);
4599 for (i = 0; i < graph->nb_outputs; i++)
4600 close_output_stream(graph->outputs[i]->ost);
4603 if (ret != AVERROR(EAGAIN))
4606 for (i = 0; i < graph->nb_inputs; i++) {
4607 ifilter = graph->inputs[i];
4609 if (input_files[ist->file_index]->eagain ||
4610 input_files[ist->file_index]->eof_reached)
4612 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4613 if (nb_requests > nb_requests_max) {
4614 nb_requests_max = nb_requests;
4620 for (i = 0; i < graph->nb_outputs; i++)
4621 graph->outputs[i]->ost->unavailable = 1;
4627 * Run a single step of transcoding.
4629 * @return 0 for success, <0 for error
4631 static int transcode_step(void)
4634 InputStream *ist = NULL;
4637 ost = choose_output();
4644 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4648 if (ost->filter && !ost->filter->graph->graph) {
4649 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4650 ret = configure_filtergraph(ost->filter->graph);
4652 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4658 if (ost->filter && ost->filter->graph->graph) {
4660 * Similar case to the early audio initialization in reap_filters.
4661 * Audio is special in ffmpeg.c currently as we depend on lavfi's
4662 * audio frame buffering/creation to get the output audio frame size
4663 * in samples correct. The audio frame size for the filter chain is
4664 * configured during the output stream initialization.
4666 * Apparently avfilter_graph_request_oldest (called in
4667 * transcode_from_filter just down the line) peeks. Peeking already
4668 * puts one frame "ready to be given out", which means that any
4669 * update in filter buffer sink configuration afterwards will not
4670 * help us. And yes, even if it would be utilized,
4671 * av_buffersink_get_samples is affected, as it internally utilizes
4672 * the same early exit for peeked frames.
4674 * In other words, if avfilter_graph_request_oldest would not make
4675 * further filter chain configuration or usage of
4676 * av_buffersink_get_samples useless (by just causing the return
4677 * of the peeked AVFrame as-is), we could get rid of this additional
4678 * early encoder initialization.
4680 if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
4681 init_output_stream_wrapper(ost, NULL, 1);
4683 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4687 } else if (ost->filter) {
4689 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4690 InputFilter *ifilter = ost->filter->graph->inputs[i];
4691 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4697 ost->inputs_done = 1;
4701 av_assert0(ost->source_index >= 0);
4702 ist = input_streams[ost->source_index];
4705 ret = process_input(ist->file_index);
4706 if (ret == AVERROR(EAGAIN)) {
4707 if (input_files[ist->file_index]->eagain)
4708 ost->unavailable = 1;
4713 return ret == AVERROR_EOF ? 0 : ret;
4715 return reap_filters(0);
4719 * The following code is the main loop of the file converter
4721 static int transcode(void)
4724 AVFormatContext *os;
4727 int64_t timer_start;
4728 int64_t total_packets_written = 0;
4730 ret = transcode_init();
4734 if (stdin_interaction) {
4735 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4738 timer_start = av_gettime_relative();
4741 if ((ret = init_input_threads()) < 0)
4745 while (!received_sigterm) {
4746 int64_t cur_time= av_gettime_relative();
4748 /* if 'q' pressed, exits */
4749 if (stdin_interaction)
4750 if (check_keyboard_interaction(cur_time) < 0)
4753 /* check if there's any stream where output is still needed */
4754 if (!need_output()) {
4755 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4759 ret = transcode_step();
4760 if (ret < 0 && ret != AVERROR_EOF) {
4761 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4765 /* dump report by using the output first video and audio streams */
4766 print_report(0, timer_start, cur_time);
4769 free_input_threads();
4772 /* at the end of stream, we must flush the decoder buffers */
4773 for (i = 0; i < nb_input_streams; i++) {
4774 ist = input_streams[i];
4775 if (!input_files[ist->file_index]->eof_reached) {
4776 process_input_packet(ist, NULL, 0);
4783 /* write the trailer if needed and close file */
4784 for (i = 0; i < nb_output_files; i++) {
4785 os = output_files[i]->ctx;
4786 if (!output_files[i]->header_written) {
4787 av_log(NULL, AV_LOG_ERROR,
4788 "Nothing was written into output file %d (%s), because "
4789 "at least one of its streams received no packets.\n",
4793 if ((ret = av_write_trailer(os)) < 0) {
4794 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4800 /* dump report by using the first video and audio streams */
4801 print_report(1, timer_start, av_gettime_relative());
4803 /* close each encoder */
4804 for (i = 0; i < nb_output_streams; i++) {
4805 ost = output_streams[i];
4806 if (ost->encoding_needed) {
4807 av_freep(&ost->enc_ctx->stats_in);
4809 total_packets_written += ost->packets_written;
4810 if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
4811 av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4816 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4817 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4821 /* close each decoder */
4822 for (i = 0; i < nb_input_streams; i++) {
4823 ist = input_streams[i];
4824 if (ist->decoding_needed) {
4825 avcodec_close(ist->dec_ctx);
4826 if (ist->hwaccel_uninit)
4827 ist->hwaccel_uninit(ist->dec_ctx);
4831 hw_device_free_all();
4838 free_input_threads();
4841 if (output_streams) {
4842 for (i = 0; i < nb_output_streams; i++) {
4843 ost = output_streams[i];
4846 if (fclose(ost->logfile))
4847 av_log(NULL, AV_LOG_ERROR,
4848 "Error closing logfile, loss of information possible: %s\n",
4849 av_err2str(AVERROR(errno)));
4850 ost->logfile = NULL;
4852 av_freep(&ost->forced_kf_pts);
4853 av_freep(&ost->apad);
4854 av_freep(&ost->disposition);
4855 av_dict_free(&ost->encoder_opts);
4856 av_dict_free(&ost->sws_dict);
4857 av_dict_free(&ost->swr_opts);
4858 av_dict_free(&ost->resample_opts);
4865 static BenchmarkTimeStamps get_benchmark_time_stamps(void)
4867 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4869 struct rusage rusage;
4871 getrusage(RUSAGE_SELF, &rusage);
4872 time_stamps.user_usec =
4873 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4874 time_stamps.sys_usec =
4875 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4876 #elif HAVE_GETPROCESSTIMES
4878 FILETIME c, e, k, u;
4879 proc = GetCurrentProcess();
4880 GetProcessTimes(proc, &c, &e, &k, &u);
4881 time_stamps.user_usec =
4882 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4883 time_stamps.sys_usec =
4884 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4886 time_stamps.user_usec = time_stamps.sys_usec = 0;
4891 static int64_t getmaxrss(void)
4893 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4894 struct rusage rusage;
4895 getrusage(RUSAGE_SELF, &rusage);
4896 return (int64_t)rusage.ru_maxrss * 1024;
4897 #elif HAVE_GETPROCESSMEMORYINFO
4899 PROCESS_MEMORY_COUNTERS memcounters;
4900 proc = GetCurrentProcess();
4901 memcounters.cb = sizeof(memcounters);
4902 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4903 return memcounters.PeakPagefileUsage;
4909 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4913 int main(int argc, char **argv)
4916 BenchmarkTimeStamps ti;
4920 register_exit(ffmpeg_cleanup);
4922 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4924 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4925 parse_loglevel(argc, argv, options);
4927 if(argc>1 && !strcmp(argv[1], "-d")){
4929 av_log_set_callback(log_callback_null);
4935 avdevice_register_all();
4937 avformat_network_init();
4939 show_banner(argc, argv, options);
4941 /* parse options and open all input/output files */
4942 ret = ffmpeg_parse_options(argc, argv);
4946 if (nb_output_files <= 0 && nb_input_files == 0) {
4948 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4952 /* file converter / grab */
4953 if (nb_output_files <= 0) {
4954 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4958 for (i = 0; i < nb_output_files; i++) {
4959 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4963 current_time = ti = get_benchmark_time_stamps();
4964 if (transcode() < 0)
4967 int64_t utime, stime, rtime;
4968 current_time = get_benchmark_time_stamps();
4969 utime = current_time.user_usec - ti.user_usec;
4970 stime = current_time.sys_usec - ti.sys_usec;
4971 rtime = current_time.real_usec - ti.real_usec;
4972 av_log(NULL, AV_LOG_INFO,
4973 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4974 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4976 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4977 decode_error_stat[0], decode_error_stat[1]);
4978 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4981 exit_program(received_nb_signals ? 255 : main_return_code);
4982 return main_return_code;