2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
65 #include "libavutil/threadmessage.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
73 #if HAVE_SYS_RESOURCE_H
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
80 #if HAVE_GETPROCESSMEMORYINFO
84 #if HAVE_SETCONSOLECTRLHANDLER
90 #include <sys/select.h>
95 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 typedef struct BenchmarkTimeStamps {
127 } BenchmarkTimeStamps;
129 static void do_video_stats(OutputStream *ost, int frame_size);
130 static BenchmarkTimeStamps get_benchmark_time_stamps(void);
131 static int64_t getmaxrss(void);
132 static int ifilter_has_all_input_formats(FilterGraph *fg);
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
140 static int want_sdp = 1;
142 static BenchmarkTimeStamps current_time;
143 AVIOContext *progress_avio = NULL;
145 static uint8_t *subtitle_out;
147 InputStream **input_streams = NULL;
148 int nb_input_streams = 0;
149 InputFile **input_files = NULL;
150 int nb_input_files = 0;
152 OutputStream **output_streams = NULL;
153 int nb_output_streams = 0;
154 OutputFile **output_files = NULL;
155 int nb_output_files = 0;
157 FilterGraph **filtergraphs;
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
168 static void free_input_threads(void);
172 Convert subtitles to video with alpha to insert them in filter graphs.
173 This is a temporary solution until libavfilter gets real subtitles support.
176 static int sub2video_get_blank_frame(InputStream *ist)
179 AVFrame *frame = ist->sub2video.frame;
181 av_frame_unref(frame);
182 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
184 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
185 if ((ret = av_frame_get_buffer(frame, 0)) < 0)
187 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
194 uint32_t *pal, *dst2;
198 if (r->type != SUBTITLE_BITMAP) {
199 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
202 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204 r->x, r->y, r->w, r->h, w, h
209 dst += r->y * dst_linesize + r->x * 4;
211 pal = (uint32_t *)r->data[1];
212 for (y = 0; y < r->h; y++) {
213 dst2 = (uint32_t *)dst;
215 for (x = 0; x < r->w; x++)
216 *(dst2++) = pal[*(src2++)];
218 src += r->linesize[0];
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
224 AVFrame *frame = ist->sub2video.frame;
228 av_assert1(frame->data[0]);
229 ist->sub2video.last_pts = frame->pts = pts;
230 for (i = 0; i < ist->nb_filters; i++) {
231 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
232 AV_BUFFERSRC_FLAG_KEEP_REF |
233 AV_BUFFERSRC_FLAG_PUSH);
234 if (ret != AVERROR_EOF && ret < 0)
235 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
240 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
242 AVFrame *frame = ist->sub2video.frame;
246 int64_t pts, end_pts;
251 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252 AV_TIME_BASE_Q, ist->st->time_base);
253 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254 AV_TIME_BASE_Q, ist->st->time_base);
255 num_rects = sub->num_rects;
257 /* If we are initializing the system, utilize current heartbeat
258 PTS as the start time, and show until the following subpicture
259 is received. Otherwise, utilize the previous subpicture's end time
260 as the fall-back value. */
261 pts = ist->sub2video.initialize ?
262 heartbeat_pts : ist->sub2video.end_pts;
266 if (sub2video_get_blank_frame(ist) < 0) {
267 av_log(ist->dec_ctx, AV_LOG_ERROR,
268 "Impossible to get a blank canvas.\n");
271 dst = frame->data [0];
272 dst_linesize = frame->linesize[0];
273 for (i = 0; i < num_rects; i++)
274 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
275 sub2video_push_ref(ist, pts);
276 ist->sub2video.end_pts = end_pts;
277 ist->sub2video.initialize = 0;
280 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
282 InputFile *infile = input_files[ist->file_index];
286 /* When a frame is read from a file, examine all sub2video streams in
287 the same file and send the sub2video frame again. Otherwise, decoded
288 video frames could be accumulating in the filter graph while a filter
289 (possibly overlay) is desperately waiting for a subtitle frame. */
290 for (i = 0; i < infile->nb_streams; i++) {
291 InputStream *ist2 = input_streams[infile->ist_index + i];
292 if (!ist2->sub2video.frame)
294 /* subtitles seem to be usually muxed ahead of other streams;
295 if not, subtracting a larger time here is necessary */
296 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
297 /* do not send the heartbeat frame if the subtitle is already ahead */
298 if (pts2 <= ist2->sub2video.last_pts)
300 if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
301 /* if we have hit the end of the current displayed subpicture,
302 or if we need to initialize the system, update the
303 overlayed subpicture and its start/end times */
304 sub2video_update(ist2, pts2 + 1, NULL);
305 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
306 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
308 sub2video_push_ref(ist2, pts2);
312 static void sub2video_flush(InputStream *ist)
317 if (ist->sub2video.end_pts < INT64_MAX)
318 sub2video_update(ist, INT64_MAX, NULL);
319 for (i = 0; i < ist->nb_filters; i++) {
320 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
321 if (ret != AVERROR_EOF && ret < 0)
322 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
326 /* end of sub2video hack */
328 static void term_exit_sigsafe(void)
332 tcsetattr (0, TCSANOW, &oldtty);
338 av_log(NULL, AV_LOG_QUIET, "%s", "");
342 static volatile int received_sigterm = 0;
343 static volatile int received_nb_signals = 0;
344 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
345 static volatile int ffmpeg_exited = 0;
346 static int main_return_code = 0;
347 static int64_t copy_ts_first_pts = AV_NOPTS_VALUE;
350 sigterm_handler(int sig)
353 received_sigterm = sig;
354 received_nb_signals++;
356 if(received_nb_signals > 3) {
357 ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
358 strlen("Received > 3 system signals, hard exiting\n"));
359 if (ret < 0) { /* Do nothing */ };
364 #if HAVE_SETCONSOLECTRLHANDLER
365 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
367 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
372 case CTRL_BREAK_EVENT:
373 sigterm_handler(SIGINT);
376 case CTRL_CLOSE_EVENT:
377 case CTRL_LOGOFF_EVENT:
378 case CTRL_SHUTDOWN_EVENT:
379 sigterm_handler(SIGTERM);
380 /* Basically, with these 3 events, when we return from this method the
381 process is hard terminated, so stall as long as we need to
382 to try and let the main thread(s) clean up and gracefully terminate
383 (we have at most 5 seconds, but should be done far before that). */
384 while (!ffmpeg_exited) {
390 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
399 if (!run_as_daemon && stdin_interaction) {
401 if (tcgetattr (0, &tty) == 0) {
405 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
406 |INLCR|IGNCR|ICRNL|IXON);
407 tty.c_oflag |= OPOST;
408 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
409 tty.c_cflag &= ~(CSIZE|PARENB);
414 tcsetattr (0, TCSANOW, &tty);
416 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
420 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
421 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
423 signal(SIGXCPU, sigterm_handler);
426 signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
428 #if HAVE_SETCONSOLECTRLHANDLER
429 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
433 /* read a key without blocking */
434 static int read_key(void)
446 n = select(1, &rfds, NULL, NULL, &tv);
455 # if HAVE_PEEKNAMEDPIPE
457 static HANDLE input_handle;
460 input_handle = GetStdHandle(STD_INPUT_HANDLE);
461 is_pipe = !GetConsoleMode(input_handle, &dw);
465 /* When running under a GUI, you will end here. */
466 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
467 // input pipe may have been closed by the program that ran ffmpeg
485 static int decode_interrupt_cb(void *ctx)
487 return received_nb_signals > atomic_load(&transcode_init_done);
490 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
492 static void ffmpeg_cleanup(int ret)
497 int maxrss = getmaxrss() / 1024;
498 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
501 for (i = 0; i < nb_filtergraphs; i++) {
502 FilterGraph *fg = filtergraphs[i];
503 avfilter_graph_free(&fg->graph);
504 for (j = 0; j < fg->nb_inputs; j++) {
505 InputFilter *ifilter = fg->inputs[j];
506 struct InputStream *ist = ifilter->ist;
508 while (av_fifo_size(ifilter->frame_queue)) {
510 av_fifo_generic_read(ifilter->frame_queue, &frame,
511 sizeof(frame), NULL);
512 av_frame_free(&frame);
514 av_fifo_freep(&ifilter->frame_queue);
515 if (ist->sub2video.sub_queue) {
516 while (av_fifo_size(ist->sub2video.sub_queue)) {
518 av_fifo_generic_read(ist->sub2video.sub_queue,
519 &sub, sizeof(sub), NULL);
520 avsubtitle_free(&sub);
522 av_fifo_freep(&ist->sub2video.sub_queue);
524 av_buffer_unref(&ifilter->hw_frames_ctx);
525 av_freep(&ifilter->name);
526 av_freep(&fg->inputs[j]);
528 av_freep(&fg->inputs);
529 for (j = 0; j < fg->nb_outputs; j++) {
530 OutputFilter *ofilter = fg->outputs[j];
532 avfilter_inout_free(&ofilter->out_tmp);
533 av_freep(&ofilter->name);
534 av_freep(&ofilter->formats);
535 av_freep(&ofilter->channel_layouts);
536 av_freep(&ofilter->sample_rates);
537 av_freep(&fg->outputs[j]);
539 av_freep(&fg->outputs);
540 av_freep(&fg->graph_desc);
542 av_freep(&filtergraphs[i]);
544 av_freep(&filtergraphs);
546 av_freep(&subtitle_out);
549 for (i = 0; i < nb_output_files; i++) {
550 OutputFile *of = output_files[i];
555 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
557 avformat_free_context(s);
558 av_dict_free(&of->opts);
560 av_freep(&output_files[i]);
562 for (i = 0; i < nb_output_streams; i++) {
563 OutputStream *ost = output_streams[i];
568 av_bsf_free(&ost->bsf_ctx);
570 av_frame_free(&ost->filtered_frame);
571 av_frame_free(&ost->last_frame);
572 av_dict_free(&ost->encoder_opts);
574 av_freep(&ost->forced_keyframes);
575 av_expr_free(ost->forced_keyframes_pexpr);
576 av_freep(&ost->avfilter);
577 av_freep(&ost->logfile_prefix);
579 av_freep(&ost->audio_channels_map);
580 ost->audio_channels_mapped = 0;
582 av_dict_free(&ost->sws_dict);
583 av_dict_free(&ost->swr_opts);
585 avcodec_free_context(&ost->enc_ctx);
586 avcodec_parameters_free(&ost->ref_par);
588 if (ost->muxing_queue) {
589 while (av_fifo_size(ost->muxing_queue)) {
591 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
592 av_packet_unref(&pkt);
594 av_fifo_freep(&ost->muxing_queue);
597 av_freep(&output_streams[i]);
600 free_input_threads();
602 for (i = 0; i < nb_input_files; i++) {
603 avformat_close_input(&input_files[i]->ctx);
604 av_freep(&input_files[i]);
606 for (i = 0; i < nb_input_streams; i++) {
607 InputStream *ist = input_streams[i];
609 av_frame_free(&ist->decoded_frame);
610 av_frame_free(&ist->filter_frame);
611 av_dict_free(&ist->decoder_opts);
612 avsubtitle_free(&ist->prev_sub.subtitle);
613 av_frame_free(&ist->sub2video.frame);
614 av_freep(&ist->filters);
615 av_freep(&ist->hwaccel_device);
616 av_freep(&ist->dts_buffer);
618 avcodec_free_context(&ist->dec_ctx);
620 av_freep(&input_streams[i]);
624 if (fclose(vstats_file))
625 av_log(NULL, AV_LOG_ERROR,
626 "Error closing vstats file, loss of information possible: %s\n",
627 av_err2str(AVERROR(errno)));
629 av_freep(&vstats_filename);
631 av_freep(&input_streams);
632 av_freep(&input_files);
633 av_freep(&output_streams);
634 av_freep(&output_files);
638 avformat_network_deinit();
640 if (received_sigterm) {
641 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
642 (int) received_sigterm);
643 } else if (ret && atomic_load(&transcode_init_done)) {
644 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
650 void remove_avoptions(AVDictionary **a, AVDictionary *b)
652 AVDictionaryEntry *t = NULL;
654 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
655 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
659 void assert_avoptions(AVDictionary *m)
661 AVDictionaryEntry *t;
662 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
663 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
668 static void abort_codec_experimental(AVCodec *c, int encoder)
673 static void update_benchmark(const char *fmt, ...)
675 if (do_benchmark_all) {
676 BenchmarkTimeStamps t = get_benchmark_time_stamps();
682 vsnprintf(buf, sizeof(buf), fmt, va);
684 av_log(NULL, AV_LOG_INFO,
685 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
686 t.user_usec - current_time.user_usec,
687 t.sys_usec - current_time.sys_usec,
688 t.real_usec - current_time.real_usec, buf);
694 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
697 for (i = 0; i < nb_output_streams; i++) {
698 OutputStream *ost2 = output_streams[i];
699 ost2->finished |= ost == ost2 ? this_stream : others;
703 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
705 AVFormatContext *s = of->ctx;
706 AVStream *st = ost->st;
710 * Audio encoders may split the packets -- #frames in != #packets out.
711 * But there is no reordering, so we can limit the number of output packets
712 * by simply dropping them here.
713 * Counting encoded video frames needs to be done separately because of
714 * reordering, see do_video_out().
715 * Do not count the packet when unqueued because it has been counted when queued.
717 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
718 if (ost->frame_number >= ost->max_frames) {
719 av_packet_unref(pkt);
725 if (!of->header_written) {
726 AVPacket tmp_pkt = {0};
727 /* the muxer is not initialized yet, buffer the packet */
728 if (!av_fifo_space(ost->muxing_queue)) {
729 unsigned int are_we_over_size =
730 (ost->muxing_queue_data_size + pkt->size) > ost->muxing_queue_data_threshold;
731 int new_size = are_we_over_size ?
732 FFMIN(2 * av_fifo_size(ost->muxing_queue),
733 ost->max_muxing_queue_size) :
734 2 * av_fifo_size(ost->muxing_queue);
736 if (new_size <= av_fifo_size(ost->muxing_queue)) {
737 av_log(NULL, AV_LOG_ERROR,
738 "Too many packets buffered for output stream %d:%d.\n",
739 ost->file_index, ost->st->index);
742 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
746 ret = av_packet_make_refcounted(pkt);
749 av_packet_move_ref(&tmp_pkt, pkt);
750 ost->muxing_queue_data_size += tmp_pkt.size;
751 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
755 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
756 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
757 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
759 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
761 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
763 ost->quality = sd ? AV_RL32(sd) : -1;
764 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
766 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
768 ost->error[i] = AV_RL64(sd + 8 + 8*i);
773 if (ost->frame_rate.num && ost->is_cfr) {
774 if (pkt->duration > 0)
775 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
776 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
781 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
783 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
784 if (pkt->dts != AV_NOPTS_VALUE &&
785 pkt->pts != AV_NOPTS_VALUE &&
786 pkt->dts > pkt->pts) {
787 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
789 ost->file_index, ost->st->index);
791 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
792 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
793 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
795 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
796 pkt->dts != AV_NOPTS_VALUE &&
797 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
798 ost->last_mux_dts != AV_NOPTS_VALUE) {
799 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
800 if (pkt->dts < max) {
801 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
803 loglevel = AV_LOG_ERROR;
804 av_log(s, loglevel, "Non-monotonous DTS in output stream "
805 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
806 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
808 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
811 av_log(s, loglevel, "changing to %"PRId64". This may result "
812 "in incorrect timestamps in the output file.\n",
814 if (pkt->pts >= pkt->dts)
815 pkt->pts = FFMAX(pkt->pts, max);
820 ost->last_mux_dts = pkt->dts;
822 ost->data_size += pkt->size;
823 ost->packets_written++;
825 pkt->stream_index = ost->index;
828 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
829 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
830 av_get_media_type_string(ost->enc_ctx->codec_type),
831 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
832 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
837 ret = av_interleaved_write_frame(s, pkt);
839 print_error("av_interleaved_write_frame()", ret);
840 main_return_code = 1;
841 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
843 av_packet_unref(pkt);
846 static void close_output_stream(OutputStream *ost)
848 OutputFile *of = output_files[ost->file_index];
850 ost->finished |= ENCODER_FINISHED;
852 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
853 of->recording_time = FFMIN(of->recording_time, end);
858 * Send a single packet to the output, applying any bitstream filters
859 * associated with the output stream. This may result in any number
860 * of packets actually being written, depending on what bitstream
861 * filters are applied. The supplied packet is consumed and will be
862 * blank (as if newly-allocated) when this function returns.
864 * If eof is set, instead indicate EOF to all bitstream filters and
865 * therefore flush any delayed packets to the output. A blank packet
866 * must be supplied in this case.
868 static void output_packet(OutputFile *of, AVPacket *pkt,
869 OutputStream *ost, int eof)
873 /* apply the output bitstream filters */
875 ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
878 while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
879 write_packet(of, pkt, ost, 0);
880 if (ret == AVERROR(EAGAIN))
883 write_packet(of, pkt, ost, 0);
886 if (ret < 0 && ret != AVERROR_EOF) {
887 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
888 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
894 static int check_recording_time(OutputStream *ost)
896 OutputFile *of = output_files[ost->file_index];
898 if (of->recording_time != INT64_MAX &&
899 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
900 AV_TIME_BASE_Q) >= 0) {
901 close_output_stream(ost);
907 static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost,
910 double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
911 AVCodecContext *enc = ost->enc_ctx;
912 if (!frame || frame->pts == AV_NOPTS_VALUE ||
913 !enc || !ost->filter || !ost->filter->graph->graph)
917 AVFilterContext *filter = ost->filter->filter;
919 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
920 AVRational filter_tb = av_buffersink_get_time_base(filter);
921 AVRational tb = enc->time_base;
922 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
924 tb.den <<= extra_bits;
926 av_rescale_q(frame->pts, filter_tb, tb) -
927 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
928 float_pts /= 1 << extra_bits;
929 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
930 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
933 av_rescale_q(frame->pts, filter_tb, enc->time_base) -
934 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
940 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
941 frame ? av_ts2str(frame->pts) : "NULL",
942 frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
944 enc ? enc->time_base.num : -1,
945 enc ? enc->time_base.den : -1);
951 static int init_output_stream(OutputStream *ost, AVFrame *frame,
952 char *error, int error_len);
954 static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame,
957 int ret = AVERROR_BUG;
958 char error[1024] = {0};
960 if (ost->initialized)
963 ret = init_output_stream(ost, frame, error, sizeof(error));
965 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
966 ost->file_index, ost->index, error);
975 static void do_audio_out(OutputFile *of, OutputStream *ost,
978 AVCodecContext *enc = ost->enc_ctx;
982 av_init_packet(&pkt);
986 adjust_frame_pts_to_encoder_tb(of, ost, frame);
988 if (!check_recording_time(ost))
991 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
992 frame->pts = ost->sync_opts;
993 ost->sync_opts = frame->pts + frame->nb_samples;
994 ost->samples_encoded += frame->nb_samples;
995 ost->frames_encoded++;
997 av_assert0(pkt.size || !pkt.data);
998 update_benchmark(NULL);
1000 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1001 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1002 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1003 enc->time_base.num, enc->time_base.den);
1006 ret = avcodec_send_frame(enc, frame);
1011 ret = avcodec_receive_packet(enc, &pkt);
1012 if (ret == AVERROR(EAGAIN))
1017 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1019 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1022 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1023 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1024 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1025 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1028 output_packet(of, &pkt, ost, 0);
1033 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1037 static void do_subtitle_out(OutputFile *of,
1041 int subtitle_out_max_size = 1024 * 1024;
1042 int subtitle_out_size, nb, i;
1043 AVCodecContext *enc;
1047 if (sub->pts == AV_NOPTS_VALUE) {
1048 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1056 if (!subtitle_out) {
1057 subtitle_out = av_malloc(subtitle_out_max_size);
1058 if (!subtitle_out) {
1059 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1064 /* Note: DVB subtitle need one packet to draw them and one other
1065 packet to clear them */
1066 /* XXX: signal it in the codec context ? */
1067 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1072 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1074 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1075 pts -= output_files[ost->file_index]->start_time;
1076 for (i = 0; i < nb; i++) {
1077 unsigned save_num_rects = sub->num_rects;
1079 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1080 if (!check_recording_time(ost))
1084 // start_display_time is required to be 0
1085 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1086 sub->end_display_time -= sub->start_display_time;
1087 sub->start_display_time = 0;
1091 ost->frames_encoded++;
1093 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1094 subtitle_out_max_size, sub);
1096 sub->num_rects = save_num_rects;
1097 if (subtitle_out_size < 0) {
1098 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1102 av_init_packet(&pkt);
1103 pkt.data = subtitle_out;
1104 pkt.size = subtitle_out_size;
1105 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1106 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1107 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1108 /* XXX: the pts correction is handled here. Maybe handling
1109 it in the codec would be better */
1111 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1113 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1116 output_packet(of, &pkt, ost, 0);
1120 static void do_video_out(OutputFile *of,
1122 AVFrame *next_picture)
1124 int ret, format_video_sync;
1126 AVCodecContext *enc = ost->enc_ctx;
1127 AVRational frame_rate;
1128 int nb_frames, nb0_frames, i;
1129 double delta, delta0;
1130 double duration = 0;
1131 double sync_ipts = AV_NOPTS_VALUE;
1133 InputStream *ist = NULL;
1134 AVFilterContext *filter = ost->filter->filter;
1136 init_output_stream_wrapper(ost, next_picture, 1);
1137 sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1139 if (ost->source_index >= 0)
1140 ist = input_streams[ost->source_index];
1142 frame_rate = av_buffersink_get_frame_rate(filter);
1143 if (frame_rate.num > 0 && frame_rate.den > 0)
1144 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1146 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1147 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1149 if (!ost->filters_script &&
1151 (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1154 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1155 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1158 if (!next_picture) {
1160 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1161 ost->last_nb0_frames[1],
1162 ost->last_nb0_frames[2]);
1164 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1165 delta = delta0 + duration;
1167 /* by default, we output a single frame */
1168 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1171 format_video_sync = video_sync_method;
1172 if (format_video_sync == VSYNC_AUTO) {
1173 if(!strcmp(of->ctx->oformat->name, "avi")) {
1174 format_video_sync = VSYNC_VFR;
1176 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1178 && format_video_sync == VSYNC_CFR
1179 && input_files[ist->file_index]->ctx->nb_streams == 1
1180 && input_files[ist->file_index]->input_ts_offset == 0) {
1181 format_video_sync = VSYNC_VSCFR;
1183 if (format_video_sync == VSYNC_CFR && copy_ts) {
1184 format_video_sync = VSYNC_VSCFR;
1187 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1191 format_video_sync != VSYNC_PASSTHROUGH &&
1192 format_video_sync != VSYNC_DROP) {
1193 if (delta0 < -0.6) {
1194 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1196 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1197 sync_ipts = ost->sync_opts;
1202 switch (format_video_sync) {
1204 if (ost->frame_number == 0 && delta0 >= 0.5) {
1205 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1208 ost->sync_opts = llrint(sync_ipts);
1211 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1212 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1214 } else if (delta < -1.1)
1216 else if (delta > 1.1) {
1217 nb_frames = lrintf(delta);
1219 nb0_frames = llrintf(delta0 - 0.6);
1225 else if (delta > 0.6)
1226 ost->sync_opts = llrint(sync_ipts);
1229 case VSYNC_PASSTHROUGH:
1230 ost->sync_opts = llrint(sync_ipts);
1237 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1238 nb0_frames = FFMIN(nb0_frames, nb_frames);
1240 memmove(ost->last_nb0_frames + 1,
1241 ost->last_nb0_frames,
1242 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1243 ost->last_nb0_frames[0] = nb0_frames;
1245 if (nb0_frames == 0 && ost->last_dropped) {
1247 av_log(NULL, AV_LOG_VERBOSE,
1248 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1249 ost->frame_number, ost->st->index, ost->last_frame->pts);
1251 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1252 if (nb_frames > dts_error_threshold * 30) {
1253 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1257 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1258 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1259 if (nb_frames_dup > dup_warning) {
1260 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1264 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1266 /* duplicates frame if needed */
1267 for (i = 0; i < nb_frames; i++) {
1268 AVFrame *in_picture;
1269 int forced_keyframe = 0;
1271 av_init_packet(&pkt);
1275 if (i < nb0_frames && ost->last_frame) {
1276 in_picture = ost->last_frame;
1278 in_picture = next_picture;
1283 in_picture->pts = ost->sync_opts;
1285 if (!check_recording_time(ost))
1288 in_picture->quality = enc->global_quality;
1289 in_picture->pict_type = 0;
1291 if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1292 in_picture->pts != AV_NOPTS_VALUE)
1293 ost->forced_kf_ref_pts = in_picture->pts;
1295 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1296 (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1297 if (ost->forced_kf_index < ost->forced_kf_count &&
1298 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1299 ost->forced_kf_index++;
1300 forced_keyframe = 1;
1301 } else if (ost->forced_keyframes_pexpr) {
1303 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1304 res = av_expr_eval(ost->forced_keyframes_pexpr,
1305 ost->forced_keyframes_expr_const_values, NULL);
1306 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1307 ost->forced_keyframes_expr_const_values[FKF_N],
1308 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1309 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1310 ost->forced_keyframes_expr_const_values[FKF_T],
1311 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1314 forced_keyframe = 1;
1315 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1316 ost->forced_keyframes_expr_const_values[FKF_N];
1317 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1318 ost->forced_keyframes_expr_const_values[FKF_T];
1319 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1322 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1323 } else if ( ost->forced_keyframes
1324 && !strncmp(ost->forced_keyframes, "source", 6)
1325 && in_picture->key_frame==1
1327 forced_keyframe = 1;
1330 if (forced_keyframe) {
1331 in_picture->pict_type = AV_PICTURE_TYPE_I;
1332 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1335 update_benchmark(NULL);
1337 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1338 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1339 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1340 enc->time_base.num, enc->time_base.den);
1343 ost->frames_encoded++;
1345 ret = avcodec_send_frame(enc, in_picture);
1348 // Make sure Closed Captions will not be duplicated
1349 av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
1352 ret = avcodec_receive_packet(enc, &pkt);
1353 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1354 if (ret == AVERROR(EAGAIN))
1360 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1361 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1362 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1363 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1366 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1367 pkt.pts = ost->sync_opts;
1369 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1372 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1373 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1374 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1375 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1378 frame_size = pkt.size;
1379 output_packet(of, &pkt, ost, 0);
1381 /* if two pass, output log */
1382 if (ost->logfile && enc->stats_out) {
1383 fprintf(ost->logfile, "%s", enc->stats_out);
1388 * For video, number of frames in == number of packets out.
1389 * But there may be reordering, so we can't throw away frames on encoder
1390 * flush, we need to limit them here, before they go into encoder.
1392 ost->frame_number++;
1394 if (vstats_filename && frame_size)
1395 do_video_stats(ost, frame_size);
1398 if (!ost->last_frame)
1399 ost->last_frame = av_frame_alloc();
1400 av_frame_unref(ost->last_frame);
1401 if (next_picture && ost->last_frame)
1402 av_frame_ref(ost->last_frame, next_picture);
1404 av_frame_free(&ost->last_frame);
1408 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1412 static double psnr(double d)
1414 return -10.0 * log10(d);
1417 static void do_video_stats(OutputStream *ost, int frame_size)
1419 AVCodecContext *enc;
1421 double ti1, bitrate, avg_bitrate;
1423 /* this is executed just the first time do_video_stats is called */
1425 vstats_file = fopen(vstats_filename, "w");
1433 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1434 frame_number = ost->st->nb_frames;
1435 if (vstats_version <= 1) {
1436 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1437 ost->quality / (float)FF_QP2LAMBDA);
1439 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1440 ost->quality / (float)FF_QP2LAMBDA);
1443 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1444 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1446 fprintf(vstats_file,"f_size= %6d ", frame_size);
1447 /* compute pts value */
1448 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1452 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1453 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1454 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1455 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1456 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1460 static void finish_output_stream(OutputStream *ost)
1462 OutputFile *of = output_files[ost->file_index];
1465 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1468 for (i = 0; i < of->ctx->nb_streams; i++)
1469 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1474 * Get and encode new output from any of the filtergraphs, without causing
1477 * @return 0 for success, <0 for severe errors
1479 static int reap_filters(int flush)
1481 AVFrame *filtered_frame = NULL;
1484 /* Reap all buffers present in the buffer sinks */
1485 for (i = 0; i < nb_output_streams; i++) {
1486 OutputStream *ost = output_streams[i];
1487 OutputFile *of = output_files[ost->file_index];
1488 AVFilterContext *filter;
1489 AVCodecContext *enc = ost->enc_ctx;
1492 if (!ost->filter || !ost->filter->graph->graph)
1494 filter = ost->filter->filter;
1497 * Unlike video, with audio the audio frame size matters.
1498 * Currently we are fully reliant on the lavfi filter chain to
1499 * do the buffering deed for us, and thus the frame size parameter
1500 * needs to be set accordingly. Where does one get the required
1501 * frame size? From the initialized AVCodecContext of an audio
1502 * encoder. Thus, if we have gotten to an audio stream, initialize
1503 * the encoder earlier than receiving the first AVFrame.
1505 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO)
1506 init_output_stream_wrapper(ost, NULL, 1);
1508 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1509 return AVERROR(ENOMEM);
1511 filtered_frame = ost->filtered_frame;
1514 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1515 AV_BUFFERSINK_FLAG_NO_REQUEST);
1517 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1518 av_log(NULL, AV_LOG_WARNING,
1519 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1520 } else if (flush && ret == AVERROR_EOF) {
1521 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1522 do_video_out(of, ost, NULL);
1526 if (ost->finished) {
1527 av_frame_unref(filtered_frame);
1531 switch (av_buffersink_get_type(filter)) {
1532 case AVMEDIA_TYPE_VIDEO:
1533 if (!ost->frame_aspect_ratio.num)
1534 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1536 do_video_out(of, ost, filtered_frame);
1538 case AVMEDIA_TYPE_AUDIO:
1539 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1540 enc->channels != filtered_frame->channels) {
1541 av_log(NULL, AV_LOG_ERROR,
1542 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1545 do_audio_out(of, ost, filtered_frame);
1548 // TODO support subtitle filters
1552 av_frame_unref(filtered_frame);
1559 static void print_final_stats(int64_t total_size)
1561 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1562 uint64_t subtitle_size = 0;
1563 uint64_t data_size = 0;
1564 float percent = -1.0;
1568 for (i = 0; i < nb_output_streams; i++) {
1569 OutputStream *ost = output_streams[i];
1570 switch (ost->enc_ctx->codec_type) {
1571 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1572 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1573 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1574 default: other_size += ost->data_size; break;
1576 extra_size += ost->enc_ctx->extradata_size;
1577 data_size += ost->data_size;
1578 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1579 != AV_CODEC_FLAG_PASS1)
1583 if (data_size && total_size>0 && total_size >= data_size)
1584 percent = 100.0 * (total_size - data_size) / data_size;
1586 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1587 video_size / 1024.0,
1588 audio_size / 1024.0,
1589 subtitle_size / 1024.0,
1590 other_size / 1024.0,
1591 extra_size / 1024.0);
1593 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1595 av_log(NULL, AV_LOG_INFO, "unknown");
1596 av_log(NULL, AV_LOG_INFO, "\n");
1598 /* print verbose per-stream stats */
1599 for (i = 0; i < nb_input_files; i++) {
1600 InputFile *f = input_files[i];
1601 uint64_t total_packets = 0, total_size = 0;
1603 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1606 for (j = 0; j < f->nb_streams; j++) {
1607 InputStream *ist = input_streams[f->ist_index + j];
1608 enum AVMediaType type = ist->dec_ctx->codec_type;
1610 total_size += ist->data_size;
1611 total_packets += ist->nb_packets;
1613 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1614 i, j, media_type_string(type));
1615 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1616 ist->nb_packets, ist->data_size);
1618 if (ist->decoding_needed) {
1619 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1620 ist->frames_decoded);
1621 if (type == AVMEDIA_TYPE_AUDIO)
1622 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1623 av_log(NULL, AV_LOG_VERBOSE, "; ");
1626 av_log(NULL, AV_LOG_VERBOSE, "\n");
1629 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1630 total_packets, total_size);
1633 for (i = 0; i < nb_output_files; i++) {
1634 OutputFile *of = output_files[i];
1635 uint64_t total_packets = 0, total_size = 0;
1637 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1640 for (j = 0; j < of->ctx->nb_streams; j++) {
1641 OutputStream *ost = output_streams[of->ost_index + j];
1642 enum AVMediaType type = ost->enc_ctx->codec_type;
1644 total_size += ost->data_size;
1645 total_packets += ost->packets_written;
1647 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1648 i, j, media_type_string(type));
1649 if (ost->encoding_needed) {
1650 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1651 ost->frames_encoded);
1652 if (type == AVMEDIA_TYPE_AUDIO)
1653 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1654 av_log(NULL, AV_LOG_VERBOSE, "; ");
1657 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1658 ost->packets_written, ost->data_size);
1660 av_log(NULL, AV_LOG_VERBOSE, "\n");
1663 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1664 total_packets, total_size);
1666 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1667 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1669 av_log(NULL, AV_LOG_WARNING, "\n");
1671 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1676 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1678 AVBPrint buf, buf_script;
1680 AVFormatContext *oc;
1682 AVCodecContext *enc;
1683 int frame_number, vid, i;
1686 int64_t pts = INT64_MIN + 1;
1687 static int64_t last_time = -1;
1688 static int qp_histogram[52];
1689 int hours, mins, secs, us;
1690 const char *hours_sign;
1694 if (!print_stats && !is_last_report && !progress_avio)
1697 if (!is_last_report) {
1698 if (last_time == -1) {
1699 last_time = cur_time;
1702 if ((cur_time - last_time) < 500000)
1704 last_time = cur_time;
1707 t = (cur_time-timer_start) / 1000000.0;
1710 oc = output_files[0]->ctx;
1712 total_size = avio_size(oc->pb);
1713 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1714 total_size = avio_tell(oc->pb);
1717 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1718 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1719 for (i = 0; i < nb_output_streams; i++) {
1721 ost = output_streams[i];
1723 if (!ost->stream_copy)
1724 q = ost->quality / (float) FF_QP2LAMBDA;
1726 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1727 av_bprintf(&buf, "q=%2.1f ", q);
1728 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1729 ost->file_index, ost->index, q);
1731 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1734 frame_number = ost->frame_number;
1735 fps = t > 1 ? frame_number / t : 0;
1736 av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1737 frame_number, fps < 9.95, fps, q);
1738 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1739 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1740 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1741 ost->file_index, ost->index, q);
1743 av_bprintf(&buf, "L");
1747 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1749 for (j = 0; j < 32; j++)
1750 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1753 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1755 double error, error_sum = 0;
1756 double scale, scale_sum = 0;
1758 char type[3] = { 'Y','U','V' };
1759 av_bprintf(&buf, "PSNR=");
1760 for (j = 0; j < 3; j++) {
1761 if (is_last_report) {
1762 error = enc->error[j];
1763 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1765 error = ost->error[j];
1766 scale = enc->width * enc->height * 255.0 * 255.0;
1772 p = psnr(error / scale);
1773 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1774 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1775 ost->file_index, ost->index, type[j] | 32, p);
1777 p = psnr(error_sum / scale_sum);
1778 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1779 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1780 ost->file_index, ost->index, p);
1784 /* compute min output value */
1785 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) {
1786 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1787 ost->st->time_base, AV_TIME_BASE_Q));
1789 if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1790 copy_ts_first_pts = pts;
1791 if (copy_ts_first_pts != AV_NOPTS_VALUE)
1792 pts -= copy_ts_first_pts;
1797 nb_frames_drop += ost->last_dropped;
1800 secs = FFABS(pts) / AV_TIME_BASE;
1801 us = FFABS(pts) % AV_TIME_BASE;
1806 hours_sign = (pts < 0) ? "-" : "";
1808 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1809 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1811 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1812 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1813 if (pts == AV_NOPTS_VALUE) {
1814 av_bprintf(&buf, "N/A ");
1816 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1817 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1821 av_bprintf(&buf, "bitrate=N/A");
1822 av_bprintf(&buf_script, "bitrate=N/A\n");
1824 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1825 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1828 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1829 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1830 if (pts == AV_NOPTS_VALUE) {
1831 av_bprintf(&buf_script, "out_time_us=N/A\n");
1832 av_bprintf(&buf_script, "out_time_ms=N/A\n");
1833 av_bprintf(&buf_script, "out_time=N/A\n");
1835 av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1836 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1837 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1838 hours_sign, hours, mins, secs, us);
1841 if (nb_frames_dup || nb_frames_drop)
1842 av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1843 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1844 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1847 av_bprintf(&buf, " speed=N/A");
1848 av_bprintf(&buf_script, "speed=N/A\n");
1850 av_bprintf(&buf, " speed=%4.3gx", speed);
1851 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1854 if (print_stats || is_last_report) {
1855 const char end = is_last_report ? '\n' : '\r';
1856 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1857 fprintf(stderr, "%s %c", buf.str, end);
1859 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1863 av_bprint_finalize(&buf, NULL);
1865 if (progress_avio) {
1866 av_bprintf(&buf_script, "progress=%s\n",
1867 is_last_report ? "end" : "continue");
1868 avio_write(progress_avio, buf_script.str,
1869 FFMIN(buf_script.len, buf_script.size - 1));
1870 avio_flush(progress_avio);
1871 av_bprint_finalize(&buf_script, NULL);
1872 if (is_last_report) {
1873 if ((ret = avio_closep(&progress_avio)) < 0)
1874 av_log(NULL, AV_LOG_ERROR,
1875 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1880 print_final_stats(total_size);
1883 static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1885 // We never got any input. Set a fake format, which will
1886 // come from libavformat.
1887 ifilter->format = par->format;
1888 ifilter->sample_rate = par->sample_rate;
1889 ifilter->channels = par->channels;
1890 ifilter->channel_layout = par->channel_layout;
1891 ifilter->width = par->width;
1892 ifilter->height = par->height;
1893 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1896 static void flush_encoders(void)
1900 for (i = 0; i < nb_output_streams; i++) {
1901 OutputStream *ost = output_streams[i];
1902 AVCodecContext *enc = ost->enc_ctx;
1903 OutputFile *of = output_files[ost->file_index];
1905 if (!ost->encoding_needed)
1908 // Try to enable encoding with no input frames.
1909 // Maybe we should just let encoding fail instead.
1910 if (!ost->initialized) {
1911 FilterGraph *fg = ost->filter->graph;
1913 av_log(NULL, AV_LOG_WARNING,
1914 "Finishing stream %d:%d without any data written to it.\n",
1915 ost->file_index, ost->st->index);
1917 if (ost->filter && !fg->graph) {
1919 for (x = 0; x < fg->nb_inputs; x++) {
1920 InputFilter *ifilter = fg->inputs[x];
1921 if (ifilter->format < 0)
1922 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1925 if (!ifilter_has_all_input_formats(fg))
1928 ret = configure_filtergraph(fg);
1930 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1934 finish_output_stream(ost);
1937 init_output_stream_wrapper(ost, NULL, 1);
1940 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1944 const char *desc = NULL;
1948 switch (enc->codec_type) {
1949 case AVMEDIA_TYPE_AUDIO:
1952 case AVMEDIA_TYPE_VIDEO:
1959 av_init_packet(&pkt);
1963 update_benchmark(NULL);
1965 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1966 ret = avcodec_send_frame(enc, NULL);
1968 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1975 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1976 if (ret < 0 && ret != AVERROR_EOF) {
1977 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1982 if (ost->logfile && enc->stats_out) {
1983 fprintf(ost->logfile, "%s", enc->stats_out);
1985 if (ret == AVERROR_EOF) {
1986 output_packet(of, &pkt, ost, 1);
1989 if (ost->finished & MUXER_FINISHED) {
1990 av_packet_unref(&pkt);
1993 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1994 pkt_size = pkt.size;
1995 output_packet(of, &pkt, ost, 0);
1996 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1997 do_video_stats(ost, pkt_size);
2004 * Check whether a packet from ist should be written into ost at this time
2006 static int check_output_constraints(InputStream *ist, OutputStream *ost)
2008 OutputFile *of = output_files[ost->file_index];
2009 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2011 if (ost->source_index != ist_index)
2017 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2023 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2025 OutputFile *of = output_files[ost->file_index];
2026 InputFile *f = input_files [ist->file_index];
2027 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2028 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2031 // EOF: flush output bitstream filters.
2033 av_init_packet(&opkt);
2036 output_packet(of, &opkt, ost, 1);
2040 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2041 !ost->copy_initial_nonkeyframes)
2044 if (!ost->frame_number && !ost->copy_prior_start) {
2045 int64_t comp_start = start_time;
2046 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2047 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2048 if (pkt->pts == AV_NOPTS_VALUE ?
2049 ist->pts < comp_start :
2050 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2054 if (of->recording_time != INT64_MAX &&
2055 ist->pts >= of->recording_time + start_time) {
2056 close_output_stream(ost);
2060 if (f->recording_time != INT64_MAX) {
2061 start_time = f->ctx->start_time;
2062 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2063 start_time += f->start_time;
2064 if (ist->pts >= f->recording_time + start_time) {
2065 close_output_stream(ost);
2070 /* force the input stream PTS */
2071 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2074 if (av_packet_ref(&opkt, pkt) < 0)
2077 if (pkt->pts != AV_NOPTS_VALUE)
2078 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2080 if (pkt->dts == AV_NOPTS_VALUE) {
2081 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2082 } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2083 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2085 duration = ist->dec_ctx->frame_size;
2086 opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2087 (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2088 &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2089 /* dts will be set immediately afterwards to what pts is now */
2090 opkt.pts = opkt.dts - ost_tb_start_time;
2092 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2093 opkt.dts -= ost_tb_start_time;
2095 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2097 output_packet(of, &opkt, ost, 0);
2100 int guess_input_channel_layout(InputStream *ist)
2102 AVCodecContext *dec = ist->dec_ctx;
2104 if (!dec->channel_layout) {
2105 char layout_name[256];
2107 if (dec->channels > ist->guess_layout_max)
2109 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2110 if (!dec->channel_layout)
2112 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2113 dec->channels, dec->channel_layout);
2114 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2115 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2120 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2122 if (*got_output || ret<0)
2123 decode_error_stat[ret<0] ++;
2125 if (ret < 0 && exit_on_error)
2128 if (*got_output && ist) {
2129 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2130 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2131 "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2138 // Filters can be configured only if the formats of all inputs are known.
2139 static int ifilter_has_all_input_formats(FilterGraph *fg)
2142 for (i = 0; i < fg->nb_inputs; i++) {
2143 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2144 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2150 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2152 FilterGraph *fg = ifilter->graph;
2153 int need_reinit, ret, i;
2155 /* determine if the parameters for this input changed */
2156 need_reinit = ifilter->format != frame->format;
2158 switch (ifilter->ist->st->codecpar->codec_type) {
2159 case AVMEDIA_TYPE_AUDIO:
2160 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2161 ifilter->channels != frame->channels ||
2162 ifilter->channel_layout != frame->channel_layout;
2164 case AVMEDIA_TYPE_VIDEO:
2165 need_reinit |= ifilter->width != frame->width ||
2166 ifilter->height != frame->height;
2170 if (!ifilter->ist->reinit_filters && fg->graph)
2173 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2174 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2178 ret = ifilter_parameters_from_frame(ifilter, frame);
2183 /* (re)init the graph if possible, otherwise buffer the frame and return */
2184 if (need_reinit || !fg->graph) {
2185 for (i = 0; i < fg->nb_inputs; i++) {
2186 if (!ifilter_has_all_input_formats(fg)) {
2187 AVFrame *tmp = av_frame_clone(frame);
2189 return AVERROR(ENOMEM);
2190 av_frame_unref(frame);
2192 if (!av_fifo_space(ifilter->frame_queue)) {
2193 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2195 av_frame_free(&tmp);
2199 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2204 ret = reap_filters(1);
2205 if (ret < 0 && ret != AVERROR_EOF) {
2206 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2210 ret = configure_filtergraph(fg);
2212 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2217 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2219 if (ret != AVERROR_EOF)
2220 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2227 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2233 if (ifilter->filter) {
2234 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2238 // the filtergraph was never configured
2239 if (ifilter->format < 0)
2240 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2241 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2242 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2243 return AVERROR_INVALIDDATA;
2250 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2251 // There is the following difference: if you got a frame, you must call
2252 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2253 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2254 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2261 ret = avcodec_send_packet(avctx, pkt);
2262 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2263 // decoded frames with avcodec_receive_frame() until done.
2264 if (ret < 0 && ret != AVERROR_EOF)
2268 ret = avcodec_receive_frame(avctx, frame);
2269 if (ret < 0 && ret != AVERROR(EAGAIN))
2277 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2282 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2283 for (i = 0; i < ist->nb_filters; i++) {
2284 if (i < ist->nb_filters - 1) {
2285 f = ist->filter_frame;
2286 ret = av_frame_ref(f, decoded_frame);
2291 ret = ifilter_send_frame(ist->filters[i], f);
2292 if (ret == AVERROR_EOF)
2293 ret = 0; /* ignore */
2295 av_log(NULL, AV_LOG_ERROR,
2296 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2303 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2306 AVFrame *decoded_frame;
2307 AVCodecContext *avctx = ist->dec_ctx;
2309 AVRational decoded_frame_tb;
2311 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2312 return AVERROR(ENOMEM);
2313 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2314 return AVERROR(ENOMEM);
2315 decoded_frame = ist->decoded_frame;
2317 update_benchmark(NULL);
2318 ret = decode(avctx, decoded_frame, got_output, pkt);
2319 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2323 if (ret >= 0 && avctx->sample_rate <= 0) {
2324 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2325 ret = AVERROR_INVALIDDATA;
2328 if (ret != AVERROR_EOF)
2329 check_decode_result(ist, got_output, ret);
2331 if (!*got_output || ret < 0)
2334 ist->samples_decoded += decoded_frame->nb_samples;
2335 ist->frames_decoded++;
2337 /* increment next_dts to use for the case where the input stream does not
2338 have timestamps or there are multiple frames in the packet */
2339 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2341 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2344 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2345 decoded_frame_tb = ist->st->time_base;
2346 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2347 decoded_frame->pts = pkt->pts;
2348 decoded_frame_tb = ist->st->time_base;
2350 decoded_frame->pts = ist->dts;
2351 decoded_frame_tb = AV_TIME_BASE_Q;
2353 if (decoded_frame->pts != AV_NOPTS_VALUE)
2354 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2355 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2356 (AVRational){1, avctx->sample_rate});
2357 ist->nb_samples = decoded_frame->nb_samples;
2358 err = send_frame_to_filters(ist, decoded_frame);
2360 av_frame_unref(ist->filter_frame);
2361 av_frame_unref(decoded_frame);
2362 return err < 0 ? err : ret;
2365 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2368 AVFrame *decoded_frame;
2369 int i, ret = 0, err = 0;
2370 int64_t best_effort_timestamp;
2371 int64_t dts = AV_NOPTS_VALUE;
2374 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2375 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2377 if (!eof && pkt && pkt->size == 0)
2380 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2381 return AVERROR(ENOMEM);
2382 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2383 return AVERROR(ENOMEM);
2384 decoded_frame = ist->decoded_frame;
2385 if (ist->dts != AV_NOPTS_VALUE)
2386 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2389 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2392 // The old code used to set dts on the drain packet, which does not work
2393 // with the new API anymore.
2395 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2397 return AVERROR(ENOMEM);
2398 ist->dts_buffer = new;
2399 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2402 update_benchmark(NULL);
2403 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2404 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2408 // The following line may be required in some cases where there is no parser
2409 // or the parser does not has_b_frames correctly
2410 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2411 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2412 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2414 av_log(ist->dec_ctx, AV_LOG_WARNING,
2415 "video_delay is larger in decoder than demuxer %d > %d.\n"
2416 "If you want to help, upload a sample "
2417 "of this file to https://streams.videolan.org/upload/ "
2418 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2419 ist->dec_ctx->has_b_frames,
2420 ist->st->codecpar->video_delay);
2423 if (ret != AVERROR_EOF)
2424 check_decode_result(ist, got_output, ret);
2426 if (*got_output && ret >= 0) {
2427 if (ist->dec_ctx->width != decoded_frame->width ||
2428 ist->dec_ctx->height != decoded_frame->height ||
2429 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2430 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2431 decoded_frame->width,
2432 decoded_frame->height,
2433 decoded_frame->format,
2434 ist->dec_ctx->width,
2435 ist->dec_ctx->height,
2436 ist->dec_ctx->pix_fmt);
2440 if (!*got_output || ret < 0)
2443 if(ist->top_field_first>=0)
2444 decoded_frame->top_field_first = ist->top_field_first;
2446 ist->frames_decoded++;
2448 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2449 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2453 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2455 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2456 *duration_pts = decoded_frame->pkt_duration;
2458 if (ist->framerate.num)
2459 best_effort_timestamp = ist->cfr_next_pts++;
2461 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2462 best_effort_timestamp = ist->dts_buffer[0];
2464 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2465 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2466 ist->nb_dts_buffer--;
2469 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2470 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2472 if (ts != AV_NOPTS_VALUE)
2473 ist->next_pts = ist->pts = ts;
2477 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2478 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2479 ist->st->index, av_ts2str(decoded_frame->pts),
2480 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2481 best_effort_timestamp,
2482 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2483 decoded_frame->key_frame, decoded_frame->pict_type,
2484 ist->st->time_base.num, ist->st->time_base.den);
2487 if (ist->st->sample_aspect_ratio.num)
2488 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2490 err = send_frame_to_filters(ist, decoded_frame);
2493 av_frame_unref(ist->filter_frame);
2494 av_frame_unref(decoded_frame);
2495 return err < 0 ? err : ret;
2498 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2501 AVSubtitle subtitle;
2503 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2504 &subtitle, got_output, pkt);
2506 check_decode_result(NULL, got_output, ret);
2508 if (ret < 0 || !*got_output) {
2511 sub2video_flush(ist);
2515 if (ist->fix_sub_duration) {
2517 if (ist->prev_sub.got_output) {
2518 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2519 1000, AV_TIME_BASE);
2520 if (end < ist->prev_sub.subtitle.end_display_time) {
2521 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2522 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2523 ist->prev_sub.subtitle.end_display_time, end,
2524 end <= 0 ? ", dropping it" : "");
2525 ist->prev_sub.subtitle.end_display_time = end;
2528 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2529 FFSWAP(int, ret, ist->prev_sub.ret);
2530 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2538 if (ist->sub2video.frame) {
2539 sub2video_update(ist, INT64_MIN, &subtitle);
2540 } else if (ist->nb_filters) {
2541 if (!ist->sub2video.sub_queue)
2542 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2543 if (!ist->sub2video.sub_queue)
2545 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2546 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2550 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2554 if (!subtitle.num_rects)
2557 ist->frames_decoded++;
2559 for (i = 0; i < nb_output_streams; i++) {
2560 OutputStream *ost = output_streams[i];
2562 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2563 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2566 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2571 avsubtitle_free(&subtitle);
2575 static int send_filter_eof(InputStream *ist)
2578 /* TODO keep pts also in stream time base to avoid converting back */
2579 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2580 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2582 for (i = 0; i < ist->nb_filters; i++) {
2583 ret = ifilter_send_eof(ist->filters[i], pts);
2590 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2591 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2595 int eof_reached = 0;
2598 if (!ist->saw_first_ts) {
2599 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2601 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2602 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2603 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2605 ist->saw_first_ts = 1;
2608 if (ist->next_dts == AV_NOPTS_VALUE)
2609 ist->next_dts = ist->dts;
2610 if (ist->next_pts == AV_NOPTS_VALUE)
2611 ist->next_pts = ist->pts;
2615 av_init_packet(&avpkt);
2622 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2623 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2624 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2625 ist->next_pts = ist->pts = ist->dts;
2628 // while we have more to decode or while the decoder did output something on EOF
2629 while (ist->decoding_needed) {
2630 int64_t duration_dts = 0;
2631 int64_t duration_pts = 0;
2633 int decode_failed = 0;
2635 ist->pts = ist->next_pts;
2636 ist->dts = ist->next_dts;
2638 switch (ist->dec_ctx->codec_type) {
2639 case AVMEDIA_TYPE_AUDIO:
2640 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2643 case AVMEDIA_TYPE_VIDEO:
2644 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2646 if (!repeating || !pkt || got_output) {
2647 if (pkt && pkt->duration) {
2648 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2649 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2650 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2651 duration_dts = ((int64_t)AV_TIME_BASE *
2652 ist->dec_ctx->framerate.den * ticks) /
2653 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2656 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2657 ist->next_dts += duration_dts;
2659 ist->next_dts = AV_NOPTS_VALUE;
2663 if (duration_pts > 0) {
2664 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2666 ist->next_pts += duration_dts;
2670 case AVMEDIA_TYPE_SUBTITLE:
2673 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2674 if (!pkt && ret >= 0)
2681 if (ret == AVERROR_EOF) {
2687 if (decode_failed) {
2688 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2689 ist->file_index, ist->st->index, av_err2str(ret));
2691 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2692 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2694 if (!decode_failed || exit_on_error)
2700 ist->got_output = 1;
2705 // During draining, we might get multiple output frames in this loop.
2706 // ffmpeg.c does not drain the filter chain on configuration changes,
2707 // which means if we send multiple frames at once to the filters, and
2708 // one of those frames changes configuration, the buffered frames will
2709 // be lost. This can upset certain FATE tests.
2710 // Decode only 1 frame per call on EOF to appease these FATE tests.
2711 // The ideal solution would be to rewrite decoding to use the new
2712 // decoding API in a better way.
2719 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2720 /* except when looping we need to flush but not to send an EOF */
2721 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2722 int ret = send_filter_eof(ist);
2724 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2729 /* handle stream copy */
2730 if (!ist->decoding_needed && pkt) {
2731 ist->dts = ist->next_dts;
2732 switch (ist->dec_ctx->codec_type) {
2733 case AVMEDIA_TYPE_AUDIO:
2734 av_assert1(pkt->duration >= 0);
2735 if (ist->dec_ctx->sample_rate) {
2736 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2737 ist->dec_ctx->sample_rate;
2739 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2742 case AVMEDIA_TYPE_VIDEO:
2743 if (ist->framerate.num) {
2744 // TODO: Remove work-around for c99-to-c89 issue 7
2745 AVRational time_base_q = AV_TIME_BASE_Q;
2746 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2747 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2748 } else if (pkt->duration) {
2749 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2750 } else if(ist->dec_ctx->framerate.num != 0) {
2751 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2752 ist->next_dts += ((int64_t)AV_TIME_BASE *
2753 ist->dec_ctx->framerate.den * ticks) /
2754 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2758 ist->pts = ist->dts;
2759 ist->next_pts = ist->next_dts;
2761 for (i = 0; i < nb_output_streams; i++) {
2762 OutputStream *ost = output_streams[i];
2764 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2767 do_streamcopy(ist, ost, pkt);
2770 return !eof_reached;
2773 static void print_sdp(void)
2778 AVIOContext *sdp_pb;
2779 AVFormatContext **avc;
2781 for (i = 0; i < nb_output_files; i++) {
2782 if (!output_files[i]->header_written)
2786 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2789 for (i = 0, j = 0; i < nb_output_files; i++) {
2790 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2791 avc[j] = output_files[i]->ctx;
2799 av_sdp_create(avc, j, sdp, sizeof(sdp));
2801 if (!sdp_filename) {
2802 printf("SDP:\n%s\n", sdp);
2805 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2806 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2808 avio_print(sdp_pb, sdp);
2809 avio_closep(&sdp_pb);
2810 av_freep(&sdp_filename);
2818 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2820 InputStream *ist = s->opaque;
2821 const enum AVPixelFormat *p;
2824 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2825 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2826 const AVCodecHWConfig *config = NULL;
2829 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2832 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2833 ist->hwaccel_id == HWACCEL_AUTO) {
2835 config = avcodec_get_hw_config(s->codec, i);
2838 if (!(config->methods &
2839 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2841 if (config->pix_fmt == *p)
2846 if (config->device_type != ist->hwaccel_device_type) {
2847 // Different hwaccel offered, ignore.
2851 ret = hwaccel_decode_init(s);
2853 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2854 av_log(NULL, AV_LOG_FATAL,
2855 "%s hwaccel requested for input stream #%d:%d, "
2856 "but cannot be initialized.\n",
2857 av_hwdevice_get_type_name(config->device_type),
2858 ist->file_index, ist->st->index);
2859 return AV_PIX_FMT_NONE;
2864 const HWAccel *hwaccel = NULL;
2866 for (i = 0; hwaccels[i].name; i++) {
2867 if (hwaccels[i].pix_fmt == *p) {
2868 hwaccel = &hwaccels[i];
2873 // No hwaccel supporting this pixfmt.
2876 if (hwaccel->id != ist->hwaccel_id) {
2877 // Does not match requested hwaccel.
2881 ret = hwaccel->init(s);
2883 av_log(NULL, AV_LOG_FATAL,
2884 "%s hwaccel requested for input stream #%d:%d, "
2885 "but cannot be initialized.\n", hwaccel->name,
2886 ist->file_index, ist->st->index);
2887 return AV_PIX_FMT_NONE;
2891 if (ist->hw_frames_ctx) {
2892 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2893 if (!s->hw_frames_ctx)
2894 return AV_PIX_FMT_NONE;
2897 ist->hwaccel_pix_fmt = *p;
2904 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2906 InputStream *ist = s->opaque;
2908 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2909 return ist->hwaccel_get_buffer(s, frame, flags);
2911 return avcodec_default_get_buffer2(s, frame, flags);
2914 static int init_input_stream(int ist_index, char *error, int error_len)
2917 InputStream *ist = input_streams[ist_index];
2919 if (ist->decoding_needed) {
2920 AVCodec *codec = ist->dec;
2922 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2923 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2924 return AVERROR(EINVAL);
2927 ist->dec_ctx->opaque = ist;
2928 ist->dec_ctx->get_format = get_format;
2929 ist->dec_ctx->get_buffer2 = get_buffer;
2930 #if LIBAVCODEC_VERSION_MAJOR < 60
2931 ist->dec_ctx->thread_safe_callbacks = 1;
2934 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2935 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2936 (ist->decoding_needed & DECODING_FOR_OST)) {
2937 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2938 if (ist->decoding_needed & DECODING_FOR_FILTER)
2939 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2942 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2944 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2945 * audio, and video decoders such as cuvid or mediacodec */
2946 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2948 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2949 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2950 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2951 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2952 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2954 ret = hw_device_setup_for_decode(ist);
2956 snprintf(error, error_len, "Device setup failed for "
2957 "decoder on input stream #%d:%d : %s",
2958 ist->file_index, ist->st->index, av_err2str(ret));
2962 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2963 if (ret == AVERROR_EXPERIMENTAL)
2964 abort_codec_experimental(codec, 0);
2966 snprintf(error, error_len,
2967 "Error while opening decoder for input stream "
2969 ist->file_index, ist->st->index, av_err2str(ret));
2972 assert_avoptions(ist->decoder_opts);
2975 ist->next_pts = AV_NOPTS_VALUE;
2976 ist->next_dts = AV_NOPTS_VALUE;
2981 static InputStream *get_input_stream(OutputStream *ost)
2983 if (ost->source_index >= 0)
2984 return input_streams[ost->source_index];
2988 static int compare_int64(const void *a, const void *b)
2990 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2993 /* open the muxer when all the streams are initialized */
2994 static int check_init_output_file(OutputFile *of, int file_index)
2998 for (i = 0; i < of->ctx->nb_streams; i++) {
2999 OutputStream *ost = output_streams[of->ost_index + i];
3000 if (!ost->initialized)
3004 of->ctx->interrupt_callback = int_cb;
3006 ret = avformat_write_header(of->ctx, &of->opts);
3008 av_log(NULL, AV_LOG_ERROR,
3009 "Could not write header for output file #%d "
3010 "(incorrect codec parameters ?): %s\n",
3011 file_index, av_err2str(ret));
3014 //assert_avoptions(of->opts);
3015 of->header_written = 1;
3017 av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3019 if (sdp_filename || want_sdp)
3022 /* flush the muxing queues */
3023 for (i = 0; i < of->ctx->nb_streams; i++) {
3024 OutputStream *ost = output_streams[of->ost_index + i];
3026 /* try to improve muxing time_base (only possible if nothing has been written yet) */
3027 if (!av_fifo_size(ost->muxing_queue))
3028 ost->mux_timebase = ost->st->time_base;
3030 while (av_fifo_size(ost->muxing_queue)) {
3032 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3033 ost->muxing_queue_data_size -= pkt.size;
3034 write_packet(of, &pkt, ost, 1);
3041 static int init_output_bsfs(OutputStream *ost)
3043 AVBSFContext *ctx = ost->bsf_ctx;
3049 ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3053 ctx->time_base_in = ost->st->time_base;
3055 ret = av_bsf_init(ctx);
3057 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3062 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3065 ost->st->time_base = ctx->time_base_out;
3070 static int init_output_stream_streamcopy(OutputStream *ost)
3072 OutputFile *of = output_files[ost->file_index];
3073 InputStream *ist = get_input_stream(ost);
3074 AVCodecParameters *par_dst = ost->st->codecpar;
3075 AVCodecParameters *par_src = ost->ref_par;
3078 uint32_t codec_tag = par_dst->codec_tag;
3080 av_assert0(ist && !ost->filter);
3082 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3084 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3086 av_log(NULL, AV_LOG_FATAL,
3087 "Error setting up codec context options.\n");
3091 ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3093 av_log(NULL, AV_LOG_FATAL,
3094 "Error getting reference codec parameters.\n");
3099 unsigned int codec_tag_tmp;
3100 if (!of->ctx->oformat->codec_tag ||
3101 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3102 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3103 codec_tag = par_src->codec_tag;
3106 ret = avcodec_parameters_copy(par_dst, par_src);
3110 par_dst->codec_tag = codec_tag;
3112 if (!ost->frame_rate.num)
3113 ost->frame_rate = ist->framerate;
3114 ost->st->avg_frame_rate = ost->frame_rate;
3116 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3120 // copy timebase while removing common factors
3121 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3122 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3124 // copy estimated duration as a hint to the muxer
3125 if (ost->st->duration <= 0 && ist->st->duration > 0)
3126 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3129 ost->st->disposition = ist->st->disposition;
3131 if (ist->st->nb_side_data) {
3132 for (i = 0; i < ist->st->nb_side_data; i++) {
3133 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3136 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3138 return AVERROR(ENOMEM);
3139 memcpy(dst_data, sd_src->data, sd_src->size);
3143 if (ost->rotate_overridden) {
3144 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3145 sizeof(int32_t) * 9);
3147 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3150 switch (par_dst->codec_type) {
3151 case AVMEDIA_TYPE_AUDIO:
3152 if (audio_volume != 256) {
3153 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3156 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3157 par_dst->block_align= 0;
3158 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3159 par_dst->block_align= 0;
3161 case AVMEDIA_TYPE_VIDEO:
3162 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3164 av_mul_q(ost->frame_aspect_ratio,
3165 (AVRational){ par_dst->height, par_dst->width });
3166 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3167 "with stream copy may produce invalid files\n");
3169 else if (ist->st->sample_aspect_ratio.num)
3170 sar = ist->st->sample_aspect_ratio;
3172 sar = par_src->sample_aspect_ratio;
3173 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3174 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3175 ost->st->r_frame_rate = ist->st->r_frame_rate;
3179 ost->mux_timebase = ist->st->time_base;
3184 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3186 AVDictionaryEntry *e;
3188 uint8_t *encoder_string;
3189 int encoder_string_len;
3190 int format_flags = 0;
3191 int codec_flags = ost->enc_ctx->flags;
3193 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3196 e = av_dict_get(of->opts, "fflags", NULL, 0);
3198 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3201 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3203 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3205 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3208 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3211 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3212 encoder_string = av_mallocz(encoder_string_len);
3213 if (!encoder_string)
3216 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3217 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3219 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3220 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3221 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3222 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3225 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3226 AVCodecContext *avctx)
3229 int n = 1, i, size, index = 0;
3232 for (p = kf; *p; p++)
3236 pts = av_malloc_array(size, sizeof(*pts));
3238 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3243 for (i = 0; i < n; i++) {
3244 char *next = strchr(p, ',');
3249 if (!memcmp(p, "chapters", 8)) {
3251 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3254 if (avf->nb_chapters > INT_MAX - size ||
3255 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3257 av_log(NULL, AV_LOG_FATAL,
3258 "Could not allocate forced key frames array.\n");
3261 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3262 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3264 for (j = 0; j < avf->nb_chapters; j++) {
3265 AVChapter *c = avf->chapters[j];
3266 av_assert1(index < size);
3267 pts[index++] = av_rescale_q(c->start, c->time_base,
3268 avctx->time_base) + t;
3273 t = parse_time_or_die("force_key_frames", p, 1);
3274 av_assert1(index < size);
3275 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3282 av_assert0(index == size);
3283 qsort(pts, size, sizeof(*pts), compare_int64);
3284 ost->forced_kf_count = size;
3285 ost->forced_kf_pts = pts;
3288 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3290 InputStream *ist = get_input_stream(ost);
3291 AVCodecContext *enc_ctx = ost->enc_ctx;
3292 AVFormatContext *oc;
3294 if (ost->enc_timebase.num > 0) {
3295 enc_ctx->time_base = ost->enc_timebase;
3299 if (ost->enc_timebase.num < 0) {
3301 enc_ctx->time_base = ist->st->time_base;
3305 oc = output_files[ost->file_index]->ctx;
3306 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3309 enc_ctx->time_base = default_time_base;
3312 static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
3314 InputStream *ist = get_input_stream(ost);
3315 AVCodecContext *enc_ctx = ost->enc_ctx;
3316 AVCodecContext *dec_ctx = NULL;
3317 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3320 set_encoder_id(output_files[ost->file_index], ost);
3322 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3323 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3324 // which have to be filtered out to prevent leaking them to output files.
3325 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3328 ost->st->disposition = ist->st->disposition;
3330 dec_ctx = ist->dec_ctx;
3332 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3334 for (j = 0; j < oc->nb_streams; j++) {
3335 AVStream *st = oc->streams[j];
3336 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3339 if (j == oc->nb_streams)
3340 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3341 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3342 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3345 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3346 if (!ost->frame_rate.num)
3347 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3348 if (ist && !ost->frame_rate.num)
3349 ost->frame_rate = ist->framerate;
3350 if (ist && !ost->frame_rate.num)
3351 ost->frame_rate = ist->st->r_frame_rate;
3352 if (ist && !ost->frame_rate.num) {
3353 ost->frame_rate = (AVRational){25, 1};
3354 av_log(NULL, AV_LOG_WARNING,
3356 "about the input framerate is available. Falling "
3357 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3358 "if you want a different framerate.\n",
3359 ost->file_index, ost->index);
3362 if (ost->enc->supported_framerates && !ost->force_fps) {
3363 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3364 ost->frame_rate = ost->enc->supported_framerates[idx];
3366 // reduce frame rate for mpeg4 to be within the spec limits
3367 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3368 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3369 ost->frame_rate.num, ost->frame_rate.den, 65535);
3373 switch (enc_ctx->codec_type) {
3374 case AVMEDIA_TYPE_AUDIO:
3375 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3377 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3378 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3379 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3380 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3381 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3383 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3386 case AVMEDIA_TYPE_VIDEO:
3387 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3389 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3390 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3391 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3392 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3393 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3394 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3397 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3398 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3399 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3400 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3401 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3402 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3404 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3406 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3407 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3410 enc_ctx->color_range = frame->color_range;
3411 enc_ctx->color_primaries = frame->color_primaries;
3412 enc_ctx->color_trc = frame->color_trc;
3413 enc_ctx->colorspace = frame->colorspace;
3414 enc_ctx->chroma_sample_location = frame->chroma_location;
3417 enc_ctx->framerate = ost->frame_rate;
3419 ost->st->avg_frame_rate = ost->frame_rate;
3422 enc_ctx->width != dec_ctx->width ||
3423 enc_ctx->height != dec_ctx->height ||
3424 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3425 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3428 if (ost->top_field_first == 0) {
3429 enc_ctx->field_order = AV_FIELD_BB;
3430 } else if (ost->top_field_first == 1) {
3431 enc_ctx->field_order = AV_FIELD_TT;
3435 if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
3436 ost->top_field_first >= 0)
3437 frame->top_field_first = !!ost->top_field_first;
3439 if (frame->interlaced_frame) {
3440 if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3441 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3443 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3445 enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3448 if (ost->forced_keyframes) {
3449 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3450 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3451 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3453 av_log(NULL, AV_LOG_ERROR,
3454 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3457 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3458 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3459 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3460 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3462 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3463 // parse it only for static kf timings
3464 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3465 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3469 case AVMEDIA_TYPE_SUBTITLE:
3470 enc_ctx->time_base = AV_TIME_BASE_Q;
3471 if (!enc_ctx->width) {
3472 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3473 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3476 case AVMEDIA_TYPE_DATA:
3483 ost->mux_timebase = enc_ctx->time_base;
3488 static int init_output_stream(OutputStream *ost, AVFrame *frame,
3489 char *error, int error_len)
3493 if (ost->encoding_needed) {
3494 AVCodec *codec = ost->enc;
3495 AVCodecContext *dec = NULL;
3498 ret = init_output_stream_encode(ost, frame);
3502 if ((ist = get_input_stream(ost)))
3504 if (dec && dec->subtitle_header) {
3505 /* ASS code assumes this buffer is null terminated so add extra byte. */
3506 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3507 if (!ost->enc_ctx->subtitle_header)
3508 return AVERROR(ENOMEM);
3509 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3510 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3512 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3513 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3514 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3516 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3517 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3518 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3520 ret = hw_device_setup_for_encode(ost);
3522 snprintf(error, error_len, "Device setup failed for "
3523 "encoder on output stream #%d:%d : %s",
3524 ost->file_index, ost->index, av_err2str(ret));
3528 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3529 int input_props = 0, output_props = 0;
3530 AVCodecDescriptor const *input_descriptor =
3531 avcodec_descriptor_get(dec->codec_id);
3532 AVCodecDescriptor const *output_descriptor =
3533 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3534 if (input_descriptor)
3535 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3536 if (output_descriptor)
3537 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3538 if (input_props && output_props && input_props != output_props) {
3539 snprintf(error, error_len,
3540 "Subtitle encoding currently only possible from text to text "
3541 "or bitmap to bitmap");
3542 return AVERROR_INVALIDDATA;
3546 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3547 if (ret == AVERROR_EXPERIMENTAL)
3548 abort_codec_experimental(codec, 1);
3549 snprintf(error, error_len,
3550 "Error while opening encoder for output stream #%d:%d - "
3551 "maybe incorrect parameters such as bit_rate, rate, width or height",
3552 ost->file_index, ost->index);
3555 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3556 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3557 av_buffersink_set_frame_size(ost->filter->filter,
3558 ost->enc_ctx->frame_size);
3559 assert_avoptions(ost->encoder_opts);
3560 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3561 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3562 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3563 " It takes bits/s as argument, not kbits/s\n");
3565 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3567 av_log(NULL, AV_LOG_FATAL,
3568 "Error initializing the output stream codec context.\n");
3572 * FIXME: ost->st->codec should't be needed here anymore.
3574 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3578 if (ost->enc_ctx->nb_coded_side_data) {
3581 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3582 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3585 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3587 return AVERROR(ENOMEM);
3588 memcpy(dst_data, sd_src->data, sd_src->size);
3593 * Add global input side data. For now this is naive, and copies it
3594 * from the input stream's global side data. All side data should
3595 * really be funneled over AVFrame and libavfilter, then added back to
3596 * packet side data, and then potentially using the first packet for
3601 for (i = 0; i < ist->st->nb_side_data; i++) {
3602 AVPacketSideData *sd = &ist->st->side_data[i];
3603 if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3604 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3606 return AVERROR(ENOMEM);
3607 memcpy(dst, sd->data, sd->size);
3608 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3609 av_display_rotation_set((uint32_t *)dst, 0);
3614 // copy timebase while removing common factors
3615 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3616 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3618 // copy estimated duration as a hint to the muxer
3619 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3620 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3622 ost->st->codec->codec= ost->enc_ctx->codec;
3623 } else if (ost->stream_copy) {
3624 ret = init_output_stream_streamcopy(ost);
3629 // parse user provided disposition, and update stream values
3630 if (ost->disposition) {
3631 static const AVOption opts[] = {
3632 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3633 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3634 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3635 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3636 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3637 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3638 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3639 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3640 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3641 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3642 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3643 { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3644 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3645 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3646 { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3647 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3650 static const AVClass class = {
3652 .item_name = av_default_item_name,
3654 .version = LIBAVUTIL_VERSION_INT,
3656 const AVClass *pclass = &class;
3658 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3663 /* initialize bitstream filters for the output stream
3664 * needs to be done here, because the codec id for streamcopy is not
3665 * known until now */
3666 ret = init_output_bsfs(ost);
3670 ost->initialized = 1;
3672 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3679 static void report_new_stream(int input_index, AVPacket *pkt)
3681 InputFile *file = input_files[input_index];
3682 AVStream *st = file->ctx->streams[pkt->stream_index];
3684 if (pkt->stream_index < file->nb_streams_warn)
3686 av_log(file->ctx, AV_LOG_WARNING,
3687 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3688 av_get_media_type_string(st->codecpar->codec_type),
3689 input_index, pkt->stream_index,
3690 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3691 file->nb_streams_warn = pkt->stream_index + 1;
3694 static int transcode_init(void)
3696 int ret = 0, i, j, k;
3697 AVFormatContext *oc;
3700 char error[1024] = {0};
3702 for (i = 0; i < nb_filtergraphs; i++) {
3703 FilterGraph *fg = filtergraphs[i];
3704 for (j = 0; j < fg->nb_outputs; j++) {
3705 OutputFilter *ofilter = fg->outputs[j];
3706 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3708 if (fg->nb_inputs != 1)
3710 for (k = nb_input_streams-1; k >= 0 ; k--)
3711 if (fg->inputs[0]->ist == input_streams[k])
3713 ofilter->ost->source_index = k;
3717 /* init framerate emulation */
3718 for (i = 0; i < nb_input_files; i++) {
3719 InputFile *ifile = input_files[i];
3720 if (ifile->rate_emu)
3721 for (j = 0; j < ifile->nb_streams; j++)
3722 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3725 /* init input streams */
3726 for (i = 0; i < nb_input_streams; i++)
3727 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3728 for (i = 0; i < nb_output_streams; i++) {
3729 ost = output_streams[i];
3730 avcodec_close(ost->enc_ctx);
3736 * initialize stream copy and subtitle/data streams.
3737 * Encoded AVFrame based streams will get initialized as follows:
3738 * - when the first AVFrame is received in do_video_out
3739 * - just before the first AVFrame is received in either transcode_step
3740 * or reap_filters due to us requiring the filter chain buffer sink
3741 * to be configured with the correct audio frame size, which is only
3742 * known after the encoder is initialized.
3744 for (i = 0; i < nb_output_streams; i++) {
3745 if (!output_streams[i]->stream_copy &&
3746 (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3747 output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO))
3750 ret = init_output_stream_wrapper(output_streams[i], NULL, 0);
3755 /* discard unused programs */
3756 for (i = 0; i < nb_input_files; i++) {
3757 InputFile *ifile = input_files[i];
3758 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3759 AVProgram *p = ifile->ctx->programs[j];
3760 int discard = AVDISCARD_ALL;
3762 for (k = 0; k < p->nb_stream_indexes; k++)
3763 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3764 discard = AVDISCARD_DEFAULT;
3767 p->discard = discard;
3771 /* write headers for files with no streams */
3772 for (i = 0; i < nb_output_files; i++) {
3773 oc = output_files[i]->ctx;
3774 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3775 ret = check_init_output_file(output_files[i], i);
3782 /* dump the stream mapping */
3783 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3784 for (i = 0; i < nb_input_streams; i++) {
3785 ist = input_streams[i];
3787 for (j = 0; j < ist->nb_filters; j++) {
3788 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3789 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3790 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3791 ist->filters[j]->name);
3792 if (nb_filtergraphs > 1)
3793 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3794 av_log(NULL, AV_LOG_INFO, "\n");
3799 for (i = 0; i < nb_output_streams; i++) {
3800 ost = output_streams[i];
3802 if (ost->attachment_filename) {
3803 /* an attached file */
3804 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3805 ost->attachment_filename, ost->file_index, ost->index);
3809 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3810 /* output from a complex graph */
3811 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3812 if (nb_filtergraphs > 1)
3813 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3815 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3816 ost->index, ost->enc ? ost->enc->name : "?");
3820 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3821 input_streams[ost->source_index]->file_index,
3822 input_streams[ost->source_index]->st->index,
3825 if (ost->sync_ist != input_streams[ost->source_index])
3826 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3827 ost->sync_ist->file_index,
3828 ost->sync_ist->st->index);
3829 if (ost->stream_copy)
3830 av_log(NULL, AV_LOG_INFO, " (copy)");
3832 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3833 const AVCodec *out_codec = ost->enc;
3834 const char *decoder_name = "?";
3835 const char *in_codec_name = "?";
3836 const char *encoder_name = "?";
3837 const char *out_codec_name = "?";
3838 const AVCodecDescriptor *desc;
3841 decoder_name = in_codec->name;
3842 desc = avcodec_descriptor_get(in_codec->id);
3844 in_codec_name = desc->name;
3845 if (!strcmp(decoder_name, in_codec_name))
3846 decoder_name = "native";
3850 encoder_name = out_codec->name;
3851 desc = avcodec_descriptor_get(out_codec->id);
3853 out_codec_name = desc->name;
3854 if (!strcmp(encoder_name, out_codec_name))
3855 encoder_name = "native";
3858 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3859 in_codec_name, decoder_name,
3860 out_codec_name, encoder_name);
3862 av_log(NULL, AV_LOG_INFO, "\n");
3866 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3870 atomic_store(&transcode_init_done, 1);
3875 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3876 static int need_output(void)
3880 for (i = 0; i < nb_output_streams; i++) {
3881 OutputStream *ost = output_streams[i];
3882 OutputFile *of = output_files[ost->file_index];
3883 AVFormatContext *os = output_files[ost->file_index]->ctx;
3885 if (ost->finished ||
3886 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3888 if (ost->frame_number >= ost->max_frames) {
3890 for (j = 0; j < of->ctx->nb_streams; j++)
3891 close_output_stream(output_streams[of->ost_index + j]);
3902 * Select the output stream to process.
3904 * @return selected output stream, or NULL if none available
3906 static OutputStream *choose_output(void)
3909 int64_t opts_min = INT64_MAX;
3910 OutputStream *ost_min = NULL;
3912 for (i = 0; i < nb_output_streams; i++) {
3913 OutputStream *ost = output_streams[i];
3914 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3915 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3917 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3918 av_log(NULL, AV_LOG_DEBUG,
3919 "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3920 ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3922 if (!ost->initialized && !ost->inputs_done)
3925 if (!ost->finished && opts < opts_min) {
3927 ost_min = ost->unavailable ? NULL : ost;
3933 static void set_tty_echo(int on)
3937 if (tcgetattr(0, &tty) == 0) {
3938 if (on) tty.c_lflag |= ECHO;
3939 else tty.c_lflag &= ~ECHO;
3940 tcsetattr(0, TCSANOW, &tty);
3945 static int check_keyboard_interaction(int64_t cur_time)
3948 static int64_t last_time;
3949 if (received_nb_signals)
3950 return AVERROR_EXIT;
3951 /* read_key() returns 0 on EOF */
3952 if(cur_time - last_time >= 100000 && !run_as_daemon){
3954 last_time = cur_time;
3958 return AVERROR_EXIT;
3959 if (key == '+') av_log_set_level(av_log_get_level()+10);
3960 if (key == '-') av_log_set_level(av_log_get_level()-10);
3961 if (key == 's') qp_hist ^= 1;
3964 do_hex_dump = do_pkt_dump = 0;
3965 } else if(do_pkt_dump){
3969 av_log_set_level(AV_LOG_DEBUG);
3971 if (key == 'c' || key == 'C'){
3972 char buf[4096], target[64], command[256], arg[256] = {0};
3975 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3978 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3983 fprintf(stderr, "\n");
3985 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3986 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3987 target, time, command, arg);
3988 for (i = 0; i < nb_filtergraphs; i++) {
3989 FilterGraph *fg = filtergraphs[i];
3992 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3993 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3994 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3995 } else if (key == 'c') {
3996 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3997 ret = AVERROR_PATCHWELCOME;
3999 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
4001 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
4006 av_log(NULL, AV_LOG_ERROR,
4007 "Parse error, at least 3 arguments were expected, "
4008 "only %d given in string '%s'\n", n, buf);
4011 if (key == 'd' || key == 'D'){
4014 debug = input_streams[0]->st->codec->debug<<1;
4015 if(!debug) debug = 1;
4016 while(debug & (FF_DEBUG_DCT_COEFF
4018 |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
4020 )) //unsupported, would just crash
4027 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4032 fprintf(stderr, "\n");
4033 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
4034 fprintf(stderr,"error parsing debug value\n");
4036 for(i=0;i<nb_input_streams;i++) {
4037 input_streams[i]->st->codec->debug = debug;
4039 for(i=0;i<nb_output_streams;i++) {
4040 OutputStream *ost = output_streams[i];
4041 ost->enc_ctx->debug = debug;
4043 if(debug) av_log_set_level(AV_LOG_DEBUG);
4044 fprintf(stderr,"debug=%d\n", debug);
4047 fprintf(stderr, "key function\n"
4048 "? show this help\n"
4049 "+ increase verbosity\n"
4050 "- decrease verbosity\n"
4051 "c Send command to first matching filter supporting it\n"
4052 "C Send/Queue command to all matching filters\n"
4053 "D cycle through available debug modes\n"
4054 "h dump packets/hex press to cycle through the 3 states\n"
4056 "s Show QP histogram\n"
4063 static void *input_thread(void *arg)
4066 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4071 ret = av_read_frame(f->ctx, &pkt);
4073 if (ret == AVERROR(EAGAIN)) {
4078 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4081 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4082 if (flags && ret == AVERROR(EAGAIN)) {
4084 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4085 av_log(f->ctx, AV_LOG_WARNING,
4086 "Thread message queue blocking; consider raising the "
4087 "thread_queue_size option (current value: %d)\n",
4088 f->thread_queue_size);
4091 if (ret != AVERROR_EOF)
4092 av_log(f->ctx, AV_LOG_ERROR,
4093 "Unable to send packet to main thread: %s\n",
4095 av_packet_unref(&pkt);
4096 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4104 static void free_input_thread(int i)
4106 InputFile *f = input_files[i];
4109 if (!f || !f->in_thread_queue)
4111 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4112 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4113 av_packet_unref(&pkt);
4115 pthread_join(f->thread, NULL);
4117 av_thread_message_queue_free(&f->in_thread_queue);
4120 static void free_input_threads(void)
4124 for (i = 0; i < nb_input_files; i++)
4125 free_input_thread(i);
4128 static int init_input_thread(int i)
4131 InputFile *f = input_files[i];
4133 if (f->thread_queue_size < 0)
4134 f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4135 if (!f->thread_queue_size)
4138 if (f->ctx->pb ? !f->ctx->pb->seekable :
4139 strcmp(f->ctx->iformat->name, "lavfi"))
4140 f->non_blocking = 1;
4141 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4142 f->thread_queue_size, sizeof(AVPacket));
4146 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4147 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4148 av_thread_message_queue_free(&f->in_thread_queue);
4149 return AVERROR(ret);
4155 static int init_input_threads(void)
4159 for (i = 0; i < nb_input_files; i++) {
4160 ret = init_input_thread(i);
4167 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4169 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4171 AV_THREAD_MESSAGE_NONBLOCK : 0);
4175 static int get_input_packet(InputFile *f, AVPacket *pkt)
4179 for (i = 0; i < f->nb_streams; i++) {
4180 InputStream *ist = input_streams[f->ist_index + i];
4181 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4182 int64_t now = av_gettime_relative() - ist->start;
4184 return AVERROR(EAGAIN);
4189 if (f->thread_queue_size)
4190 return get_input_packet_mt(f, pkt);
4192 return av_read_frame(f->ctx, pkt);
4195 static int got_eagain(void)
4198 for (i = 0; i < nb_output_streams; i++)
4199 if (output_streams[i]->unavailable)
4204 static void reset_eagain(void)
4207 for (i = 0; i < nb_input_files; i++)
4208 input_files[i]->eagain = 0;
4209 for (i = 0; i < nb_output_streams; i++)
4210 output_streams[i]->unavailable = 0;
4213 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4214 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4215 AVRational time_base)
4221 return tmp_time_base;
4224 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4227 return tmp_time_base;
4233 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4236 AVCodecContext *avctx;
4237 int i, ret, has_audio = 0;
4238 int64_t duration = 0;
4240 ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4244 for (i = 0; i < ifile->nb_streams; i++) {
4245 ist = input_streams[ifile->ist_index + i];
4246 avctx = ist->dec_ctx;
4248 /* duration is the length of the last frame in a stream
4249 * when audio stream is present we don't care about
4250 * last video frame length because it's not defined exactly */
4251 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4255 for (i = 0; i < ifile->nb_streams; i++) {
4256 ist = input_streams[ifile->ist_index + i];
4257 avctx = ist->dec_ctx;
4260 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4261 AVRational sample_rate = {1, avctx->sample_rate};
4263 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4268 if (ist->framerate.num) {
4269 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4270 } else if (ist->st->avg_frame_rate.num) {
4271 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4276 if (!ifile->duration)
4277 ifile->time_base = ist->st->time_base;
4278 /* the total duration of the stream, max_pts - min_pts is
4279 * the duration of the stream without the last frame */
4280 if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4281 duration += ist->max_pts - ist->min_pts;
4282 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4286 if (ifile->loop > 0)
4294 * - 0 -- one packet was read and processed
4295 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4296 * this function should be called again
4297 * - AVERROR_EOF -- this function should not be called again
4299 static int process_input(int file_index)
4301 InputFile *ifile = input_files[file_index];
4302 AVFormatContext *is;
4305 int ret, thread_ret, i, j;
4308 int disable_discontinuity_correction = copy_ts;
4311 ret = get_input_packet(ifile, &pkt);
4313 if (ret == AVERROR(EAGAIN)) {
4317 if (ret < 0 && ifile->loop) {
4318 AVCodecContext *avctx;
4319 for (i = 0; i < ifile->nb_streams; i++) {
4320 ist = input_streams[ifile->ist_index + i];
4321 avctx = ist->dec_ctx;
4322 if (ist->decoding_needed) {
4323 ret = process_input_packet(ist, NULL, 1);
4326 avcodec_flush_buffers(avctx);
4330 free_input_thread(file_index);
4332 ret = seek_to_start(ifile, is);
4334 thread_ret = init_input_thread(file_index);
4339 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4341 ret = get_input_packet(ifile, &pkt);
4342 if (ret == AVERROR(EAGAIN)) {
4348 if (ret != AVERROR_EOF) {
4349 print_error(is->url, ret);
4354 for (i = 0; i < ifile->nb_streams; i++) {
4355 ist = input_streams[ifile->ist_index + i];
4356 if (ist->decoding_needed) {
4357 ret = process_input_packet(ist, NULL, 0);
4362 /* mark all outputs that don't go through lavfi as finished */
4363 for (j = 0; j < nb_output_streams; j++) {
4364 OutputStream *ost = output_streams[j];
4366 if (ost->source_index == ifile->ist_index + i &&
4367 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4368 finish_output_stream(ost);
4372 ifile->eof_reached = 1;
4373 return AVERROR(EAGAIN);
4379 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4380 is->streams[pkt.stream_index]);
4382 /* the following test is needed in case new streams appear
4383 dynamically in stream : we ignore them */
4384 if (pkt.stream_index >= ifile->nb_streams) {
4385 report_new_stream(file_index, &pkt);
4386 goto discard_packet;
4389 ist = input_streams[ifile->ist_index + pkt.stream_index];
4391 ist->data_size += pkt.size;
4395 goto discard_packet;
4397 if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4398 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
4399 "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4405 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4406 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4407 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4408 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4409 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4410 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4411 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4412 av_ts2str(input_files[ist->file_index]->ts_offset),
4413 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4416 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4417 int64_t stime, stime2;
4418 // Correcting starttime based on the enabled streams
4419 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4420 // so we instead do it here as part of discontinuity handling
4421 if ( ist->next_dts == AV_NOPTS_VALUE
4422 && ifile->ts_offset == -is->start_time
4423 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4424 int64_t new_start_time = INT64_MAX;
4425 for (i=0; i<is->nb_streams; i++) {
4426 AVStream *st = is->streams[i];
4427 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4429 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4431 if (new_start_time > is->start_time) {
4432 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4433 ifile->ts_offset = -new_start_time;
4437 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4438 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4439 ist->wrap_correction_done = 1;
4441 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4442 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4443 ist->wrap_correction_done = 0;
4445 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4446 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4447 ist->wrap_correction_done = 0;
4451 /* add the stream-global side data to the first packet */
4452 if (ist->nb_packets == 1) {
4453 for (i = 0; i < ist->st->nb_side_data; i++) {
4454 AVPacketSideData *src_sd = &ist->st->side_data[i];
4457 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4460 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4463 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4467 memcpy(dst_data, src_sd->data, src_sd->size);
4471 if (pkt.dts != AV_NOPTS_VALUE)
4472 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4473 if (pkt.pts != AV_NOPTS_VALUE)
4474 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4476 if (pkt.pts != AV_NOPTS_VALUE)
4477 pkt.pts *= ist->ts_scale;
4478 if (pkt.dts != AV_NOPTS_VALUE)
4479 pkt.dts *= ist->ts_scale;
4481 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4482 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4483 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4484 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4485 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4486 int64_t delta = pkt_dts - ifile->last_ts;
4487 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4488 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4489 ifile->ts_offset -= delta;
4490 av_log(NULL, AV_LOG_DEBUG,
4491 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4492 delta, ifile->ts_offset);
4493 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4494 if (pkt.pts != AV_NOPTS_VALUE)
4495 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4499 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4500 if (pkt.pts != AV_NOPTS_VALUE) {
4501 pkt.pts += duration;
4502 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4503 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4506 if (pkt.dts != AV_NOPTS_VALUE)
4507 pkt.dts += duration;
4509 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4511 if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4512 (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4513 int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
4514 ist->st->time_base, AV_TIME_BASE_Q,
4515 AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4516 if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4517 disable_discontinuity_correction = 0;
4520 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4521 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4522 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4523 !disable_discontinuity_correction) {
4524 int64_t delta = pkt_dts - ist->next_dts;
4525 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4526 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4527 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4528 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4529 ifile->ts_offset -= delta;
4530 av_log(NULL, AV_LOG_DEBUG,
4531 "timestamp discontinuity for stream #%d:%d "
4532 "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4533 ist->file_index, ist->st->index, ist->st->id,
4534 av_get_media_type_string(ist->dec_ctx->codec_type),
4535 delta, ifile->ts_offset);
4536 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4537 if (pkt.pts != AV_NOPTS_VALUE)
4538 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4541 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4542 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4543 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4544 pkt.dts = AV_NOPTS_VALUE;
4546 if (pkt.pts != AV_NOPTS_VALUE){
4547 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4548 delta = pkt_pts - ist->next_dts;
4549 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4550 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4551 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4552 pkt.pts = AV_NOPTS_VALUE;
4558 if (pkt.dts != AV_NOPTS_VALUE)
4559 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4562 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4563 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4564 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4565 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4566 av_ts2str(input_files[ist->file_index]->ts_offset),
4567 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4570 sub2video_heartbeat(ist, pkt.pts);
4572 process_input_packet(ist, &pkt, 0);
4575 av_packet_unref(&pkt);
4581 * Perform a step of transcoding for the specified filter graph.
4583 * @param[in] graph filter graph to consider
4584 * @param[out] best_ist input stream where a frame would allow to continue
4585 * @return 0 for success, <0 for error
4587 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4590 int nb_requests, nb_requests_max = 0;
4591 InputFilter *ifilter;
4595 ret = avfilter_graph_request_oldest(graph->graph);
4597 return reap_filters(0);
4599 if (ret == AVERROR_EOF) {
4600 ret = reap_filters(1);
4601 for (i = 0; i < graph->nb_outputs; i++)
4602 close_output_stream(graph->outputs[i]->ost);
4605 if (ret != AVERROR(EAGAIN))
4608 for (i = 0; i < graph->nb_inputs; i++) {
4609 ifilter = graph->inputs[i];
4611 if (input_files[ist->file_index]->eagain ||
4612 input_files[ist->file_index]->eof_reached)
4614 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4615 if (nb_requests > nb_requests_max) {
4616 nb_requests_max = nb_requests;
4622 for (i = 0; i < graph->nb_outputs; i++)
4623 graph->outputs[i]->ost->unavailable = 1;
4629 * Run a single step of transcoding.
4631 * @return 0 for success, <0 for error
4633 static int transcode_step(void)
4636 InputStream *ist = NULL;
4639 ost = choose_output();
4646 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4650 if (ost->filter && !ost->filter->graph->graph) {
4651 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4652 ret = configure_filtergraph(ost->filter->graph);
4654 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4660 if (ost->filter && ost->filter->graph->graph) {
4662 * Similar case to the early audio initialization in reap_filters.
4663 * Audio is special in ffmpeg.c currently as we depend on lavfi's
4664 * audio frame buffering/creation to get the output audio frame size
4665 * in samples correct. The audio frame size for the filter chain is
4666 * configured during the output stream initialization.
4668 * Apparently avfilter_graph_request_oldest (called in
4669 * transcode_from_filter just down the line) peeks. Peeking already
4670 * puts one frame "ready to be given out", which means that any
4671 * update in filter buffer sink configuration afterwards will not
4672 * help us. And yes, even if it would be utilized,
4673 * av_buffersink_get_samples is affected, as it internally utilizes
4674 * the same early exit for peeked frames.
4676 * In other words, if avfilter_graph_request_oldest would not make
4677 * further filter chain configuration or usage of
4678 * av_buffersink_get_samples useless (by just causing the return
4679 * of the peeked AVFrame as-is), we could get rid of this additional
4680 * early encoder initialization.
4682 if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
4683 init_output_stream_wrapper(ost, NULL, 1);
4685 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4689 } else if (ost->filter) {
4691 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4692 InputFilter *ifilter = ost->filter->graph->inputs[i];
4693 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4699 ost->inputs_done = 1;
4703 av_assert0(ost->source_index >= 0);
4704 ist = input_streams[ost->source_index];
4707 ret = process_input(ist->file_index);
4708 if (ret == AVERROR(EAGAIN)) {
4709 if (input_files[ist->file_index]->eagain)
4710 ost->unavailable = 1;
4715 return ret == AVERROR_EOF ? 0 : ret;
4717 return reap_filters(0);
4721 * The following code is the main loop of the file converter
4723 static int transcode(void)
4726 AVFormatContext *os;
4729 int64_t timer_start;
4730 int64_t total_packets_written = 0;
4732 ret = transcode_init();
4736 if (stdin_interaction) {
4737 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4740 timer_start = av_gettime_relative();
4743 if ((ret = init_input_threads()) < 0)
4747 while (!received_sigterm) {
4748 int64_t cur_time= av_gettime_relative();
4750 /* if 'q' pressed, exits */
4751 if (stdin_interaction)
4752 if (check_keyboard_interaction(cur_time) < 0)
4755 /* check if there's any stream where output is still needed */
4756 if (!need_output()) {
4757 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4761 ret = transcode_step();
4762 if (ret < 0 && ret != AVERROR_EOF) {
4763 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4767 /* dump report by using the output first video and audio streams */
4768 print_report(0, timer_start, cur_time);
4771 free_input_threads();
4774 /* at the end of stream, we must flush the decoder buffers */
4775 for (i = 0; i < nb_input_streams; i++) {
4776 ist = input_streams[i];
4777 if (!input_files[ist->file_index]->eof_reached) {
4778 process_input_packet(ist, NULL, 0);
4785 /* write the trailer if needed and close file */
4786 for (i = 0; i < nb_output_files; i++) {
4787 os = output_files[i]->ctx;
4788 if (!output_files[i]->header_written) {
4789 av_log(NULL, AV_LOG_ERROR,
4790 "Nothing was written into output file %d (%s), because "
4791 "at least one of its streams received no packets.\n",
4795 if ((ret = av_write_trailer(os)) < 0) {
4796 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4802 /* dump report by using the first video and audio streams */
4803 print_report(1, timer_start, av_gettime_relative());
4805 /* close each encoder */
4806 for (i = 0; i < nb_output_streams; i++) {
4807 ost = output_streams[i];
4808 if (ost->encoding_needed) {
4809 av_freep(&ost->enc_ctx->stats_in);
4811 total_packets_written += ost->packets_written;
4812 if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
4813 av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4818 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4819 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4823 /* close each decoder */
4824 for (i = 0; i < nb_input_streams; i++) {
4825 ist = input_streams[i];
4826 if (ist->decoding_needed) {
4827 avcodec_close(ist->dec_ctx);
4828 if (ist->hwaccel_uninit)
4829 ist->hwaccel_uninit(ist->dec_ctx);
4833 hw_device_free_all();
4840 free_input_threads();
4843 if (output_streams) {
4844 for (i = 0; i < nb_output_streams; i++) {
4845 ost = output_streams[i];
4848 if (fclose(ost->logfile))
4849 av_log(NULL, AV_LOG_ERROR,
4850 "Error closing logfile, loss of information possible: %s\n",
4851 av_err2str(AVERROR(errno)));
4852 ost->logfile = NULL;
4854 av_freep(&ost->forced_kf_pts);
4855 av_freep(&ost->apad);
4856 av_freep(&ost->disposition);
4857 av_dict_free(&ost->encoder_opts);
4858 av_dict_free(&ost->sws_dict);
4859 av_dict_free(&ost->swr_opts);
4860 av_dict_free(&ost->resample_opts);
4867 static BenchmarkTimeStamps get_benchmark_time_stamps(void)
4869 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4871 struct rusage rusage;
4873 getrusage(RUSAGE_SELF, &rusage);
4874 time_stamps.user_usec =
4875 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4876 time_stamps.sys_usec =
4877 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4878 #elif HAVE_GETPROCESSTIMES
4880 FILETIME c, e, k, u;
4881 proc = GetCurrentProcess();
4882 GetProcessTimes(proc, &c, &e, &k, &u);
4883 time_stamps.user_usec =
4884 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4885 time_stamps.sys_usec =
4886 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4888 time_stamps.user_usec = time_stamps.sys_usec = 0;
4893 static int64_t getmaxrss(void)
4895 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4896 struct rusage rusage;
4897 getrusage(RUSAGE_SELF, &rusage);
4898 return (int64_t)rusage.ru_maxrss * 1024;
4899 #elif HAVE_GETPROCESSMEMORYINFO
4901 PROCESS_MEMORY_COUNTERS memcounters;
4902 proc = GetCurrentProcess();
4903 memcounters.cb = sizeof(memcounters);
4904 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4905 return memcounters.PeakPagefileUsage;
4911 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4915 int main(int argc, char **argv)
4918 BenchmarkTimeStamps ti;
4922 register_exit(ffmpeg_cleanup);
4924 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4926 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4927 parse_loglevel(argc, argv, options);
4929 if(argc>1 && !strcmp(argv[1], "-d")){
4931 av_log_set_callback(log_callback_null);
4937 avdevice_register_all();
4939 avformat_network_init();
4941 show_banner(argc, argv, options);
4943 /* parse options and open all input/output files */
4944 ret = ffmpeg_parse_options(argc, argv);
4948 if (nb_output_files <= 0 && nb_input_files == 0) {
4950 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4954 /* file converter / grab */
4955 if (nb_output_files <= 0) {
4956 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4960 for (i = 0; i < nb_output_files; i++) {
4961 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4965 current_time = ti = get_benchmark_time_stamps();
4966 if (transcode() < 0)
4969 int64_t utime, stime, rtime;
4970 current_time = get_benchmark_time_stamps();
4971 utime = current_time.user_usec - ti.user_usec;
4972 stime = current_time.sys_usec - ti.sys_usec;
4973 rtime = current_time.real_usec - ti.real_usec;
4974 av_log(NULL, AV_LOG_INFO,
4975 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4976 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4978 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4979 decode_error_stat[0], decode_error_stat[1]);
4980 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4983 exit_program(received_nb_signals ? 255 : main_return_code);
4984 return main_return_code;