2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
65 #include "libavutil/threadmessage.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
73 #if HAVE_SYS_RESOURCE_H
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
80 #if HAVE_GETPROCESSMEMORYINFO
84 #if HAVE_SETCONSOLECTRLHANDLER
90 #include <sys/select.h>
95 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 typedef struct BenchmarkTimeStamps {
127 } BenchmarkTimeStamps;
129 static void do_video_stats(OutputStream *ost, int frame_size);
130 static BenchmarkTimeStamps get_benchmark_time_stamps(void);
131 static int64_t getmaxrss(void);
132 static int ifilter_has_all_input_formats(FilterGraph *fg);
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 static unsigned nb_output_dumped = 0;
141 static int want_sdp = 1;
143 static BenchmarkTimeStamps current_time;
144 AVIOContext *progress_avio = NULL;
146 static uint8_t *subtitle_out;
148 InputStream **input_streams = NULL;
149 int nb_input_streams = 0;
150 InputFile **input_files = NULL;
151 int nb_input_files = 0;
153 OutputStream **output_streams = NULL;
154 int nb_output_streams = 0;
155 OutputFile **output_files = NULL;
156 int nb_output_files = 0;
158 FilterGraph **filtergraphs;
163 /* init terminal so that we can grab keys */
164 static struct termios oldtty;
165 static int restore_tty;
169 static void free_input_threads(void);
173 Convert subtitles to video with alpha to insert them in filter graphs.
174 This is a temporary solution until libavfilter gets real subtitles support.
177 static int sub2video_get_blank_frame(InputStream *ist)
180 AVFrame *frame = ist->sub2video.frame;
182 av_frame_unref(frame);
183 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
184 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
186 if ((ret = av_frame_get_buffer(frame, 0)) < 0)
188 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
192 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
195 uint32_t *pal, *dst2;
199 if (r->type != SUBTITLE_BITMAP) {
200 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
203 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
204 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
205 r->x, r->y, r->w, r->h, w, h
210 dst += r->y * dst_linesize + r->x * 4;
212 pal = (uint32_t *)r->data[1];
213 for (y = 0; y < r->h; y++) {
214 dst2 = (uint32_t *)dst;
216 for (x = 0; x < r->w; x++)
217 *(dst2++) = pal[*(src2++)];
219 src += r->linesize[0];
223 static void sub2video_push_ref(InputStream *ist, int64_t pts)
225 AVFrame *frame = ist->sub2video.frame;
229 av_assert1(frame->data[0]);
230 ist->sub2video.last_pts = frame->pts = pts;
231 for (i = 0; i < ist->nb_filters; i++) {
232 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
233 AV_BUFFERSRC_FLAG_KEEP_REF |
234 AV_BUFFERSRC_FLAG_PUSH);
235 if (ret != AVERROR_EOF && ret < 0)
236 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
241 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
243 AVFrame *frame = ist->sub2video.frame;
247 int64_t pts, end_pts;
252 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
253 AV_TIME_BASE_Q, ist->st->time_base);
254 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
255 AV_TIME_BASE_Q, ist->st->time_base);
256 num_rects = sub->num_rects;
258 /* If we are initializing the system, utilize current heartbeat
259 PTS as the start time, and show until the following subpicture
260 is received. Otherwise, utilize the previous subpicture's end time
261 as the fall-back value. */
262 pts = ist->sub2video.initialize ?
263 heartbeat_pts : ist->sub2video.end_pts;
267 if (sub2video_get_blank_frame(ist) < 0) {
268 av_log(ist->dec_ctx, AV_LOG_ERROR,
269 "Impossible to get a blank canvas.\n");
272 dst = frame->data [0];
273 dst_linesize = frame->linesize[0];
274 for (i = 0; i < num_rects; i++)
275 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
276 sub2video_push_ref(ist, pts);
277 ist->sub2video.end_pts = end_pts;
278 ist->sub2video.initialize = 0;
281 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
283 InputFile *infile = input_files[ist->file_index];
287 /* When a frame is read from a file, examine all sub2video streams in
288 the same file and send the sub2video frame again. Otherwise, decoded
289 video frames could be accumulating in the filter graph while a filter
290 (possibly overlay) is desperately waiting for a subtitle frame. */
291 for (i = 0; i < infile->nb_streams; i++) {
292 InputStream *ist2 = input_streams[infile->ist_index + i];
293 if (!ist2->sub2video.frame)
295 /* subtitles seem to be usually muxed ahead of other streams;
296 if not, subtracting a larger time here is necessary */
297 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
298 /* do not send the heartbeat frame if the subtitle is already ahead */
299 if (pts2 <= ist2->sub2video.last_pts)
301 if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
302 /* if we have hit the end of the current displayed subpicture,
303 or if we need to initialize the system, update the
304 overlayed subpicture and its start/end times */
305 sub2video_update(ist2, pts2 + 1, NULL);
306 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
307 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
309 sub2video_push_ref(ist2, pts2);
313 static void sub2video_flush(InputStream *ist)
318 if (ist->sub2video.end_pts < INT64_MAX)
319 sub2video_update(ist, INT64_MAX, NULL);
320 for (i = 0; i < ist->nb_filters; i++) {
321 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
322 if (ret != AVERROR_EOF && ret < 0)
323 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
327 /* end of sub2video hack */
329 static void term_exit_sigsafe(void)
333 tcsetattr (0, TCSANOW, &oldtty);
339 av_log(NULL, AV_LOG_QUIET, "%s", "");
343 static volatile int received_sigterm = 0;
344 static volatile int received_nb_signals = 0;
345 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
346 static volatile int ffmpeg_exited = 0;
347 static int main_return_code = 0;
348 static int64_t copy_ts_first_pts = AV_NOPTS_VALUE;
351 sigterm_handler(int sig)
354 received_sigterm = sig;
355 received_nb_signals++;
357 if(received_nb_signals > 3) {
358 ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
359 strlen("Received > 3 system signals, hard exiting\n"));
360 if (ret < 0) { /* Do nothing */ };
365 #if HAVE_SETCONSOLECTRLHANDLER
366 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
368 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
373 case CTRL_BREAK_EVENT:
374 sigterm_handler(SIGINT);
377 case CTRL_CLOSE_EVENT:
378 case CTRL_LOGOFF_EVENT:
379 case CTRL_SHUTDOWN_EVENT:
380 sigterm_handler(SIGTERM);
381 /* Basically, with these 3 events, when we return from this method the
382 process is hard terminated, so stall as long as we need to
383 to try and let the main thread(s) clean up and gracefully terminate
384 (we have at most 5 seconds, but should be done far before that). */
385 while (!ffmpeg_exited) {
391 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
398 #define SIGNAL(sig, func) \
400 action.sa_handler = func; \
401 sigaction(sig, &action, NULL); \
404 #define SIGNAL(sig, func) \
410 #if defined __linux__
411 struct sigaction action = {0};
412 action.sa_handler = sigterm_handler;
414 /* block other interrupts while processing this one */
415 sigfillset(&action.sa_mask);
417 /* restart interruptible functions (i.e. don't fail with EINTR) */
418 action.sa_flags = SA_RESTART;
422 if (!run_as_daemon && stdin_interaction) {
424 if (tcgetattr (0, &tty) == 0) {
428 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
429 |INLCR|IGNCR|ICRNL|IXON);
430 tty.c_oflag |= OPOST;
431 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
432 tty.c_cflag &= ~(CSIZE|PARENB);
437 tcsetattr (0, TCSANOW, &tty);
439 SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
443 SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
444 SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */
446 SIGNAL(SIGXCPU, sigterm_handler);
449 signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
451 #if HAVE_SETCONSOLECTRLHANDLER
452 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
456 /* read a key without blocking */
457 static int read_key(void)
469 n = select(1, &rfds, NULL, NULL, &tv);
478 # if HAVE_PEEKNAMEDPIPE
480 static HANDLE input_handle;
483 input_handle = GetStdHandle(STD_INPUT_HANDLE);
484 is_pipe = !GetConsoleMode(input_handle, &dw);
488 /* When running under a GUI, you will end here. */
489 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
490 // input pipe may have been closed by the program that ran ffmpeg
508 static int decode_interrupt_cb(void *ctx)
510 return received_nb_signals > atomic_load(&transcode_init_done);
513 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
515 static void ffmpeg_cleanup(int ret)
520 int maxrss = getmaxrss() / 1024;
521 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
524 for (i = 0; i < nb_filtergraphs; i++) {
525 FilterGraph *fg = filtergraphs[i];
526 avfilter_graph_free(&fg->graph);
527 for (j = 0; j < fg->nb_inputs; j++) {
528 InputFilter *ifilter = fg->inputs[j];
529 struct InputStream *ist = ifilter->ist;
531 while (av_fifo_size(ifilter->frame_queue)) {
533 av_fifo_generic_read(ifilter->frame_queue, &frame,
534 sizeof(frame), NULL);
535 av_frame_free(&frame);
537 av_fifo_freep(&ifilter->frame_queue);
538 if (ist->sub2video.sub_queue) {
539 while (av_fifo_size(ist->sub2video.sub_queue)) {
541 av_fifo_generic_read(ist->sub2video.sub_queue,
542 &sub, sizeof(sub), NULL);
543 avsubtitle_free(&sub);
545 av_fifo_freep(&ist->sub2video.sub_queue);
547 av_buffer_unref(&ifilter->hw_frames_ctx);
548 av_freep(&ifilter->name);
549 av_freep(&fg->inputs[j]);
551 av_freep(&fg->inputs);
552 for (j = 0; j < fg->nb_outputs; j++) {
553 OutputFilter *ofilter = fg->outputs[j];
555 avfilter_inout_free(&ofilter->out_tmp);
556 av_freep(&ofilter->name);
557 av_freep(&ofilter->formats);
558 av_freep(&ofilter->channel_layouts);
559 av_freep(&ofilter->sample_rates);
560 av_freep(&fg->outputs[j]);
562 av_freep(&fg->outputs);
563 av_freep(&fg->graph_desc);
565 av_freep(&filtergraphs[i]);
567 av_freep(&filtergraphs);
569 av_freep(&subtitle_out);
572 for (i = 0; i < nb_output_files; i++) {
573 OutputFile *of = output_files[i];
578 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
580 avformat_free_context(s);
581 av_dict_free(&of->opts);
583 av_freep(&output_files[i]);
585 for (i = 0; i < nb_output_streams; i++) {
586 OutputStream *ost = output_streams[i];
591 av_bsf_free(&ost->bsf_ctx);
593 av_frame_free(&ost->filtered_frame);
594 av_frame_free(&ost->last_frame);
595 av_dict_free(&ost->encoder_opts);
597 av_freep(&ost->forced_keyframes);
598 av_expr_free(ost->forced_keyframes_pexpr);
599 av_freep(&ost->avfilter);
600 av_freep(&ost->logfile_prefix);
602 av_freep(&ost->audio_channels_map);
603 ost->audio_channels_mapped = 0;
605 av_dict_free(&ost->sws_dict);
606 av_dict_free(&ost->swr_opts);
608 avcodec_free_context(&ost->enc_ctx);
609 avcodec_parameters_free(&ost->ref_par);
611 if (ost->muxing_queue) {
612 while (av_fifo_size(ost->muxing_queue)) {
614 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
615 av_packet_unref(&pkt);
617 av_fifo_freep(&ost->muxing_queue);
620 av_freep(&output_streams[i]);
623 free_input_threads();
625 for (i = 0; i < nb_input_files; i++) {
626 avformat_close_input(&input_files[i]->ctx);
627 av_freep(&input_files[i]);
629 for (i = 0; i < nb_input_streams; i++) {
630 InputStream *ist = input_streams[i];
632 av_frame_free(&ist->decoded_frame);
633 av_frame_free(&ist->filter_frame);
634 av_dict_free(&ist->decoder_opts);
635 avsubtitle_free(&ist->prev_sub.subtitle);
636 av_frame_free(&ist->sub2video.frame);
637 av_freep(&ist->filters);
638 av_freep(&ist->hwaccel_device);
639 av_freep(&ist->dts_buffer);
641 avcodec_free_context(&ist->dec_ctx);
643 av_freep(&input_streams[i]);
647 if (fclose(vstats_file))
648 av_log(NULL, AV_LOG_ERROR,
649 "Error closing vstats file, loss of information possible: %s\n",
650 av_err2str(AVERROR(errno)));
652 av_freep(&vstats_filename);
654 av_freep(&input_streams);
655 av_freep(&input_files);
656 av_freep(&output_streams);
657 av_freep(&output_files);
661 avformat_network_deinit();
663 if (received_sigterm) {
664 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
665 (int) received_sigterm);
666 } else if (ret && atomic_load(&transcode_init_done)) {
667 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
673 void remove_avoptions(AVDictionary **a, AVDictionary *b)
675 AVDictionaryEntry *t = NULL;
677 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
678 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
682 void assert_avoptions(AVDictionary *m)
684 AVDictionaryEntry *t;
685 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
686 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
691 static void abort_codec_experimental(AVCodec *c, int encoder)
696 static void update_benchmark(const char *fmt, ...)
698 if (do_benchmark_all) {
699 BenchmarkTimeStamps t = get_benchmark_time_stamps();
705 vsnprintf(buf, sizeof(buf), fmt, va);
707 av_log(NULL, AV_LOG_INFO,
708 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
709 t.user_usec - current_time.user_usec,
710 t.sys_usec - current_time.sys_usec,
711 t.real_usec - current_time.real_usec, buf);
717 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
720 for (i = 0; i < nb_output_streams; i++) {
721 OutputStream *ost2 = output_streams[i];
722 ost2->finished |= ost == ost2 ? this_stream : others;
726 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
728 AVFormatContext *s = of->ctx;
729 AVStream *st = ost->st;
733 * Audio encoders may split the packets -- #frames in != #packets out.
734 * But there is no reordering, so we can limit the number of output packets
735 * by simply dropping them here.
736 * Counting encoded video frames needs to be done separately because of
737 * reordering, see do_video_out().
738 * Do not count the packet when unqueued because it has been counted when queued.
740 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
741 if (ost->frame_number >= ost->max_frames) {
742 av_packet_unref(pkt);
748 if (!of->header_written) {
749 AVPacket tmp_pkt = {0};
750 /* the muxer is not initialized yet, buffer the packet */
751 if (!av_fifo_space(ost->muxing_queue)) {
752 unsigned int are_we_over_size =
753 (ost->muxing_queue_data_size + pkt->size) > ost->muxing_queue_data_threshold;
754 int new_size = are_we_over_size ?
755 FFMIN(2 * av_fifo_size(ost->muxing_queue),
756 ost->max_muxing_queue_size) :
757 2 * av_fifo_size(ost->muxing_queue);
759 if (new_size <= av_fifo_size(ost->muxing_queue)) {
760 av_log(NULL, AV_LOG_ERROR,
761 "Too many packets buffered for output stream %d:%d.\n",
762 ost->file_index, ost->st->index);
765 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
769 ret = av_packet_make_refcounted(pkt);
772 av_packet_move_ref(&tmp_pkt, pkt);
773 ost->muxing_queue_data_size += tmp_pkt.size;
774 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
778 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
779 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
780 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
782 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
784 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
786 ost->quality = sd ? AV_RL32(sd) : -1;
787 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
789 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
791 ost->error[i] = AV_RL64(sd + 8 + 8*i);
796 if (ost->frame_rate.num && ost->is_cfr) {
797 if (pkt->duration > 0)
798 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
799 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
804 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
806 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
807 if (pkt->dts != AV_NOPTS_VALUE &&
808 pkt->pts != AV_NOPTS_VALUE &&
809 pkt->dts > pkt->pts) {
810 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
812 ost->file_index, ost->st->index);
814 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
815 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
816 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
818 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
819 pkt->dts != AV_NOPTS_VALUE &&
820 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
821 ost->last_mux_dts != AV_NOPTS_VALUE) {
822 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
823 if (pkt->dts < max) {
824 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
826 loglevel = AV_LOG_ERROR;
827 av_log(s, loglevel, "Non-monotonous DTS in output stream "
828 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
829 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
831 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
834 av_log(s, loglevel, "changing to %"PRId64". This may result "
835 "in incorrect timestamps in the output file.\n",
837 if (pkt->pts >= pkt->dts)
838 pkt->pts = FFMAX(pkt->pts, max);
843 ost->last_mux_dts = pkt->dts;
845 ost->data_size += pkt->size;
846 ost->packets_written++;
848 pkt->stream_index = ost->index;
851 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
852 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
853 av_get_media_type_string(ost->enc_ctx->codec_type),
854 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
855 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
860 ret = av_interleaved_write_frame(s, pkt);
862 print_error("av_interleaved_write_frame()", ret);
863 main_return_code = 1;
864 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
866 av_packet_unref(pkt);
869 static void close_output_stream(OutputStream *ost)
871 OutputFile *of = output_files[ost->file_index];
873 ost->finished |= ENCODER_FINISHED;
875 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
876 of->recording_time = FFMIN(of->recording_time, end);
881 * Send a single packet to the output, applying any bitstream filters
882 * associated with the output stream. This may result in any number
883 * of packets actually being written, depending on what bitstream
884 * filters are applied. The supplied packet is consumed and will be
885 * blank (as if newly-allocated) when this function returns.
887 * If eof is set, instead indicate EOF to all bitstream filters and
888 * therefore flush any delayed packets to the output. A blank packet
889 * must be supplied in this case.
891 static void output_packet(OutputFile *of, AVPacket *pkt,
892 OutputStream *ost, int eof)
896 /* apply the output bitstream filters */
898 ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
901 while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
902 write_packet(of, pkt, ost, 0);
903 if (ret == AVERROR(EAGAIN))
906 write_packet(of, pkt, ost, 0);
909 if (ret < 0 && ret != AVERROR_EOF) {
910 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
911 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
917 static int check_recording_time(OutputStream *ost)
919 OutputFile *of = output_files[ost->file_index];
921 if (of->recording_time != INT64_MAX &&
922 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
923 AV_TIME_BASE_Q) >= 0) {
924 close_output_stream(ost);
930 static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost,
933 double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
934 AVCodecContext *enc = ost->enc_ctx;
935 if (!frame || frame->pts == AV_NOPTS_VALUE ||
936 !enc || !ost->filter || !ost->filter->graph->graph)
940 AVFilterContext *filter = ost->filter->filter;
942 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
943 AVRational filter_tb = av_buffersink_get_time_base(filter);
944 AVRational tb = enc->time_base;
945 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
947 tb.den <<= extra_bits;
949 av_rescale_q(frame->pts, filter_tb, tb) -
950 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
951 float_pts /= 1 << extra_bits;
952 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
953 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
956 av_rescale_q(frame->pts, filter_tb, enc->time_base) -
957 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
963 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
964 frame ? av_ts2str(frame->pts) : "NULL",
965 frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
967 enc ? enc->time_base.num : -1,
968 enc ? enc->time_base.den : -1);
974 static int init_output_stream(OutputStream *ost, AVFrame *frame,
975 char *error, int error_len);
977 static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame,
980 int ret = AVERROR_BUG;
981 char error[1024] = {0};
983 if (ost->initialized)
986 ret = init_output_stream(ost, frame, error, sizeof(error));
988 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
989 ost->file_index, ost->index, error);
998 static void do_audio_out(OutputFile *of, OutputStream *ost,
1001 AVCodecContext *enc = ost->enc_ctx;
1005 av_init_packet(&pkt);
1009 adjust_frame_pts_to_encoder_tb(of, ost, frame);
1011 if (!check_recording_time(ost))
1014 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1015 frame->pts = ost->sync_opts;
1016 ost->sync_opts = frame->pts + frame->nb_samples;
1017 ost->samples_encoded += frame->nb_samples;
1018 ost->frames_encoded++;
1020 av_assert0(pkt.size || !pkt.data);
1021 update_benchmark(NULL);
1023 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1024 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1025 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1026 enc->time_base.num, enc->time_base.den);
1029 ret = avcodec_send_frame(enc, frame);
1034 ret = avcodec_receive_packet(enc, &pkt);
1035 if (ret == AVERROR(EAGAIN))
1040 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1042 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1045 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1046 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1047 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1048 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1051 output_packet(of, &pkt, ost, 0);
1056 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1060 static void do_subtitle_out(OutputFile *of,
1064 int subtitle_out_max_size = 1024 * 1024;
1065 int subtitle_out_size, nb, i;
1066 AVCodecContext *enc;
1070 if (sub->pts == AV_NOPTS_VALUE) {
1071 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1079 if (!subtitle_out) {
1080 subtitle_out = av_malloc(subtitle_out_max_size);
1081 if (!subtitle_out) {
1082 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1087 /* Note: DVB subtitle need one packet to draw them and one other
1088 packet to clear them */
1089 /* XXX: signal it in the codec context ? */
1090 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1095 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1097 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1098 pts -= output_files[ost->file_index]->start_time;
1099 for (i = 0; i < nb; i++) {
1100 unsigned save_num_rects = sub->num_rects;
1102 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1103 if (!check_recording_time(ost))
1107 // start_display_time is required to be 0
1108 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1109 sub->end_display_time -= sub->start_display_time;
1110 sub->start_display_time = 0;
1114 ost->frames_encoded++;
1116 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1117 subtitle_out_max_size, sub);
1119 sub->num_rects = save_num_rects;
1120 if (subtitle_out_size < 0) {
1121 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1125 av_init_packet(&pkt);
1126 pkt.data = subtitle_out;
1127 pkt.size = subtitle_out_size;
1128 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1129 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1130 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1131 /* XXX: the pts correction is handled here. Maybe handling
1132 it in the codec would be better */
1134 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1136 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1139 output_packet(of, &pkt, ost, 0);
1143 static void do_video_out(OutputFile *of,
1145 AVFrame *next_picture)
1147 int ret, format_video_sync;
1149 AVCodecContext *enc = ost->enc_ctx;
1150 AVRational frame_rate;
1151 int nb_frames, nb0_frames, i;
1152 double delta, delta0;
1153 double duration = 0;
1154 double sync_ipts = AV_NOPTS_VALUE;
1156 InputStream *ist = NULL;
1157 AVFilterContext *filter = ost->filter->filter;
1159 init_output_stream_wrapper(ost, next_picture, 1);
1160 sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1162 if (ost->source_index >= 0)
1163 ist = input_streams[ost->source_index];
1165 frame_rate = av_buffersink_get_frame_rate(filter);
1166 if (frame_rate.num > 0 && frame_rate.den > 0)
1167 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1169 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1170 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1172 if (!ost->filters_script &&
1174 (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1177 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1178 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1181 if (!next_picture) {
1183 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1184 ost->last_nb0_frames[1],
1185 ost->last_nb0_frames[2]);
1187 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1188 delta = delta0 + duration;
1190 /* by default, we output a single frame */
1191 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1194 format_video_sync = video_sync_method;
1195 if (format_video_sync == VSYNC_AUTO) {
1196 if(!strcmp(of->ctx->oformat->name, "avi")) {
1197 format_video_sync = VSYNC_VFR;
1199 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1201 && format_video_sync == VSYNC_CFR
1202 && input_files[ist->file_index]->ctx->nb_streams == 1
1203 && input_files[ist->file_index]->input_ts_offset == 0) {
1204 format_video_sync = VSYNC_VSCFR;
1206 if (format_video_sync == VSYNC_CFR && copy_ts) {
1207 format_video_sync = VSYNC_VSCFR;
1210 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1214 format_video_sync != VSYNC_PASSTHROUGH &&
1215 format_video_sync != VSYNC_DROP) {
1216 if (delta0 < -0.6) {
1217 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1219 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1220 sync_ipts = ost->sync_opts;
1225 switch (format_video_sync) {
1227 if (ost->frame_number == 0 && delta0 >= 0.5) {
1228 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1231 ost->sync_opts = llrint(sync_ipts);
1234 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1235 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1237 } else if (delta < -1.1)
1239 else if (delta > 1.1) {
1240 nb_frames = lrintf(delta);
1242 nb0_frames = llrintf(delta0 - 0.6);
1248 else if (delta > 0.6)
1249 ost->sync_opts = llrint(sync_ipts);
1252 case VSYNC_PASSTHROUGH:
1253 ost->sync_opts = llrint(sync_ipts);
1260 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1261 nb0_frames = FFMIN(nb0_frames, nb_frames);
1263 memmove(ost->last_nb0_frames + 1,
1264 ost->last_nb0_frames,
1265 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1266 ost->last_nb0_frames[0] = nb0_frames;
1268 if (nb0_frames == 0 && ost->last_dropped) {
1270 av_log(NULL, AV_LOG_VERBOSE,
1271 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1272 ost->frame_number, ost->st->index, ost->last_frame->pts);
1274 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1275 if (nb_frames > dts_error_threshold * 30) {
1276 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1280 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1281 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1282 if (nb_frames_dup > dup_warning) {
1283 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1287 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1289 /* duplicates frame if needed */
1290 for (i = 0; i < nb_frames; i++) {
1291 AVFrame *in_picture;
1292 int forced_keyframe = 0;
1294 av_init_packet(&pkt);
1298 if (i < nb0_frames && ost->last_frame) {
1299 in_picture = ost->last_frame;
1301 in_picture = next_picture;
1306 in_picture->pts = ost->sync_opts;
1308 if (!check_recording_time(ost))
1311 in_picture->quality = enc->global_quality;
1312 in_picture->pict_type = 0;
1314 if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1315 in_picture->pts != AV_NOPTS_VALUE)
1316 ost->forced_kf_ref_pts = in_picture->pts;
1318 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1319 (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1320 if (ost->forced_kf_index < ost->forced_kf_count &&
1321 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1322 ost->forced_kf_index++;
1323 forced_keyframe = 1;
1324 } else if (ost->forced_keyframes_pexpr) {
1326 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1327 res = av_expr_eval(ost->forced_keyframes_pexpr,
1328 ost->forced_keyframes_expr_const_values, NULL);
1329 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1330 ost->forced_keyframes_expr_const_values[FKF_N],
1331 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1332 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1333 ost->forced_keyframes_expr_const_values[FKF_T],
1334 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1337 forced_keyframe = 1;
1338 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1339 ost->forced_keyframes_expr_const_values[FKF_N];
1340 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1341 ost->forced_keyframes_expr_const_values[FKF_T];
1342 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1345 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1346 } else if ( ost->forced_keyframes
1347 && !strncmp(ost->forced_keyframes, "source", 6)
1348 && in_picture->key_frame==1
1350 forced_keyframe = 1;
1353 if (forced_keyframe) {
1354 in_picture->pict_type = AV_PICTURE_TYPE_I;
1355 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1358 update_benchmark(NULL);
1360 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1361 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1362 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1363 enc->time_base.num, enc->time_base.den);
1366 ost->frames_encoded++;
1368 ret = avcodec_send_frame(enc, in_picture);
1371 // Make sure Closed Captions will not be duplicated
1372 av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
1375 ret = avcodec_receive_packet(enc, &pkt);
1376 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1377 if (ret == AVERROR(EAGAIN))
1383 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1384 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1385 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1386 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1389 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1390 pkt.pts = ost->sync_opts;
1392 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1395 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1396 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1397 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1398 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1401 frame_size = pkt.size;
1402 output_packet(of, &pkt, ost, 0);
1404 /* if two pass, output log */
1405 if (ost->logfile && enc->stats_out) {
1406 fprintf(ost->logfile, "%s", enc->stats_out);
1411 * For video, number of frames in == number of packets out.
1412 * But there may be reordering, so we can't throw away frames on encoder
1413 * flush, we need to limit them here, before they go into encoder.
1415 ost->frame_number++;
1417 if (vstats_filename && frame_size)
1418 do_video_stats(ost, frame_size);
1421 if (!ost->last_frame)
1422 ost->last_frame = av_frame_alloc();
1423 av_frame_unref(ost->last_frame);
1424 if (next_picture && ost->last_frame)
1425 av_frame_ref(ost->last_frame, next_picture);
1427 av_frame_free(&ost->last_frame);
1431 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1435 static double psnr(double d)
1437 return -10.0 * log10(d);
1440 static void do_video_stats(OutputStream *ost, int frame_size)
1442 AVCodecContext *enc;
1444 double ti1, bitrate, avg_bitrate;
1446 /* this is executed just the first time do_video_stats is called */
1448 vstats_file = fopen(vstats_filename, "w");
1456 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1457 frame_number = ost->st->nb_frames;
1458 if (vstats_version <= 1) {
1459 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1460 ost->quality / (float)FF_QP2LAMBDA);
1462 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1463 ost->quality / (float)FF_QP2LAMBDA);
1466 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1467 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1469 fprintf(vstats_file,"f_size= %6d ", frame_size);
1470 /* compute pts value */
1471 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1475 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1476 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1477 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1478 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1479 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1483 static void finish_output_stream(OutputStream *ost)
1485 OutputFile *of = output_files[ost->file_index];
1488 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1491 for (i = 0; i < of->ctx->nb_streams; i++)
1492 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1497 * Get and encode new output from any of the filtergraphs, without causing
1500 * @return 0 for success, <0 for severe errors
1502 static int reap_filters(int flush)
1504 AVFrame *filtered_frame = NULL;
1507 /* Reap all buffers present in the buffer sinks */
1508 for (i = 0; i < nb_output_streams; i++) {
1509 OutputStream *ost = output_streams[i];
1510 OutputFile *of = output_files[ost->file_index];
1511 AVFilterContext *filter;
1512 AVCodecContext *enc = ost->enc_ctx;
1515 if (!ost->filter || !ost->filter->graph->graph)
1517 filter = ost->filter->filter;
1520 * Unlike video, with audio the audio frame size matters.
1521 * Currently we are fully reliant on the lavfi filter chain to
1522 * do the buffering deed for us, and thus the frame size parameter
1523 * needs to be set accordingly. Where does one get the required
1524 * frame size? From the initialized AVCodecContext of an audio
1525 * encoder. Thus, if we have gotten to an audio stream, initialize
1526 * the encoder earlier than receiving the first AVFrame.
1528 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO)
1529 init_output_stream_wrapper(ost, NULL, 1);
1531 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1532 return AVERROR(ENOMEM);
1534 filtered_frame = ost->filtered_frame;
1537 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1538 AV_BUFFERSINK_FLAG_NO_REQUEST);
1540 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1541 av_log(NULL, AV_LOG_WARNING,
1542 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1543 } else if (flush && ret == AVERROR_EOF) {
1544 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1545 do_video_out(of, ost, NULL);
1549 if (ost->finished) {
1550 av_frame_unref(filtered_frame);
1554 switch (av_buffersink_get_type(filter)) {
1555 case AVMEDIA_TYPE_VIDEO:
1556 if (!ost->frame_aspect_ratio.num)
1557 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1559 do_video_out(of, ost, filtered_frame);
1561 case AVMEDIA_TYPE_AUDIO:
1562 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1563 enc->channels != filtered_frame->channels) {
1564 av_log(NULL, AV_LOG_ERROR,
1565 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1568 do_audio_out(of, ost, filtered_frame);
1571 // TODO support subtitle filters
1575 av_frame_unref(filtered_frame);
1582 static void print_final_stats(int64_t total_size)
1584 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1585 uint64_t subtitle_size = 0;
1586 uint64_t data_size = 0;
1587 float percent = -1.0;
1591 for (i = 0; i < nb_output_streams; i++) {
1592 OutputStream *ost = output_streams[i];
1593 switch (ost->enc_ctx->codec_type) {
1594 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1595 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1596 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1597 default: other_size += ost->data_size; break;
1599 extra_size += ost->enc_ctx->extradata_size;
1600 data_size += ost->data_size;
1601 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1602 != AV_CODEC_FLAG_PASS1)
1606 if (data_size && total_size>0 && total_size >= data_size)
1607 percent = 100.0 * (total_size - data_size) / data_size;
1609 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1610 video_size / 1024.0,
1611 audio_size / 1024.0,
1612 subtitle_size / 1024.0,
1613 other_size / 1024.0,
1614 extra_size / 1024.0);
1616 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1618 av_log(NULL, AV_LOG_INFO, "unknown");
1619 av_log(NULL, AV_LOG_INFO, "\n");
1621 /* print verbose per-stream stats */
1622 for (i = 0; i < nb_input_files; i++) {
1623 InputFile *f = input_files[i];
1624 uint64_t total_packets = 0, total_size = 0;
1626 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1629 for (j = 0; j < f->nb_streams; j++) {
1630 InputStream *ist = input_streams[f->ist_index + j];
1631 enum AVMediaType type = ist->dec_ctx->codec_type;
1633 total_size += ist->data_size;
1634 total_packets += ist->nb_packets;
1636 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1637 i, j, media_type_string(type));
1638 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1639 ist->nb_packets, ist->data_size);
1641 if (ist->decoding_needed) {
1642 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1643 ist->frames_decoded);
1644 if (type == AVMEDIA_TYPE_AUDIO)
1645 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1646 av_log(NULL, AV_LOG_VERBOSE, "; ");
1649 av_log(NULL, AV_LOG_VERBOSE, "\n");
1652 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1653 total_packets, total_size);
1656 for (i = 0; i < nb_output_files; i++) {
1657 OutputFile *of = output_files[i];
1658 uint64_t total_packets = 0, total_size = 0;
1660 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1663 for (j = 0; j < of->ctx->nb_streams; j++) {
1664 OutputStream *ost = output_streams[of->ost_index + j];
1665 enum AVMediaType type = ost->enc_ctx->codec_type;
1667 total_size += ost->data_size;
1668 total_packets += ost->packets_written;
1670 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1671 i, j, media_type_string(type));
1672 if (ost->encoding_needed) {
1673 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1674 ost->frames_encoded);
1675 if (type == AVMEDIA_TYPE_AUDIO)
1676 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1677 av_log(NULL, AV_LOG_VERBOSE, "; ");
1680 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1681 ost->packets_written, ost->data_size);
1683 av_log(NULL, AV_LOG_VERBOSE, "\n");
1686 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1687 total_packets, total_size);
1689 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1690 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1692 av_log(NULL, AV_LOG_WARNING, "\n");
1694 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1699 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1701 AVBPrint buf, buf_script;
1703 AVFormatContext *oc;
1705 AVCodecContext *enc;
1706 int frame_number, vid, i;
1709 int64_t pts = INT64_MIN + 1;
1710 static int64_t last_time = -1;
1711 static int first_report = 1;
1712 static int qp_histogram[52];
1713 int hours, mins, secs, us;
1714 const char *hours_sign;
1718 if (!print_stats && !is_last_report && !progress_avio)
1721 if (!is_last_report) {
1722 if (last_time == -1) {
1723 last_time = cur_time;
1725 if (((cur_time - last_time) < stats_period && !first_report) ||
1726 (first_report && nb_output_dumped < nb_output_files))
1728 last_time = cur_time;
1731 t = (cur_time-timer_start) / 1000000.0;
1734 oc = output_files[0]->ctx;
1736 total_size = avio_size(oc->pb);
1737 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1738 total_size = avio_tell(oc->pb);
1741 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1742 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1743 for (i = 0; i < nb_output_streams; i++) {
1745 ost = output_streams[i];
1747 if (!ost->stream_copy)
1748 q = ost->quality / (float) FF_QP2LAMBDA;
1750 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1751 av_bprintf(&buf, "q=%2.1f ", q);
1752 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1753 ost->file_index, ost->index, q);
1755 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1758 frame_number = ost->frame_number;
1759 fps = t > 1 ? frame_number / t : 0;
1760 av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1761 frame_number, fps < 9.95, fps, q);
1762 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1763 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1764 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1765 ost->file_index, ost->index, q);
1767 av_bprintf(&buf, "L");
1771 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1773 for (j = 0; j < 32; j++)
1774 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1777 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1779 double error, error_sum = 0;
1780 double scale, scale_sum = 0;
1782 char type[3] = { 'Y','U','V' };
1783 av_bprintf(&buf, "PSNR=");
1784 for (j = 0; j < 3; j++) {
1785 if (is_last_report) {
1786 error = enc->error[j];
1787 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1789 error = ost->error[j];
1790 scale = enc->width * enc->height * 255.0 * 255.0;
1796 p = psnr(error / scale);
1797 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1798 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1799 ost->file_index, ost->index, type[j] | 32, p);
1801 p = psnr(error_sum / scale_sum);
1802 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1803 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1804 ost->file_index, ost->index, p);
1808 /* compute min output value */
1809 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) {
1810 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1811 ost->st->time_base, AV_TIME_BASE_Q));
1813 if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1814 copy_ts_first_pts = pts;
1815 if (copy_ts_first_pts != AV_NOPTS_VALUE)
1816 pts -= copy_ts_first_pts;
1821 nb_frames_drop += ost->last_dropped;
1824 secs = FFABS(pts) / AV_TIME_BASE;
1825 us = FFABS(pts) % AV_TIME_BASE;
1830 hours_sign = (pts < 0) ? "-" : "";
1832 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1833 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1835 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1836 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1837 if (pts == AV_NOPTS_VALUE) {
1838 av_bprintf(&buf, "N/A ");
1840 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1841 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1845 av_bprintf(&buf, "bitrate=N/A");
1846 av_bprintf(&buf_script, "bitrate=N/A\n");
1848 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1849 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1852 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1853 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1854 if (pts == AV_NOPTS_VALUE) {
1855 av_bprintf(&buf_script, "out_time_us=N/A\n");
1856 av_bprintf(&buf_script, "out_time_ms=N/A\n");
1857 av_bprintf(&buf_script, "out_time=N/A\n");
1859 av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1860 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1861 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1862 hours_sign, hours, mins, secs, us);
1865 if (nb_frames_dup || nb_frames_drop)
1866 av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1867 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1868 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1871 av_bprintf(&buf, " speed=N/A");
1872 av_bprintf(&buf_script, "speed=N/A\n");
1874 av_bprintf(&buf, " speed=%4.3gx", speed);
1875 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1878 if (print_stats || is_last_report) {
1879 const char end = is_last_report ? '\n' : '\r';
1880 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1881 fprintf(stderr, "%s %c", buf.str, end);
1883 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1887 av_bprint_finalize(&buf, NULL);
1889 if (progress_avio) {
1890 av_bprintf(&buf_script, "progress=%s\n",
1891 is_last_report ? "end" : "continue");
1892 avio_write(progress_avio, buf_script.str,
1893 FFMIN(buf_script.len, buf_script.size - 1));
1894 avio_flush(progress_avio);
1895 av_bprint_finalize(&buf_script, NULL);
1896 if (is_last_report) {
1897 if ((ret = avio_closep(&progress_avio)) < 0)
1898 av_log(NULL, AV_LOG_ERROR,
1899 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1906 print_final_stats(total_size);
1909 static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1911 // We never got any input. Set a fake format, which will
1912 // come from libavformat.
1913 ifilter->format = par->format;
1914 ifilter->sample_rate = par->sample_rate;
1915 ifilter->channels = par->channels;
1916 ifilter->channel_layout = par->channel_layout;
1917 ifilter->width = par->width;
1918 ifilter->height = par->height;
1919 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1922 static void flush_encoders(void)
1926 for (i = 0; i < nb_output_streams; i++) {
1927 OutputStream *ost = output_streams[i];
1928 AVCodecContext *enc = ost->enc_ctx;
1929 OutputFile *of = output_files[ost->file_index];
1931 if (!ost->encoding_needed)
1934 // Try to enable encoding with no input frames.
1935 // Maybe we should just let encoding fail instead.
1936 if (!ost->initialized) {
1937 FilterGraph *fg = ost->filter->graph;
1939 av_log(NULL, AV_LOG_WARNING,
1940 "Finishing stream %d:%d without any data written to it.\n",
1941 ost->file_index, ost->st->index);
1943 if (ost->filter && !fg->graph) {
1945 for (x = 0; x < fg->nb_inputs; x++) {
1946 InputFilter *ifilter = fg->inputs[x];
1947 if (ifilter->format < 0)
1948 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1951 if (!ifilter_has_all_input_formats(fg))
1954 ret = configure_filtergraph(fg);
1956 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1960 finish_output_stream(ost);
1963 init_output_stream_wrapper(ost, NULL, 1);
1966 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1970 const char *desc = NULL;
1974 switch (enc->codec_type) {
1975 case AVMEDIA_TYPE_AUDIO:
1978 case AVMEDIA_TYPE_VIDEO:
1985 av_init_packet(&pkt);
1989 update_benchmark(NULL);
1991 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1992 ret = avcodec_send_frame(enc, NULL);
1994 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2001 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
2002 if (ret < 0 && ret != AVERROR_EOF) {
2003 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2008 if (ost->logfile && enc->stats_out) {
2009 fprintf(ost->logfile, "%s", enc->stats_out);
2011 if (ret == AVERROR_EOF) {
2012 output_packet(of, &pkt, ost, 1);
2015 if (ost->finished & MUXER_FINISHED) {
2016 av_packet_unref(&pkt);
2019 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
2020 pkt_size = pkt.size;
2021 output_packet(of, &pkt, ost, 0);
2022 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
2023 do_video_stats(ost, pkt_size);
2030 * Check whether a packet from ist should be written into ost at this time
2032 static int check_output_constraints(InputStream *ist, OutputStream *ost)
2034 OutputFile *of = output_files[ost->file_index];
2035 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2037 if (ost->source_index != ist_index)
2043 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2049 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2051 OutputFile *of = output_files[ost->file_index];
2052 InputFile *f = input_files [ist->file_index];
2053 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2054 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2057 // EOF: flush output bitstream filters.
2059 av_init_packet(&opkt);
2062 output_packet(of, &opkt, ost, 1);
2066 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2067 !ost->copy_initial_nonkeyframes)
2070 if (!ost->frame_number && !ost->copy_prior_start) {
2071 int64_t comp_start = start_time;
2072 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2073 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2074 if (pkt->pts == AV_NOPTS_VALUE ?
2075 ist->pts < comp_start :
2076 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2080 if (of->recording_time != INT64_MAX &&
2081 ist->pts >= of->recording_time + start_time) {
2082 close_output_stream(ost);
2086 if (f->recording_time != INT64_MAX) {
2087 start_time = f->ctx->start_time;
2088 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2089 start_time += f->start_time;
2090 if (ist->pts >= f->recording_time + start_time) {
2091 close_output_stream(ost);
2096 /* force the input stream PTS */
2097 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2100 if (av_packet_ref(&opkt, pkt) < 0)
2103 if (pkt->pts != AV_NOPTS_VALUE)
2104 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2106 if (pkt->dts == AV_NOPTS_VALUE) {
2107 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2108 } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2109 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2111 duration = ist->dec_ctx->frame_size;
2112 opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2113 (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2114 &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2115 /* dts will be set immediately afterwards to what pts is now */
2116 opkt.pts = opkt.dts - ost_tb_start_time;
2118 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2119 opkt.dts -= ost_tb_start_time;
2121 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2123 output_packet(of, &opkt, ost, 0);
2126 int guess_input_channel_layout(InputStream *ist)
2128 AVCodecContext *dec = ist->dec_ctx;
2130 if (!dec->channel_layout) {
2131 char layout_name[256];
2133 if (dec->channels > ist->guess_layout_max)
2135 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2136 if (!dec->channel_layout)
2138 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2139 dec->channels, dec->channel_layout);
2140 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2141 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2146 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2148 if (*got_output || ret<0)
2149 decode_error_stat[ret<0] ++;
2151 if (ret < 0 && exit_on_error)
2154 if (*got_output && ist) {
2155 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2156 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2157 "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2164 // Filters can be configured only if the formats of all inputs are known.
2165 static int ifilter_has_all_input_formats(FilterGraph *fg)
2168 for (i = 0; i < fg->nb_inputs; i++) {
2169 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2170 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2176 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2178 FilterGraph *fg = ifilter->graph;
2179 int need_reinit, ret, i;
2181 /* determine if the parameters for this input changed */
2182 need_reinit = ifilter->format != frame->format;
2184 switch (ifilter->ist->st->codecpar->codec_type) {
2185 case AVMEDIA_TYPE_AUDIO:
2186 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2187 ifilter->channels != frame->channels ||
2188 ifilter->channel_layout != frame->channel_layout;
2190 case AVMEDIA_TYPE_VIDEO:
2191 need_reinit |= ifilter->width != frame->width ||
2192 ifilter->height != frame->height;
2196 if (!ifilter->ist->reinit_filters && fg->graph)
2199 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2200 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2204 ret = ifilter_parameters_from_frame(ifilter, frame);
2209 /* (re)init the graph if possible, otherwise buffer the frame and return */
2210 if (need_reinit || !fg->graph) {
2211 for (i = 0; i < fg->nb_inputs; i++) {
2212 if (!ifilter_has_all_input_formats(fg)) {
2213 AVFrame *tmp = av_frame_clone(frame);
2215 return AVERROR(ENOMEM);
2216 av_frame_unref(frame);
2218 if (!av_fifo_space(ifilter->frame_queue)) {
2219 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2221 av_frame_free(&tmp);
2225 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2230 ret = reap_filters(1);
2231 if (ret < 0 && ret != AVERROR_EOF) {
2232 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2236 ret = configure_filtergraph(fg);
2238 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2243 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2245 if (ret != AVERROR_EOF)
2246 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2253 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2259 if (ifilter->filter) {
2260 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2264 // the filtergraph was never configured
2265 if (ifilter->format < 0)
2266 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2267 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2268 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2269 return AVERROR_INVALIDDATA;
2276 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2277 // There is the following difference: if you got a frame, you must call
2278 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2279 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2280 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2287 ret = avcodec_send_packet(avctx, pkt);
2288 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2289 // decoded frames with avcodec_receive_frame() until done.
2290 if (ret < 0 && ret != AVERROR_EOF)
2294 ret = avcodec_receive_frame(avctx, frame);
2295 if (ret < 0 && ret != AVERROR(EAGAIN))
2303 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2308 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2309 for (i = 0; i < ist->nb_filters; i++) {
2310 if (i < ist->nb_filters - 1) {
2311 f = ist->filter_frame;
2312 ret = av_frame_ref(f, decoded_frame);
2317 ret = ifilter_send_frame(ist->filters[i], f);
2318 if (ret == AVERROR_EOF)
2319 ret = 0; /* ignore */
2321 av_log(NULL, AV_LOG_ERROR,
2322 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2329 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2332 AVFrame *decoded_frame;
2333 AVCodecContext *avctx = ist->dec_ctx;
2335 AVRational decoded_frame_tb;
2337 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2338 return AVERROR(ENOMEM);
2339 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2340 return AVERROR(ENOMEM);
2341 decoded_frame = ist->decoded_frame;
2343 update_benchmark(NULL);
2344 ret = decode(avctx, decoded_frame, got_output, pkt);
2345 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2349 if (ret >= 0 && avctx->sample_rate <= 0) {
2350 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2351 ret = AVERROR_INVALIDDATA;
2354 if (ret != AVERROR_EOF)
2355 check_decode_result(ist, got_output, ret);
2357 if (!*got_output || ret < 0)
2360 ist->samples_decoded += decoded_frame->nb_samples;
2361 ist->frames_decoded++;
2363 /* increment next_dts to use for the case where the input stream does not
2364 have timestamps or there are multiple frames in the packet */
2365 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2367 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2370 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2371 decoded_frame_tb = ist->st->time_base;
2372 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2373 decoded_frame->pts = pkt->pts;
2374 decoded_frame_tb = ist->st->time_base;
2376 decoded_frame->pts = ist->dts;
2377 decoded_frame_tb = AV_TIME_BASE_Q;
2379 if (decoded_frame->pts != AV_NOPTS_VALUE)
2380 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2381 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2382 (AVRational){1, avctx->sample_rate});
2383 ist->nb_samples = decoded_frame->nb_samples;
2384 err = send_frame_to_filters(ist, decoded_frame);
2386 av_frame_unref(ist->filter_frame);
2387 av_frame_unref(decoded_frame);
2388 return err < 0 ? err : ret;
2391 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2394 AVFrame *decoded_frame;
2395 int i, ret = 0, err = 0;
2396 int64_t best_effort_timestamp;
2397 int64_t dts = AV_NOPTS_VALUE;
2400 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2401 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2403 if (!eof && pkt && pkt->size == 0)
2406 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2407 return AVERROR(ENOMEM);
2408 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2409 return AVERROR(ENOMEM);
2410 decoded_frame = ist->decoded_frame;
2411 if (ist->dts != AV_NOPTS_VALUE)
2412 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2415 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2418 // The old code used to set dts on the drain packet, which does not work
2419 // with the new API anymore.
2421 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2423 return AVERROR(ENOMEM);
2424 ist->dts_buffer = new;
2425 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2428 update_benchmark(NULL);
2429 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2430 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2434 // The following line may be required in some cases where there is no parser
2435 // or the parser does not has_b_frames correctly
2436 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2437 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2438 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2440 av_log(ist->dec_ctx, AV_LOG_WARNING,
2441 "video_delay is larger in decoder than demuxer %d > %d.\n"
2442 "If you want to help, upload a sample "
2443 "of this file to https://streams.videolan.org/upload/ "
2444 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2445 ist->dec_ctx->has_b_frames,
2446 ist->st->codecpar->video_delay);
2449 if (ret != AVERROR_EOF)
2450 check_decode_result(ist, got_output, ret);
2452 if (*got_output && ret >= 0) {
2453 if (ist->dec_ctx->width != decoded_frame->width ||
2454 ist->dec_ctx->height != decoded_frame->height ||
2455 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2456 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2457 decoded_frame->width,
2458 decoded_frame->height,
2459 decoded_frame->format,
2460 ist->dec_ctx->width,
2461 ist->dec_ctx->height,
2462 ist->dec_ctx->pix_fmt);
2466 if (!*got_output || ret < 0)
2469 if(ist->top_field_first>=0)
2470 decoded_frame->top_field_first = ist->top_field_first;
2472 ist->frames_decoded++;
2474 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2475 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2479 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2481 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2482 *duration_pts = decoded_frame->pkt_duration;
2484 if (ist->framerate.num)
2485 best_effort_timestamp = ist->cfr_next_pts++;
2487 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2488 best_effort_timestamp = ist->dts_buffer[0];
2490 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2491 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2492 ist->nb_dts_buffer--;
2495 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2496 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2498 if (ts != AV_NOPTS_VALUE)
2499 ist->next_pts = ist->pts = ts;
2503 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2504 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2505 ist->st->index, av_ts2str(decoded_frame->pts),
2506 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2507 best_effort_timestamp,
2508 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2509 decoded_frame->key_frame, decoded_frame->pict_type,
2510 ist->st->time_base.num, ist->st->time_base.den);
2513 if (ist->st->sample_aspect_ratio.num)
2514 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2516 err = send_frame_to_filters(ist, decoded_frame);
2519 av_frame_unref(ist->filter_frame);
2520 av_frame_unref(decoded_frame);
2521 return err < 0 ? err : ret;
2524 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2527 AVSubtitle subtitle;
2529 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2530 &subtitle, got_output, pkt);
2532 check_decode_result(NULL, got_output, ret);
2534 if (ret < 0 || !*got_output) {
2537 sub2video_flush(ist);
2541 if (ist->fix_sub_duration) {
2543 if (ist->prev_sub.got_output) {
2544 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2545 1000, AV_TIME_BASE);
2546 if (end < ist->prev_sub.subtitle.end_display_time) {
2547 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2548 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2549 ist->prev_sub.subtitle.end_display_time, end,
2550 end <= 0 ? ", dropping it" : "");
2551 ist->prev_sub.subtitle.end_display_time = end;
2554 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2555 FFSWAP(int, ret, ist->prev_sub.ret);
2556 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2564 if (ist->sub2video.frame) {
2565 sub2video_update(ist, INT64_MIN, &subtitle);
2566 } else if (ist->nb_filters) {
2567 if (!ist->sub2video.sub_queue)
2568 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2569 if (!ist->sub2video.sub_queue)
2571 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2572 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2576 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2580 if (!subtitle.num_rects)
2583 ist->frames_decoded++;
2585 for (i = 0; i < nb_output_streams; i++) {
2586 OutputStream *ost = output_streams[i];
2588 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2589 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2592 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2597 avsubtitle_free(&subtitle);
2601 static int send_filter_eof(InputStream *ist)
2604 /* TODO keep pts also in stream time base to avoid converting back */
2605 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2606 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2608 for (i = 0; i < ist->nb_filters; i++) {
2609 ret = ifilter_send_eof(ist->filters[i], pts);
2616 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2617 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2621 int eof_reached = 0;
2624 if (!ist->saw_first_ts) {
2625 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2627 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2628 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2629 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2631 ist->saw_first_ts = 1;
2634 if (ist->next_dts == AV_NOPTS_VALUE)
2635 ist->next_dts = ist->dts;
2636 if (ist->next_pts == AV_NOPTS_VALUE)
2637 ist->next_pts = ist->pts;
2641 av_init_packet(&avpkt);
2648 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2649 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2650 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2651 ist->next_pts = ist->pts = ist->dts;
2654 // while we have more to decode or while the decoder did output something on EOF
2655 while (ist->decoding_needed) {
2656 int64_t duration_dts = 0;
2657 int64_t duration_pts = 0;
2659 int decode_failed = 0;
2661 ist->pts = ist->next_pts;
2662 ist->dts = ist->next_dts;
2664 switch (ist->dec_ctx->codec_type) {
2665 case AVMEDIA_TYPE_AUDIO:
2666 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2669 case AVMEDIA_TYPE_VIDEO:
2670 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2672 if (!repeating || !pkt || got_output) {
2673 if (pkt && pkt->duration) {
2674 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2675 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2676 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2677 duration_dts = ((int64_t)AV_TIME_BASE *
2678 ist->dec_ctx->framerate.den * ticks) /
2679 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2682 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2683 ist->next_dts += duration_dts;
2685 ist->next_dts = AV_NOPTS_VALUE;
2689 if (duration_pts > 0) {
2690 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2692 ist->next_pts += duration_dts;
2696 case AVMEDIA_TYPE_SUBTITLE:
2699 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2700 if (!pkt && ret >= 0)
2707 if (ret == AVERROR_EOF) {
2713 if (decode_failed) {
2714 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2715 ist->file_index, ist->st->index, av_err2str(ret));
2717 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2718 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2720 if (!decode_failed || exit_on_error)
2726 ist->got_output = 1;
2731 // During draining, we might get multiple output frames in this loop.
2732 // ffmpeg.c does not drain the filter chain on configuration changes,
2733 // which means if we send multiple frames at once to the filters, and
2734 // one of those frames changes configuration, the buffered frames will
2735 // be lost. This can upset certain FATE tests.
2736 // Decode only 1 frame per call on EOF to appease these FATE tests.
2737 // The ideal solution would be to rewrite decoding to use the new
2738 // decoding API in a better way.
2745 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2746 /* except when looping we need to flush but not to send an EOF */
2747 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2748 int ret = send_filter_eof(ist);
2750 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2755 /* handle stream copy */
2756 if (!ist->decoding_needed && pkt) {
2757 ist->dts = ist->next_dts;
2758 switch (ist->dec_ctx->codec_type) {
2759 case AVMEDIA_TYPE_AUDIO:
2760 av_assert1(pkt->duration >= 0);
2761 if (ist->dec_ctx->sample_rate) {
2762 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2763 ist->dec_ctx->sample_rate;
2765 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2768 case AVMEDIA_TYPE_VIDEO:
2769 if (ist->framerate.num) {
2770 // TODO: Remove work-around for c99-to-c89 issue 7
2771 AVRational time_base_q = AV_TIME_BASE_Q;
2772 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2773 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2774 } else if (pkt->duration) {
2775 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2776 } else if(ist->dec_ctx->framerate.num != 0) {
2777 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2778 ist->next_dts += ((int64_t)AV_TIME_BASE *
2779 ist->dec_ctx->framerate.den * ticks) /
2780 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2784 ist->pts = ist->dts;
2785 ist->next_pts = ist->next_dts;
2787 for (i = 0; i < nb_output_streams; i++) {
2788 OutputStream *ost = output_streams[i];
2790 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2793 do_streamcopy(ist, ost, pkt);
2796 return !eof_reached;
2799 static void print_sdp(void)
2804 AVIOContext *sdp_pb;
2805 AVFormatContext **avc;
2807 for (i = 0; i < nb_output_files; i++) {
2808 if (!output_files[i]->header_written)
2812 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2815 for (i = 0, j = 0; i < nb_output_files; i++) {
2816 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2817 avc[j] = output_files[i]->ctx;
2825 av_sdp_create(avc, j, sdp, sizeof(sdp));
2827 if (!sdp_filename) {
2828 printf("SDP:\n%s\n", sdp);
2831 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2832 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2834 avio_print(sdp_pb, sdp);
2835 avio_closep(&sdp_pb);
2836 av_freep(&sdp_filename);
2844 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2846 InputStream *ist = s->opaque;
2847 const enum AVPixelFormat *p;
2850 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2851 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2852 const AVCodecHWConfig *config = NULL;
2855 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2858 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2859 ist->hwaccel_id == HWACCEL_AUTO) {
2861 config = avcodec_get_hw_config(s->codec, i);
2864 if (!(config->methods &
2865 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2867 if (config->pix_fmt == *p)
2872 if (config->device_type != ist->hwaccel_device_type) {
2873 // Different hwaccel offered, ignore.
2877 ret = hwaccel_decode_init(s);
2879 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2880 av_log(NULL, AV_LOG_FATAL,
2881 "%s hwaccel requested for input stream #%d:%d, "
2882 "but cannot be initialized.\n",
2883 av_hwdevice_get_type_name(config->device_type),
2884 ist->file_index, ist->st->index);
2885 return AV_PIX_FMT_NONE;
2890 const HWAccel *hwaccel = NULL;
2892 for (i = 0; hwaccels[i].name; i++) {
2893 if (hwaccels[i].pix_fmt == *p) {
2894 hwaccel = &hwaccels[i];
2899 // No hwaccel supporting this pixfmt.
2902 if (hwaccel->id != ist->hwaccel_id) {
2903 // Does not match requested hwaccel.
2907 ret = hwaccel->init(s);
2909 av_log(NULL, AV_LOG_FATAL,
2910 "%s hwaccel requested for input stream #%d:%d, "
2911 "but cannot be initialized.\n", hwaccel->name,
2912 ist->file_index, ist->st->index);
2913 return AV_PIX_FMT_NONE;
2917 if (ist->hw_frames_ctx) {
2918 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2919 if (!s->hw_frames_ctx)
2920 return AV_PIX_FMT_NONE;
2923 ist->hwaccel_pix_fmt = *p;
2930 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2932 InputStream *ist = s->opaque;
2934 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2935 return ist->hwaccel_get_buffer(s, frame, flags);
2937 return avcodec_default_get_buffer2(s, frame, flags);
2940 static int init_input_stream(int ist_index, char *error, int error_len)
2943 InputStream *ist = input_streams[ist_index];
2945 if (ist->decoding_needed) {
2946 AVCodec *codec = ist->dec;
2948 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2949 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2950 return AVERROR(EINVAL);
2953 ist->dec_ctx->opaque = ist;
2954 ist->dec_ctx->get_format = get_format;
2955 ist->dec_ctx->get_buffer2 = get_buffer;
2956 #if LIBAVCODEC_VERSION_MAJOR < 60
2957 ist->dec_ctx->thread_safe_callbacks = 1;
2960 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2961 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2962 (ist->decoding_needed & DECODING_FOR_OST)) {
2963 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2964 if (ist->decoding_needed & DECODING_FOR_FILTER)
2965 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2968 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2970 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2971 * audio, and video decoders such as cuvid or mediacodec */
2972 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2974 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2975 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2976 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2977 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2978 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2980 ret = hw_device_setup_for_decode(ist);
2982 snprintf(error, error_len, "Device setup failed for "
2983 "decoder on input stream #%d:%d : %s",
2984 ist->file_index, ist->st->index, av_err2str(ret));
2988 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2989 if (ret == AVERROR_EXPERIMENTAL)
2990 abort_codec_experimental(codec, 0);
2992 snprintf(error, error_len,
2993 "Error while opening decoder for input stream "
2995 ist->file_index, ist->st->index, av_err2str(ret));
2998 assert_avoptions(ist->decoder_opts);
3001 ist->next_pts = AV_NOPTS_VALUE;
3002 ist->next_dts = AV_NOPTS_VALUE;
3007 static InputStream *get_input_stream(OutputStream *ost)
3009 if (ost->source_index >= 0)
3010 return input_streams[ost->source_index];
3014 static int compare_int64(const void *a, const void *b)
3016 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
3019 /* open the muxer when all the streams are initialized */
3020 static int check_init_output_file(OutputFile *of, int file_index)
3024 for (i = 0; i < of->ctx->nb_streams; i++) {
3025 OutputStream *ost = output_streams[of->ost_index + i];
3026 if (!ost->initialized)
3030 of->ctx->interrupt_callback = int_cb;
3032 ret = avformat_write_header(of->ctx, &of->opts);
3034 av_log(NULL, AV_LOG_ERROR,
3035 "Could not write header for output file #%d "
3036 "(incorrect codec parameters ?): %s\n",
3037 file_index, av_err2str(ret));
3040 //assert_avoptions(of->opts);
3041 of->header_written = 1;
3043 av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3046 if (sdp_filename || want_sdp)
3049 /* flush the muxing queues */
3050 for (i = 0; i < of->ctx->nb_streams; i++) {
3051 OutputStream *ost = output_streams[of->ost_index + i];
3053 /* try to improve muxing time_base (only possible if nothing has been written yet) */
3054 if (!av_fifo_size(ost->muxing_queue))
3055 ost->mux_timebase = ost->st->time_base;
3057 while (av_fifo_size(ost->muxing_queue)) {
3059 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3060 ost->muxing_queue_data_size -= pkt.size;
3061 write_packet(of, &pkt, ost, 1);
3068 static int init_output_bsfs(OutputStream *ost)
3070 AVBSFContext *ctx = ost->bsf_ctx;
3076 ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3080 ctx->time_base_in = ost->st->time_base;
3082 ret = av_bsf_init(ctx);
3084 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3089 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3092 ost->st->time_base = ctx->time_base_out;
3097 static int init_output_stream_streamcopy(OutputStream *ost)
3099 OutputFile *of = output_files[ost->file_index];
3100 InputStream *ist = get_input_stream(ost);
3101 AVCodecParameters *par_dst = ost->st->codecpar;
3102 AVCodecParameters *par_src = ost->ref_par;
3105 uint32_t codec_tag = par_dst->codec_tag;
3107 av_assert0(ist && !ost->filter);
3109 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3111 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3113 av_log(NULL, AV_LOG_FATAL,
3114 "Error setting up codec context options.\n");
3118 ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3120 av_log(NULL, AV_LOG_FATAL,
3121 "Error getting reference codec parameters.\n");
3126 unsigned int codec_tag_tmp;
3127 if (!of->ctx->oformat->codec_tag ||
3128 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3129 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3130 codec_tag = par_src->codec_tag;
3133 ret = avcodec_parameters_copy(par_dst, par_src);
3137 par_dst->codec_tag = codec_tag;
3139 if (!ost->frame_rate.num)
3140 ost->frame_rate = ist->framerate;
3141 ost->st->avg_frame_rate = ost->frame_rate;
3143 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3147 // copy timebase while removing common factors
3148 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3149 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3151 // copy estimated duration as a hint to the muxer
3152 if (ost->st->duration <= 0 && ist->st->duration > 0)
3153 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3156 ost->st->disposition = ist->st->disposition;
3158 if (ist->st->nb_side_data) {
3159 for (i = 0; i < ist->st->nb_side_data; i++) {
3160 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3163 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3165 return AVERROR(ENOMEM);
3166 memcpy(dst_data, sd_src->data, sd_src->size);
3170 if (ost->rotate_overridden) {
3171 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3172 sizeof(int32_t) * 9);
3174 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3177 switch (par_dst->codec_type) {
3178 case AVMEDIA_TYPE_AUDIO:
3179 if (audio_volume != 256) {
3180 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3183 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3184 par_dst->block_align= 0;
3185 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3186 par_dst->block_align= 0;
3188 case AVMEDIA_TYPE_VIDEO:
3189 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3191 av_mul_q(ost->frame_aspect_ratio,
3192 (AVRational){ par_dst->height, par_dst->width });
3193 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3194 "with stream copy may produce invalid files\n");
3196 else if (ist->st->sample_aspect_ratio.num)
3197 sar = ist->st->sample_aspect_ratio;
3199 sar = par_src->sample_aspect_ratio;
3200 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3201 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3202 ost->st->r_frame_rate = ist->st->r_frame_rate;
3206 ost->mux_timebase = ist->st->time_base;
3211 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3213 AVDictionaryEntry *e;
3215 uint8_t *encoder_string;
3216 int encoder_string_len;
3217 int format_flags = 0;
3218 int codec_flags = ost->enc_ctx->flags;
3220 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3223 e = av_dict_get(of->opts, "fflags", NULL, 0);
3225 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3228 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3230 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3232 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3235 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3238 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3239 encoder_string = av_mallocz(encoder_string_len);
3240 if (!encoder_string)
3243 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3244 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3246 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3247 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3248 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3249 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3252 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3253 AVCodecContext *avctx)
3256 int n = 1, i, size, index = 0;
3259 for (p = kf; *p; p++)
3263 pts = av_malloc_array(size, sizeof(*pts));
3265 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3270 for (i = 0; i < n; i++) {
3271 char *next = strchr(p, ',');
3276 if (!memcmp(p, "chapters", 8)) {
3278 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3281 if (avf->nb_chapters > INT_MAX - size ||
3282 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3284 av_log(NULL, AV_LOG_FATAL,
3285 "Could not allocate forced key frames array.\n");
3288 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3289 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3291 for (j = 0; j < avf->nb_chapters; j++) {
3292 AVChapter *c = avf->chapters[j];
3293 av_assert1(index < size);
3294 pts[index++] = av_rescale_q(c->start, c->time_base,
3295 avctx->time_base) + t;
3300 t = parse_time_or_die("force_key_frames", p, 1);
3301 av_assert1(index < size);
3302 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3309 av_assert0(index == size);
3310 qsort(pts, size, sizeof(*pts), compare_int64);
3311 ost->forced_kf_count = size;
3312 ost->forced_kf_pts = pts;
3315 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3317 InputStream *ist = get_input_stream(ost);
3318 AVCodecContext *enc_ctx = ost->enc_ctx;
3319 AVFormatContext *oc;
3321 if (ost->enc_timebase.num > 0) {
3322 enc_ctx->time_base = ost->enc_timebase;
3326 if (ost->enc_timebase.num < 0) {
3328 enc_ctx->time_base = ist->st->time_base;
3332 oc = output_files[ost->file_index]->ctx;
3333 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3336 enc_ctx->time_base = default_time_base;
3339 static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
3341 InputStream *ist = get_input_stream(ost);
3342 AVCodecContext *enc_ctx = ost->enc_ctx;
3343 AVCodecContext *dec_ctx = NULL;
3344 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3347 set_encoder_id(output_files[ost->file_index], ost);
3349 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3350 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3351 // which have to be filtered out to prevent leaking them to output files.
3352 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3355 ost->st->disposition = ist->st->disposition;
3357 dec_ctx = ist->dec_ctx;
3359 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3361 for (j = 0; j < oc->nb_streams; j++) {
3362 AVStream *st = oc->streams[j];
3363 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3366 if (j == oc->nb_streams)
3367 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3368 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3369 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3372 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3373 if (!ost->frame_rate.num)
3374 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3375 if (ist && !ost->frame_rate.num)
3376 ost->frame_rate = ist->framerate;
3377 if (ist && !ost->frame_rate.num)
3378 ost->frame_rate = ist->st->r_frame_rate;
3379 if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
3380 ost->frame_rate = (AVRational){25, 1};
3381 av_log(NULL, AV_LOG_WARNING,
3383 "about the input framerate is available. Falling "
3384 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3385 "if you want a different framerate.\n",
3386 ost->file_index, ost->index);
3389 if (ost->max_frame_rate.num &&
3390 (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) ||
3391 !ost->frame_rate.den))
3392 ost->frame_rate = ost->max_frame_rate;
3394 if (ost->enc->supported_framerates && !ost->force_fps) {
3395 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3396 ost->frame_rate = ost->enc->supported_framerates[idx];
3398 // reduce frame rate for mpeg4 to be within the spec limits
3399 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3400 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3401 ost->frame_rate.num, ost->frame_rate.den, 65535);
3405 switch (enc_ctx->codec_type) {
3406 case AVMEDIA_TYPE_AUDIO:
3407 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3409 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3410 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3411 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3412 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3413 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3415 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3418 case AVMEDIA_TYPE_VIDEO:
3419 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3421 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3422 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3423 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3424 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3425 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3426 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3429 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3430 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3431 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3432 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3433 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3434 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3436 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3438 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3439 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3442 enc_ctx->color_range = frame->color_range;
3443 enc_ctx->color_primaries = frame->color_primaries;
3444 enc_ctx->color_trc = frame->color_trc;
3445 enc_ctx->colorspace = frame->colorspace;
3446 enc_ctx->chroma_sample_location = frame->chroma_location;
3449 enc_ctx->framerate = ost->frame_rate;
3451 ost->st->avg_frame_rate = ost->frame_rate;
3454 enc_ctx->width != dec_ctx->width ||
3455 enc_ctx->height != dec_ctx->height ||
3456 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3457 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3460 if (ost->top_field_first == 0) {
3461 enc_ctx->field_order = AV_FIELD_BB;
3462 } else if (ost->top_field_first == 1) {
3463 enc_ctx->field_order = AV_FIELD_TT;
3467 if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
3468 ost->top_field_first >= 0)
3469 frame->top_field_first = !!ost->top_field_first;
3471 if (frame->interlaced_frame) {
3472 if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3473 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3475 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3477 enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3480 if (ost->forced_keyframes) {
3481 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3482 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3483 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3485 av_log(NULL, AV_LOG_ERROR,
3486 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3489 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3490 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3491 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3492 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3494 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3495 // parse it only for static kf timings
3496 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3497 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3501 case AVMEDIA_TYPE_SUBTITLE:
3502 enc_ctx->time_base = AV_TIME_BASE_Q;
3503 if (!enc_ctx->width) {
3504 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3505 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3508 case AVMEDIA_TYPE_DATA:
3515 ost->mux_timebase = enc_ctx->time_base;
3520 static int init_output_stream(OutputStream *ost, AVFrame *frame,
3521 char *error, int error_len)
3525 if (ost->encoding_needed) {
3526 AVCodec *codec = ost->enc;
3527 AVCodecContext *dec = NULL;
3530 ret = init_output_stream_encode(ost, frame);
3534 if ((ist = get_input_stream(ost)))
3536 if (dec && dec->subtitle_header) {
3537 /* ASS code assumes this buffer is null terminated so add extra byte. */
3538 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3539 if (!ost->enc_ctx->subtitle_header)
3540 return AVERROR(ENOMEM);
3541 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3542 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3544 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3545 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3546 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3548 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3549 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3550 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3552 ret = hw_device_setup_for_encode(ost);
3554 snprintf(error, error_len, "Device setup failed for "
3555 "encoder on output stream #%d:%d : %s",
3556 ost->file_index, ost->index, av_err2str(ret));
3560 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3561 int input_props = 0, output_props = 0;
3562 AVCodecDescriptor const *input_descriptor =
3563 avcodec_descriptor_get(dec->codec_id);
3564 AVCodecDescriptor const *output_descriptor =
3565 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3566 if (input_descriptor)
3567 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3568 if (output_descriptor)
3569 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3570 if (input_props && output_props && input_props != output_props) {
3571 snprintf(error, error_len,
3572 "Subtitle encoding currently only possible from text to text "
3573 "or bitmap to bitmap");
3574 return AVERROR_INVALIDDATA;
3578 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3579 if (ret == AVERROR_EXPERIMENTAL)
3580 abort_codec_experimental(codec, 1);
3581 snprintf(error, error_len,
3582 "Error while opening encoder for output stream #%d:%d - "
3583 "maybe incorrect parameters such as bit_rate, rate, width or height",
3584 ost->file_index, ost->index);
3587 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3588 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3589 av_buffersink_set_frame_size(ost->filter->filter,
3590 ost->enc_ctx->frame_size);
3591 assert_avoptions(ost->encoder_opts);
3592 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3593 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3594 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3595 " It takes bits/s as argument, not kbits/s\n");
3597 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3599 av_log(NULL, AV_LOG_FATAL,
3600 "Error initializing the output stream codec context.\n");
3604 if (ost->enc_ctx->nb_coded_side_data) {
3607 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3608 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3611 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3613 return AVERROR(ENOMEM);
3614 memcpy(dst_data, sd_src->data, sd_src->size);
3619 * Add global input side data. For now this is naive, and copies it
3620 * from the input stream's global side data. All side data should
3621 * really be funneled over AVFrame and libavfilter, then added back to
3622 * packet side data, and then potentially using the first packet for
3627 for (i = 0; i < ist->st->nb_side_data; i++) {
3628 AVPacketSideData *sd = &ist->st->side_data[i];
3629 if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3630 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3632 return AVERROR(ENOMEM);
3633 memcpy(dst, sd->data, sd->size);
3634 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3635 av_display_rotation_set((uint32_t *)dst, 0);
3640 // copy timebase while removing common factors
3641 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3642 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3644 // copy estimated duration as a hint to the muxer
3645 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3646 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3647 } else if (ost->stream_copy) {
3648 ret = init_output_stream_streamcopy(ost);
3653 // parse user provided disposition, and update stream values
3654 if (ost->disposition) {
3655 static const AVOption opts[] = {
3656 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3657 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3658 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3659 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3660 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3661 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3662 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3663 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3664 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3665 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3666 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3667 { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3668 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3669 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3670 { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3671 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3674 static const AVClass class = {
3676 .item_name = av_default_item_name,
3678 .version = LIBAVUTIL_VERSION_INT,
3680 const AVClass *pclass = &class;
3682 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3687 /* initialize bitstream filters for the output stream
3688 * needs to be done here, because the codec id for streamcopy is not
3689 * known until now */
3690 ret = init_output_bsfs(ost);
3694 ost->initialized = 1;
3696 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3703 static void report_new_stream(int input_index, AVPacket *pkt)
3705 InputFile *file = input_files[input_index];
3706 AVStream *st = file->ctx->streams[pkt->stream_index];
3708 if (pkt->stream_index < file->nb_streams_warn)
3710 av_log(file->ctx, AV_LOG_WARNING,
3711 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3712 av_get_media_type_string(st->codecpar->codec_type),
3713 input_index, pkt->stream_index,
3714 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3715 file->nb_streams_warn = pkt->stream_index + 1;
3718 static int transcode_init(void)
3720 int ret = 0, i, j, k;
3721 AVFormatContext *oc;
3724 char error[1024] = {0};
3726 for (i = 0; i < nb_filtergraphs; i++) {
3727 FilterGraph *fg = filtergraphs[i];
3728 for (j = 0; j < fg->nb_outputs; j++) {
3729 OutputFilter *ofilter = fg->outputs[j];
3730 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3732 if (fg->nb_inputs != 1)
3734 for (k = nb_input_streams-1; k >= 0 ; k--)
3735 if (fg->inputs[0]->ist == input_streams[k])
3737 ofilter->ost->source_index = k;
3741 /* init framerate emulation */
3742 for (i = 0; i < nb_input_files; i++) {
3743 InputFile *ifile = input_files[i];
3744 if (ifile->rate_emu)
3745 for (j = 0; j < ifile->nb_streams; j++)
3746 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3749 /* init input streams */
3750 for (i = 0; i < nb_input_streams; i++)
3751 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3752 for (i = 0; i < nb_output_streams; i++) {
3753 ost = output_streams[i];
3754 avcodec_close(ost->enc_ctx);
3760 * initialize stream copy and subtitle/data streams.
3761 * Encoded AVFrame based streams will get initialized as follows:
3762 * - when the first AVFrame is received in do_video_out
3763 * - just before the first AVFrame is received in either transcode_step
3764 * or reap_filters due to us requiring the filter chain buffer sink
3765 * to be configured with the correct audio frame size, which is only
3766 * known after the encoder is initialized.
3768 for (i = 0; i < nb_output_streams; i++) {
3769 if (!output_streams[i]->stream_copy &&
3770 (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3771 output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO))
3774 ret = init_output_stream_wrapper(output_streams[i], NULL, 0);
3779 /* discard unused programs */
3780 for (i = 0; i < nb_input_files; i++) {
3781 InputFile *ifile = input_files[i];
3782 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3783 AVProgram *p = ifile->ctx->programs[j];
3784 int discard = AVDISCARD_ALL;
3786 for (k = 0; k < p->nb_stream_indexes; k++)
3787 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3788 discard = AVDISCARD_DEFAULT;
3791 p->discard = discard;
3795 /* write headers for files with no streams */
3796 for (i = 0; i < nb_output_files; i++) {
3797 oc = output_files[i]->ctx;
3798 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3799 ret = check_init_output_file(output_files[i], i);
3806 /* dump the stream mapping */
3807 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3808 for (i = 0; i < nb_input_streams; i++) {
3809 ist = input_streams[i];
3811 for (j = 0; j < ist->nb_filters; j++) {
3812 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3813 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3814 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3815 ist->filters[j]->name);
3816 if (nb_filtergraphs > 1)
3817 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3818 av_log(NULL, AV_LOG_INFO, "\n");
3823 for (i = 0; i < nb_output_streams; i++) {
3824 ost = output_streams[i];
3826 if (ost->attachment_filename) {
3827 /* an attached file */
3828 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3829 ost->attachment_filename, ost->file_index, ost->index);
3833 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3834 /* output from a complex graph */
3835 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3836 if (nb_filtergraphs > 1)
3837 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3839 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3840 ost->index, ost->enc ? ost->enc->name : "?");
3844 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3845 input_streams[ost->source_index]->file_index,
3846 input_streams[ost->source_index]->st->index,
3849 if (ost->sync_ist != input_streams[ost->source_index])
3850 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3851 ost->sync_ist->file_index,
3852 ost->sync_ist->st->index);
3853 if (ost->stream_copy)
3854 av_log(NULL, AV_LOG_INFO, " (copy)");
3856 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3857 const AVCodec *out_codec = ost->enc;
3858 const char *decoder_name = "?";
3859 const char *in_codec_name = "?";
3860 const char *encoder_name = "?";
3861 const char *out_codec_name = "?";
3862 const AVCodecDescriptor *desc;
3865 decoder_name = in_codec->name;
3866 desc = avcodec_descriptor_get(in_codec->id);
3868 in_codec_name = desc->name;
3869 if (!strcmp(decoder_name, in_codec_name))
3870 decoder_name = "native";
3874 encoder_name = out_codec->name;
3875 desc = avcodec_descriptor_get(out_codec->id);
3877 out_codec_name = desc->name;
3878 if (!strcmp(encoder_name, out_codec_name))
3879 encoder_name = "native";
3882 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3883 in_codec_name, decoder_name,
3884 out_codec_name, encoder_name);
3886 av_log(NULL, AV_LOG_INFO, "\n");
3890 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3894 atomic_store(&transcode_init_done, 1);
3899 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3900 static int need_output(void)
3904 for (i = 0; i < nb_output_streams; i++) {
3905 OutputStream *ost = output_streams[i];
3906 OutputFile *of = output_files[ost->file_index];
3907 AVFormatContext *os = output_files[ost->file_index]->ctx;
3909 if (ost->finished ||
3910 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3912 if (ost->frame_number >= ost->max_frames) {
3914 for (j = 0; j < of->ctx->nb_streams; j++)
3915 close_output_stream(output_streams[of->ost_index + j]);
3926 * Select the output stream to process.
3928 * @return selected output stream, or NULL if none available
3930 static OutputStream *choose_output(void)
3933 int64_t opts_min = INT64_MAX;
3934 OutputStream *ost_min = NULL;
3936 for (i = 0; i < nb_output_streams; i++) {
3937 OutputStream *ost = output_streams[i];
3938 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3939 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3941 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3942 av_log(NULL, AV_LOG_DEBUG,
3943 "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3944 ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3946 if (!ost->initialized && !ost->inputs_done)
3949 if (!ost->finished && opts < opts_min) {
3951 ost_min = ost->unavailable ? NULL : ost;
3957 static void set_tty_echo(int on)
3961 if (tcgetattr(0, &tty) == 0) {
3962 if (on) tty.c_lflag |= ECHO;
3963 else tty.c_lflag &= ~ECHO;
3964 tcsetattr(0, TCSANOW, &tty);
3969 static int check_keyboard_interaction(int64_t cur_time)
3972 static int64_t last_time;
3973 if (received_nb_signals)
3974 return AVERROR_EXIT;
3975 /* read_key() returns 0 on EOF */
3976 if(cur_time - last_time >= 100000 && !run_as_daemon){
3978 last_time = cur_time;
3982 return AVERROR_EXIT;
3983 if (key == '+') av_log_set_level(av_log_get_level()+10);
3984 if (key == '-') av_log_set_level(av_log_get_level()-10);
3985 if (key == 's') qp_hist ^= 1;
3988 do_hex_dump = do_pkt_dump = 0;
3989 } else if(do_pkt_dump){
3993 av_log_set_level(AV_LOG_DEBUG);
3995 if (key == 'c' || key == 'C'){
3996 char buf[4096], target[64], command[256], arg[256] = {0};
3999 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
4002 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4007 fprintf(stderr, "\n");
4009 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
4010 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
4011 target, time, command, arg);
4012 for (i = 0; i < nb_filtergraphs; i++) {
4013 FilterGraph *fg = filtergraphs[i];
4016 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
4017 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
4018 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
4019 } else if (key == 'c') {
4020 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
4021 ret = AVERROR_PATCHWELCOME;
4023 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
4025 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
4030 av_log(NULL, AV_LOG_ERROR,
4031 "Parse error, at least 3 arguments were expected, "
4032 "only %d given in string '%s'\n", n, buf);
4035 if (key == 'd' || key == 'D'){
4038 debug = input_streams[0]->dec_ctx->debug << 1;
4039 if(!debug) debug = 1;
4040 while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
4047 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4052 fprintf(stderr, "\n");
4053 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
4054 fprintf(stderr,"error parsing debug value\n");
4056 for(i=0;i<nb_input_streams;i++) {
4057 input_streams[i]->dec_ctx->debug = debug;
4059 for(i=0;i<nb_output_streams;i++) {
4060 OutputStream *ost = output_streams[i];
4061 ost->enc_ctx->debug = debug;
4063 if(debug) av_log_set_level(AV_LOG_DEBUG);
4064 fprintf(stderr,"debug=%d\n", debug);
4067 fprintf(stderr, "key function\n"
4068 "? show this help\n"
4069 "+ increase verbosity\n"
4070 "- decrease verbosity\n"
4071 "c Send command to first matching filter supporting it\n"
4072 "C Send/Queue command to all matching filters\n"
4073 "D cycle through available debug modes\n"
4074 "h dump packets/hex press to cycle through the 3 states\n"
4076 "s Show QP histogram\n"
4083 static void *input_thread(void *arg)
4086 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4091 ret = av_read_frame(f->ctx, &pkt);
4093 if (ret == AVERROR(EAGAIN)) {
4098 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4101 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4102 if (flags && ret == AVERROR(EAGAIN)) {
4104 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4105 av_log(f->ctx, AV_LOG_WARNING,
4106 "Thread message queue blocking; consider raising the "
4107 "thread_queue_size option (current value: %d)\n",
4108 f->thread_queue_size);
4111 if (ret != AVERROR_EOF)
4112 av_log(f->ctx, AV_LOG_ERROR,
4113 "Unable to send packet to main thread: %s\n",
4115 av_packet_unref(&pkt);
4116 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4124 static void free_input_thread(int i)
4126 InputFile *f = input_files[i];
4129 if (!f || !f->in_thread_queue)
4131 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4132 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4133 av_packet_unref(&pkt);
4135 pthread_join(f->thread, NULL);
4137 av_thread_message_queue_free(&f->in_thread_queue);
4140 static void free_input_threads(void)
4144 for (i = 0; i < nb_input_files; i++)
4145 free_input_thread(i);
4148 static int init_input_thread(int i)
4151 InputFile *f = input_files[i];
4153 if (f->thread_queue_size < 0)
4154 f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4155 if (!f->thread_queue_size)
4158 if (f->ctx->pb ? !f->ctx->pb->seekable :
4159 strcmp(f->ctx->iformat->name, "lavfi"))
4160 f->non_blocking = 1;
4161 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4162 f->thread_queue_size, sizeof(AVPacket));
4166 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4167 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4168 av_thread_message_queue_free(&f->in_thread_queue);
4169 return AVERROR(ret);
4175 static int init_input_threads(void)
4179 for (i = 0; i < nb_input_files; i++) {
4180 ret = init_input_thread(i);
4187 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4189 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4191 AV_THREAD_MESSAGE_NONBLOCK : 0);
4195 static int get_input_packet(InputFile *f, AVPacket *pkt)
4199 for (i = 0; i < f->nb_streams; i++) {
4200 InputStream *ist = input_streams[f->ist_index + i];
4201 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4202 int64_t now = av_gettime_relative() - ist->start;
4204 return AVERROR(EAGAIN);
4209 if (f->thread_queue_size)
4210 return get_input_packet_mt(f, pkt);
4212 return av_read_frame(f->ctx, pkt);
4215 static int got_eagain(void)
4218 for (i = 0; i < nb_output_streams; i++)
4219 if (output_streams[i]->unavailable)
4224 static void reset_eagain(void)
4227 for (i = 0; i < nb_input_files; i++)
4228 input_files[i]->eagain = 0;
4229 for (i = 0; i < nb_output_streams; i++)
4230 output_streams[i]->unavailable = 0;
4233 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4234 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4235 AVRational time_base)
4241 return tmp_time_base;
4244 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4247 return tmp_time_base;
4253 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4256 AVCodecContext *avctx;
4257 int i, ret, has_audio = 0;
4258 int64_t duration = 0;
4260 ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4264 for (i = 0; i < ifile->nb_streams; i++) {
4265 ist = input_streams[ifile->ist_index + i];
4266 avctx = ist->dec_ctx;
4268 /* duration is the length of the last frame in a stream
4269 * when audio stream is present we don't care about
4270 * last video frame length because it's not defined exactly */
4271 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4275 for (i = 0; i < ifile->nb_streams; i++) {
4276 ist = input_streams[ifile->ist_index + i];
4277 avctx = ist->dec_ctx;
4280 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4281 AVRational sample_rate = {1, avctx->sample_rate};
4283 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4288 if (ist->framerate.num) {
4289 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4290 } else if (ist->st->avg_frame_rate.num) {
4291 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4296 if (!ifile->duration)
4297 ifile->time_base = ist->st->time_base;
4298 /* the total duration of the stream, max_pts - min_pts is
4299 * the duration of the stream without the last frame */
4300 if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4301 duration += ist->max_pts - ist->min_pts;
4302 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4306 if (ifile->loop > 0)
4314 * - 0 -- one packet was read and processed
4315 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4316 * this function should be called again
4317 * - AVERROR_EOF -- this function should not be called again
4319 static int process_input(int file_index)
4321 InputFile *ifile = input_files[file_index];
4322 AVFormatContext *is;
4325 int ret, thread_ret, i, j;
4328 int disable_discontinuity_correction = copy_ts;
4331 ret = get_input_packet(ifile, &pkt);
4333 if (ret == AVERROR(EAGAIN)) {
4337 if (ret < 0 && ifile->loop) {
4338 AVCodecContext *avctx;
4339 for (i = 0; i < ifile->nb_streams; i++) {
4340 ist = input_streams[ifile->ist_index + i];
4341 avctx = ist->dec_ctx;
4342 if (ist->decoding_needed) {
4343 ret = process_input_packet(ist, NULL, 1);
4346 avcodec_flush_buffers(avctx);
4350 free_input_thread(file_index);
4352 ret = seek_to_start(ifile, is);
4354 thread_ret = init_input_thread(file_index);
4359 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4361 ret = get_input_packet(ifile, &pkt);
4362 if (ret == AVERROR(EAGAIN)) {
4368 if (ret != AVERROR_EOF) {
4369 print_error(is->url, ret);
4374 for (i = 0; i < ifile->nb_streams; i++) {
4375 ist = input_streams[ifile->ist_index + i];
4376 if (ist->decoding_needed) {
4377 ret = process_input_packet(ist, NULL, 0);
4382 /* mark all outputs that don't go through lavfi as finished */
4383 for (j = 0; j < nb_output_streams; j++) {
4384 OutputStream *ost = output_streams[j];
4386 if (ost->source_index == ifile->ist_index + i &&
4387 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4388 finish_output_stream(ost);
4392 ifile->eof_reached = 1;
4393 return AVERROR(EAGAIN);
4399 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4400 is->streams[pkt.stream_index]);
4402 /* the following test is needed in case new streams appear
4403 dynamically in stream : we ignore them */
4404 if (pkt.stream_index >= ifile->nb_streams) {
4405 report_new_stream(file_index, &pkt);
4406 goto discard_packet;
4409 ist = input_streams[ifile->ist_index + pkt.stream_index];
4411 ist->data_size += pkt.size;
4415 goto discard_packet;
4417 if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4418 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
4419 "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4425 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4426 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4427 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4428 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4429 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4430 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4431 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4432 av_ts2str(input_files[ist->file_index]->ts_offset),
4433 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4436 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4437 int64_t stime, stime2;
4438 // Correcting starttime based on the enabled streams
4439 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4440 // so we instead do it here as part of discontinuity handling
4441 if ( ist->next_dts == AV_NOPTS_VALUE
4442 && ifile->ts_offset == -is->start_time
4443 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4444 int64_t new_start_time = INT64_MAX;
4445 for (i=0; i<is->nb_streams; i++) {
4446 AVStream *st = is->streams[i];
4447 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4449 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4451 if (new_start_time > is->start_time) {
4452 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4453 ifile->ts_offset = -new_start_time;
4457 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4458 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4459 ist->wrap_correction_done = 1;
4461 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4462 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4463 ist->wrap_correction_done = 0;
4465 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4466 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4467 ist->wrap_correction_done = 0;
4471 /* add the stream-global side data to the first packet */
4472 if (ist->nb_packets == 1) {
4473 for (i = 0; i < ist->st->nb_side_data; i++) {
4474 AVPacketSideData *src_sd = &ist->st->side_data[i];
4477 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4480 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4483 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4487 memcpy(dst_data, src_sd->data, src_sd->size);
4491 if (pkt.dts != AV_NOPTS_VALUE)
4492 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4493 if (pkt.pts != AV_NOPTS_VALUE)
4494 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4496 if (pkt.pts != AV_NOPTS_VALUE)
4497 pkt.pts *= ist->ts_scale;
4498 if (pkt.dts != AV_NOPTS_VALUE)
4499 pkt.dts *= ist->ts_scale;
4501 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4502 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4503 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4504 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4505 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4506 int64_t delta = pkt_dts - ifile->last_ts;
4507 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4508 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4509 ifile->ts_offset -= delta;
4510 av_log(NULL, AV_LOG_DEBUG,
4511 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4512 delta, ifile->ts_offset);
4513 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4514 if (pkt.pts != AV_NOPTS_VALUE)
4515 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4519 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4520 if (pkt.pts != AV_NOPTS_VALUE) {
4521 pkt.pts += duration;
4522 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4523 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4526 if (pkt.dts != AV_NOPTS_VALUE)
4527 pkt.dts += duration;
4529 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4531 if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4532 (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4533 int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
4534 ist->st->time_base, AV_TIME_BASE_Q,
4535 AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4536 if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4537 disable_discontinuity_correction = 0;
4540 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4541 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4542 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4543 !disable_discontinuity_correction) {
4544 int64_t delta = pkt_dts - ist->next_dts;
4545 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4546 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4547 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4548 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4549 ifile->ts_offset -= delta;
4550 av_log(NULL, AV_LOG_DEBUG,
4551 "timestamp discontinuity for stream #%d:%d "
4552 "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4553 ist->file_index, ist->st->index, ist->st->id,
4554 av_get_media_type_string(ist->dec_ctx->codec_type),
4555 delta, ifile->ts_offset);
4556 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4557 if (pkt.pts != AV_NOPTS_VALUE)
4558 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4561 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4562 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4563 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4564 pkt.dts = AV_NOPTS_VALUE;
4566 if (pkt.pts != AV_NOPTS_VALUE){
4567 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4568 delta = pkt_pts - ist->next_dts;
4569 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4570 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4571 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4572 pkt.pts = AV_NOPTS_VALUE;
4578 if (pkt.dts != AV_NOPTS_VALUE)
4579 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4582 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4583 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4584 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4585 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4586 av_ts2str(input_files[ist->file_index]->ts_offset),
4587 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4590 sub2video_heartbeat(ist, pkt.pts);
4592 process_input_packet(ist, &pkt, 0);
4595 av_packet_unref(&pkt);
4601 * Perform a step of transcoding for the specified filter graph.
4603 * @param[in] graph filter graph to consider
4604 * @param[out] best_ist input stream where a frame would allow to continue
4605 * @return 0 for success, <0 for error
4607 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4610 int nb_requests, nb_requests_max = 0;
4611 InputFilter *ifilter;
4615 ret = avfilter_graph_request_oldest(graph->graph);
4617 return reap_filters(0);
4619 if (ret == AVERROR_EOF) {
4620 ret = reap_filters(1);
4621 for (i = 0; i < graph->nb_outputs; i++)
4622 close_output_stream(graph->outputs[i]->ost);
4625 if (ret != AVERROR(EAGAIN))
4628 for (i = 0; i < graph->nb_inputs; i++) {
4629 ifilter = graph->inputs[i];
4631 if (input_files[ist->file_index]->eagain ||
4632 input_files[ist->file_index]->eof_reached)
4634 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4635 if (nb_requests > nb_requests_max) {
4636 nb_requests_max = nb_requests;
4642 for (i = 0; i < graph->nb_outputs; i++)
4643 graph->outputs[i]->ost->unavailable = 1;
4649 * Run a single step of transcoding.
4651 * @return 0 for success, <0 for error
4653 static int transcode_step(void)
4656 InputStream *ist = NULL;
4659 ost = choose_output();
4666 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4670 if (ost->filter && !ost->filter->graph->graph) {
4671 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4672 ret = configure_filtergraph(ost->filter->graph);
4674 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4680 if (ost->filter && ost->filter->graph->graph) {
4682 * Similar case to the early audio initialization in reap_filters.
4683 * Audio is special in ffmpeg.c currently as we depend on lavfi's
4684 * audio frame buffering/creation to get the output audio frame size
4685 * in samples correct. The audio frame size for the filter chain is
4686 * configured during the output stream initialization.
4688 * Apparently avfilter_graph_request_oldest (called in
4689 * transcode_from_filter just down the line) peeks. Peeking already
4690 * puts one frame "ready to be given out", which means that any
4691 * update in filter buffer sink configuration afterwards will not
4692 * help us. And yes, even if it would be utilized,
4693 * av_buffersink_get_samples is affected, as it internally utilizes
4694 * the same early exit for peeked frames.
4696 * In other words, if avfilter_graph_request_oldest would not make
4697 * further filter chain configuration or usage of
4698 * av_buffersink_get_samples useless (by just causing the return
4699 * of the peeked AVFrame as-is), we could get rid of this additional
4700 * early encoder initialization.
4702 if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
4703 init_output_stream_wrapper(ost, NULL, 1);
4705 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4709 } else if (ost->filter) {
4711 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4712 InputFilter *ifilter = ost->filter->graph->inputs[i];
4713 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4719 ost->inputs_done = 1;
4723 av_assert0(ost->source_index >= 0);
4724 ist = input_streams[ost->source_index];
4727 ret = process_input(ist->file_index);
4728 if (ret == AVERROR(EAGAIN)) {
4729 if (input_files[ist->file_index]->eagain)
4730 ost->unavailable = 1;
4735 return ret == AVERROR_EOF ? 0 : ret;
4737 return reap_filters(0);
4741 * The following code is the main loop of the file converter
4743 static int transcode(void)
4746 AVFormatContext *os;
4749 int64_t timer_start;
4750 int64_t total_packets_written = 0;
4752 ret = transcode_init();
4756 if (stdin_interaction) {
4757 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4760 timer_start = av_gettime_relative();
4763 if ((ret = init_input_threads()) < 0)
4767 while (!received_sigterm) {
4768 int64_t cur_time= av_gettime_relative();
4770 /* if 'q' pressed, exits */
4771 if (stdin_interaction)
4772 if (check_keyboard_interaction(cur_time) < 0)
4775 /* check if there's any stream where output is still needed */
4776 if (!need_output()) {
4777 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4781 ret = transcode_step();
4782 if (ret < 0 && ret != AVERROR_EOF) {
4783 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4787 /* dump report by using the output first video and audio streams */
4788 print_report(0, timer_start, cur_time);
4791 free_input_threads();
4794 /* at the end of stream, we must flush the decoder buffers */
4795 for (i = 0; i < nb_input_streams; i++) {
4796 ist = input_streams[i];
4797 if (!input_files[ist->file_index]->eof_reached) {
4798 process_input_packet(ist, NULL, 0);
4805 /* write the trailer if needed and close file */
4806 for (i = 0; i < nb_output_files; i++) {
4807 os = output_files[i]->ctx;
4808 if (!output_files[i]->header_written) {
4809 av_log(NULL, AV_LOG_ERROR,
4810 "Nothing was written into output file %d (%s), because "
4811 "at least one of its streams received no packets.\n",
4815 if ((ret = av_write_trailer(os)) < 0) {
4816 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4822 /* dump report by using the first video and audio streams */
4823 print_report(1, timer_start, av_gettime_relative());
4825 /* close each encoder */
4826 for (i = 0; i < nb_output_streams; i++) {
4827 ost = output_streams[i];
4828 if (ost->encoding_needed) {
4829 av_freep(&ost->enc_ctx->stats_in);
4831 total_packets_written += ost->packets_written;
4832 if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
4833 av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4838 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4839 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4843 /* close each decoder */
4844 for (i = 0; i < nb_input_streams; i++) {
4845 ist = input_streams[i];
4846 if (ist->decoding_needed) {
4847 avcodec_close(ist->dec_ctx);
4848 if (ist->hwaccel_uninit)
4849 ist->hwaccel_uninit(ist->dec_ctx);
4853 hw_device_free_all();
4860 free_input_threads();
4863 if (output_streams) {
4864 for (i = 0; i < nb_output_streams; i++) {
4865 ost = output_streams[i];
4868 if (fclose(ost->logfile))
4869 av_log(NULL, AV_LOG_ERROR,
4870 "Error closing logfile, loss of information possible: %s\n",
4871 av_err2str(AVERROR(errno)));
4872 ost->logfile = NULL;
4874 av_freep(&ost->forced_kf_pts);
4875 av_freep(&ost->apad);
4876 av_freep(&ost->disposition);
4877 av_dict_free(&ost->encoder_opts);
4878 av_dict_free(&ost->sws_dict);
4879 av_dict_free(&ost->swr_opts);
4880 av_dict_free(&ost->resample_opts);
4887 static BenchmarkTimeStamps get_benchmark_time_stamps(void)
4889 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4891 struct rusage rusage;
4893 getrusage(RUSAGE_SELF, &rusage);
4894 time_stamps.user_usec =
4895 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4896 time_stamps.sys_usec =
4897 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4898 #elif HAVE_GETPROCESSTIMES
4900 FILETIME c, e, k, u;
4901 proc = GetCurrentProcess();
4902 GetProcessTimes(proc, &c, &e, &k, &u);
4903 time_stamps.user_usec =
4904 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4905 time_stamps.sys_usec =
4906 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4908 time_stamps.user_usec = time_stamps.sys_usec = 0;
4913 static int64_t getmaxrss(void)
4915 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4916 struct rusage rusage;
4917 getrusage(RUSAGE_SELF, &rusage);
4918 return (int64_t)rusage.ru_maxrss * 1024;
4919 #elif HAVE_GETPROCESSMEMORYINFO
4921 PROCESS_MEMORY_COUNTERS memcounters;
4922 proc = GetCurrentProcess();
4923 memcounters.cb = sizeof(memcounters);
4924 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4925 return memcounters.PeakPagefileUsage;
4931 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4935 int main(int argc, char **argv)
4938 BenchmarkTimeStamps ti;
4942 register_exit(ffmpeg_cleanup);
4944 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4946 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4947 parse_loglevel(argc, argv, options);
4949 if(argc>1 && !strcmp(argv[1], "-d")){
4951 av_log_set_callback(log_callback_null);
4957 avdevice_register_all();
4959 avformat_network_init();
4961 show_banner(argc, argv, options);
4963 /* parse options and open all input/output files */
4964 ret = ffmpeg_parse_options(argc, argv);
4968 if (nb_output_files <= 0 && nb_input_files == 0) {
4970 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4974 /* file converter / grab */
4975 if (nb_output_files <= 0) {
4976 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4980 for (i = 0; i < nb_output_files; i++) {
4981 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4985 current_time = ti = get_benchmark_time_stamps();
4986 if (transcode() < 0)
4989 int64_t utime, stime, rtime;
4990 current_time = get_benchmark_time_stamps();
4991 utime = current_time.user_usec - ti.user_usec;
4992 stime = current_time.sys_usec - ti.sys_usec;
4993 rtime = current_time.real_usec - ti.real_usec;
4994 av_log(NULL, AV_LOG_INFO,
4995 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4996 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4998 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4999 decode_error_stat[0], decode_error_stat[1]);
5000 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
5003 exit_program(received_nb_signals ? 255 : main_return_code);
5004 return main_return_code;