2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
65 #include "libavutil/threadmessage.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
73 #if HAVE_SYS_RESOURCE_H
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
80 #if HAVE_GETPROCESSMEMORYINFO
84 #if HAVE_SETCONSOLECTRLHANDLER
90 #include <sys/select.h>
95 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 typedef struct BenchmarkTimeStamps {
127 } BenchmarkTimeStamps;
129 static void do_video_stats(OutputStream *ost, int frame_size);
130 static BenchmarkTimeStamps get_benchmark_time_stamps(void);
131 static int64_t getmaxrss(void);
132 static int ifilter_has_all_input_formats(FilterGraph *fg);
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
140 static int want_sdp = 1;
142 static BenchmarkTimeStamps current_time;
143 AVIOContext *progress_avio = NULL;
145 static uint8_t *subtitle_out;
147 InputStream **input_streams = NULL;
148 int nb_input_streams = 0;
149 InputFile **input_files = NULL;
150 int nb_input_files = 0;
152 OutputStream **output_streams = NULL;
153 int nb_output_streams = 0;
154 OutputFile **output_files = NULL;
155 int nb_output_files = 0;
157 FilterGraph **filtergraphs;
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
168 static void free_input_threads(void);
172 Convert subtitles to video with alpha to insert them in filter graphs.
173 This is a temporary solution until libavfilter gets real subtitles support.
176 static int sub2video_get_blank_frame(InputStream *ist)
179 AVFrame *frame = ist->sub2video.frame;
181 av_frame_unref(frame);
182 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
184 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
185 if ((ret = av_frame_get_buffer(frame, 0)) < 0)
187 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
194 uint32_t *pal, *dst2;
198 if (r->type != SUBTITLE_BITMAP) {
199 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
202 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204 r->x, r->y, r->w, r->h, w, h
209 dst += r->y * dst_linesize + r->x * 4;
211 pal = (uint32_t *)r->data[1];
212 for (y = 0; y < r->h; y++) {
213 dst2 = (uint32_t *)dst;
215 for (x = 0; x < r->w; x++)
216 *(dst2++) = pal[*(src2++)];
218 src += r->linesize[0];
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
224 AVFrame *frame = ist->sub2video.frame;
228 av_assert1(frame->data[0]);
229 ist->sub2video.last_pts = frame->pts = pts;
230 for (i = 0; i < ist->nb_filters; i++) {
231 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
232 AV_BUFFERSRC_FLAG_KEEP_REF |
233 AV_BUFFERSRC_FLAG_PUSH);
234 if (ret != AVERROR_EOF && ret < 0)
235 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
240 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
242 AVFrame *frame = ist->sub2video.frame;
246 int64_t pts, end_pts;
251 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252 AV_TIME_BASE_Q, ist->st->time_base);
253 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254 AV_TIME_BASE_Q, ist->st->time_base);
255 num_rects = sub->num_rects;
257 /* If we are initializing the system, utilize current heartbeat
258 PTS as the start time, and show until the following subpicture
259 is received. Otherwise, utilize the previous subpicture's end time
260 as the fall-back value. */
261 pts = ist->sub2video.initialize ?
262 heartbeat_pts : ist->sub2video.end_pts;
266 if (sub2video_get_blank_frame(ist) < 0) {
267 av_log(ist->dec_ctx, AV_LOG_ERROR,
268 "Impossible to get a blank canvas.\n");
271 dst = frame->data [0];
272 dst_linesize = frame->linesize[0];
273 for (i = 0; i < num_rects; i++)
274 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
275 sub2video_push_ref(ist, pts);
276 ist->sub2video.end_pts = end_pts;
277 ist->sub2video.initialize = 0;
280 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
282 InputFile *infile = input_files[ist->file_index];
286 /* When a frame is read from a file, examine all sub2video streams in
287 the same file and send the sub2video frame again. Otherwise, decoded
288 video frames could be accumulating in the filter graph while a filter
289 (possibly overlay) is desperately waiting for a subtitle frame. */
290 for (i = 0; i < infile->nb_streams; i++) {
291 InputStream *ist2 = input_streams[infile->ist_index + i];
292 if (!ist2->sub2video.frame)
294 /* subtitles seem to be usually muxed ahead of other streams;
295 if not, subtracting a larger time here is necessary */
296 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
297 /* do not send the heartbeat frame if the subtitle is already ahead */
298 if (pts2 <= ist2->sub2video.last_pts)
300 if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
301 /* if we have hit the end of the current displayed subpicture,
302 or if we need to initialize the system, update the
303 overlayed subpicture and its start/end times */
304 sub2video_update(ist2, pts2 + 1, NULL);
305 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
306 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
308 sub2video_push_ref(ist2, pts2);
312 static void sub2video_flush(InputStream *ist)
317 if (ist->sub2video.end_pts < INT64_MAX)
318 sub2video_update(ist, INT64_MAX, NULL);
319 for (i = 0; i < ist->nb_filters; i++) {
320 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
321 if (ret != AVERROR_EOF && ret < 0)
322 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
326 /* end of sub2video hack */
328 static void term_exit_sigsafe(void)
332 tcsetattr (0, TCSANOW, &oldtty);
338 av_log(NULL, AV_LOG_QUIET, "%s", "");
342 static volatile int received_sigterm = 0;
343 static volatile int received_nb_signals = 0;
344 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
345 static volatile int ffmpeg_exited = 0;
346 static int main_return_code = 0;
349 sigterm_handler(int sig)
352 received_sigterm = sig;
353 received_nb_signals++;
355 if(received_nb_signals > 3) {
356 ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
357 strlen("Received > 3 system signals, hard exiting\n"));
358 if (ret < 0) { /* Do nothing */ };
363 #if HAVE_SETCONSOLECTRLHANDLER
364 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
366 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
371 case CTRL_BREAK_EVENT:
372 sigterm_handler(SIGINT);
375 case CTRL_CLOSE_EVENT:
376 case CTRL_LOGOFF_EVENT:
377 case CTRL_SHUTDOWN_EVENT:
378 sigterm_handler(SIGTERM);
379 /* Basically, with these 3 events, when we return from this method the
380 process is hard terminated, so stall as long as we need to
381 to try and let the main thread(s) clean up and gracefully terminate
382 (we have at most 5 seconds, but should be done far before that). */
383 while (!ffmpeg_exited) {
389 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
398 if (!run_as_daemon && stdin_interaction) {
400 if (tcgetattr (0, &tty) == 0) {
404 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
405 |INLCR|IGNCR|ICRNL|IXON);
406 tty.c_oflag |= OPOST;
407 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
408 tty.c_cflag &= ~(CSIZE|PARENB);
413 tcsetattr (0, TCSANOW, &tty);
415 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
419 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
420 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
422 signal(SIGXCPU, sigterm_handler);
425 signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
427 #if HAVE_SETCONSOLECTRLHANDLER
428 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
432 /* read a key without blocking */
433 static int read_key(void)
445 n = select(1, &rfds, NULL, NULL, &tv);
454 # if HAVE_PEEKNAMEDPIPE
456 static HANDLE input_handle;
459 input_handle = GetStdHandle(STD_INPUT_HANDLE);
460 is_pipe = !GetConsoleMode(input_handle, &dw);
464 /* When running under a GUI, you will end here. */
465 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
466 // input pipe may have been closed by the program that ran ffmpeg
484 static int decode_interrupt_cb(void *ctx)
486 return received_nb_signals > atomic_load(&transcode_init_done);
489 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
491 static void ffmpeg_cleanup(int ret)
496 int maxrss = getmaxrss() / 1024;
497 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
500 for (i = 0; i < nb_filtergraphs; i++) {
501 FilterGraph *fg = filtergraphs[i];
502 avfilter_graph_free(&fg->graph);
503 for (j = 0; j < fg->nb_inputs; j++) {
504 InputFilter *ifilter = fg->inputs[j];
505 struct InputStream *ist = ifilter->ist;
507 while (av_fifo_size(ifilter->frame_queue)) {
509 av_fifo_generic_read(ifilter->frame_queue, &frame,
510 sizeof(frame), NULL);
511 av_frame_free(&frame);
513 av_fifo_freep(&ifilter->frame_queue);
514 if (ist->sub2video.sub_queue) {
515 while (av_fifo_size(ist->sub2video.sub_queue)) {
517 av_fifo_generic_read(ist->sub2video.sub_queue,
518 &sub, sizeof(sub), NULL);
519 avsubtitle_free(&sub);
521 av_fifo_freep(&ist->sub2video.sub_queue);
523 av_buffer_unref(&ifilter->hw_frames_ctx);
524 av_freep(&ifilter->name);
525 av_freep(&fg->inputs[j]);
527 av_freep(&fg->inputs);
528 for (j = 0; j < fg->nb_outputs; j++) {
529 OutputFilter *ofilter = fg->outputs[j];
531 av_freep(&ofilter->name);
532 av_freep(&ofilter->formats);
533 av_freep(&ofilter->channel_layouts);
534 av_freep(&ofilter->sample_rates);
535 av_freep(&fg->outputs[j]);
537 av_freep(&fg->outputs);
538 av_freep(&fg->graph_desc);
540 av_freep(&filtergraphs[i]);
542 av_freep(&filtergraphs);
544 av_freep(&subtitle_out);
547 for (i = 0; i < nb_output_files; i++) {
548 OutputFile *of = output_files[i];
553 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
555 avformat_free_context(s);
556 av_dict_free(&of->opts);
558 av_freep(&output_files[i]);
560 for (i = 0; i < nb_output_streams; i++) {
561 OutputStream *ost = output_streams[i];
566 av_bsf_free(&ost->bsf_ctx);
568 av_frame_free(&ost->filtered_frame);
569 av_frame_free(&ost->last_frame);
570 av_dict_free(&ost->encoder_opts);
572 av_freep(&ost->forced_keyframes);
573 av_expr_free(ost->forced_keyframes_pexpr);
574 av_freep(&ost->avfilter);
575 av_freep(&ost->logfile_prefix);
577 av_freep(&ost->audio_channels_map);
578 ost->audio_channels_mapped = 0;
580 av_dict_free(&ost->sws_dict);
581 av_dict_free(&ost->swr_opts);
583 avcodec_free_context(&ost->enc_ctx);
584 avcodec_parameters_free(&ost->ref_par);
586 if (ost->muxing_queue) {
587 while (av_fifo_size(ost->muxing_queue)) {
589 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
590 av_packet_unref(&pkt);
592 av_fifo_freep(&ost->muxing_queue);
595 av_freep(&output_streams[i]);
598 free_input_threads();
600 for (i = 0; i < nb_input_files; i++) {
601 avformat_close_input(&input_files[i]->ctx);
602 av_freep(&input_files[i]);
604 for (i = 0; i < nb_input_streams; i++) {
605 InputStream *ist = input_streams[i];
607 av_frame_free(&ist->decoded_frame);
608 av_frame_free(&ist->filter_frame);
609 av_dict_free(&ist->decoder_opts);
610 avsubtitle_free(&ist->prev_sub.subtitle);
611 av_frame_free(&ist->sub2video.frame);
612 av_freep(&ist->filters);
613 av_freep(&ist->hwaccel_device);
614 av_freep(&ist->dts_buffer);
616 avcodec_free_context(&ist->dec_ctx);
618 av_freep(&input_streams[i]);
622 if (fclose(vstats_file))
623 av_log(NULL, AV_LOG_ERROR,
624 "Error closing vstats file, loss of information possible: %s\n",
625 av_err2str(AVERROR(errno)));
627 av_freep(&vstats_filename);
629 av_freep(&input_streams);
630 av_freep(&input_files);
631 av_freep(&output_streams);
632 av_freep(&output_files);
636 avformat_network_deinit();
638 if (received_sigterm) {
639 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
640 (int) received_sigterm);
641 } else if (ret && atomic_load(&transcode_init_done)) {
642 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
648 void remove_avoptions(AVDictionary **a, AVDictionary *b)
650 AVDictionaryEntry *t = NULL;
652 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
653 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
657 void assert_avoptions(AVDictionary *m)
659 AVDictionaryEntry *t;
660 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
661 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
666 static void abort_codec_experimental(AVCodec *c, int encoder)
671 static void update_benchmark(const char *fmt, ...)
673 if (do_benchmark_all) {
674 BenchmarkTimeStamps t = get_benchmark_time_stamps();
680 vsnprintf(buf, sizeof(buf), fmt, va);
682 av_log(NULL, AV_LOG_INFO,
683 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
684 t.user_usec - current_time.user_usec,
685 t.sys_usec - current_time.sys_usec,
686 t.real_usec - current_time.real_usec, buf);
692 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
695 for (i = 0; i < nb_output_streams; i++) {
696 OutputStream *ost2 = output_streams[i];
697 ost2->finished |= ost == ost2 ? this_stream : others;
701 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
703 AVFormatContext *s = of->ctx;
704 AVStream *st = ost->st;
708 * Audio encoders may split the packets -- #frames in != #packets out.
709 * But there is no reordering, so we can limit the number of output packets
710 * by simply dropping them here.
711 * Counting encoded video frames needs to be done separately because of
712 * reordering, see do_video_out().
713 * Do not count the packet when unqueued because it has been counted when queued.
715 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
716 if (ost->frame_number >= ost->max_frames) {
717 av_packet_unref(pkt);
723 if (!of->header_written) {
724 AVPacket tmp_pkt = {0};
725 /* the muxer is not initialized yet, buffer the packet */
726 if (!av_fifo_space(ost->muxing_queue)) {
727 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
728 ost->max_muxing_queue_size);
729 if (new_size <= av_fifo_size(ost->muxing_queue)) {
730 av_log(NULL, AV_LOG_ERROR,
731 "Too many packets buffered for output stream %d:%d.\n",
732 ost->file_index, ost->st->index);
735 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
739 ret = av_packet_make_refcounted(pkt);
742 av_packet_move_ref(&tmp_pkt, pkt);
743 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
747 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
748 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
749 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
751 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
753 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
755 ost->quality = sd ? AV_RL32(sd) : -1;
756 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
758 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
760 ost->error[i] = AV_RL64(sd + 8 + 8*i);
765 if (ost->frame_rate.num && ost->is_cfr) {
766 if (pkt->duration > 0)
767 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
768 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
773 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
775 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
776 if (pkt->dts != AV_NOPTS_VALUE &&
777 pkt->pts != AV_NOPTS_VALUE &&
778 pkt->dts > pkt->pts) {
779 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
781 ost->file_index, ost->st->index);
783 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
784 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
785 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
787 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
788 pkt->dts != AV_NOPTS_VALUE &&
789 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
790 ost->last_mux_dts != AV_NOPTS_VALUE) {
791 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
792 if (pkt->dts < max) {
793 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
795 loglevel = AV_LOG_ERROR;
796 av_log(s, loglevel, "Non-monotonous DTS in output stream "
797 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
798 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
800 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
803 av_log(s, loglevel, "changing to %"PRId64". This may result "
804 "in incorrect timestamps in the output file.\n",
806 if (pkt->pts >= pkt->dts)
807 pkt->pts = FFMAX(pkt->pts, max);
812 ost->last_mux_dts = pkt->dts;
814 ost->data_size += pkt->size;
815 ost->packets_written++;
817 pkt->stream_index = ost->index;
820 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
821 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
822 av_get_media_type_string(ost->enc_ctx->codec_type),
823 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
824 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
829 ret = av_interleaved_write_frame(s, pkt);
831 print_error("av_interleaved_write_frame()", ret);
832 main_return_code = 1;
833 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
835 av_packet_unref(pkt);
838 static void close_output_stream(OutputStream *ost)
840 OutputFile *of = output_files[ost->file_index];
842 ost->finished |= ENCODER_FINISHED;
844 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
845 of->recording_time = FFMIN(of->recording_time, end);
850 * Send a single packet to the output, applying any bitstream filters
851 * associated with the output stream. This may result in any number
852 * of packets actually being written, depending on what bitstream
853 * filters are applied. The supplied packet is consumed and will be
854 * blank (as if newly-allocated) when this function returns.
856 * If eof is set, instead indicate EOF to all bitstream filters and
857 * therefore flush any delayed packets to the output. A blank packet
858 * must be supplied in this case.
860 static void output_packet(OutputFile *of, AVPacket *pkt,
861 OutputStream *ost, int eof)
865 /* apply the output bitstream filters */
867 ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
870 while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
871 write_packet(of, pkt, ost, 0);
872 if (ret == AVERROR(EAGAIN))
875 write_packet(of, pkt, ost, 0);
878 if (ret < 0 && ret != AVERROR_EOF) {
879 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
880 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
886 static int check_recording_time(OutputStream *ost)
888 OutputFile *of = output_files[ost->file_index];
890 if (of->recording_time != INT64_MAX &&
891 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
892 AV_TIME_BASE_Q) >= 0) {
893 close_output_stream(ost);
899 static void do_audio_out(OutputFile *of, OutputStream *ost,
902 AVCodecContext *enc = ost->enc_ctx;
906 av_init_packet(&pkt);
910 if (!check_recording_time(ost))
913 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
914 frame->pts = ost->sync_opts;
915 ost->sync_opts = frame->pts + frame->nb_samples;
916 ost->samples_encoded += frame->nb_samples;
917 ost->frames_encoded++;
919 av_assert0(pkt.size || !pkt.data);
920 update_benchmark(NULL);
922 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
923 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
924 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
925 enc->time_base.num, enc->time_base.den);
928 ret = avcodec_send_frame(enc, frame);
933 ret = avcodec_receive_packet(enc, &pkt);
934 if (ret == AVERROR(EAGAIN))
939 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
941 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
944 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
945 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
946 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
947 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
950 output_packet(of, &pkt, ost, 0);
955 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
959 static void do_subtitle_out(OutputFile *of,
963 int subtitle_out_max_size = 1024 * 1024;
964 int subtitle_out_size, nb, i;
969 if (sub->pts == AV_NOPTS_VALUE) {
970 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
979 subtitle_out = av_malloc(subtitle_out_max_size);
981 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
986 /* Note: DVB subtitle need one packet to draw them and one other
987 packet to clear them */
988 /* XXX: signal it in the codec context ? */
989 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
994 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
996 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
997 pts -= output_files[ost->file_index]->start_time;
998 for (i = 0; i < nb; i++) {
999 unsigned save_num_rects = sub->num_rects;
1001 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1002 if (!check_recording_time(ost))
1006 // start_display_time is required to be 0
1007 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1008 sub->end_display_time -= sub->start_display_time;
1009 sub->start_display_time = 0;
1013 ost->frames_encoded++;
1015 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1016 subtitle_out_max_size, sub);
1018 sub->num_rects = save_num_rects;
1019 if (subtitle_out_size < 0) {
1020 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1024 av_init_packet(&pkt);
1025 pkt.data = subtitle_out;
1026 pkt.size = subtitle_out_size;
1027 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1028 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1029 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1030 /* XXX: the pts correction is handled here. Maybe handling
1031 it in the codec would be better */
1033 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1035 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1038 output_packet(of, &pkt, ost, 0);
1042 static void do_video_out(OutputFile *of,
1044 AVFrame *next_picture,
1047 int ret, format_video_sync;
1049 AVCodecContext *enc = ost->enc_ctx;
1050 AVCodecParameters *mux_par = ost->st->codecpar;
1051 AVRational frame_rate;
1052 int nb_frames, nb0_frames, i;
1053 double delta, delta0;
1054 double duration = 0;
1056 InputStream *ist = NULL;
1057 AVFilterContext *filter = ost->filter->filter;
1059 if (ost->source_index >= 0)
1060 ist = input_streams[ost->source_index];
1062 frame_rate = av_buffersink_get_frame_rate(filter);
1063 if (frame_rate.num > 0 && frame_rate.den > 0)
1064 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1066 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1067 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1069 if (!ost->filters_script &&
1071 (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1074 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1075 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1078 if (!next_picture) {
1080 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1081 ost->last_nb0_frames[1],
1082 ost->last_nb0_frames[2]);
1084 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1085 delta = delta0 + duration;
1087 /* by default, we output a single frame */
1088 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1091 format_video_sync = video_sync_method;
1092 if (format_video_sync == VSYNC_AUTO) {
1093 if(!strcmp(of->ctx->oformat->name, "avi")) {
1094 format_video_sync = VSYNC_VFR;
1096 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1098 && format_video_sync == VSYNC_CFR
1099 && input_files[ist->file_index]->ctx->nb_streams == 1
1100 && input_files[ist->file_index]->input_ts_offset == 0) {
1101 format_video_sync = VSYNC_VSCFR;
1103 if (format_video_sync == VSYNC_CFR && copy_ts) {
1104 format_video_sync = VSYNC_VSCFR;
1107 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1111 format_video_sync != VSYNC_PASSTHROUGH &&
1112 format_video_sync != VSYNC_DROP) {
1113 if (delta0 < -0.6) {
1114 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1116 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1117 sync_ipts = ost->sync_opts;
1122 switch (format_video_sync) {
1124 if (ost->frame_number == 0 && delta0 >= 0.5) {
1125 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1128 ost->sync_opts = llrint(sync_ipts);
1131 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1132 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1134 } else if (delta < -1.1)
1136 else if (delta > 1.1) {
1137 nb_frames = lrintf(delta);
1139 nb0_frames = llrintf(delta0 - 0.6);
1145 else if (delta > 0.6)
1146 ost->sync_opts = llrint(sync_ipts);
1149 case VSYNC_PASSTHROUGH:
1150 ost->sync_opts = llrint(sync_ipts);
1157 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1158 nb0_frames = FFMIN(nb0_frames, nb_frames);
1160 memmove(ost->last_nb0_frames + 1,
1161 ost->last_nb0_frames,
1162 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1163 ost->last_nb0_frames[0] = nb0_frames;
1165 if (nb0_frames == 0 && ost->last_dropped) {
1167 av_log(NULL, AV_LOG_VERBOSE,
1168 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1169 ost->frame_number, ost->st->index, ost->last_frame->pts);
1171 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1172 if (nb_frames > dts_error_threshold * 30) {
1173 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1177 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1178 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1179 if (nb_frames_dup > dup_warning) {
1180 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1184 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1186 /* duplicates frame if needed */
1187 for (i = 0; i < nb_frames; i++) {
1188 AVFrame *in_picture;
1189 int forced_keyframe = 0;
1191 av_init_packet(&pkt);
1195 if (i < nb0_frames && ost->last_frame) {
1196 in_picture = ost->last_frame;
1198 in_picture = next_picture;
1203 in_picture->pts = ost->sync_opts;
1205 if (!check_recording_time(ost))
1208 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1209 ost->top_field_first >= 0)
1210 in_picture->top_field_first = !!ost->top_field_first;
1212 if (in_picture->interlaced_frame) {
1213 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1214 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1216 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1218 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1220 in_picture->quality = enc->global_quality;
1221 in_picture->pict_type = 0;
1223 if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1224 in_picture->pts != AV_NOPTS_VALUE)
1225 ost->forced_kf_ref_pts = in_picture->pts;
1227 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1228 (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1229 if (ost->forced_kf_index < ost->forced_kf_count &&
1230 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1231 ost->forced_kf_index++;
1232 forced_keyframe = 1;
1233 } else if (ost->forced_keyframes_pexpr) {
1235 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1236 res = av_expr_eval(ost->forced_keyframes_pexpr,
1237 ost->forced_keyframes_expr_const_values, NULL);
1238 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1239 ost->forced_keyframes_expr_const_values[FKF_N],
1240 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1241 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1242 ost->forced_keyframes_expr_const_values[FKF_T],
1243 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1246 forced_keyframe = 1;
1247 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1248 ost->forced_keyframes_expr_const_values[FKF_N];
1249 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1250 ost->forced_keyframes_expr_const_values[FKF_T];
1251 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1254 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1255 } else if ( ost->forced_keyframes
1256 && !strncmp(ost->forced_keyframes, "source", 6)
1257 && in_picture->key_frame==1
1259 forced_keyframe = 1;
1262 if (forced_keyframe) {
1263 in_picture->pict_type = AV_PICTURE_TYPE_I;
1264 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1267 update_benchmark(NULL);
1269 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1270 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1271 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1272 enc->time_base.num, enc->time_base.den);
1275 ost->frames_encoded++;
1277 ret = avcodec_send_frame(enc, in_picture);
1280 // Make sure Closed Captions will not be duplicated
1281 av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
1284 ret = avcodec_receive_packet(enc, &pkt);
1285 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1286 if (ret == AVERROR(EAGAIN))
1292 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1293 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1294 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1295 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1298 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1299 pkt.pts = ost->sync_opts;
1301 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1304 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1305 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1306 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1307 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1310 frame_size = pkt.size;
1311 output_packet(of, &pkt, ost, 0);
1313 /* if two pass, output log */
1314 if (ost->logfile && enc->stats_out) {
1315 fprintf(ost->logfile, "%s", enc->stats_out);
1320 * For video, number of frames in == number of packets out.
1321 * But there may be reordering, so we can't throw away frames on encoder
1322 * flush, we need to limit them here, before they go into encoder.
1324 ost->frame_number++;
1326 if (vstats_filename && frame_size)
1327 do_video_stats(ost, frame_size);
1330 if (!ost->last_frame)
1331 ost->last_frame = av_frame_alloc();
1332 av_frame_unref(ost->last_frame);
1333 if (next_picture && ost->last_frame)
1334 av_frame_ref(ost->last_frame, next_picture);
1336 av_frame_free(&ost->last_frame);
1340 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1344 static double psnr(double d)
1346 return -10.0 * log10(d);
1349 static void do_video_stats(OutputStream *ost, int frame_size)
1351 AVCodecContext *enc;
1353 double ti1, bitrate, avg_bitrate;
1355 /* this is executed just the first time do_video_stats is called */
1357 vstats_file = fopen(vstats_filename, "w");
1365 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1366 frame_number = ost->st->nb_frames;
1367 if (vstats_version <= 1) {
1368 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1369 ost->quality / (float)FF_QP2LAMBDA);
1371 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1372 ost->quality / (float)FF_QP2LAMBDA);
1375 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1376 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1378 fprintf(vstats_file,"f_size= %6d ", frame_size);
1379 /* compute pts value */
1380 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1384 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1385 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1386 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1387 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1388 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1392 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1394 static void finish_output_stream(OutputStream *ost)
1396 OutputFile *of = output_files[ost->file_index];
1399 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1402 for (i = 0; i < of->ctx->nb_streams; i++)
1403 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1408 * Get and encode new output from any of the filtergraphs, without causing
1411 * @return 0 for success, <0 for severe errors
1413 static int reap_filters(int flush)
1415 AVFrame *filtered_frame = NULL;
1418 /* Reap all buffers present in the buffer sinks */
1419 for (i = 0; i < nb_output_streams; i++) {
1420 OutputStream *ost = output_streams[i];
1421 OutputFile *of = output_files[ost->file_index];
1422 AVFilterContext *filter;
1423 AVCodecContext *enc = ost->enc_ctx;
1426 if (!ost->filter || !ost->filter->graph->graph)
1428 filter = ost->filter->filter;
1430 if (!ost->initialized) {
1431 char error[1024] = "";
1432 ret = init_output_stream(ost, error, sizeof(error));
1434 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1435 ost->file_index, ost->index, error);
1440 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1441 return AVERROR(ENOMEM);
1443 filtered_frame = ost->filtered_frame;
1446 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1447 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1448 AV_BUFFERSINK_FLAG_NO_REQUEST);
1450 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1451 av_log(NULL, AV_LOG_WARNING,
1452 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1453 } else if (flush && ret == AVERROR_EOF) {
1454 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1455 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1459 if (ost->finished) {
1460 av_frame_unref(filtered_frame);
1463 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1464 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1465 AVRational filter_tb = av_buffersink_get_time_base(filter);
1466 AVRational tb = enc->time_base;
1467 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1469 tb.den <<= extra_bits;
1471 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1472 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1473 float_pts /= 1 << extra_bits;
1474 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1475 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1477 filtered_frame->pts =
1478 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1479 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1482 switch (av_buffersink_get_type(filter)) {
1483 case AVMEDIA_TYPE_VIDEO:
1484 if (!ost->frame_aspect_ratio.num)
1485 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1488 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1489 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1491 enc->time_base.num, enc->time_base.den);
1494 do_video_out(of, ost, filtered_frame, float_pts);
1496 case AVMEDIA_TYPE_AUDIO:
1497 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1498 enc->channels != filtered_frame->channels) {
1499 av_log(NULL, AV_LOG_ERROR,
1500 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1503 do_audio_out(of, ost, filtered_frame);
1506 // TODO support subtitle filters
1510 av_frame_unref(filtered_frame);
1517 static void print_final_stats(int64_t total_size)
1519 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1520 uint64_t subtitle_size = 0;
1521 uint64_t data_size = 0;
1522 float percent = -1.0;
1526 for (i = 0; i < nb_output_streams; i++) {
1527 OutputStream *ost = output_streams[i];
1528 switch (ost->enc_ctx->codec_type) {
1529 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1530 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1531 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1532 default: other_size += ost->data_size; break;
1534 extra_size += ost->enc_ctx->extradata_size;
1535 data_size += ost->data_size;
1536 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1537 != AV_CODEC_FLAG_PASS1)
1541 if (data_size && total_size>0 && total_size >= data_size)
1542 percent = 100.0 * (total_size - data_size) / data_size;
1544 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1545 video_size / 1024.0,
1546 audio_size / 1024.0,
1547 subtitle_size / 1024.0,
1548 other_size / 1024.0,
1549 extra_size / 1024.0);
1551 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1553 av_log(NULL, AV_LOG_INFO, "unknown");
1554 av_log(NULL, AV_LOG_INFO, "\n");
1556 /* print verbose per-stream stats */
1557 for (i = 0; i < nb_input_files; i++) {
1558 InputFile *f = input_files[i];
1559 uint64_t total_packets = 0, total_size = 0;
1561 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1564 for (j = 0; j < f->nb_streams; j++) {
1565 InputStream *ist = input_streams[f->ist_index + j];
1566 enum AVMediaType type = ist->dec_ctx->codec_type;
1568 total_size += ist->data_size;
1569 total_packets += ist->nb_packets;
1571 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1572 i, j, media_type_string(type));
1573 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1574 ist->nb_packets, ist->data_size);
1576 if (ist->decoding_needed) {
1577 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1578 ist->frames_decoded);
1579 if (type == AVMEDIA_TYPE_AUDIO)
1580 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1581 av_log(NULL, AV_LOG_VERBOSE, "; ");
1584 av_log(NULL, AV_LOG_VERBOSE, "\n");
1587 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1588 total_packets, total_size);
1591 for (i = 0; i < nb_output_files; i++) {
1592 OutputFile *of = output_files[i];
1593 uint64_t total_packets = 0, total_size = 0;
1595 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1598 for (j = 0; j < of->ctx->nb_streams; j++) {
1599 OutputStream *ost = output_streams[of->ost_index + j];
1600 enum AVMediaType type = ost->enc_ctx->codec_type;
1602 total_size += ost->data_size;
1603 total_packets += ost->packets_written;
1605 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1606 i, j, media_type_string(type));
1607 if (ost->encoding_needed) {
1608 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1609 ost->frames_encoded);
1610 if (type == AVMEDIA_TYPE_AUDIO)
1611 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1612 av_log(NULL, AV_LOG_VERBOSE, "; ");
1615 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1616 ost->packets_written, ost->data_size);
1618 av_log(NULL, AV_LOG_VERBOSE, "\n");
1621 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1622 total_packets, total_size);
1624 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1625 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1627 av_log(NULL, AV_LOG_WARNING, "\n");
1629 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1634 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1636 AVBPrint buf, buf_script;
1638 AVFormatContext *oc;
1640 AVCodecContext *enc;
1641 int frame_number, vid, i;
1644 int64_t pts = INT64_MIN + 1;
1645 static int64_t last_time = -1;
1646 static int qp_histogram[52];
1647 int hours, mins, secs, us;
1648 const char *hours_sign;
1652 if (!print_stats && !is_last_report && !progress_avio)
1655 if (!is_last_report) {
1656 if (last_time == -1) {
1657 last_time = cur_time;
1660 if ((cur_time - last_time) < 500000)
1662 last_time = cur_time;
1665 t = (cur_time-timer_start) / 1000000.0;
1668 oc = output_files[0]->ctx;
1670 total_size = avio_size(oc->pb);
1671 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1672 total_size = avio_tell(oc->pb);
1675 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1676 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1677 for (i = 0; i < nb_output_streams; i++) {
1679 ost = output_streams[i];
1681 if (!ost->stream_copy)
1682 q = ost->quality / (float) FF_QP2LAMBDA;
1684 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1685 av_bprintf(&buf, "q=%2.1f ", q);
1686 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1687 ost->file_index, ost->index, q);
1689 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1692 frame_number = ost->frame_number;
1693 fps = t > 1 ? frame_number / t : 0;
1694 av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1695 frame_number, fps < 9.95, fps, q);
1696 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1697 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1698 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1699 ost->file_index, ost->index, q);
1701 av_bprintf(&buf, "L");
1705 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1707 for (j = 0; j < 32; j++)
1708 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1711 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1713 double error, error_sum = 0;
1714 double scale, scale_sum = 0;
1716 char type[3] = { 'Y','U','V' };
1717 av_bprintf(&buf, "PSNR=");
1718 for (j = 0; j < 3; j++) {
1719 if (is_last_report) {
1720 error = enc->error[j];
1721 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1723 error = ost->error[j];
1724 scale = enc->width * enc->height * 255.0 * 255.0;
1730 p = psnr(error / scale);
1731 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1732 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1733 ost->file_index, ost->index, type[j] | 32, p);
1735 p = psnr(error_sum / scale_sum);
1736 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1737 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1738 ost->file_index, ost->index, p);
1742 /* compute min output value */
1743 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1744 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1745 ost->st->time_base, AV_TIME_BASE_Q));
1747 nb_frames_drop += ost->last_dropped;
1750 secs = FFABS(pts) / AV_TIME_BASE;
1751 us = FFABS(pts) % AV_TIME_BASE;
1756 hours_sign = (pts < 0) ? "-" : "";
1758 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1759 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1761 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1762 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1763 if (pts == AV_NOPTS_VALUE) {
1764 av_bprintf(&buf, "N/A ");
1766 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1767 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1771 av_bprintf(&buf, "bitrate=N/A");
1772 av_bprintf(&buf_script, "bitrate=N/A\n");
1774 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1775 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1778 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1779 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1780 if (pts == AV_NOPTS_VALUE) {
1781 av_bprintf(&buf_script, "out_time_us=N/A\n");
1782 av_bprintf(&buf_script, "out_time_ms=N/A\n");
1783 av_bprintf(&buf_script, "out_time=N/A\n");
1785 av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1786 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1787 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1788 hours_sign, hours, mins, secs, us);
1791 if (nb_frames_dup || nb_frames_drop)
1792 av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1793 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1794 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1797 av_bprintf(&buf, " speed=N/A");
1798 av_bprintf(&buf_script, "speed=N/A\n");
1800 av_bprintf(&buf, " speed=%4.3gx", speed);
1801 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1804 if (print_stats || is_last_report) {
1805 const char end = is_last_report ? '\n' : '\r';
1806 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1807 fprintf(stderr, "%s %c", buf.str, end);
1809 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1813 av_bprint_finalize(&buf, NULL);
1815 if (progress_avio) {
1816 av_bprintf(&buf_script, "progress=%s\n",
1817 is_last_report ? "end" : "continue");
1818 avio_write(progress_avio, buf_script.str,
1819 FFMIN(buf_script.len, buf_script.size - 1));
1820 avio_flush(progress_avio);
1821 av_bprint_finalize(&buf_script, NULL);
1822 if (is_last_report) {
1823 if ((ret = avio_closep(&progress_avio)) < 0)
1824 av_log(NULL, AV_LOG_ERROR,
1825 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1830 print_final_stats(total_size);
1833 static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1835 // We never got any input. Set a fake format, which will
1836 // come from libavformat.
1837 ifilter->format = par->format;
1838 ifilter->sample_rate = par->sample_rate;
1839 ifilter->channels = par->channels;
1840 ifilter->channel_layout = par->channel_layout;
1841 ifilter->width = par->width;
1842 ifilter->height = par->height;
1843 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1846 static void flush_encoders(void)
1850 for (i = 0; i < nb_output_streams; i++) {
1851 OutputStream *ost = output_streams[i];
1852 AVCodecContext *enc = ost->enc_ctx;
1853 OutputFile *of = output_files[ost->file_index];
1855 if (!ost->encoding_needed)
1858 // Try to enable encoding with no input frames.
1859 // Maybe we should just let encoding fail instead.
1860 if (!ost->initialized) {
1861 FilterGraph *fg = ost->filter->graph;
1862 char error[1024] = "";
1864 av_log(NULL, AV_LOG_WARNING,
1865 "Finishing stream %d:%d without any data written to it.\n",
1866 ost->file_index, ost->st->index);
1868 if (ost->filter && !fg->graph) {
1870 for (x = 0; x < fg->nb_inputs; x++) {
1871 InputFilter *ifilter = fg->inputs[x];
1872 if (ifilter->format < 0)
1873 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1876 if (!ifilter_has_all_input_formats(fg))
1879 ret = configure_filtergraph(fg);
1881 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1885 finish_output_stream(ost);
1888 ret = init_output_stream(ost, error, sizeof(error));
1890 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1891 ost->file_index, ost->index, error);
1896 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1900 const char *desc = NULL;
1904 switch (enc->codec_type) {
1905 case AVMEDIA_TYPE_AUDIO:
1908 case AVMEDIA_TYPE_VIDEO:
1915 av_init_packet(&pkt);
1919 update_benchmark(NULL);
1921 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1922 ret = avcodec_send_frame(enc, NULL);
1924 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1931 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1932 if (ret < 0 && ret != AVERROR_EOF) {
1933 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1938 if (ost->logfile && enc->stats_out) {
1939 fprintf(ost->logfile, "%s", enc->stats_out);
1941 if (ret == AVERROR_EOF) {
1942 output_packet(of, &pkt, ost, 1);
1945 if (ost->finished & MUXER_FINISHED) {
1946 av_packet_unref(&pkt);
1949 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1950 pkt_size = pkt.size;
1951 output_packet(of, &pkt, ost, 0);
1952 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1953 do_video_stats(ost, pkt_size);
1960 * Check whether a packet from ist should be written into ost at this time
1962 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1964 OutputFile *of = output_files[ost->file_index];
1965 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1967 if (ost->source_index != ist_index)
1973 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1979 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1981 OutputFile *of = output_files[ost->file_index];
1982 InputFile *f = input_files [ist->file_index];
1983 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1984 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1987 // EOF: flush output bitstream filters.
1989 av_init_packet(&opkt);
1992 output_packet(of, &opkt, ost, 1);
1996 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1997 !ost->copy_initial_nonkeyframes)
2000 if (!ost->frame_number && !ost->copy_prior_start) {
2001 int64_t comp_start = start_time;
2002 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2003 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2004 if (pkt->pts == AV_NOPTS_VALUE ?
2005 ist->pts < comp_start :
2006 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2010 if (of->recording_time != INT64_MAX &&
2011 ist->pts >= of->recording_time + start_time) {
2012 close_output_stream(ost);
2016 if (f->recording_time != INT64_MAX) {
2017 start_time = f->ctx->start_time;
2018 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2019 start_time += f->start_time;
2020 if (ist->pts >= f->recording_time + start_time) {
2021 close_output_stream(ost);
2026 /* force the input stream PTS */
2027 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2030 if (av_packet_ref(&opkt, pkt) < 0)
2033 if (pkt->pts != AV_NOPTS_VALUE)
2034 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2036 if (pkt->dts == AV_NOPTS_VALUE) {
2037 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2038 } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2039 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2041 duration = ist->dec_ctx->frame_size;
2042 opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2043 (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2044 &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2045 /* dts will be set immediately afterwards to what pts is now */
2046 opkt.pts = opkt.dts - ost_tb_start_time;
2048 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2049 opkt.dts -= ost_tb_start_time;
2051 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2053 output_packet(of, &opkt, ost, 0);
2056 int guess_input_channel_layout(InputStream *ist)
2058 AVCodecContext *dec = ist->dec_ctx;
2060 if (!dec->channel_layout) {
2061 char layout_name[256];
2063 if (dec->channels > ist->guess_layout_max)
2065 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2066 if (!dec->channel_layout)
2068 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2069 dec->channels, dec->channel_layout);
2070 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2071 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2076 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2078 if (*got_output || ret<0)
2079 decode_error_stat[ret<0] ++;
2081 if (ret < 0 && exit_on_error)
2084 if (*got_output && ist) {
2085 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2086 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2087 "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2094 // Filters can be configured only if the formats of all inputs are known.
2095 static int ifilter_has_all_input_formats(FilterGraph *fg)
2098 for (i = 0; i < fg->nb_inputs; i++) {
2099 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2100 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2106 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2108 FilterGraph *fg = ifilter->graph;
2109 int need_reinit, ret, i;
2111 /* determine if the parameters for this input changed */
2112 need_reinit = ifilter->format != frame->format;
2114 switch (ifilter->ist->st->codecpar->codec_type) {
2115 case AVMEDIA_TYPE_AUDIO:
2116 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2117 ifilter->channels != frame->channels ||
2118 ifilter->channel_layout != frame->channel_layout;
2120 case AVMEDIA_TYPE_VIDEO:
2121 need_reinit |= ifilter->width != frame->width ||
2122 ifilter->height != frame->height;
2126 if (!ifilter->ist->reinit_filters && fg->graph)
2129 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2130 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2134 ret = ifilter_parameters_from_frame(ifilter, frame);
2139 /* (re)init the graph if possible, otherwise buffer the frame and return */
2140 if (need_reinit || !fg->graph) {
2141 for (i = 0; i < fg->nb_inputs; i++) {
2142 if (!ifilter_has_all_input_formats(fg)) {
2143 AVFrame *tmp = av_frame_clone(frame);
2145 return AVERROR(ENOMEM);
2146 av_frame_unref(frame);
2148 if (!av_fifo_space(ifilter->frame_queue)) {
2149 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2151 av_frame_free(&tmp);
2155 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2160 ret = reap_filters(1);
2161 if (ret < 0 && ret != AVERROR_EOF) {
2162 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2166 ret = configure_filtergraph(fg);
2168 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2173 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2175 if (ret != AVERROR_EOF)
2176 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2183 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2189 if (ifilter->filter) {
2190 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2194 // the filtergraph was never configured
2195 if (ifilter->format < 0)
2196 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2197 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2198 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2199 return AVERROR_INVALIDDATA;
2206 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2207 // There is the following difference: if you got a frame, you must call
2208 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2209 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2210 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2217 ret = avcodec_send_packet(avctx, pkt);
2218 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2219 // decoded frames with avcodec_receive_frame() until done.
2220 if (ret < 0 && ret != AVERROR_EOF)
2224 ret = avcodec_receive_frame(avctx, frame);
2225 if (ret < 0 && ret != AVERROR(EAGAIN))
2233 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2238 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2239 for (i = 0; i < ist->nb_filters; i++) {
2240 if (i < ist->nb_filters - 1) {
2241 f = ist->filter_frame;
2242 ret = av_frame_ref(f, decoded_frame);
2247 ret = ifilter_send_frame(ist->filters[i], f);
2248 if (ret == AVERROR_EOF)
2249 ret = 0; /* ignore */
2251 av_log(NULL, AV_LOG_ERROR,
2252 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2259 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2262 AVFrame *decoded_frame;
2263 AVCodecContext *avctx = ist->dec_ctx;
2265 AVRational decoded_frame_tb;
2267 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2268 return AVERROR(ENOMEM);
2269 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2270 return AVERROR(ENOMEM);
2271 decoded_frame = ist->decoded_frame;
2273 update_benchmark(NULL);
2274 ret = decode(avctx, decoded_frame, got_output, pkt);
2275 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2279 if (ret >= 0 && avctx->sample_rate <= 0) {
2280 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2281 ret = AVERROR_INVALIDDATA;
2284 if (ret != AVERROR_EOF)
2285 check_decode_result(ist, got_output, ret);
2287 if (!*got_output || ret < 0)
2290 ist->samples_decoded += decoded_frame->nb_samples;
2291 ist->frames_decoded++;
2293 /* increment next_dts to use for the case where the input stream does not
2294 have timestamps or there are multiple frames in the packet */
2295 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2297 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2300 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2301 decoded_frame_tb = ist->st->time_base;
2302 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2303 decoded_frame->pts = pkt->pts;
2304 decoded_frame_tb = ist->st->time_base;
2306 decoded_frame->pts = ist->dts;
2307 decoded_frame_tb = AV_TIME_BASE_Q;
2309 if (decoded_frame->pts != AV_NOPTS_VALUE)
2310 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2311 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2312 (AVRational){1, avctx->sample_rate});
2313 ist->nb_samples = decoded_frame->nb_samples;
2314 err = send_frame_to_filters(ist, decoded_frame);
2316 av_frame_unref(ist->filter_frame);
2317 av_frame_unref(decoded_frame);
2318 return err < 0 ? err : ret;
2321 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2324 AVFrame *decoded_frame;
2325 int i, ret = 0, err = 0;
2326 int64_t best_effort_timestamp;
2327 int64_t dts = AV_NOPTS_VALUE;
2330 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2331 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2333 if (!eof && pkt && pkt->size == 0)
2336 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2337 return AVERROR(ENOMEM);
2338 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2339 return AVERROR(ENOMEM);
2340 decoded_frame = ist->decoded_frame;
2341 if (ist->dts != AV_NOPTS_VALUE)
2342 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2345 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2348 // The old code used to set dts on the drain packet, which does not work
2349 // with the new API anymore.
2351 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2353 return AVERROR(ENOMEM);
2354 ist->dts_buffer = new;
2355 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2358 update_benchmark(NULL);
2359 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2360 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2364 // The following line may be required in some cases where there is no parser
2365 // or the parser does not has_b_frames correctly
2366 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2367 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2368 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2370 av_log(ist->dec_ctx, AV_LOG_WARNING,
2371 "video_delay is larger in decoder than demuxer %d > %d.\n"
2372 "If you want to help, upload a sample "
2373 "of this file to https://streams.videolan.org/upload/ "
2374 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2375 ist->dec_ctx->has_b_frames,
2376 ist->st->codecpar->video_delay);
2379 if (ret != AVERROR_EOF)
2380 check_decode_result(ist, got_output, ret);
2382 if (*got_output && ret >= 0) {
2383 if (ist->dec_ctx->width != decoded_frame->width ||
2384 ist->dec_ctx->height != decoded_frame->height ||
2385 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2386 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2387 decoded_frame->width,
2388 decoded_frame->height,
2389 decoded_frame->format,
2390 ist->dec_ctx->width,
2391 ist->dec_ctx->height,
2392 ist->dec_ctx->pix_fmt);
2396 if (!*got_output || ret < 0)
2399 if(ist->top_field_first>=0)
2400 decoded_frame->top_field_first = ist->top_field_first;
2402 ist->frames_decoded++;
2404 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2405 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2409 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2411 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2412 *duration_pts = decoded_frame->pkt_duration;
2414 if (ist->framerate.num)
2415 best_effort_timestamp = ist->cfr_next_pts++;
2417 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2418 best_effort_timestamp = ist->dts_buffer[0];
2420 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2421 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2422 ist->nb_dts_buffer--;
2425 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2426 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2428 if (ts != AV_NOPTS_VALUE)
2429 ist->next_pts = ist->pts = ts;
2433 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2434 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2435 ist->st->index, av_ts2str(decoded_frame->pts),
2436 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2437 best_effort_timestamp,
2438 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2439 decoded_frame->key_frame, decoded_frame->pict_type,
2440 ist->st->time_base.num, ist->st->time_base.den);
2443 if (ist->st->sample_aspect_ratio.num)
2444 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2446 err = send_frame_to_filters(ist, decoded_frame);
2449 av_frame_unref(ist->filter_frame);
2450 av_frame_unref(decoded_frame);
2451 return err < 0 ? err : ret;
2454 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2457 AVSubtitle subtitle;
2459 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2460 &subtitle, got_output, pkt);
2462 check_decode_result(NULL, got_output, ret);
2464 if (ret < 0 || !*got_output) {
2467 sub2video_flush(ist);
2471 if (ist->fix_sub_duration) {
2473 if (ist->prev_sub.got_output) {
2474 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2475 1000, AV_TIME_BASE);
2476 if (end < ist->prev_sub.subtitle.end_display_time) {
2477 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2478 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2479 ist->prev_sub.subtitle.end_display_time, end,
2480 end <= 0 ? ", dropping it" : "");
2481 ist->prev_sub.subtitle.end_display_time = end;
2484 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2485 FFSWAP(int, ret, ist->prev_sub.ret);
2486 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2494 if (ist->sub2video.frame) {
2495 sub2video_update(ist, INT64_MIN, &subtitle);
2496 } else if (ist->nb_filters) {
2497 if (!ist->sub2video.sub_queue)
2498 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2499 if (!ist->sub2video.sub_queue)
2501 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2502 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2506 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2510 if (!subtitle.num_rects)
2513 ist->frames_decoded++;
2515 for (i = 0; i < nb_output_streams; i++) {
2516 OutputStream *ost = output_streams[i];
2518 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2519 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2522 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2527 avsubtitle_free(&subtitle);
2531 static int send_filter_eof(InputStream *ist)
2534 /* TODO keep pts also in stream time base to avoid converting back */
2535 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2536 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2538 for (i = 0; i < ist->nb_filters; i++) {
2539 ret = ifilter_send_eof(ist->filters[i], pts);
2546 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2547 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2551 int eof_reached = 0;
2554 if (!ist->saw_first_ts) {
2555 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2557 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2558 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2559 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2561 ist->saw_first_ts = 1;
2564 if (ist->next_dts == AV_NOPTS_VALUE)
2565 ist->next_dts = ist->dts;
2566 if (ist->next_pts == AV_NOPTS_VALUE)
2567 ist->next_pts = ist->pts;
2571 av_init_packet(&avpkt);
2578 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2579 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2580 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2581 ist->next_pts = ist->pts = ist->dts;
2584 // while we have more to decode or while the decoder did output something on EOF
2585 while (ist->decoding_needed) {
2586 int64_t duration_dts = 0;
2587 int64_t duration_pts = 0;
2589 int decode_failed = 0;
2591 ist->pts = ist->next_pts;
2592 ist->dts = ist->next_dts;
2594 switch (ist->dec_ctx->codec_type) {
2595 case AVMEDIA_TYPE_AUDIO:
2596 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2599 case AVMEDIA_TYPE_VIDEO:
2600 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2602 if (!repeating || !pkt || got_output) {
2603 if (pkt && pkt->duration) {
2604 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2605 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2606 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2607 duration_dts = ((int64_t)AV_TIME_BASE *
2608 ist->dec_ctx->framerate.den * ticks) /
2609 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2612 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2613 ist->next_dts += duration_dts;
2615 ist->next_dts = AV_NOPTS_VALUE;
2619 if (duration_pts > 0) {
2620 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2622 ist->next_pts += duration_dts;
2626 case AVMEDIA_TYPE_SUBTITLE:
2629 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2630 if (!pkt && ret >= 0)
2637 if (ret == AVERROR_EOF) {
2643 if (decode_failed) {
2644 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2645 ist->file_index, ist->st->index, av_err2str(ret));
2647 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2648 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2650 if (!decode_failed || exit_on_error)
2656 ist->got_output = 1;
2661 // During draining, we might get multiple output frames in this loop.
2662 // ffmpeg.c does not drain the filter chain on configuration changes,
2663 // which means if we send multiple frames at once to the filters, and
2664 // one of those frames changes configuration, the buffered frames will
2665 // be lost. This can upset certain FATE tests.
2666 // Decode only 1 frame per call on EOF to appease these FATE tests.
2667 // The ideal solution would be to rewrite decoding to use the new
2668 // decoding API in a better way.
2675 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2676 /* except when looping we need to flush but not to send an EOF */
2677 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2678 int ret = send_filter_eof(ist);
2680 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2685 /* handle stream copy */
2686 if (!ist->decoding_needed && pkt) {
2687 ist->dts = ist->next_dts;
2688 switch (ist->dec_ctx->codec_type) {
2689 case AVMEDIA_TYPE_AUDIO:
2690 av_assert1(pkt->duration >= 0);
2691 if (ist->dec_ctx->sample_rate) {
2692 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2693 ist->dec_ctx->sample_rate;
2695 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2698 case AVMEDIA_TYPE_VIDEO:
2699 if (ist->framerate.num) {
2700 // TODO: Remove work-around for c99-to-c89 issue 7
2701 AVRational time_base_q = AV_TIME_BASE_Q;
2702 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2703 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2704 } else if (pkt->duration) {
2705 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2706 } else if(ist->dec_ctx->framerate.num != 0) {
2707 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2708 ist->next_dts += ((int64_t)AV_TIME_BASE *
2709 ist->dec_ctx->framerate.den * ticks) /
2710 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2714 ist->pts = ist->dts;
2715 ist->next_pts = ist->next_dts;
2717 for (i = 0; i < nb_output_streams; i++) {
2718 OutputStream *ost = output_streams[i];
2720 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2723 do_streamcopy(ist, ost, pkt);
2726 return !eof_reached;
2729 static void print_sdp(void)
2734 AVIOContext *sdp_pb;
2735 AVFormatContext **avc;
2737 for (i = 0; i < nb_output_files; i++) {
2738 if (!output_files[i]->header_written)
2742 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2745 for (i = 0, j = 0; i < nb_output_files; i++) {
2746 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2747 avc[j] = output_files[i]->ctx;
2755 av_sdp_create(avc, j, sdp, sizeof(sdp));
2757 if (!sdp_filename) {
2758 printf("SDP:\n%s\n", sdp);
2761 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2762 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2764 avio_print(sdp_pb, sdp);
2765 avio_closep(&sdp_pb);
2766 av_freep(&sdp_filename);
2774 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2776 InputStream *ist = s->opaque;
2777 const enum AVPixelFormat *p;
2780 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2781 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2782 const AVCodecHWConfig *config = NULL;
2785 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2788 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2789 ist->hwaccel_id == HWACCEL_AUTO) {
2791 config = avcodec_get_hw_config(s->codec, i);
2794 if (!(config->methods &
2795 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2797 if (config->pix_fmt == *p)
2802 if (config->device_type != ist->hwaccel_device_type) {
2803 // Different hwaccel offered, ignore.
2807 ret = hwaccel_decode_init(s);
2809 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2810 av_log(NULL, AV_LOG_FATAL,
2811 "%s hwaccel requested for input stream #%d:%d, "
2812 "but cannot be initialized.\n",
2813 av_hwdevice_get_type_name(config->device_type),
2814 ist->file_index, ist->st->index);
2815 return AV_PIX_FMT_NONE;
2820 const HWAccel *hwaccel = NULL;
2822 for (i = 0; hwaccels[i].name; i++) {
2823 if (hwaccels[i].pix_fmt == *p) {
2824 hwaccel = &hwaccels[i];
2829 // No hwaccel supporting this pixfmt.
2832 if (hwaccel->id != ist->hwaccel_id) {
2833 // Does not match requested hwaccel.
2837 ret = hwaccel->init(s);
2839 av_log(NULL, AV_LOG_FATAL,
2840 "%s hwaccel requested for input stream #%d:%d, "
2841 "but cannot be initialized.\n", hwaccel->name,
2842 ist->file_index, ist->st->index);
2843 return AV_PIX_FMT_NONE;
2847 if (ist->hw_frames_ctx) {
2848 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2849 if (!s->hw_frames_ctx)
2850 return AV_PIX_FMT_NONE;
2853 ist->hwaccel_pix_fmt = *p;
2860 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2862 InputStream *ist = s->opaque;
2864 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2865 return ist->hwaccel_get_buffer(s, frame, flags);
2867 return avcodec_default_get_buffer2(s, frame, flags);
2870 static int init_input_stream(int ist_index, char *error, int error_len)
2873 InputStream *ist = input_streams[ist_index];
2875 if (ist->decoding_needed) {
2876 AVCodec *codec = ist->dec;
2878 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2879 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2880 return AVERROR(EINVAL);
2883 ist->dec_ctx->opaque = ist;
2884 ist->dec_ctx->get_format = get_format;
2885 ist->dec_ctx->get_buffer2 = get_buffer;
2886 ist->dec_ctx->thread_safe_callbacks = 1;
2888 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2889 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2890 (ist->decoding_needed & DECODING_FOR_OST)) {
2891 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2892 if (ist->decoding_needed & DECODING_FOR_FILTER)
2893 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2896 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2898 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2899 * audio, and video decoders such as cuvid or mediacodec */
2900 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2902 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2903 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2904 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2905 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2906 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2908 ret = hw_device_setup_for_decode(ist);
2910 snprintf(error, error_len, "Device setup failed for "
2911 "decoder on input stream #%d:%d : %s",
2912 ist->file_index, ist->st->index, av_err2str(ret));
2916 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2917 if (ret == AVERROR_EXPERIMENTAL)
2918 abort_codec_experimental(codec, 0);
2920 snprintf(error, error_len,
2921 "Error while opening decoder for input stream "
2923 ist->file_index, ist->st->index, av_err2str(ret));
2926 assert_avoptions(ist->decoder_opts);
2929 ist->next_pts = AV_NOPTS_VALUE;
2930 ist->next_dts = AV_NOPTS_VALUE;
2935 static InputStream *get_input_stream(OutputStream *ost)
2937 if (ost->source_index >= 0)
2938 return input_streams[ost->source_index];
2942 static int compare_int64(const void *a, const void *b)
2944 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2947 /* open the muxer when all the streams are initialized */
2948 static int check_init_output_file(OutputFile *of, int file_index)
2952 for (i = 0; i < of->ctx->nb_streams; i++) {
2953 OutputStream *ost = output_streams[of->ost_index + i];
2954 if (!ost->initialized)
2958 of->ctx->interrupt_callback = int_cb;
2960 ret = avformat_write_header(of->ctx, &of->opts);
2962 av_log(NULL, AV_LOG_ERROR,
2963 "Could not write header for output file #%d "
2964 "(incorrect codec parameters ?): %s\n",
2965 file_index, av_err2str(ret));
2968 //assert_avoptions(of->opts);
2969 of->header_written = 1;
2971 av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2973 if (sdp_filename || want_sdp)
2976 /* flush the muxing queues */
2977 for (i = 0; i < of->ctx->nb_streams; i++) {
2978 OutputStream *ost = output_streams[of->ost_index + i];
2980 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2981 if (!av_fifo_size(ost->muxing_queue))
2982 ost->mux_timebase = ost->st->time_base;
2984 while (av_fifo_size(ost->muxing_queue)) {
2986 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2987 write_packet(of, &pkt, ost, 1);
2994 static int init_output_bsfs(OutputStream *ost)
2996 AVBSFContext *ctx = ost->bsf_ctx;
3002 ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3006 ctx->time_base_in = ost->st->time_base;
3008 ret = av_bsf_init(ctx);
3010 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3015 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3018 ost->st->time_base = ctx->time_base_out;
3023 static int init_output_stream_streamcopy(OutputStream *ost)
3025 OutputFile *of = output_files[ost->file_index];
3026 InputStream *ist = get_input_stream(ost);
3027 AVCodecParameters *par_dst = ost->st->codecpar;
3028 AVCodecParameters *par_src = ost->ref_par;
3031 uint32_t codec_tag = par_dst->codec_tag;
3033 av_assert0(ist && !ost->filter);
3035 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3037 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3039 av_log(NULL, AV_LOG_FATAL,
3040 "Error setting up codec context options.\n");
3044 ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3046 av_log(NULL, AV_LOG_FATAL,
3047 "Error getting reference codec parameters.\n");
3052 unsigned int codec_tag_tmp;
3053 if (!of->ctx->oformat->codec_tag ||
3054 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3055 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3056 codec_tag = par_src->codec_tag;
3059 ret = avcodec_parameters_copy(par_dst, par_src);
3063 par_dst->codec_tag = codec_tag;
3065 if (!ost->frame_rate.num)
3066 ost->frame_rate = ist->framerate;
3067 ost->st->avg_frame_rate = ost->frame_rate;
3069 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3073 // copy timebase while removing common factors
3074 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3075 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3077 // copy estimated duration as a hint to the muxer
3078 if (ost->st->duration <= 0 && ist->st->duration > 0)
3079 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3082 ost->st->disposition = ist->st->disposition;
3084 if (ist->st->nb_side_data) {
3085 for (i = 0; i < ist->st->nb_side_data; i++) {
3086 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3089 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3091 return AVERROR(ENOMEM);
3092 memcpy(dst_data, sd_src->data, sd_src->size);
3096 if (ost->rotate_overridden) {
3097 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3098 sizeof(int32_t) * 9);
3100 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3103 switch (par_dst->codec_type) {
3104 case AVMEDIA_TYPE_AUDIO:
3105 if (audio_volume != 256) {
3106 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3109 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3110 par_dst->block_align= 0;
3111 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3112 par_dst->block_align= 0;
3114 case AVMEDIA_TYPE_VIDEO:
3115 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3117 av_mul_q(ost->frame_aspect_ratio,
3118 (AVRational){ par_dst->height, par_dst->width });
3119 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3120 "with stream copy may produce invalid files\n");
3122 else if (ist->st->sample_aspect_ratio.num)
3123 sar = ist->st->sample_aspect_ratio;
3125 sar = par_src->sample_aspect_ratio;
3126 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3127 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3128 ost->st->r_frame_rate = ist->st->r_frame_rate;
3132 ost->mux_timebase = ist->st->time_base;
3137 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3139 AVDictionaryEntry *e;
3141 uint8_t *encoder_string;
3142 int encoder_string_len;
3143 int format_flags = 0;
3144 int codec_flags = ost->enc_ctx->flags;
3146 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3149 e = av_dict_get(of->opts, "fflags", NULL, 0);
3151 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3154 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3156 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3158 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3161 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3164 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3165 encoder_string = av_mallocz(encoder_string_len);
3166 if (!encoder_string)
3169 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3170 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3172 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3173 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3174 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3175 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3178 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3179 AVCodecContext *avctx)
3182 int n = 1, i, size, index = 0;
3185 for (p = kf; *p; p++)
3189 pts = av_malloc_array(size, sizeof(*pts));
3191 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3196 for (i = 0; i < n; i++) {
3197 char *next = strchr(p, ',');
3202 if (!memcmp(p, "chapters", 8)) {
3204 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3207 if (avf->nb_chapters > INT_MAX - size ||
3208 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3210 av_log(NULL, AV_LOG_FATAL,
3211 "Could not allocate forced key frames array.\n");
3214 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3215 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3217 for (j = 0; j < avf->nb_chapters; j++) {
3218 AVChapter *c = avf->chapters[j];
3219 av_assert1(index < size);
3220 pts[index++] = av_rescale_q(c->start, c->time_base,
3221 avctx->time_base) + t;
3226 t = parse_time_or_die("force_key_frames", p, 1);
3227 av_assert1(index < size);
3228 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3235 av_assert0(index == size);
3236 qsort(pts, size, sizeof(*pts), compare_int64);
3237 ost->forced_kf_count = size;
3238 ost->forced_kf_pts = pts;
3241 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3243 InputStream *ist = get_input_stream(ost);
3244 AVCodecContext *enc_ctx = ost->enc_ctx;
3245 AVFormatContext *oc;
3247 if (ost->enc_timebase.num > 0) {
3248 enc_ctx->time_base = ost->enc_timebase;
3252 if (ost->enc_timebase.num < 0) {
3254 enc_ctx->time_base = ist->st->time_base;
3258 oc = output_files[ost->file_index]->ctx;
3259 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3262 enc_ctx->time_base = default_time_base;
3265 static int init_output_stream_encode(OutputStream *ost)
3267 InputStream *ist = get_input_stream(ost);
3268 AVCodecContext *enc_ctx = ost->enc_ctx;
3269 AVCodecContext *dec_ctx = NULL;
3270 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3273 set_encoder_id(output_files[ost->file_index], ost);
3275 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3276 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3277 // which have to be filtered out to prevent leaking them to output files.
3278 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3281 ost->st->disposition = ist->st->disposition;
3283 dec_ctx = ist->dec_ctx;
3285 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3287 for (j = 0; j < oc->nb_streams; j++) {
3288 AVStream *st = oc->streams[j];
3289 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3292 if (j == oc->nb_streams)
3293 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3294 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3295 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3298 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3299 if (!ost->frame_rate.num)
3300 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3301 if (ist && !ost->frame_rate.num)
3302 ost->frame_rate = ist->framerate;
3303 if (ist && !ost->frame_rate.num)
3304 ost->frame_rate = ist->st->r_frame_rate;
3305 if (ist && !ost->frame_rate.num) {
3306 ost->frame_rate = (AVRational){25, 1};
3307 av_log(NULL, AV_LOG_WARNING,
3309 "about the input framerate is available. Falling "
3310 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3311 "if you want a different framerate.\n",
3312 ost->file_index, ost->index);
3315 if (ost->enc->supported_framerates && !ost->force_fps) {
3316 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3317 ost->frame_rate = ost->enc->supported_framerates[idx];
3319 // reduce frame rate for mpeg4 to be within the spec limits
3320 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3321 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3322 ost->frame_rate.num, ost->frame_rate.den, 65535);
3326 switch (enc_ctx->codec_type) {
3327 case AVMEDIA_TYPE_AUDIO:
3328 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3330 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3331 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3332 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3333 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3334 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3336 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3339 case AVMEDIA_TYPE_VIDEO:
3340 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3342 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3343 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3344 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3345 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3346 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3347 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3350 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3351 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3352 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3353 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3354 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3355 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3357 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3359 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3360 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3362 enc_ctx->framerate = ost->frame_rate;
3364 ost->st->avg_frame_rate = ost->frame_rate;
3367 enc_ctx->width != dec_ctx->width ||
3368 enc_ctx->height != dec_ctx->height ||
3369 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3370 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3373 if (ost->top_field_first == 0) {
3374 enc_ctx->field_order = AV_FIELD_BB;
3375 } else if (ost->top_field_first == 1) {
3376 enc_ctx->field_order = AV_FIELD_TT;
3379 if (ost->forced_keyframes) {
3380 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3381 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3382 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3384 av_log(NULL, AV_LOG_ERROR,
3385 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3388 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3389 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3390 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3391 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3393 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3394 // parse it only for static kf timings
3395 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3396 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3400 case AVMEDIA_TYPE_SUBTITLE:
3401 enc_ctx->time_base = AV_TIME_BASE_Q;
3402 if (!enc_ctx->width) {
3403 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3404 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3407 case AVMEDIA_TYPE_DATA:
3414 ost->mux_timebase = enc_ctx->time_base;
3419 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3423 if (ost->encoding_needed) {
3424 AVCodec *codec = ost->enc;
3425 AVCodecContext *dec = NULL;
3428 ret = init_output_stream_encode(ost);
3432 if ((ist = get_input_stream(ost)))
3434 if (dec && dec->subtitle_header) {
3435 /* ASS code assumes this buffer is null terminated so add extra byte. */
3436 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3437 if (!ost->enc_ctx->subtitle_header)
3438 return AVERROR(ENOMEM);
3439 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3440 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3442 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3443 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3444 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3446 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3447 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3448 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3450 ret = hw_device_setup_for_encode(ost);
3452 snprintf(error, error_len, "Device setup failed for "
3453 "encoder on output stream #%d:%d : %s",
3454 ost->file_index, ost->index, av_err2str(ret));
3458 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3459 int input_props = 0, output_props = 0;
3460 AVCodecDescriptor const *input_descriptor =
3461 avcodec_descriptor_get(dec->codec_id);
3462 AVCodecDescriptor const *output_descriptor =
3463 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3464 if (input_descriptor)
3465 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3466 if (output_descriptor)
3467 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3468 if (input_props && output_props && input_props != output_props) {
3469 snprintf(error, error_len,
3470 "Subtitle encoding currently only possible from text to text "
3471 "or bitmap to bitmap");
3472 return AVERROR_INVALIDDATA;
3476 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3477 if (ret == AVERROR_EXPERIMENTAL)
3478 abort_codec_experimental(codec, 1);
3479 snprintf(error, error_len,
3480 "Error while opening encoder for output stream #%d:%d - "
3481 "maybe incorrect parameters such as bit_rate, rate, width or height",
3482 ost->file_index, ost->index);
3485 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3486 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3487 av_buffersink_set_frame_size(ost->filter->filter,
3488 ost->enc_ctx->frame_size);
3489 assert_avoptions(ost->encoder_opts);
3490 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3491 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3492 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3493 " It takes bits/s as argument, not kbits/s\n");
3495 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3497 av_log(NULL, AV_LOG_FATAL,
3498 "Error initializing the output stream codec context.\n");
3502 * FIXME: ost->st->codec should't be needed here anymore.
3504 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3508 if (ost->enc_ctx->nb_coded_side_data) {
3511 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3512 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3515 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3517 return AVERROR(ENOMEM);
3518 memcpy(dst_data, sd_src->data, sd_src->size);
3523 * Add global input side data. For now this is naive, and copies it
3524 * from the input stream's global side data. All side data should
3525 * really be funneled over AVFrame and libavfilter, then added back to
3526 * packet side data, and then potentially using the first packet for
3531 for (i = 0; i < ist->st->nb_side_data; i++) {
3532 AVPacketSideData *sd = &ist->st->side_data[i];
3533 if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3534 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3536 return AVERROR(ENOMEM);
3537 memcpy(dst, sd->data, sd->size);
3538 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3539 av_display_rotation_set((uint32_t *)dst, 0);
3544 // copy timebase while removing common factors
3545 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3546 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3548 // copy estimated duration as a hint to the muxer
3549 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3550 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3552 ost->st->codec->codec= ost->enc_ctx->codec;
3553 } else if (ost->stream_copy) {
3554 ret = init_output_stream_streamcopy(ost);
3559 // parse user provided disposition, and update stream values
3560 if (ost->disposition) {
3561 static const AVOption opts[] = {
3562 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3563 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3564 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3565 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3566 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3567 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3568 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3569 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3570 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3571 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3572 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3573 { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3574 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3575 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3576 { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3577 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3580 static const AVClass class = {
3582 .item_name = av_default_item_name,
3584 .version = LIBAVUTIL_VERSION_INT,
3586 const AVClass *pclass = &class;
3588 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3593 /* initialize bitstream filters for the output stream
3594 * needs to be done here, because the codec id for streamcopy is not
3595 * known until now */
3596 ret = init_output_bsfs(ost);
3600 ost->initialized = 1;
3602 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3609 static void report_new_stream(int input_index, AVPacket *pkt)
3611 InputFile *file = input_files[input_index];
3612 AVStream *st = file->ctx->streams[pkt->stream_index];
3614 if (pkt->stream_index < file->nb_streams_warn)
3616 av_log(file->ctx, AV_LOG_WARNING,
3617 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3618 av_get_media_type_string(st->codecpar->codec_type),
3619 input_index, pkt->stream_index,
3620 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3621 file->nb_streams_warn = pkt->stream_index + 1;
3624 static int transcode_init(void)
3626 int ret = 0, i, j, k;
3627 AVFormatContext *oc;
3630 char error[1024] = {0};
3632 for (i = 0; i < nb_filtergraphs; i++) {
3633 FilterGraph *fg = filtergraphs[i];
3634 for (j = 0; j < fg->nb_outputs; j++) {
3635 OutputFilter *ofilter = fg->outputs[j];
3636 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3638 if (fg->nb_inputs != 1)
3640 for (k = nb_input_streams-1; k >= 0 ; k--)
3641 if (fg->inputs[0]->ist == input_streams[k])
3643 ofilter->ost->source_index = k;
3647 /* init framerate emulation */
3648 for (i = 0; i < nb_input_files; i++) {
3649 InputFile *ifile = input_files[i];
3650 if (ifile->rate_emu)
3651 for (j = 0; j < ifile->nb_streams; j++)
3652 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3655 /* init input streams */
3656 for (i = 0; i < nb_input_streams; i++)
3657 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3658 for (i = 0; i < nb_output_streams; i++) {
3659 ost = output_streams[i];
3660 avcodec_close(ost->enc_ctx);
3665 /* open each encoder */
3666 for (i = 0; i < nb_output_streams; i++) {
3667 // skip streams fed from filtergraphs until we have a frame for them
3668 if (output_streams[i]->filter)
3671 ret = init_output_stream(output_streams[i], error, sizeof(error));
3676 /* discard unused programs */
3677 for (i = 0; i < nb_input_files; i++) {
3678 InputFile *ifile = input_files[i];
3679 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3680 AVProgram *p = ifile->ctx->programs[j];
3681 int discard = AVDISCARD_ALL;
3683 for (k = 0; k < p->nb_stream_indexes; k++)
3684 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3685 discard = AVDISCARD_DEFAULT;
3688 p->discard = discard;
3692 /* write headers for files with no streams */
3693 for (i = 0; i < nb_output_files; i++) {
3694 oc = output_files[i]->ctx;
3695 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3696 ret = check_init_output_file(output_files[i], i);
3703 /* dump the stream mapping */
3704 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3705 for (i = 0; i < nb_input_streams; i++) {
3706 ist = input_streams[i];
3708 for (j = 0; j < ist->nb_filters; j++) {
3709 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3710 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3711 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3712 ist->filters[j]->name);
3713 if (nb_filtergraphs > 1)
3714 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3715 av_log(NULL, AV_LOG_INFO, "\n");
3720 for (i = 0; i < nb_output_streams; i++) {
3721 ost = output_streams[i];
3723 if (ost->attachment_filename) {
3724 /* an attached file */
3725 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3726 ost->attachment_filename, ost->file_index, ost->index);
3730 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3731 /* output from a complex graph */
3732 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3733 if (nb_filtergraphs > 1)
3734 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3736 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3737 ost->index, ost->enc ? ost->enc->name : "?");
3741 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3742 input_streams[ost->source_index]->file_index,
3743 input_streams[ost->source_index]->st->index,
3746 if (ost->sync_ist != input_streams[ost->source_index])
3747 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3748 ost->sync_ist->file_index,
3749 ost->sync_ist->st->index);
3750 if (ost->stream_copy)
3751 av_log(NULL, AV_LOG_INFO, " (copy)");
3753 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3754 const AVCodec *out_codec = ost->enc;
3755 const char *decoder_name = "?";
3756 const char *in_codec_name = "?";
3757 const char *encoder_name = "?";
3758 const char *out_codec_name = "?";
3759 const AVCodecDescriptor *desc;
3762 decoder_name = in_codec->name;
3763 desc = avcodec_descriptor_get(in_codec->id);
3765 in_codec_name = desc->name;
3766 if (!strcmp(decoder_name, in_codec_name))
3767 decoder_name = "native";
3771 encoder_name = out_codec->name;
3772 desc = avcodec_descriptor_get(out_codec->id);
3774 out_codec_name = desc->name;
3775 if (!strcmp(encoder_name, out_codec_name))
3776 encoder_name = "native";
3779 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3780 in_codec_name, decoder_name,
3781 out_codec_name, encoder_name);
3783 av_log(NULL, AV_LOG_INFO, "\n");
3787 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3791 atomic_store(&transcode_init_done, 1);
3796 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3797 static int need_output(void)
3801 for (i = 0; i < nb_output_streams; i++) {
3802 OutputStream *ost = output_streams[i];
3803 OutputFile *of = output_files[ost->file_index];
3804 AVFormatContext *os = output_files[ost->file_index]->ctx;
3806 if (ost->finished ||
3807 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3809 if (ost->frame_number >= ost->max_frames) {
3811 for (j = 0; j < of->ctx->nb_streams; j++)
3812 close_output_stream(output_streams[of->ost_index + j]);
3823 * Select the output stream to process.
3825 * @return selected output stream, or NULL if none available
3827 static OutputStream *choose_output(void)
3830 int64_t opts_min = INT64_MAX;
3831 OutputStream *ost_min = NULL;
3833 for (i = 0; i < nb_output_streams; i++) {
3834 OutputStream *ost = output_streams[i];
3835 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3836 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3838 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3839 av_log(NULL, AV_LOG_DEBUG,
3840 "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3841 ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3843 if (!ost->initialized && !ost->inputs_done)
3846 if (!ost->finished && opts < opts_min) {
3848 ost_min = ost->unavailable ? NULL : ost;
3854 static void set_tty_echo(int on)
3858 if (tcgetattr(0, &tty) == 0) {
3859 if (on) tty.c_lflag |= ECHO;
3860 else tty.c_lflag &= ~ECHO;
3861 tcsetattr(0, TCSANOW, &tty);
3866 static int check_keyboard_interaction(int64_t cur_time)
3869 static int64_t last_time;
3870 if (received_nb_signals)
3871 return AVERROR_EXIT;
3872 /* read_key() returns 0 on EOF */
3873 if(cur_time - last_time >= 100000 && !run_as_daemon){
3875 last_time = cur_time;
3879 return AVERROR_EXIT;
3880 if (key == '+') av_log_set_level(av_log_get_level()+10);
3881 if (key == '-') av_log_set_level(av_log_get_level()-10);
3882 if (key == 's') qp_hist ^= 1;
3885 do_hex_dump = do_pkt_dump = 0;
3886 } else if(do_pkt_dump){
3890 av_log_set_level(AV_LOG_DEBUG);
3892 if (key == 'c' || key == 'C'){
3893 char buf[4096], target[64], command[256], arg[256] = {0};
3896 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3899 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3904 fprintf(stderr, "\n");
3906 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3907 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3908 target, time, command, arg);
3909 for (i = 0; i < nb_filtergraphs; i++) {
3910 FilterGraph *fg = filtergraphs[i];
3913 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3914 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3915 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3916 } else if (key == 'c') {
3917 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3918 ret = AVERROR_PATCHWELCOME;
3920 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3922 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3927 av_log(NULL, AV_LOG_ERROR,
3928 "Parse error, at least 3 arguments were expected, "
3929 "only %d given in string '%s'\n", n, buf);
3932 if (key == 'd' || key == 'D'){
3935 debug = input_streams[0]->st->codec->debug<<1;
3936 if(!debug) debug = 1;
3937 while(debug & (FF_DEBUG_DCT_COEFF
3939 |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3941 )) //unsupported, would just crash
3948 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3953 fprintf(stderr, "\n");
3954 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3955 fprintf(stderr,"error parsing debug value\n");
3957 for(i=0;i<nb_input_streams;i++) {
3958 input_streams[i]->st->codec->debug = debug;
3960 for(i=0;i<nb_output_streams;i++) {
3961 OutputStream *ost = output_streams[i];
3962 ost->enc_ctx->debug = debug;
3964 if(debug) av_log_set_level(AV_LOG_DEBUG);
3965 fprintf(stderr,"debug=%d\n", debug);
3968 fprintf(stderr, "key function\n"
3969 "? show this help\n"
3970 "+ increase verbosity\n"
3971 "- decrease verbosity\n"
3972 "c Send command to first matching filter supporting it\n"
3973 "C Send/Queue command to all matching filters\n"
3974 "D cycle through available debug modes\n"
3975 "h dump packets/hex press to cycle through the 3 states\n"
3977 "s Show QP histogram\n"
3984 static void *input_thread(void *arg)
3987 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3992 ret = av_read_frame(f->ctx, &pkt);
3994 if (ret == AVERROR(EAGAIN)) {
3999 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4002 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4003 if (flags && ret == AVERROR(EAGAIN)) {
4005 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4006 av_log(f->ctx, AV_LOG_WARNING,
4007 "Thread message queue blocking; consider raising the "
4008 "thread_queue_size option (current value: %d)\n",
4009 f->thread_queue_size);
4012 if (ret != AVERROR_EOF)
4013 av_log(f->ctx, AV_LOG_ERROR,
4014 "Unable to send packet to main thread: %s\n",
4016 av_packet_unref(&pkt);
4017 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4025 static void free_input_thread(int i)
4027 InputFile *f = input_files[i];
4030 if (!f || !f->in_thread_queue)
4032 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4033 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4034 av_packet_unref(&pkt);
4036 pthread_join(f->thread, NULL);
4038 av_thread_message_queue_free(&f->in_thread_queue);
4041 static void free_input_threads(void)
4045 for (i = 0; i < nb_input_files; i++)
4046 free_input_thread(i);
4049 static int init_input_thread(int i)
4052 InputFile *f = input_files[i];
4054 if (nb_input_files == 1)
4057 if (f->ctx->pb ? !f->ctx->pb->seekable :
4058 strcmp(f->ctx->iformat->name, "lavfi"))
4059 f->non_blocking = 1;
4060 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4061 f->thread_queue_size, sizeof(AVPacket));
4065 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4066 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4067 av_thread_message_queue_free(&f->in_thread_queue);
4068 return AVERROR(ret);
4074 static int init_input_threads(void)
4078 for (i = 0; i < nb_input_files; i++) {
4079 ret = init_input_thread(i);
4086 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4088 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4090 AV_THREAD_MESSAGE_NONBLOCK : 0);
4094 static int get_input_packet(InputFile *f, AVPacket *pkt)
4098 for (i = 0; i < f->nb_streams; i++) {
4099 InputStream *ist = input_streams[f->ist_index + i];
4100 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4101 int64_t now = av_gettime_relative() - ist->start;
4103 return AVERROR(EAGAIN);
4108 if (nb_input_files > 1)
4109 return get_input_packet_mt(f, pkt);
4111 return av_read_frame(f->ctx, pkt);
4114 static int got_eagain(void)
4117 for (i = 0; i < nb_output_streams; i++)
4118 if (output_streams[i]->unavailable)
4123 static void reset_eagain(void)
4126 for (i = 0; i < nb_input_files; i++)
4127 input_files[i]->eagain = 0;
4128 for (i = 0; i < nb_output_streams; i++)
4129 output_streams[i]->unavailable = 0;
4132 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4133 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4134 AVRational time_base)
4140 return tmp_time_base;
4143 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4146 return tmp_time_base;
4152 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4155 AVCodecContext *avctx;
4156 int i, ret, has_audio = 0;
4157 int64_t duration = 0;
4159 ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4163 for (i = 0; i < ifile->nb_streams; i++) {
4164 ist = input_streams[ifile->ist_index + i];
4165 avctx = ist->dec_ctx;
4167 /* duration is the length of the last frame in a stream
4168 * when audio stream is present we don't care about
4169 * last video frame length because it's not defined exactly */
4170 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4174 for (i = 0; i < ifile->nb_streams; i++) {
4175 ist = input_streams[ifile->ist_index + i];
4176 avctx = ist->dec_ctx;
4179 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4180 AVRational sample_rate = {1, avctx->sample_rate};
4182 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4187 if (ist->framerate.num) {
4188 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4189 } else if (ist->st->avg_frame_rate.num) {
4190 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4195 if (!ifile->duration)
4196 ifile->time_base = ist->st->time_base;
4197 /* the total duration of the stream, max_pts - min_pts is
4198 * the duration of the stream without the last frame */
4199 if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4200 duration += ist->max_pts - ist->min_pts;
4201 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4205 if (ifile->loop > 0)
4213 * - 0 -- one packet was read and processed
4214 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4215 * this function should be called again
4216 * - AVERROR_EOF -- this function should not be called again
4218 static int process_input(int file_index)
4220 InputFile *ifile = input_files[file_index];
4221 AVFormatContext *is;
4224 int ret, thread_ret, i, j;
4227 int disable_discontinuity_correction = copy_ts;
4230 ret = get_input_packet(ifile, &pkt);
4232 if (ret == AVERROR(EAGAIN)) {
4236 if (ret < 0 && ifile->loop) {
4237 AVCodecContext *avctx;
4238 for (i = 0; i < ifile->nb_streams; i++) {
4239 ist = input_streams[ifile->ist_index + i];
4240 avctx = ist->dec_ctx;
4241 if (ist->decoding_needed) {
4242 ret = process_input_packet(ist, NULL, 1);
4245 avcodec_flush_buffers(avctx);
4249 free_input_thread(file_index);
4251 ret = seek_to_start(ifile, is);
4253 thread_ret = init_input_thread(file_index);
4258 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4260 ret = get_input_packet(ifile, &pkt);
4261 if (ret == AVERROR(EAGAIN)) {
4267 if (ret != AVERROR_EOF) {
4268 print_error(is->url, ret);
4273 for (i = 0; i < ifile->nb_streams; i++) {
4274 ist = input_streams[ifile->ist_index + i];
4275 if (ist->decoding_needed) {
4276 ret = process_input_packet(ist, NULL, 0);
4281 /* mark all outputs that don't go through lavfi as finished */
4282 for (j = 0; j < nb_output_streams; j++) {
4283 OutputStream *ost = output_streams[j];
4285 if (ost->source_index == ifile->ist_index + i &&
4286 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4287 finish_output_stream(ost);
4291 ifile->eof_reached = 1;
4292 return AVERROR(EAGAIN);
4298 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4299 is->streams[pkt.stream_index]);
4301 /* the following test is needed in case new streams appear
4302 dynamically in stream : we ignore them */
4303 if (pkt.stream_index >= ifile->nb_streams) {
4304 report_new_stream(file_index, &pkt);
4305 goto discard_packet;
4308 ist = input_streams[ifile->ist_index + pkt.stream_index];
4310 ist->data_size += pkt.size;
4314 goto discard_packet;
4316 if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4317 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
4318 "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4324 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4325 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4326 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4327 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4328 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4329 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4330 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4331 av_ts2str(input_files[ist->file_index]->ts_offset),
4332 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4335 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4336 int64_t stime, stime2;
4337 // Correcting starttime based on the enabled streams
4338 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4339 // so we instead do it here as part of discontinuity handling
4340 if ( ist->next_dts == AV_NOPTS_VALUE
4341 && ifile->ts_offset == -is->start_time
4342 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4343 int64_t new_start_time = INT64_MAX;
4344 for (i=0; i<is->nb_streams; i++) {
4345 AVStream *st = is->streams[i];
4346 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4348 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4350 if (new_start_time > is->start_time) {
4351 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4352 ifile->ts_offset = -new_start_time;
4356 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4357 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4358 ist->wrap_correction_done = 1;
4360 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4361 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4362 ist->wrap_correction_done = 0;
4364 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4365 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4366 ist->wrap_correction_done = 0;
4370 /* add the stream-global side data to the first packet */
4371 if (ist->nb_packets == 1) {
4372 for (i = 0; i < ist->st->nb_side_data; i++) {
4373 AVPacketSideData *src_sd = &ist->st->side_data[i];
4376 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4379 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4382 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4386 memcpy(dst_data, src_sd->data, src_sd->size);
4390 if (pkt.dts != AV_NOPTS_VALUE)
4391 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4392 if (pkt.pts != AV_NOPTS_VALUE)
4393 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4395 if (pkt.pts != AV_NOPTS_VALUE)
4396 pkt.pts *= ist->ts_scale;
4397 if (pkt.dts != AV_NOPTS_VALUE)
4398 pkt.dts *= ist->ts_scale;
4400 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4401 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4402 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4403 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4404 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4405 int64_t delta = pkt_dts - ifile->last_ts;
4406 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4407 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4408 ifile->ts_offset -= delta;
4409 av_log(NULL, AV_LOG_DEBUG,
4410 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4411 delta, ifile->ts_offset);
4412 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4413 if (pkt.pts != AV_NOPTS_VALUE)
4414 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4418 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4419 if (pkt.pts != AV_NOPTS_VALUE) {
4420 pkt.pts += duration;
4421 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4422 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4425 if (pkt.dts != AV_NOPTS_VALUE)
4426 pkt.dts += duration;
4428 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4430 if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4431 (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4432 int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
4433 ist->st->time_base, AV_TIME_BASE_Q,
4434 AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4435 if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4436 disable_discontinuity_correction = 0;
4439 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4440 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4441 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4442 !disable_discontinuity_correction) {
4443 int64_t delta = pkt_dts - ist->next_dts;
4444 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4445 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4446 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4447 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4448 ifile->ts_offset -= delta;
4449 av_log(NULL, AV_LOG_DEBUG,
4450 "timestamp discontinuity for stream #%d:%d "
4451 "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4452 ist->file_index, ist->st->index, ist->st->id,
4453 av_get_media_type_string(ist->dec_ctx->codec_type),
4454 delta, ifile->ts_offset);
4455 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4456 if (pkt.pts != AV_NOPTS_VALUE)
4457 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4460 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4461 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4462 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4463 pkt.dts = AV_NOPTS_VALUE;
4465 if (pkt.pts != AV_NOPTS_VALUE){
4466 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4467 delta = pkt_pts - ist->next_dts;
4468 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4469 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4470 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4471 pkt.pts = AV_NOPTS_VALUE;
4477 if (pkt.dts != AV_NOPTS_VALUE)
4478 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4481 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4482 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4483 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4484 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4485 av_ts2str(input_files[ist->file_index]->ts_offset),
4486 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4489 sub2video_heartbeat(ist, pkt.pts);
4491 process_input_packet(ist, &pkt, 0);
4494 av_packet_unref(&pkt);
4500 * Perform a step of transcoding for the specified filter graph.
4502 * @param[in] graph filter graph to consider
4503 * @param[out] best_ist input stream where a frame would allow to continue
4504 * @return 0 for success, <0 for error
4506 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4509 int nb_requests, nb_requests_max = 0;
4510 InputFilter *ifilter;
4514 ret = avfilter_graph_request_oldest(graph->graph);
4516 return reap_filters(0);
4518 if (ret == AVERROR_EOF) {
4519 ret = reap_filters(1);
4520 for (i = 0; i < graph->nb_outputs; i++)
4521 close_output_stream(graph->outputs[i]->ost);
4524 if (ret != AVERROR(EAGAIN))
4527 for (i = 0; i < graph->nb_inputs; i++) {
4528 ifilter = graph->inputs[i];
4530 if (input_files[ist->file_index]->eagain ||
4531 input_files[ist->file_index]->eof_reached)
4533 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4534 if (nb_requests > nb_requests_max) {
4535 nb_requests_max = nb_requests;
4541 for (i = 0; i < graph->nb_outputs; i++)
4542 graph->outputs[i]->ost->unavailable = 1;
4548 * Run a single step of transcoding.
4550 * @return 0 for success, <0 for error
4552 static int transcode_step(void)
4555 InputStream *ist = NULL;
4558 ost = choose_output();
4565 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4569 if (ost->filter && !ost->filter->graph->graph) {
4570 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4571 ret = configure_filtergraph(ost->filter->graph);
4573 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4579 if (ost->filter && ost->filter->graph->graph) {
4580 if (!ost->initialized) {
4581 char error[1024] = {0};
4582 ret = init_output_stream(ost, error, sizeof(error));
4584 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4585 ost->file_index, ost->index, error);
4589 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4593 } else if (ost->filter) {
4595 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4596 InputFilter *ifilter = ost->filter->graph->inputs[i];
4597 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4603 ost->inputs_done = 1;
4607 av_assert0(ost->source_index >= 0);
4608 ist = input_streams[ost->source_index];
4611 ret = process_input(ist->file_index);
4612 if (ret == AVERROR(EAGAIN)) {
4613 if (input_files[ist->file_index]->eagain)
4614 ost->unavailable = 1;
4619 return ret == AVERROR_EOF ? 0 : ret;
4621 return reap_filters(0);
4625 * The following code is the main loop of the file converter
4627 static int transcode(void)
4630 AVFormatContext *os;
4633 int64_t timer_start;
4634 int64_t total_packets_written = 0;
4636 ret = transcode_init();
4640 if (stdin_interaction) {
4641 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4644 timer_start = av_gettime_relative();
4647 if ((ret = init_input_threads()) < 0)
4651 while (!received_sigterm) {
4652 int64_t cur_time= av_gettime_relative();
4654 /* if 'q' pressed, exits */
4655 if (stdin_interaction)
4656 if (check_keyboard_interaction(cur_time) < 0)
4659 /* check if there's any stream where output is still needed */
4660 if (!need_output()) {
4661 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4665 ret = transcode_step();
4666 if (ret < 0 && ret != AVERROR_EOF) {
4667 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4671 /* dump report by using the output first video and audio streams */
4672 print_report(0, timer_start, cur_time);
4675 free_input_threads();
4678 /* at the end of stream, we must flush the decoder buffers */
4679 for (i = 0; i < nb_input_streams; i++) {
4680 ist = input_streams[i];
4681 if (!input_files[ist->file_index]->eof_reached) {
4682 process_input_packet(ist, NULL, 0);
4689 /* write the trailer if needed and close file */
4690 for (i = 0; i < nb_output_files; i++) {
4691 os = output_files[i]->ctx;
4692 if (!output_files[i]->header_written) {
4693 av_log(NULL, AV_LOG_ERROR,
4694 "Nothing was written into output file %d (%s), because "
4695 "at least one of its streams received no packets.\n",
4699 if ((ret = av_write_trailer(os)) < 0) {
4700 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4706 /* dump report by using the first video and audio streams */
4707 print_report(1, timer_start, av_gettime_relative());
4709 /* close each encoder */
4710 for (i = 0; i < nb_output_streams; i++) {
4711 ost = output_streams[i];
4712 if (ost->encoding_needed) {
4713 av_freep(&ost->enc_ctx->stats_in);
4715 total_packets_written += ost->packets_written;
4716 if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
4717 av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4722 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4723 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4727 /* close each decoder */
4728 for (i = 0; i < nb_input_streams; i++) {
4729 ist = input_streams[i];
4730 if (ist->decoding_needed) {
4731 avcodec_close(ist->dec_ctx);
4732 if (ist->hwaccel_uninit)
4733 ist->hwaccel_uninit(ist->dec_ctx);
4737 hw_device_free_all();
4744 free_input_threads();
4747 if (output_streams) {
4748 for (i = 0; i < nb_output_streams; i++) {
4749 ost = output_streams[i];
4752 if (fclose(ost->logfile))
4753 av_log(NULL, AV_LOG_ERROR,
4754 "Error closing logfile, loss of information possible: %s\n",
4755 av_err2str(AVERROR(errno)));
4756 ost->logfile = NULL;
4758 av_freep(&ost->forced_kf_pts);
4759 av_freep(&ost->apad);
4760 av_freep(&ost->disposition);
4761 av_dict_free(&ost->encoder_opts);
4762 av_dict_free(&ost->sws_dict);
4763 av_dict_free(&ost->swr_opts);
4764 av_dict_free(&ost->resample_opts);
4771 static BenchmarkTimeStamps get_benchmark_time_stamps(void)
4773 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4775 struct rusage rusage;
4777 getrusage(RUSAGE_SELF, &rusage);
4778 time_stamps.user_usec =
4779 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4780 time_stamps.sys_usec =
4781 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4782 #elif HAVE_GETPROCESSTIMES
4784 FILETIME c, e, k, u;
4785 proc = GetCurrentProcess();
4786 GetProcessTimes(proc, &c, &e, &k, &u);
4787 time_stamps.user_usec =
4788 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4789 time_stamps.sys_usec =
4790 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4792 time_stamps.user_usec = time_stamps.sys_usec = 0;
4797 static int64_t getmaxrss(void)
4799 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4800 struct rusage rusage;
4801 getrusage(RUSAGE_SELF, &rusage);
4802 return (int64_t)rusage.ru_maxrss * 1024;
4803 #elif HAVE_GETPROCESSMEMORYINFO
4805 PROCESS_MEMORY_COUNTERS memcounters;
4806 proc = GetCurrentProcess();
4807 memcounters.cb = sizeof(memcounters);
4808 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4809 return memcounters.PeakPagefileUsage;
4815 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4819 int main(int argc, char **argv)
4822 BenchmarkTimeStamps ti;
4826 register_exit(ffmpeg_cleanup);
4828 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4830 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4831 parse_loglevel(argc, argv, options);
4833 if(argc>1 && !strcmp(argv[1], "-d")){
4835 av_log_set_callback(log_callback_null);
4841 avdevice_register_all();
4843 avformat_network_init();
4845 show_banner(argc, argv, options);
4847 /* parse options and open all input/output files */
4848 ret = ffmpeg_parse_options(argc, argv);
4852 if (nb_output_files <= 0 && nb_input_files == 0) {
4854 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4858 /* file converter / grab */
4859 if (nb_output_files <= 0) {
4860 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4864 for (i = 0; i < nb_output_files; i++) {
4865 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4869 current_time = ti = get_benchmark_time_stamps();
4870 if (transcode() < 0)
4873 int64_t utime, stime, rtime;
4874 current_time = get_benchmark_time_stamps();
4875 utime = current_time.user_usec - ti.user_usec;
4876 stime = current_time.sys_usec - ti.sys_usec;
4877 rtime = current_time.real_usec - ti.real_usec;
4878 av_log(NULL, AV_LOG_INFO,
4879 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4880 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4882 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4883 decode_error_stat[0], decode_error_stat[1]);
4884 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4887 exit_program(received_nb_signals ? 255 : main_return_code);
4888 return main_return_code;