2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
65 #include "libavutil/threadmessage.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
73 #if HAVE_SYS_RESOURCE_H
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
80 #if HAVE_GETPROCESSMEMORYINFO
84 #if HAVE_SETCONSOLECTRLHANDLER
90 #include <sys/select.h>
95 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 typedef struct BenchmarkTimeStamps {
127 } BenchmarkTimeStamps;
129 static void do_video_stats(OutputStream *ost, int frame_size);
130 static BenchmarkTimeStamps get_benchmark_time_stamps(void);
131 static int64_t getmaxrss(void);
132 static int ifilter_has_all_input_formats(FilterGraph *fg);
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
140 static int want_sdp = 1;
142 static BenchmarkTimeStamps current_time;
143 AVIOContext *progress_avio = NULL;
145 static uint8_t *subtitle_out;
147 InputStream **input_streams = NULL;
148 int nb_input_streams = 0;
149 InputFile **input_files = NULL;
150 int nb_input_files = 0;
152 OutputStream **output_streams = NULL;
153 int nb_output_streams = 0;
154 OutputFile **output_files = NULL;
155 int nb_output_files = 0;
157 FilterGraph **filtergraphs;
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
168 static void free_input_threads(void);
172 Convert subtitles to video with alpha to insert them in filter graphs.
173 This is a temporary solution until libavfilter gets real subtitles support.
176 static int sub2video_get_blank_frame(InputStream *ist)
179 AVFrame *frame = ist->sub2video.frame;
181 av_frame_unref(frame);
182 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
184 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
185 if ((ret = av_frame_get_buffer(frame, 0)) < 0)
187 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
194 uint32_t *pal, *dst2;
198 if (r->type != SUBTITLE_BITMAP) {
199 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
202 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204 r->x, r->y, r->w, r->h, w, h
209 dst += r->y * dst_linesize + r->x * 4;
211 pal = (uint32_t *)r->data[1];
212 for (y = 0; y < r->h; y++) {
213 dst2 = (uint32_t *)dst;
215 for (x = 0; x < r->w; x++)
216 *(dst2++) = pal[*(src2++)];
218 src += r->linesize[0];
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
224 AVFrame *frame = ist->sub2video.frame;
228 av_assert1(frame->data[0]);
229 ist->sub2video.last_pts = frame->pts = pts;
230 for (i = 0; i < ist->nb_filters; i++) {
231 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
232 AV_BUFFERSRC_FLAG_KEEP_REF |
233 AV_BUFFERSRC_FLAG_PUSH);
234 if (ret != AVERROR_EOF && ret < 0)
235 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
240 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
242 AVFrame *frame = ist->sub2video.frame;
246 int64_t pts, end_pts;
251 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252 AV_TIME_BASE_Q, ist->st->time_base);
253 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254 AV_TIME_BASE_Q, ist->st->time_base);
255 num_rects = sub->num_rects;
257 /* If we are initializing the system, utilize current heartbeat
258 PTS as the start time, and show until the following subpicture
259 is received. Otherwise, utilize the previous subpicture's end time
260 as the fall-back value. */
261 pts = ist->sub2video.initialize ?
262 heartbeat_pts : ist->sub2video.end_pts;
266 if (sub2video_get_blank_frame(ist) < 0) {
267 av_log(ist->dec_ctx, AV_LOG_ERROR,
268 "Impossible to get a blank canvas.\n");
271 dst = frame->data [0];
272 dst_linesize = frame->linesize[0];
273 for (i = 0; i < num_rects; i++)
274 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
275 sub2video_push_ref(ist, pts);
276 ist->sub2video.end_pts = end_pts;
277 ist->sub2video.initialize = 0;
280 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
282 InputFile *infile = input_files[ist->file_index];
286 /* When a frame is read from a file, examine all sub2video streams in
287 the same file and send the sub2video frame again. Otherwise, decoded
288 video frames could be accumulating in the filter graph while a filter
289 (possibly overlay) is desperately waiting for a subtitle frame. */
290 for (i = 0; i < infile->nb_streams; i++) {
291 InputStream *ist2 = input_streams[infile->ist_index + i];
292 if (!ist2->sub2video.frame)
294 /* subtitles seem to be usually muxed ahead of other streams;
295 if not, subtracting a larger time here is necessary */
296 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
297 /* do not send the heartbeat frame if the subtitle is already ahead */
298 if (pts2 <= ist2->sub2video.last_pts)
300 if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
301 /* if we have hit the end of the current displayed subpicture,
302 or if we need to initialize the system, update the
303 overlayed subpicture and its start/end times */
304 sub2video_update(ist2, pts2 + 1, NULL);
305 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
306 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
308 sub2video_push_ref(ist2, pts2);
312 static void sub2video_flush(InputStream *ist)
317 if (ist->sub2video.end_pts < INT64_MAX)
318 sub2video_update(ist, INT64_MAX, NULL);
319 for (i = 0; i < ist->nb_filters; i++) {
320 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
321 if (ret != AVERROR_EOF && ret < 0)
322 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
326 /* end of sub2video hack */
328 static void term_exit_sigsafe(void)
332 tcsetattr (0, TCSANOW, &oldtty);
338 av_log(NULL, AV_LOG_QUIET, "%s", "");
342 static volatile int received_sigterm = 0;
343 static volatile int received_nb_signals = 0;
344 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
345 static volatile int ffmpeg_exited = 0;
346 static int main_return_code = 0;
349 sigterm_handler(int sig)
352 received_sigterm = sig;
353 received_nb_signals++;
355 if(received_nb_signals > 3) {
356 ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
357 strlen("Received > 3 system signals, hard exiting\n"));
358 if (ret < 0) { /* Do nothing */ };
363 #if HAVE_SETCONSOLECTRLHANDLER
364 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
366 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
371 case CTRL_BREAK_EVENT:
372 sigterm_handler(SIGINT);
375 case CTRL_CLOSE_EVENT:
376 case CTRL_LOGOFF_EVENT:
377 case CTRL_SHUTDOWN_EVENT:
378 sigterm_handler(SIGTERM);
379 /* Basically, with these 3 events, when we return from this method the
380 process is hard terminated, so stall as long as we need to
381 to try and let the main thread(s) clean up and gracefully terminate
382 (we have at most 5 seconds, but should be done far before that). */
383 while (!ffmpeg_exited) {
389 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
398 if (!run_as_daemon && stdin_interaction) {
400 if (tcgetattr (0, &tty) == 0) {
404 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
405 |INLCR|IGNCR|ICRNL|IXON);
406 tty.c_oflag |= OPOST;
407 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
408 tty.c_cflag &= ~(CSIZE|PARENB);
413 tcsetattr (0, TCSANOW, &tty);
415 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
419 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
420 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
422 signal(SIGXCPU, sigterm_handler);
425 signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
427 #if HAVE_SETCONSOLECTRLHANDLER
428 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
432 /* read a key without blocking */
433 static int read_key(void)
445 n = select(1, &rfds, NULL, NULL, &tv);
454 # if HAVE_PEEKNAMEDPIPE
456 static HANDLE input_handle;
459 input_handle = GetStdHandle(STD_INPUT_HANDLE);
460 is_pipe = !GetConsoleMode(input_handle, &dw);
464 /* When running under a GUI, you will end here. */
465 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
466 // input pipe may have been closed by the program that ran ffmpeg
484 static int decode_interrupt_cb(void *ctx)
486 return received_nb_signals > atomic_load(&transcode_init_done);
489 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
491 static void ffmpeg_cleanup(int ret)
496 int maxrss = getmaxrss() / 1024;
497 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
500 for (i = 0; i < nb_filtergraphs; i++) {
501 FilterGraph *fg = filtergraphs[i];
502 avfilter_graph_free(&fg->graph);
503 for (j = 0; j < fg->nb_inputs; j++) {
504 InputFilter *ifilter = fg->inputs[j];
505 struct InputStream *ist = ifilter->ist;
507 while (av_fifo_size(ifilter->frame_queue)) {
509 av_fifo_generic_read(ifilter->frame_queue, &frame,
510 sizeof(frame), NULL);
511 av_frame_free(&frame);
513 av_fifo_freep(&ifilter->frame_queue);
514 if (ist->sub2video.sub_queue) {
515 while (av_fifo_size(ist->sub2video.sub_queue)) {
517 av_fifo_generic_read(ist->sub2video.sub_queue,
518 &sub, sizeof(sub), NULL);
519 avsubtitle_free(&sub);
521 av_fifo_freep(&ist->sub2video.sub_queue);
523 av_buffer_unref(&ifilter->hw_frames_ctx);
524 av_freep(&ifilter->name);
525 av_freep(&fg->inputs[j]);
527 av_freep(&fg->inputs);
528 for (j = 0; j < fg->nb_outputs; j++) {
529 OutputFilter *ofilter = fg->outputs[j];
531 avfilter_inout_free(&ofilter->out_tmp);
532 av_freep(&ofilter->name);
533 av_freep(&ofilter->formats);
534 av_freep(&ofilter->channel_layouts);
535 av_freep(&ofilter->sample_rates);
536 av_freep(&fg->outputs[j]);
538 av_freep(&fg->outputs);
539 av_freep(&fg->graph_desc);
541 av_freep(&filtergraphs[i]);
543 av_freep(&filtergraphs);
545 av_freep(&subtitle_out);
548 for (i = 0; i < nb_output_files; i++) {
549 OutputFile *of = output_files[i];
554 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
556 avformat_free_context(s);
557 av_dict_free(&of->opts);
559 av_freep(&output_files[i]);
561 for (i = 0; i < nb_output_streams; i++) {
562 OutputStream *ost = output_streams[i];
567 av_bsf_free(&ost->bsf_ctx);
569 av_frame_free(&ost->filtered_frame);
570 av_frame_free(&ost->last_frame);
571 av_dict_free(&ost->encoder_opts);
573 av_freep(&ost->forced_keyframes);
574 av_expr_free(ost->forced_keyframes_pexpr);
575 av_freep(&ost->avfilter);
576 av_freep(&ost->logfile_prefix);
578 av_freep(&ost->audio_channels_map);
579 ost->audio_channels_mapped = 0;
581 av_dict_free(&ost->sws_dict);
582 av_dict_free(&ost->swr_opts);
584 avcodec_free_context(&ost->enc_ctx);
585 avcodec_parameters_free(&ost->ref_par);
587 if (ost->muxing_queue) {
588 while (av_fifo_size(ost->muxing_queue)) {
590 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
591 av_packet_unref(&pkt);
593 av_fifo_freep(&ost->muxing_queue);
596 av_freep(&output_streams[i]);
599 free_input_threads();
601 for (i = 0; i < nb_input_files; i++) {
602 avformat_close_input(&input_files[i]->ctx);
603 av_freep(&input_files[i]);
605 for (i = 0; i < nb_input_streams; i++) {
606 InputStream *ist = input_streams[i];
608 av_frame_free(&ist->decoded_frame);
609 av_frame_free(&ist->filter_frame);
610 av_dict_free(&ist->decoder_opts);
611 avsubtitle_free(&ist->prev_sub.subtitle);
612 av_frame_free(&ist->sub2video.frame);
613 av_freep(&ist->filters);
614 av_freep(&ist->hwaccel_device);
615 av_freep(&ist->dts_buffer);
617 avcodec_free_context(&ist->dec_ctx);
619 av_freep(&input_streams[i]);
623 if (fclose(vstats_file))
624 av_log(NULL, AV_LOG_ERROR,
625 "Error closing vstats file, loss of information possible: %s\n",
626 av_err2str(AVERROR(errno)));
628 av_freep(&vstats_filename);
630 av_freep(&input_streams);
631 av_freep(&input_files);
632 av_freep(&output_streams);
633 av_freep(&output_files);
637 avformat_network_deinit();
639 if (received_sigterm) {
640 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
641 (int) received_sigterm);
642 } else if (ret && atomic_load(&transcode_init_done)) {
643 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
649 void remove_avoptions(AVDictionary **a, AVDictionary *b)
651 AVDictionaryEntry *t = NULL;
653 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
654 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
658 void assert_avoptions(AVDictionary *m)
660 AVDictionaryEntry *t;
661 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
662 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
667 static void abort_codec_experimental(AVCodec *c, int encoder)
672 static void update_benchmark(const char *fmt, ...)
674 if (do_benchmark_all) {
675 BenchmarkTimeStamps t = get_benchmark_time_stamps();
681 vsnprintf(buf, sizeof(buf), fmt, va);
683 av_log(NULL, AV_LOG_INFO,
684 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
685 t.user_usec - current_time.user_usec,
686 t.sys_usec - current_time.sys_usec,
687 t.real_usec - current_time.real_usec, buf);
693 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
696 for (i = 0; i < nb_output_streams; i++) {
697 OutputStream *ost2 = output_streams[i];
698 ost2->finished |= ost == ost2 ? this_stream : others;
702 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
704 AVFormatContext *s = of->ctx;
705 AVStream *st = ost->st;
709 * Audio encoders may split the packets -- #frames in != #packets out.
710 * But there is no reordering, so we can limit the number of output packets
711 * by simply dropping them here.
712 * Counting encoded video frames needs to be done separately because of
713 * reordering, see do_video_out().
714 * Do not count the packet when unqueued because it has been counted when queued.
716 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
717 if (ost->frame_number >= ost->max_frames) {
718 av_packet_unref(pkt);
724 if (!of->header_written) {
725 AVPacket tmp_pkt = {0};
726 /* the muxer is not initialized yet, buffer the packet */
727 if (!av_fifo_space(ost->muxing_queue)) {
728 unsigned int are_we_over_size =
729 (ost->muxing_queue_data_size + pkt->size) > ost->muxing_queue_data_threshold;
730 int new_size = are_we_over_size ?
731 FFMIN(2 * av_fifo_size(ost->muxing_queue),
732 ost->max_muxing_queue_size) :
733 2 * av_fifo_size(ost->muxing_queue);
735 if (new_size <= av_fifo_size(ost->muxing_queue)) {
736 av_log(NULL, AV_LOG_ERROR,
737 "Too many packets buffered for output stream %d:%d.\n",
738 ost->file_index, ost->st->index);
741 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
745 ret = av_packet_make_refcounted(pkt);
748 av_packet_move_ref(&tmp_pkt, pkt);
749 ost->muxing_queue_data_size += tmp_pkt.size;
750 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
754 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
755 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
756 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
758 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
760 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
762 ost->quality = sd ? AV_RL32(sd) : -1;
763 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
765 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
767 ost->error[i] = AV_RL64(sd + 8 + 8*i);
772 if (ost->frame_rate.num && ost->is_cfr) {
773 if (pkt->duration > 0)
774 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
775 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
780 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
782 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
783 if (pkt->dts != AV_NOPTS_VALUE &&
784 pkt->pts != AV_NOPTS_VALUE &&
785 pkt->dts > pkt->pts) {
786 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
788 ost->file_index, ost->st->index);
790 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
791 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
792 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
794 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
795 pkt->dts != AV_NOPTS_VALUE &&
796 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
797 ost->last_mux_dts != AV_NOPTS_VALUE) {
798 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
799 if (pkt->dts < max) {
800 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
802 loglevel = AV_LOG_ERROR;
803 av_log(s, loglevel, "Non-monotonous DTS in output stream "
804 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
805 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
807 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
810 av_log(s, loglevel, "changing to %"PRId64". This may result "
811 "in incorrect timestamps in the output file.\n",
813 if (pkt->pts >= pkt->dts)
814 pkt->pts = FFMAX(pkt->pts, max);
819 ost->last_mux_dts = pkt->dts;
821 ost->data_size += pkt->size;
822 ost->packets_written++;
824 pkt->stream_index = ost->index;
827 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
828 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
829 av_get_media_type_string(ost->enc_ctx->codec_type),
830 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
831 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
836 ret = av_interleaved_write_frame(s, pkt);
838 print_error("av_interleaved_write_frame()", ret);
839 main_return_code = 1;
840 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
842 av_packet_unref(pkt);
845 static void close_output_stream(OutputStream *ost)
847 OutputFile *of = output_files[ost->file_index];
849 ost->finished |= ENCODER_FINISHED;
851 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
852 of->recording_time = FFMIN(of->recording_time, end);
857 * Send a single packet to the output, applying any bitstream filters
858 * associated with the output stream. This may result in any number
859 * of packets actually being written, depending on what bitstream
860 * filters are applied. The supplied packet is consumed and will be
861 * blank (as if newly-allocated) when this function returns.
863 * If eof is set, instead indicate EOF to all bitstream filters and
864 * therefore flush any delayed packets to the output. A blank packet
865 * must be supplied in this case.
867 static void output_packet(OutputFile *of, AVPacket *pkt,
868 OutputStream *ost, int eof)
872 /* apply the output bitstream filters */
874 ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
877 while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
878 write_packet(of, pkt, ost, 0);
879 if (ret == AVERROR(EAGAIN))
882 write_packet(of, pkt, ost, 0);
885 if (ret < 0 && ret != AVERROR_EOF) {
886 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
887 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
893 static int check_recording_time(OutputStream *ost)
895 OutputFile *of = output_files[ost->file_index];
897 if (of->recording_time != INT64_MAX &&
898 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
899 AV_TIME_BASE_Q) >= 0) {
900 close_output_stream(ost);
906 static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost,
909 double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
910 AVCodecContext *enc = ost->enc_ctx;
911 if (!frame || frame->pts == AV_NOPTS_VALUE ||
912 !enc || !ost->filter || !ost->filter->graph->graph)
916 AVFilterContext *filter = ost->filter->filter;
918 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
919 AVRational filter_tb = av_buffersink_get_time_base(filter);
920 AVRational tb = enc->time_base;
921 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
923 tb.den <<= extra_bits;
925 av_rescale_q(frame->pts, filter_tb, tb) -
926 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
927 float_pts /= 1 << extra_bits;
928 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
929 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
932 av_rescale_q(frame->pts, filter_tb, enc->time_base) -
933 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
939 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
940 frame ? av_ts2str(frame->pts) : "NULL",
941 frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
943 enc ? enc->time_base.num : -1,
944 enc ? enc->time_base.den : -1);
950 static void do_audio_out(OutputFile *of, OutputStream *ost,
953 AVCodecContext *enc = ost->enc_ctx;
957 av_init_packet(&pkt);
961 if (!check_recording_time(ost))
964 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
965 frame->pts = ost->sync_opts;
966 ost->sync_opts = frame->pts + frame->nb_samples;
967 ost->samples_encoded += frame->nb_samples;
968 ost->frames_encoded++;
970 av_assert0(pkt.size || !pkt.data);
971 update_benchmark(NULL);
973 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
974 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
975 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
976 enc->time_base.num, enc->time_base.den);
979 ret = avcodec_send_frame(enc, frame);
984 ret = avcodec_receive_packet(enc, &pkt);
985 if (ret == AVERROR(EAGAIN))
990 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
992 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
995 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
996 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
997 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
998 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1001 output_packet(of, &pkt, ost, 0);
1006 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1010 static void do_subtitle_out(OutputFile *of,
1014 int subtitle_out_max_size = 1024 * 1024;
1015 int subtitle_out_size, nb, i;
1016 AVCodecContext *enc;
1020 if (sub->pts == AV_NOPTS_VALUE) {
1021 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1029 if (!subtitle_out) {
1030 subtitle_out = av_malloc(subtitle_out_max_size);
1031 if (!subtitle_out) {
1032 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1037 /* Note: DVB subtitle need one packet to draw them and one other
1038 packet to clear them */
1039 /* XXX: signal it in the codec context ? */
1040 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1045 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1047 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1048 pts -= output_files[ost->file_index]->start_time;
1049 for (i = 0; i < nb; i++) {
1050 unsigned save_num_rects = sub->num_rects;
1052 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1053 if (!check_recording_time(ost))
1057 // start_display_time is required to be 0
1058 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1059 sub->end_display_time -= sub->start_display_time;
1060 sub->start_display_time = 0;
1064 ost->frames_encoded++;
1066 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1067 subtitle_out_max_size, sub);
1069 sub->num_rects = save_num_rects;
1070 if (subtitle_out_size < 0) {
1071 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1075 av_init_packet(&pkt);
1076 pkt.data = subtitle_out;
1077 pkt.size = subtitle_out_size;
1078 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1079 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1080 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1081 /* XXX: the pts correction is handled here. Maybe handling
1082 it in the codec would be better */
1084 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1086 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1089 output_packet(of, &pkt, ost, 0);
1093 static void do_video_out(OutputFile *of,
1095 AVFrame *next_picture,
1098 int ret, format_video_sync;
1100 AVCodecContext *enc = ost->enc_ctx;
1101 AVCodecParameters *mux_par = ost->st->codecpar;
1102 AVRational frame_rate;
1103 int nb_frames, nb0_frames, i;
1104 double delta, delta0;
1105 double duration = 0;
1107 InputStream *ist = NULL;
1108 AVFilterContext *filter = ost->filter->filter;
1110 if (ost->source_index >= 0)
1111 ist = input_streams[ost->source_index];
1113 frame_rate = av_buffersink_get_frame_rate(filter);
1114 if (frame_rate.num > 0 && frame_rate.den > 0)
1115 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1117 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1118 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1120 if (!ost->filters_script &&
1122 (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1125 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1126 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1129 if (!next_picture) {
1131 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1132 ost->last_nb0_frames[1],
1133 ost->last_nb0_frames[2]);
1135 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1136 delta = delta0 + duration;
1138 /* by default, we output a single frame */
1139 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1142 format_video_sync = video_sync_method;
1143 if (format_video_sync == VSYNC_AUTO) {
1144 if(!strcmp(of->ctx->oformat->name, "avi")) {
1145 format_video_sync = VSYNC_VFR;
1147 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1149 && format_video_sync == VSYNC_CFR
1150 && input_files[ist->file_index]->ctx->nb_streams == 1
1151 && input_files[ist->file_index]->input_ts_offset == 0) {
1152 format_video_sync = VSYNC_VSCFR;
1154 if (format_video_sync == VSYNC_CFR && copy_ts) {
1155 format_video_sync = VSYNC_VSCFR;
1158 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1162 format_video_sync != VSYNC_PASSTHROUGH &&
1163 format_video_sync != VSYNC_DROP) {
1164 if (delta0 < -0.6) {
1165 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1167 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1168 sync_ipts = ost->sync_opts;
1173 switch (format_video_sync) {
1175 if (ost->frame_number == 0 && delta0 >= 0.5) {
1176 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1179 ost->sync_opts = llrint(sync_ipts);
1182 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1183 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1185 } else if (delta < -1.1)
1187 else if (delta > 1.1) {
1188 nb_frames = lrintf(delta);
1190 nb0_frames = llrintf(delta0 - 0.6);
1196 else if (delta > 0.6)
1197 ost->sync_opts = llrint(sync_ipts);
1200 case VSYNC_PASSTHROUGH:
1201 ost->sync_opts = llrint(sync_ipts);
1208 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1209 nb0_frames = FFMIN(nb0_frames, nb_frames);
1211 memmove(ost->last_nb0_frames + 1,
1212 ost->last_nb0_frames,
1213 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1214 ost->last_nb0_frames[0] = nb0_frames;
1216 if (nb0_frames == 0 && ost->last_dropped) {
1218 av_log(NULL, AV_LOG_VERBOSE,
1219 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1220 ost->frame_number, ost->st->index, ost->last_frame->pts);
1222 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1223 if (nb_frames > dts_error_threshold * 30) {
1224 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1228 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1229 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1230 if (nb_frames_dup > dup_warning) {
1231 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1235 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1237 /* duplicates frame if needed */
1238 for (i = 0; i < nb_frames; i++) {
1239 AVFrame *in_picture;
1240 int forced_keyframe = 0;
1242 av_init_packet(&pkt);
1246 if (i < nb0_frames && ost->last_frame) {
1247 in_picture = ost->last_frame;
1249 in_picture = next_picture;
1254 in_picture->pts = ost->sync_opts;
1256 if (!check_recording_time(ost))
1259 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1260 ost->top_field_first >= 0)
1261 in_picture->top_field_first = !!ost->top_field_first;
1263 if (in_picture->interlaced_frame) {
1264 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1265 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1267 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1269 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1271 in_picture->quality = enc->global_quality;
1272 in_picture->pict_type = 0;
1274 if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1275 in_picture->pts != AV_NOPTS_VALUE)
1276 ost->forced_kf_ref_pts = in_picture->pts;
1278 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1279 (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1280 if (ost->forced_kf_index < ost->forced_kf_count &&
1281 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1282 ost->forced_kf_index++;
1283 forced_keyframe = 1;
1284 } else if (ost->forced_keyframes_pexpr) {
1286 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1287 res = av_expr_eval(ost->forced_keyframes_pexpr,
1288 ost->forced_keyframes_expr_const_values, NULL);
1289 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1290 ost->forced_keyframes_expr_const_values[FKF_N],
1291 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1292 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1293 ost->forced_keyframes_expr_const_values[FKF_T],
1294 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1297 forced_keyframe = 1;
1298 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1299 ost->forced_keyframes_expr_const_values[FKF_N];
1300 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1301 ost->forced_keyframes_expr_const_values[FKF_T];
1302 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1305 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1306 } else if ( ost->forced_keyframes
1307 && !strncmp(ost->forced_keyframes, "source", 6)
1308 && in_picture->key_frame==1
1310 forced_keyframe = 1;
1313 if (forced_keyframe) {
1314 in_picture->pict_type = AV_PICTURE_TYPE_I;
1315 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1318 update_benchmark(NULL);
1320 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1321 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1322 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1323 enc->time_base.num, enc->time_base.den);
1326 ost->frames_encoded++;
1328 ret = avcodec_send_frame(enc, in_picture);
1331 // Make sure Closed Captions will not be duplicated
1332 av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
1335 ret = avcodec_receive_packet(enc, &pkt);
1336 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1337 if (ret == AVERROR(EAGAIN))
1343 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1344 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1345 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1346 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1349 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1350 pkt.pts = ost->sync_opts;
1352 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1355 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1356 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1357 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1358 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1361 frame_size = pkt.size;
1362 output_packet(of, &pkt, ost, 0);
1364 /* if two pass, output log */
1365 if (ost->logfile && enc->stats_out) {
1366 fprintf(ost->logfile, "%s", enc->stats_out);
1371 * For video, number of frames in == number of packets out.
1372 * But there may be reordering, so we can't throw away frames on encoder
1373 * flush, we need to limit them here, before they go into encoder.
1375 ost->frame_number++;
1377 if (vstats_filename && frame_size)
1378 do_video_stats(ost, frame_size);
1381 if (!ost->last_frame)
1382 ost->last_frame = av_frame_alloc();
1383 av_frame_unref(ost->last_frame);
1384 if (next_picture && ost->last_frame)
1385 av_frame_ref(ost->last_frame, next_picture);
1387 av_frame_free(&ost->last_frame);
1391 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1395 static double psnr(double d)
1397 return -10.0 * log10(d);
1400 static void do_video_stats(OutputStream *ost, int frame_size)
1402 AVCodecContext *enc;
1404 double ti1, bitrate, avg_bitrate;
1406 /* this is executed just the first time do_video_stats is called */
1408 vstats_file = fopen(vstats_filename, "w");
1416 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1417 frame_number = ost->st->nb_frames;
1418 if (vstats_version <= 1) {
1419 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1420 ost->quality / (float)FF_QP2LAMBDA);
1422 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1423 ost->quality / (float)FF_QP2LAMBDA);
1426 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1427 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1429 fprintf(vstats_file,"f_size= %6d ", frame_size);
1430 /* compute pts value */
1431 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1435 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1436 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1437 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1438 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1439 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1443 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1445 static int init_output_stream_wrapper(OutputStream *ost, unsigned int fatal)
1447 int ret = AVERROR_BUG;
1448 char error[1024] = {0};
1450 if (ost->initialized)
1453 ret = init_output_stream(ost, error, sizeof(error));
1455 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1456 ost->file_index, ost->index, error);
1465 static void finish_output_stream(OutputStream *ost)
1467 OutputFile *of = output_files[ost->file_index];
1470 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1473 for (i = 0; i < of->ctx->nb_streams; i++)
1474 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1479 * Get and encode new output from any of the filtergraphs, without causing
1482 * @return 0 for success, <0 for severe errors
1484 static int reap_filters(int flush)
1486 AVFrame *filtered_frame = NULL;
1489 /* Reap all buffers present in the buffer sinks */
1490 for (i = 0; i < nb_output_streams; i++) {
1491 OutputStream *ost = output_streams[i];
1492 OutputFile *of = output_files[ost->file_index];
1493 AVFilterContext *filter;
1494 AVCodecContext *enc = ost->enc_ctx;
1497 if (!ost->filter || !ost->filter->graph->graph)
1499 filter = ost->filter->filter;
1501 init_output_stream_wrapper(ost, 1);
1503 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1504 return AVERROR(ENOMEM);
1506 filtered_frame = ost->filtered_frame;
1509 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1510 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1511 AV_BUFFERSINK_FLAG_NO_REQUEST);
1513 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1514 av_log(NULL, AV_LOG_WARNING,
1515 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1516 } else if (flush && ret == AVERROR_EOF) {
1517 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1518 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1522 if (ost->finished) {
1523 av_frame_unref(filtered_frame);
1527 float_pts = adjust_frame_pts_to_encoder_tb(of, ost,
1530 switch (av_buffersink_get_type(filter)) {
1531 case AVMEDIA_TYPE_VIDEO:
1532 if (!ost->frame_aspect_ratio.num)
1533 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1535 do_video_out(of, ost, filtered_frame, float_pts);
1537 case AVMEDIA_TYPE_AUDIO:
1538 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1539 enc->channels != filtered_frame->channels) {
1540 av_log(NULL, AV_LOG_ERROR,
1541 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1544 do_audio_out(of, ost, filtered_frame);
1547 // TODO support subtitle filters
1551 av_frame_unref(filtered_frame);
1558 static void print_final_stats(int64_t total_size)
1560 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1561 uint64_t subtitle_size = 0;
1562 uint64_t data_size = 0;
1563 float percent = -1.0;
1567 for (i = 0; i < nb_output_streams; i++) {
1568 OutputStream *ost = output_streams[i];
1569 switch (ost->enc_ctx->codec_type) {
1570 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1571 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1572 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1573 default: other_size += ost->data_size; break;
1575 extra_size += ost->enc_ctx->extradata_size;
1576 data_size += ost->data_size;
1577 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1578 != AV_CODEC_FLAG_PASS1)
1582 if (data_size && total_size>0 && total_size >= data_size)
1583 percent = 100.0 * (total_size - data_size) / data_size;
1585 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1586 video_size / 1024.0,
1587 audio_size / 1024.0,
1588 subtitle_size / 1024.0,
1589 other_size / 1024.0,
1590 extra_size / 1024.0);
1592 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1594 av_log(NULL, AV_LOG_INFO, "unknown");
1595 av_log(NULL, AV_LOG_INFO, "\n");
1597 /* print verbose per-stream stats */
1598 for (i = 0; i < nb_input_files; i++) {
1599 InputFile *f = input_files[i];
1600 uint64_t total_packets = 0, total_size = 0;
1602 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1605 for (j = 0; j < f->nb_streams; j++) {
1606 InputStream *ist = input_streams[f->ist_index + j];
1607 enum AVMediaType type = ist->dec_ctx->codec_type;
1609 total_size += ist->data_size;
1610 total_packets += ist->nb_packets;
1612 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1613 i, j, media_type_string(type));
1614 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1615 ist->nb_packets, ist->data_size);
1617 if (ist->decoding_needed) {
1618 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1619 ist->frames_decoded);
1620 if (type == AVMEDIA_TYPE_AUDIO)
1621 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1622 av_log(NULL, AV_LOG_VERBOSE, "; ");
1625 av_log(NULL, AV_LOG_VERBOSE, "\n");
1628 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1629 total_packets, total_size);
1632 for (i = 0; i < nb_output_files; i++) {
1633 OutputFile *of = output_files[i];
1634 uint64_t total_packets = 0, total_size = 0;
1636 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1639 for (j = 0; j < of->ctx->nb_streams; j++) {
1640 OutputStream *ost = output_streams[of->ost_index + j];
1641 enum AVMediaType type = ost->enc_ctx->codec_type;
1643 total_size += ost->data_size;
1644 total_packets += ost->packets_written;
1646 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1647 i, j, media_type_string(type));
1648 if (ost->encoding_needed) {
1649 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1650 ost->frames_encoded);
1651 if (type == AVMEDIA_TYPE_AUDIO)
1652 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1653 av_log(NULL, AV_LOG_VERBOSE, "; ");
1656 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1657 ost->packets_written, ost->data_size);
1659 av_log(NULL, AV_LOG_VERBOSE, "\n");
1662 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1663 total_packets, total_size);
1665 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1666 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1668 av_log(NULL, AV_LOG_WARNING, "\n");
1670 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1675 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1677 AVBPrint buf, buf_script;
1679 AVFormatContext *oc;
1681 AVCodecContext *enc;
1682 int frame_number, vid, i;
1685 int64_t pts = INT64_MIN + 1;
1686 static int64_t last_time = -1;
1687 static int qp_histogram[52];
1688 int hours, mins, secs, us;
1689 const char *hours_sign;
1693 if (!print_stats && !is_last_report && !progress_avio)
1696 if (!is_last_report) {
1697 if (last_time == -1) {
1698 last_time = cur_time;
1701 if ((cur_time - last_time) < 500000)
1703 last_time = cur_time;
1706 t = (cur_time-timer_start) / 1000000.0;
1709 oc = output_files[0]->ctx;
1711 total_size = avio_size(oc->pb);
1712 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1713 total_size = avio_tell(oc->pb);
1716 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1717 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1718 for (i = 0; i < nb_output_streams; i++) {
1720 ost = output_streams[i];
1722 if (!ost->stream_copy)
1723 q = ost->quality / (float) FF_QP2LAMBDA;
1725 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1726 av_bprintf(&buf, "q=%2.1f ", q);
1727 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1728 ost->file_index, ost->index, q);
1730 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1733 frame_number = ost->frame_number;
1734 fps = t > 1 ? frame_number / t : 0;
1735 av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1736 frame_number, fps < 9.95, fps, q);
1737 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1738 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1739 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1740 ost->file_index, ost->index, q);
1742 av_bprintf(&buf, "L");
1746 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1748 for (j = 0; j < 32; j++)
1749 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1752 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1754 double error, error_sum = 0;
1755 double scale, scale_sum = 0;
1757 char type[3] = { 'Y','U','V' };
1758 av_bprintf(&buf, "PSNR=");
1759 for (j = 0; j < 3; j++) {
1760 if (is_last_report) {
1761 error = enc->error[j];
1762 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1764 error = ost->error[j];
1765 scale = enc->width * enc->height * 255.0 * 255.0;
1771 p = psnr(error / scale);
1772 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1773 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1774 ost->file_index, ost->index, type[j] | 32, p);
1776 p = psnr(error_sum / scale_sum);
1777 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1778 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1779 ost->file_index, ost->index, p);
1783 /* compute min output value */
1784 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1785 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1786 ost->st->time_base, AV_TIME_BASE_Q));
1788 nb_frames_drop += ost->last_dropped;
1791 secs = FFABS(pts) / AV_TIME_BASE;
1792 us = FFABS(pts) % AV_TIME_BASE;
1797 hours_sign = (pts < 0) ? "-" : "";
1799 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1800 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1802 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1803 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1804 if (pts == AV_NOPTS_VALUE) {
1805 av_bprintf(&buf, "N/A ");
1807 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1808 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1812 av_bprintf(&buf, "bitrate=N/A");
1813 av_bprintf(&buf_script, "bitrate=N/A\n");
1815 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1816 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1819 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1820 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1821 if (pts == AV_NOPTS_VALUE) {
1822 av_bprintf(&buf_script, "out_time_us=N/A\n");
1823 av_bprintf(&buf_script, "out_time_ms=N/A\n");
1824 av_bprintf(&buf_script, "out_time=N/A\n");
1826 av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1827 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1828 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1829 hours_sign, hours, mins, secs, us);
1832 if (nb_frames_dup || nb_frames_drop)
1833 av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1834 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1835 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1838 av_bprintf(&buf, " speed=N/A");
1839 av_bprintf(&buf_script, "speed=N/A\n");
1841 av_bprintf(&buf, " speed=%4.3gx", speed);
1842 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1845 if (print_stats || is_last_report) {
1846 const char end = is_last_report ? '\n' : '\r';
1847 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1848 fprintf(stderr, "%s %c", buf.str, end);
1850 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1854 av_bprint_finalize(&buf, NULL);
1856 if (progress_avio) {
1857 av_bprintf(&buf_script, "progress=%s\n",
1858 is_last_report ? "end" : "continue");
1859 avio_write(progress_avio, buf_script.str,
1860 FFMIN(buf_script.len, buf_script.size - 1));
1861 avio_flush(progress_avio);
1862 av_bprint_finalize(&buf_script, NULL);
1863 if (is_last_report) {
1864 if ((ret = avio_closep(&progress_avio)) < 0)
1865 av_log(NULL, AV_LOG_ERROR,
1866 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1871 print_final_stats(total_size);
1874 static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1876 // We never got any input. Set a fake format, which will
1877 // come from libavformat.
1878 ifilter->format = par->format;
1879 ifilter->sample_rate = par->sample_rate;
1880 ifilter->channels = par->channels;
1881 ifilter->channel_layout = par->channel_layout;
1882 ifilter->width = par->width;
1883 ifilter->height = par->height;
1884 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1887 static void flush_encoders(void)
1891 for (i = 0; i < nb_output_streams; i++) {
1892 OutputStream *ost = output_streams[i];
1893 AVCodecContext *enc = ost->enc_ctx;
1894 OutputFile *of = output_files[ost->file_index];
1896 if (!ost->encoding_needed)
1899 // Try to enable encoding with no input frames.
1900 // Maybe we should just let encoding fail instead.
1901 if (!ost->initialized) {
1902 FilterGraph *fg = ost->filter->graph;
1904 av_log(NULL, AV_LOG_WARNING,
1905 "Finishing stream %d:%d without any data written to it.\n",
1906 ost->file_index, ost->st->index);
1908 if (ost->filter && !fg->graph) {
1910 for (x = 0; x < fg->nb_inputs; x++) {
1911 InputFilter *ifilter = fg->inputs[x];
1912 if (ifilter->format < 0)
1913 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1916 if (!ifilter_has_all_input_formats(fg))
1919 ret = configure_filtergraph(fg);
1921 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1925 finish_output_stream(ost);
1928 init_output_stream_wrapper(ost, 1);
1931 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1935 const char *desc = NULL;
1939 switch (enc->codec_type) {
1940 case AVMEDIA_TYPE_AUDIO:
1943 case AVMEDIA_TYPE_VIDEO:
1950 av_init_packet(&pkt);
1954 update_benchmark(NULL);
1956 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1957 ret = avcodec_send_frame(enc, NULL);
1959 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1966 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1967 if (ret < 0 && ret != AVERROR_EOF) {
1968 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1973 if (ost->logfile && enc->stats_out) {
1974 fprintf(ost->logfile, "%s", enc->stats_out);
1976 if (ret == AVERROR_EOF) {
1977 output_packet(of, &pkt, ost, 1);
1980 if (ost->finished & MUXER_FINISHED) {
1981 av_packet_unref(&pkt);
1984 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1985 pkt_size = pkt.size;
1986 output_packet(of, &pkt, ost, 0);
1987 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1988 do_video_stats(ost, pkt_size);
1995 * Check whether a packet from ist should be written into ost at this time
1997 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1999 OutputFile *of = output_files[ost->file_index];
2000 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2002 if (ost->source_index != ist_index)
2008 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2014 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2016 OutputFile *of = output_files[ost->file_index];
2017 InputFile *f = input_files [ist->file_index];
2018 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2019 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2022 // EOF: flush output bitstream filters.
2024 av_init_packet(&opkt);
2027 output_packet(of, &opkt, ost, 1);
2031 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2032 !ost->copy_initial_nonkeyframes)
2035 if (!ost->frame_number && !ost->copy_prior_start) {
2036 int64_t comp_start = start_time;
2037 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2038 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2039 if (pkt->pts == AV_NOPTS_VALUE ?
2040 ist->pts < comp_start :
2041 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2045 if (of->recording_time != INT64_MAX &&
2046 ist->pts >= of->recording_time + start_time) {
2047 close_output_stream(ost);
2051 if (f->recording_time != INT64_MAX) {
2052 start_time = f->ctx->start_time;
2053 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2054 start_time += f->start_time;
2055 if (ist->pts >= f->recording_time + start_time) {
2056 close_output_stream(ost);
2061 /* force the input stream PTS */
2062 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2065 if (av_packet_ref(&opkt, pkt) < 0)
2068 if (pkt->pts != AV_NOPTS_VALUE)
2069 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2071 if (pkt->dts == AV_NOPTS_VALUE) {
2072 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2073 } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2074 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2076 duration = ist->dec_ctx->frame_size;
2077 opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2078 (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2079 &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2080 /* dts will be set immediately afterwards to what pts is now */
2081 opkt.pts = opkt.dts - ost_tb_start_time;
2083 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2084 opkt.dts -= ost_tb_start_time;
2086 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2088 output_packet(of, &opkt, ost, 0);
2091 int guess_input_channel_layout(InputStream *ist)
2093 AVCodecContext *dec = ist->dec_ctx;
2095 if (!dec->channel_layout) {
2096 char layout_name[256];
2098 if (dec->channels > ist->guess_layout_max)
2100 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2101 if (!dec->channel_layout)
2103 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2104 dec->channels, dec->channel_layout);
2105 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2106 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2111 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2113 if (*got_output || ret<0)
2114 decode_error_stat[ret<0] ++;
2116 if (ret < 0 && exit_on_error)
2119 if (*got_output && ist) {
2120 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2121 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2122 "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2129 // Filters can be configured only if the formats of all inputs are known.
2130 static int ifilter_has_all_input_formats(FilterGraph *fg)
2133 for (i = 0; i < fg->nb_inputs; i++) {
2134 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2135 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2141 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2143 FilterGraph *fg = ifilter->graph;
2144 int need_reinit, ret, i;
2146 /* determine if the parameters for this input changed */
2147 need_reinit = ifilter->format != frame->format;
2149 switch (ifilter->ist->st->codecpar->codec_type) {
2150 case AVMEDIA_TYPE_AUDIO:
2151 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2152 ifilter->channels != frame->channels ||
2153 ifilter->channel_layout != frame->channel_layout;
2155 case AVMEDIA_TYPE_VIDEO:
2156 need_reinit |= ifilter->width != frame->width ||
2157 ifilter->height != frame->height;
2161 if (!ifilter->ist->reinit_filters && fg->graph)
2164 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2165 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2169 ret = ifilter_parameters_from_frame(ifilter, frame);
2174 /* (re)init the graph if possible, otherwise buffer the frame and return */
2175 if (need_reinit || !fg->graph) {
2176 for (i = 0; i < fg->nb_inputs; i++) {
2177 if (!ifilter_has_all_input_formats(fg)) {
2178 AVFrame *tmp = av_frame_clone(frame);
2180 return AVERROR(ENOMEM);
2181 av_frame_unref(frame);
2183 if (!av_fifo_space(ifilter->frame_queue)) {
2184 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2186 av_frame_free(&tmp);
2190 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2195 ret = reap_filters(1);
2196 if (ret < 0 && ret != AVERROR_EOF) {
2197 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2201 ret = configure_filtergraph(fg);
2203 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2208 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2210 if (ret != AVERROR_EOF)
2211 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2218 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2224 if (ifilter->filter) {
2225 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2229 // the filtergraph was never configured
2230 if (ifilter->format < 0)
2231 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2232 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2233 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2234 return AVERROR_INVALIDDATA;
2241 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2242 // There is the following difference: if you got a frame, you must call
2243 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2244 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2245 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2252 ret = avcodec_send_packet(avctx, pkt);
2253 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2254 // decoded frames with avcodec_receive_frame() until done.
2255 if (ret < 0 && ret != AVERROR_EOF)
2259 ret = avcodec_receive_frame(avctx, frame);
2260 if (ret < 0 && ret != AVERROR(EAGAIN))
2268 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2273 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2274 for (i = 0; i < ist->nb_filters; i++) {
2275 if (i < ist->nb_filters - 1) {
2276 f = ist->filter_frame;
2277 ret = av_frame_ref(f, decoded_frame);
2282 ret = ifilter_send_frame(ist->filters[i], f);
2283 if (ret == AVERROR_EOF)
2284 ret = 0; /* ignore */
2286 av_log(NULL, AV_LOG_ERROR,
2287 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2294 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2297 AVFrame *decoded_frame;
2298 AVCodecContext *avctx = ist->dec_ctx;
2300 AVRational decoded_frame_tb;
2302 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2303 return AVERROR(ENOMEM);
2304 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2305 return AVERROR(ENOMEM);
2306 decoded_frame = ist->decoded_frame;
2308 update_benchmark(NULL);
2309 ret = decode(avctx, decoded_frame, got_output, pkt);
2310 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2314 if (ret >= 0 && avctx->sample_rate <= 0) {
2315 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2316 ret = AVERROR_INVALIDDATA;
2319 if (ret != AVERROR_EOF)
2320 check_decode_result(ist, got_output, ret);
2322 if (!*got_output || ret < 0)
2325 ist->samples_decoded += decoded_frame->nb_samples;
2326 ist->frames_decoded++;
2328 /* increment next_dts to use for the case where the input stream does not
2329 have timestamps or there are multiple frames in the packet */
2330 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2332 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2335 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2336 decoded_frame_tb = ist->st->time_base;
2337 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2338 decoded_frame->pts = pkt->pts;
2339 decoded_frame_tb = ist->st->time_base;
2341 decoded_frame->pts = ist->dts;
2342 decoded_frame_tb = AV_TIME_BASE_Q;
2344 if (decoded_frame->pts != AV_NOPTS_VALUE)
2345 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2346 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2347 (AVRational){1, avctx->sample_rate});
2348 ist->nb_samples = decoded_frame->nb_samples;
2349 err = send_frame_to_filters(ist, decoded_frame);
2351 av_frame_unref(ist->filter_frame);
2352 av_frame_unref(decoded_frame);
2353 return err < 0 ? err : ret;
2356 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2359 AVFrame *decoded_frame;
2360 int i, ret = 0, err = 0;
2361 int64_t best_effort_timestamp;
2362 int64_t dts = AV_NOPTS_VALUE;
2365 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2366 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2368 if (!eof && pkt && pkt->size == 0)
2371 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2372 return AVERROR(ENOMEM);
2373 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2374 return AVERROR(ENOMEM);
2375 decoded_frame = ist->decoded_frame;
2376 if (ist->dts != AV_NOPTS_VALUE)
2377 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2380 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2383 // The old code used to set dts on the drain packet, which does not work
2384 // with the new API anymore.
2386 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2388 return AVERROR(ENOMEM);
2389 ist->dts_buffer = new;
2390 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2393 update_benchmark(NULL);
2394 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2395 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2399 // The following line may be required in some cases where there is no parser
2400 // or the parser does not has_b_frames correctly
2401 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2402 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2403 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2405 av_log(ist->dec_ctx, AV_LOG_WARNING,
2406 "video_delay is larger in decoder than demuxer %d > %d.\n"
2407 "If you want to help, upload a sample "
2408 "of this file to https://streams.videolan.org/upload/ "
2409 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2410 ist->dec_ctx->has_b_frames,
2411 ist->st->codecpar->video_delay);
2414 if (ret != AVERROR_EOF)
2415 check_decode_result(ist, got_output, ret);
2417 if (*got_output && ret >= 0) {
2418 if (ist->dec_ctx->width != decoded_frame->width ||
2419 ist->dec_ctx->height != decoded_frame->height ||
2420 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2421 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2422 decoded_frame->width,
2423 decoded_frame->height,
2424 decoded_frame->format,
2425 ist->dec_ctx->width,
2426 ist->dec_ctx->height,
2427 ist->dec_ctx->pix_fmt);
2431 if (!*got_output || ret < 0)
2434 if(ist->top_field_first>=0)
2435 decoded_frame->top_field_first = ist->top_field_first;
2437 ist->frames_decoded++;
2439 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2440 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2444 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2446 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2447 *duration_pts = decoded_frame->pkt_duration;
2449 if (ist->framerate.num)
2450 best_effort_timestamp = ist->cfr_next_pts++;
2452 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2453 best_effort_timestamp = ist->dts_buffer[0];
2455 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2456 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2457 ist->nb_dts_buffer--;
2460 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2461 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2463 if (ts != AV_NOPTS_VALUE)
2464 ist->next_pts = ist->pts = ts;
2468 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2469 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2470 ist->st->index, av_ts2str(decoded_frame->pts),
2471 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2472 best_effort_timestamp,
2473 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2474 decoded_frame->key_frame, decoded_frame->pict_type,
2475 ist->st->time_base.num, ist->st->time_base.den);
2478 if (ist->st->sample_aspect_ratio.num)
2479 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2481 err = send_frame_to_filters(ist, decoded_frame);
2484 av_frame_unref(ist->filter_frame);
2485 av_frame_unref(decoded_frame);
2486 return err < 0 ? err : ret;
2489 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2492 AVSubtitle subtitle;
2494 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2495 &subtitle, got_output, pkt);
2497 check_decode_result(NULL, got_output, ret);
2499 if (ret < 0 || !*got_output) {
2502 sub2video_flush(ist);
2506 if (ist->fix_sub_duration) {
2508 if (ist->prev_sub.got_output) {
2509 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2510 1000, AV_TIME_BASE);
2511 if (end < ist->prev_sub.subtitle.end_display_time) {
2512 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2513 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2514 ist->prev_sub.subtitle.end_display_time, end,
2515 end <= 0 ? ", dropping it" : "");
2516 ist->prev_sub.subtitle.end_display_time = end;
2519 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2520 FFSWAP(int, ret, ist->prev_sub.ret);
2521 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2529 if (ist->sub2video.frame) {
2530 sub2video_update(ist, INT64_MIN, &subtitle);
2531 } else if (ist->nb_filters) {
2532 if (!ist->sub2video.sub_queue)
2533 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2534 if (!ist->sub2video.sub_queue)
2536 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2537 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2541 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2545 if (!subtitle.num_rects)
2548 ist->frames_decoded++;
2550 for (i = 0; i < nb_output_streams; i++) {
2551 OutputStream *ost = output_streams[i];
2553 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2554 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2557 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2562 avsubtitle_free(&subtitle);
2566 static int send_filter_eof(InputStream *ist)
2569 /* TODO keep pts also in stream time base to avoid converting back */
2570 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2571 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2573 for (i = 0; i < ist->nb_filters; i++) {
2574 ret = ifilter_send_eof(ist->filters[i], pts);
2581 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2582 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2586 int eof_reached = 0;
2589 if (!ist->saw_first_ts) {
2590 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2592 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2593 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2594 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2596 ist->saw_first_ts = 1;
2599 if (ist->next_dts == AV_NOPTS_VALUE)
2600 ist->next_dts = ist->dts;
2601 if (ist->next_pts == AV_NOPTS_VALUE)
2602 ist->next_pts = ist->pts;
2606 av_init_packet(&avpkt);
2613 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2614 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2615 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2616 ist->next_pts = ist->pts = ist->dts;
2619 // while we have more to decode or while the decoder did output something on EOF
2620 while (ist->decoding_needed) {
2621 int64_t duration_dts = 0;
2622 int64_t duration_pts = 0;
2624 int decode_failed = 0;
2626 ist->pts = ist->next_pts;
2627 ist->dts = ist->next_dts;
2629 switch (ist->dec_ctx->codec_type) {
2630 case AVMEDIA_TYPE_AUDIO:
2631 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2634 case AVMEDIA_TYPE_VIDEO:
2635 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2637 if (!repeating || !pkt || got_output) {
2638 if (pkt && pkt->duration) {
2639 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2640 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2641 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2642 duration_dts = ((int64_t)AV_TIME_BASE *
2643 ist->dec_ctx->framerate.den * ticks) /
2644 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2647 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2648 ist->next_dts += duration_dts;
2650 ist->next_dts = AV_NOPTS_VALUE;
2654 if (duration_pts > 0) {
2655 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2657 ist->next_pts += duration_dts;
2661 case AVMEDIA_TYPE_SUBTITLE:
2664 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2665 if (!pkt && ret >= 0)
2672 if (ret == AVERROR_EOF) {
2678 if (decode_failed) {
2679 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2680 ist->file_index, ist->st->index, av_err2str(ret));
2682 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2683 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2685 if (!decode_failed || exit_on_error)
2691 ist->got_output = 1;
2696 // During draining, we might get multiple output frames in this loop.
2697 // ffmpeg.c does not drain the filter chain on configuration changes,
2698 // which means if we send multiple frames at once to the filters, and
2699 // one of those frames changes configuration, the buffered frames will
2700 // be lost. This can upset certain FATE tests.
2701 // Decode only 1 frame per call on EOF to appease these FATE tests.
2702 // The ideal solution would be to rewrite decoding to use the new
2703 // decoding API in a better way.
2710 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2711 /* except when looping we need to flush but not to send an EOF */
2712 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2713 int ret = send_filter_eof(ist);
2715 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2720 /* handle stream copy */
2721 if (!ist->decoding_needed && pkt) {
2722 ist->dts = ist->next_dts;
2723 switch (ist->dec_ctx->codec_type) {
2724 case AVMEDIA_TYPE_AUDIO:
2725 av_assert1(pkt->duration >= 0);
2726 if (ist->dec_ctx->sample_rate) {
2727 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2728 ist->dec_ctx->sample_rate;
2730 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2733 case AVMEDIA_TYPE_VIDEO:
2734 if (ist->framerate.num) {
2735 // TODO: Remove work-around for c99-to-c89 issue 7
2736 AVRational time_base_q = AV_TIME_BASE_Q;
2737 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2738 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2739 } else if (pkt->duration) {
2740 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2741 } else if(ist->dec_ctx->framerate.num != 0) {
2742 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2743 ist->next_dts += ((int64_t)AV_TIME_BASE *
2744 ist->dec_ctx->framerate.den * ticks) /
2745 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2749 ist->pts = ist->dts;
2750 ist->next_pts = ist->next_dts;
2752 for (i = 0; i < nb_output_streams; i++) {
2753 OutputStream *ost = output_streams[i];
2755 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2758 do_streamcopy(ist, ost, pkt);
2761 return !eof_reached;
2764 static void print_sdp(void)
2769 AVIOContext *sdp_pb;
2770 AVFormatContext **avc;
2772 for (i = 0; i < nb_output_files; i++) {
2773 if (!output_files[i]->header_written)
2777 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2780 for (i = 0, j = 0; i < nb_output_files; i++) {
2781 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2782 avc[j] = output_files[i]->ctx;
2790 av_sdp_create(avc, j, sdp, sizeof(sdp));
2792 if (!sdp_filename) {
2793 printf("SDP:\n%s\n", sdp);
2796 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2797 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2799 avio_print(sdp_pb, sdp);
2800 avio_closep(&sdp_pb);
2801 av_freep(&sdp_filename);
2809 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2811 InputStream *ist = s->opaque;
2812 const enum AVPixelFormat *p;
2815 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2816 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2817 const AVCodecHWConfig *config = NULL;
2820 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2823 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2824 ist->hwaccel_id == HWACCEL_AUTO) {
2826 config = avcodec_get_hw_config(s->codec, i);
2829 if (!(config->methods &
2830 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2832 if (config->pix_fmt == *p)
2837 if (config->device_type != ist->hwaccel_device_type) {
2838 // Different hwaccel offered, ignore.
2842 ret = hwaccel_decode_init(s);
2844 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2845 av_log(NULL, AV_LOG_FATAL,
2846 "%s hwaccel requested for input stream #%d:%d, "
2847 "but cannot be initialized.\n",
2848 av_hwdevice_get_type_name(config->device_type),
2849 ist->file_index, ist->st->index);
2850 return AV_PIX_FMT_NONE;
2855 const HWAccel *hwaccel = NULL;
2857 for (i = 0; hwaccels[i].name; i++) {
2858 if (hwaccels[i].pix_fmt == *p) {
2859 hwaccel = &hwaccels[i];
2864 // No hwaccel supporting this pixfmt.
2867 if (hwaccel->id != ist->hwaccel_id) {
2868 // Does not match requested hwaccel.
2872 ret = hwaccel->init(s);
2874 av_log(NULL, AV_LOG_FATAL,
2875 "%s hwaccel requested for input stream #%d:%d, "
2876 "but cannot be initialized.\n", hwaccel->name,
2877 ist->file_index, ist->st->index);
2878 return AV_PIX_FMT_NONE;
2882 if (ist->hw_frames_ctx) {
2883 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2884 if (!s->hw_frames_ctx)
2885 return AV_PIX_FMT_NONE;
2888 ist->hwaccel_pix_fmt = *p;
2895 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2897 InputStream *ist = s->opaque;
2899 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2900 return ist->hwaccel_get_buffer(s, frame, flags);
2902 return avcodec_default_get_buffer2(s, frame, flags);
2905 static int init_input_stream(int ist_index, char *error, int error_len)
2908 InputStream *ist = input_streams[ist_index];
2910 if (ist->decoding_needed) {
2911 AVCodec *codec = ist->dec;
2913 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2914 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2915 return AVERROR(EINVAL);
2918 ist->dec_ctx->opaque = ist;
2919 ist->dec_ctx->get_format = get_format;
2920 ist->dec_ctx->get_buffer2 = get_buffer;
2921 ist->dec_ctx->thread_safe_callbacks = 1;
2923 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2924 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2925 (ist->decoding_needed & DECODING_FOR_OST)) {
2926 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2927 if (ist->decoding_needed & DECODING_FOR_FILTER)
2928 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2931 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2933 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2934 * audio, and video decoders such as cuvid or mediacodec */
2935 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2937 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2938 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2939 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2940 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2941 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2943 ret = hw_device_setup_for_decode(ist);
2945 snprintf(error, error_len, "Device setup failed for "
2946 "decoder on input stream #%d:%d : %s",
2947 ist->file_index, ist->st->index, av_err2str(ret));
2951 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2952 if (ret == AVERROR_EXPERIMENTAL)
2953 abort_codec_experimental(codec, 0);
2955 snprintf(error, error_len,
2956 "Error while opening decoder for input stream "
2958 ist->file_index, ist->st->index, av_err2str(ret));
2961 assert_avoptions(ist->decoder_opts);
2964 ist->next_pts = AV_NOPTS_VALUE;
2965 ist->next_dts = AV_NOPTS_VALUE;
2970 static InputStream *get_input_stream(OutputStream *ost)
2972 if (ost->source_index >= 0)
2973 return input_streams[ost->source_index];
2977 static int compare_int64(const void *a, const void *b)
2979 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2982 /* open the muxer when all the streams are initialized */
2983 static int check_init_output_file(OutputFile *of, int file_index)
2987 for (i = 0; i < of->ctx->nb_streams; i++) {
2988 OutputStream *ost = output_streams[of->ost_index + i];
2989 if (!ost->initialized)
2993 of->ctx->interrupt_callback = int_cb;
2995 ret = avformat_write_header(of->ctx, &of->opts);
2997 av_log(NULL, AV_LOG_ERROR,
2998 "Could not write header for output file #%d "
2999 "(incorrect codec parameters ?): %s\n",
3000 file_index, av_err2str(ret));
3003 //assert_avoptions(of->opts);
3004 of->header_written = 1;
3006 av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3008 if (sdp_filename || want_sdp)
3011 /* flush the muxing queues */
3012 for (i = 0; i < of->ctx->nb_streams; i++) {
3013 OutputStream *ost = output_streams[of->ost_index + i];
3015 /* try to improve muxing time_base (only possible if nothing has been written yet) */
3016 if (!av_fifo_size(ost->muxing_queue))
3017 ost->mux_timebase = ost->st->time_base;
3019 while (av_fifo_size(ost->muxing_queue)) {
3021 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3022 ost->muxing_queue_data_size -= pkt.size;
3023 write_packet(of, &pkt, ost, 1);
3030 static int init_output_bsfs(OutputStream *ost)
3032 AVBSFContext *ctx = ost->bsf_ctx;
3038 ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3042 ctx->time_base_in = ost->st->time_base;
3044 ret = av_bsf_init(ctx);
3046 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3051 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3054 ost->st->time_base = ctx->time_base_out;
3059 static int init_output_stream_streamcopy(OutputStream *ost)
3061 OutputFile *of = output_files[ost->file_index];
3062 InputStream *ist = get_input_stream(ost);
3063 AVCodecParameters *par_dst = ost->st->codecpar;
3064 AVCodecParameters *par_src = ost->ref_par;
3067 uint32_t codec_tag = par_dst->codec_tag;
3069 av_assert0(ist && !ost->filter);
3071 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3073 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3075 av_log(NULL, AV_LOG_FATAL,
3076 "Error setting up codec context options.\n");
3080 ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3082 av_log(NULL, AV_LOG_FATAL,
3083 "Error getting reference codec parameters.\n");
3088 unsigned int codec_tag_tmp;
3089 if (!of->ctx->oformat->codec_tag ||
3090 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3091 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3092 codec_tag = par_src->codec_tag;
3095 ret = avcodec_parameters_copy(par_dst, par_src);
3099 par_dst->codec_tag = codec_tag;
3101 if (!ost->frame_rate.num)
3102 ost->frame_rate = ist->framerate;
3103 ost->st->avg_frame_rate = ost->frame_rate;
3105 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3109 // copy timebase while removing common factors
3110 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3111 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3113 // copy estimated duration as a hint to the muxer
3114 if (ost->st->duration <= 0 && ist->st->duration > 0)
3115 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3118 ost->st->disposition = ist->st->disposition;
3120 if (ist->st->nb_side_data) {
3121 for (i = 0; i < ist->st->nb_side_data; i++) {
3122 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3125 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3127 return AVERROR(ENOMEM);
3128 memcpy(dst_data, sd_src->data, sd_src->size);
3132 if (ost->rotate_overridden) {
3133 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3134 sizeof(int32_t) * 9);
3136 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3139 switch (par_dst->codec_type) {
3140 case AVMEDIA_TYPE_AUDIO:
3141 if (audio_volume != 256) {
3142 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3145 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3146 par_dst->block_align= 0;
3147 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3148 par_dst->block_align= 0;
3150 case AVMEDIA_TYPE_VIDEO:
3151 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3153 av_mul_q(ost->frame_aspect_ratio,
3154 (AVRational){ par_dst->height, par_dst->width });
3155 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3156 "with stream copy may produce invalid files\n");
3158 else if (ist->st->sample_aspect_ratio.num)
3159 sar = ist->st->sample_aspect_ratio;
3161 sar = par_src->sample_aspect_ratio;
3162 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3163 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3164 ost->st->r_frame_rate = ist->st->r_frame_rate;
3168 ost->mux_timebase = ist->st->time_base;
3173 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3175 AVDictionaryEntry *e;
3177 uint8_t *encoder_string;
3178 int encoder_string_len;
3179 int format_flags = 0;
3180 int codec_flags = ost->enc_ctx->flags;
3182 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3185 e = av_dict_get(of->opts, "fflags", NULL, 0);
3187 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3190 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3192 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3194 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3197 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3200 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3201 encoder_string = av_mallocz(encoder_string_len);
3202 if (!encoder_string)
3205 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3206 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3208 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3209 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3210 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3211 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3214 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3215 AVCodecContext *avctx)
3218 int n = 1, i, size, index = 0;
3221 for (p = kf; *p; p++)
3225 pts = av_malloc_array(size, sizeof(*pts));
3227 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3232 for (i = 0; i < n; i++) {
3233 char *next = strchr(p, ',');
3238 if (!memcmp(p, "chapters", 8)) {
3240 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3243 if (avf->nb_chapters > INT_MAX - size ||
3244 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3246 av_log(NULL, AV_LOG_FATAL,
3247 "Could not allocate forced key frames array.\n");
3250 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3251 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3253 for (j = 0; j < avf->nb_chapters; j++) {
3254 AVChapter *c = avf->chapters[j];
3255 av_assert1(index < size);
3256 pts[index++] = av_rescale_q(c->start, c->time_base,
3257 avctx->time_base) + t;
3262 t = parse_time_or_die("force_key_frames", p, 1);
3263 av_assert1(index < size);
3264 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3271 av_assert0(index == size);
3272 qsort(pts, size, sizeof(*pts), compare_int64);
3273 ost->forced_kf_count = size;
3274 ost->forced_kf_pts = pts;
3277 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3279 InputStream *ist = get_input_stream(ost);
3280 AVCodecContext *enc_ctx = ost->enc_ctx;
3281 AVFormatContext *oc;
3283 if (ost->enc_timebase.num > 0) {
3284 enc_ctx->time_base = ost->enc_timebase;
3288 if (ost->enc_timebase.num < 0) {
3290 enc_ctx->time_base = ist->st->time_base;
3294 oc = output_files[ost->file_index]->ctx;
3295 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3298 enc_ctx->time_base = default_time_base;
3301 static int init_output_stream_encode(OutputStream *ost)
3303 InputStream *ist = get_input_stream(ost);
3304 AVCodecContext *enc_ctx = ost->enc_ctx;
3305 AVCodecContext *dec_ctx = NULL;
3306 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3309 set_encoder_id(output_files[ost->file_index], ost);
3311 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3312 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3313 // which have to be filtered out to prevent leaking them to output files.
3314 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3317 ost->st->disposition = ist->st->disposition;
3319 dec_ctx = ist->dec_ctx;
3321 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3323 for (j = 0; j < oc->nb_streams; j++) {
3324 AVStream *st = oc->streams[j];
3325 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3328 if (j == oc->nb_streams)
3329 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3330 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3331 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3334 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3335 if (!ost->frame_rate.num)
3336 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3337 if (ist && !ost->frame_rate.num)
3338 ost->frame_rate = ist->framerate;
3339 if (ist && !ost->frame_rate.num)
3340 ost->frame_rate = ist->st->r_frame_rate;
3341 if (ist && !ost->frame_rate.num) {
3342 ost->frame_rate = (AVRational){25, 1};
3343 av_log(NULL, AV_LOG_WARNING,
3345 "about the input framerate is available. Falling "
3346 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3347 "if you want a different framerate.\n",
3348 ost->file_index, ost->index);
3351 if (ost->enc->supported_framerates && !ost->force_fps) {
3352 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3353 ost->frame_rate = ost->enc->supported_framerates[idx];
3355 // reduce frame rate for mpeg4 to be within the spec limits
3356 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3357 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3358 ost->frame_rate.num, ost->frame_rate.den, 65535);
3362 switch (enc_ctx->codec_type) {
3363 case AVMEDIA_TYPE_AUDIO:
3364 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3366 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3367 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3368 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3369 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3370 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3372 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3375 case AVMEDIA_TYPE_VIDEO:
3376 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3378 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3379 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3380 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3381 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3382 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3383 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3386 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3387 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3388 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3389 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3390 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3391 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3393 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3395 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3396 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3398 enc_ctx->framerate = ost->frame_rate;
3400 ost->st->avg_frame_rate = ost->frame_rate;
3403 enc_ctx->width != dec_ctx->width ||
3404 enc_ctx->height != dec_ctx->height ||
3405 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3406 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3409 if (ost->top_field_first == 0) {
3410 enc_ctx->field_order = AV_FIELD_BB;
3411 } else if (ost->top_field_first == 1) {
3412 enc_ctx->field_order = AV_FIELD_TT;
3415 if (ost->forced_keyframes) {
3416 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3417 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3418 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3420 av_log(NULL, AV_LOG_ERROR,
3421 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3424 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3425 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3426 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3427 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3429 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3430 // parse it only for static kf timings
3431 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3432 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3436 case AVMEDIA_TYPE_SUBTITLE:
3437 enc_ctx->time_base = AV_TIME_BASE_Q;
3438 if (!enc_ctx->width) {
3439 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3440 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3443 case AVMEDIA_TYPE_DATA:
3450 ost->mux_timebase = enc_ctx->time_base;
3455 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3459 if (ost->encoding_needed) {
3460 AVCodec *codec = ost->enc;
3461 AVCodecContext *dec = NULL;
3464 ret = init_output_stream_encode(ost);
3468 if ((ist = get_input_stream(ost)))
3470 if (dec && dec->subtitle_header) {
3471 /* ASS code assumes this buffer is null terminated so add extra byte. */
3472 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3473 if (!ost->enc_ctx->subtitle_header)
3474 return AVERROR(ENOMEM);
3475 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3476 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3478 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3479 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3480 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3482 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3483 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3484 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3486 ret = hw_device_setup_for_encode(ost);
3488 snprintf(error, error_len, "Device setup failed for "
3489 "encoder on output stream #%d:%d : %s",
3490 ost->file_index, ost->index, av_err2str(ret));
3494 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3495 int input_props = 0, output_props = 0;
3496 AVCodecDescriptor const *input_descriptor =
3497 avcodec_descriptor_get(dec->codec_id);
3498 AVCodecDescriptor const *output_descriptor =
3499 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3500 if (input_descriptor)
3501 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3502 if (output_descriptor)
3503 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3504 if (input_props && output_props && input_props != output_props) {
3505 snprintf(error, error_len,
3506 "Subtitle encoding currently only possible from text to text "
3507 "or bitmap to bitmap");
3508 return AVERROR_INVALIDDATA;
3512 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3513 if (ret == AVERROR_EXPERIMENTAL)
3514 abort_codec_experimental(codec, 1);
3515 snprintf(error, error_len,
3516 "Error while opening encoder for output stream #%d:%d - "
3517 "maybe incorrect parameters such as bit_rate, rate, width or height",
3518 ost->file_index, ost->index);
3521 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3522 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3523 av_buffersink_set_frame_size(ost->filter->filter,
3524 ost->enc_ctx->frame_size);
3525 assert_avoptions(ost->encoder_opts);
3526 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3527 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3528 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3529 " It takes bits/s as argument, not kbits/s\n");
3531 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3533 av_log(NULL, AV_LOG_FATAL,
3534 "Error initializing the output stream codec context.\n");
3538 * FIXME: ost->st->codec should't be needed here anymore.
3540 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3544 if (ost->enc_ctx->nb_coded_side_data) {
3547 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3548 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3551 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3553 return AVERROR(ENOMEM);
3554 memcpy(dst_data, sd_src->data, sd_src->size);
3559 * Add global input side data. For now this is naive, and copies it
3560 * from the input stream's global side data. All side data should
3561 * really be funneled over AVFrame and libavfilter, then added back to
3562 * packet side data, and then potentially using the first packet for
3567 for (i = 0; i < ist->st->nb_side_data; i++) {
3568 AVPacketSideData *sd = &ist->st->side_data[i];
3569 if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3570 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3572 return AVERROR(ENOMEM);
3573 memcpy(dst, sd->data, sd->size);
3574 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3575 av_display_rotation_set((uint32_t *)dst, 0);
3580 // copy timebase while removing common factors
3581 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3582 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3584 // copy estimated duration as a hint to the muxer
3585 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3586 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3588 ost->st->codec->codec= ost->enc_ctx->codec;
3589 } else if (ost->stream_copy) {
3590 ret = init_output_stream_streamcopy(ost);
3595 // parse user provided disposition, and update stream values
3596 if (ost->disposition) {
3597 static const AVOption opts[] = {
3598 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3599 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3600 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3601 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3602 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3603 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3604 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3605 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3606 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3607 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3608 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3609 { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3610 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3611 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3612 { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3613 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3616 static const AVClass class = {
3618 .item_name = av_default_item_name,
3620 .version = LIBAVUTIL_VERSION_INT,
3622 const AVClass *pclass = &class;
3624 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3629 /* initialize bitstream filters for the output stream
3630 * needs to be done here, because the codec id for streamcopy is not
3631 * known until now */
3632 ret = init_output_bsfs(ost);
3636 ost->initialized = 1;
3638 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3645 static void report_new_stream(int input_index, AVPacket *pkt)
3647 InputFile *file = input_files[input_index];
3648 AVStream *st = file->ctx->streams[pkt->stream_index];
3650 if (pkt->stream_index < file->nb_streams_warn)
3652 av_log(file->ctx, AV_LOG_WARNING,
3653 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3654 av_get_media_type_string(st->codecpar->codec_type),
3655 input_index, pkt->stream_index,
3656 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3657 file->nb_streams_warn = pkt->stream_index + 1;
3660 static int transcode_init(void)
3662 int ret = 0, i, j, k;
3663 AVFormatContext *oc;
3666 char error[1024] = {0};
3668 for (i = 0; i < nb_filtergraphs; i++) {
3669 FilterGraph *fg = filtergraphs[i];
3670 for (j = 0; j < fg->nb_outputs; j++) {
3671 OutputFilter *ofilter = fg->outputs[j];
3672 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3674 if (fg->nb_inputs != 1)
3676 for (k = nb_input_streams-1; k >= 0 ; k--)
3677 if (fg->inputs[0]->ist == input_streams[k])
3679 ofilter->ost->source_index = k;
3683 /* init framerate emulation */
3684 for (i = 0; i < nb_input_files; i++) {
3685 InputFile *ifile = input_files[i];
3686 if (ifile->rate_emu)
3687 for (j = 0; j < ifile->nb_streams; j++)
3688 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3691 /* init input streams */
3692 for (i = 0; i < nb_input_streams; i++)
3693 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3694 for (i = 0; i < nb_output_streams; i++) {
3695 ost = output_streams[i];
3696 avcodec_close(ost->enc_ctx);
3701 /* open each encoder */
3702 for (i = 0; i < nb_output_streams; i++) {
3703 // skip streams fed from filtergraphs until we have a frame for them
3704 if (output_streams[i]->filter)
3707 ret = init_output_stream_wrapper(output_streams[i], 0);
3712 /* discard unused programs */
3713 for (i = 0; i < nb_input_files; i++) {
3714 InputFile *ifile = input_files[i];
3715 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3716 AVProgram *p = ifile->ctx->programs[j];
3717 int discard = AVDISCARD_ALL;
3719 for (k = 0; k < p->nb_stream_indexes; k++)
3720 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3721 discard = AVDISCARD_DEFAULT;
3724 p->discard = discard;
3728 /* write headers for files with no streams */
3729 for (i = 0; i < nb_output_files; i++) {
3730 oc = output_files[i]->ctx;
3731 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3732 ret = check_init_output_file(output_files[i], i);
3739 /* dump the stream mapping */
3740 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3741 for (i = 0; i < nb_input_streams; i++) {
3742 ist = input_streams[i];
3744 for (j = 0; j < ist->nb_filters; j++) {
3745 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3746 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3747 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3748 ist->filters[j]->name);
3749 if (nb_filtergraphs > 1)
3750 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3751 av_log(NULL, AV_LOG_INFO, "\n");
3756 for (i = 0; i < nb_output_streams; i++) {
3757 ost = output_streams[i];
3759 if (ost->attachment_filename) {
3760 /* an attached file */
3761 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3762 ost->attachment_filename, ost->file_index, ost->index);
3766 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3767 /* output from a complex graph */
3768 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3769 if (nb_filtergraphs > 1)
3770 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3772 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3773 ost->index, ost->enc ? ost->enc->name : "?");
3777 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3778 input_streams[ost->source_index]->file_index,
3779 input_streams[ost->source_index]->st->index,
3782 if (ost->sync_ist != input_streams[ost->source_index])
3783 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3784 ost->sync_ist->file_index,
3785 ost->sync_ist->st->index);
3786 if (ost->stream_copy)
3787 av_log(NULL, AV_LOG_INFO, " (copy)");
3789 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3790 const AVCodec *out_codec = ost->enc;
3791 const char *decoder_name = "?";
3792 const char *in_codec_name = "?";
3793 const char *encoder_name = "?";
3794 const char *out_codec_name = "?";
3795 const AVCodecDescriptor *desc;
3798 decoder_name = in_codec->name;
3799 desc = avcodec_descriptor_get(in_codec->id);
3801 in_codec_name = desc->name;
3802 if (!strcmp(decoder_name, in_codec_name))
3803 decoder_name = "native";
3807 encoder_name = out_codec->name;
3808 desc = avcodec_descriptor_get(out_codec->id);
3810 out_codec_name = desc->name;
3811 if (!strcmp(encoder_name, out_codec_name))
3812 encoder_name = "native";
3815 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3816 in_codec_name, decoder_name,
3817 out_codec_name, encoder_name);
3819 av_log(NULL, AV_LOG_INFO, "\n");
3823 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3827 atomic_store(&transcode_init_done, 1);
3832 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3833 static int need_output(void)
3837 for (i = 0; i < nb_output_streams; i++) {
3838 OutputStream *ost = output_streams[i];
3839 OutputFile *of = output_files[ost->file_index];
3840 AVFormatContext *os = output_files[ost->file_index]->ctx;
3842 if (ost->finished ||
3843 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3845 if (ost->frame_number >= ost->max_frames) {
3847 for (j = 0; j < of->ctx->nb_streams; j++)
3848 close_output_stream(output_streams[of->ost_index + j]);
3859 * Select the output stream to process.
3861 * @return selected output stream, or NULL if none available
3863 static OutputStream *choose_output(void)
3866 int64_t opts_min = INT64_MAX;
3867 OutputStream *ost_min = NULL;
3869 for (i = 0; i < nb_output_streams; i++) {
3870 OutputStream *ost = output_streams[i];
3871 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3872 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3874 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3875 av_log(NULL, AV_LOG_DEBUG,
3876 "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3877 ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3879 if (!ost->initialized && !ost->inputs_done)
3882 if (!ost->finished && opts < opts_min) {
3884 ost_min = ost->unavailable ? NULL : ost;
3890 static void set_tty_echo(int on)
3894 if (tcgetattr(0, &tty) == 0) {
3895 if (on) tty.c_lflag |= ECHO;
3896 else tty.c_lflag &= ~ECHO;
3897 tcsetattr(0, TCSANOW, &tty);
3902 static int check_keyboard_interaction(int64_t cur_time)
3905 static int64_t last_time;
3906 if (received_nb_signals)
3907 return AVERROR_EXIT;
3908 /* read_key() returns 0 on EOF */
3909 if(cur_time - last_time >= 100000 && !run_as_daemon){
3911 last_time = cur_time;
3915 return AVERROR_EXIT;
3916 if (key == '+') av_log_set_level(av_log_get_level()+10);
3917 if (key == '-') av_log_set_level(av_log_get_level()-10);
3918 if (key == 's') qp_hist ^= 1;
3921 do_hex_dump = do_pkt_dump = 0;
3922 } else if(do_pkt_dump){
3926 av_log_set_level(AV_LOG_DEBUG);
3928 if (key == 'c' || key == 'C'){
3929 char buf[4096], target[64], command[256], arg[256] = {0};
3932 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3935 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3940 fprintf(stderr, "\n");
3942 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3943 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3944 target, time, command, arg);
3945 for (i = 0; i < nb_filtergraphs; i++) {
3946 FilterGraph *fg = filtergraphs[i];
3949 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3950 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3951 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3952 } else if (key == 'c') {
3953 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3954 ret = AVERROR_PATCHWELCOME;
3956 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3958 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3963 av_log(NULL, AV_LOG_ERROR,
3964 "Parse error, at least 3 arguments were expected, "
3965 "only %d given in string '%s'\n", n, buf);
3968 if (key == 'd' || key == 'D'){
3971 debug = input_streams[0]->st->codec->debug<<1;
3972 if(!debug) debug = 1;
3973 while(debug & (FF_DEBUG_DCT_COEFF
3975 |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3977 )) //unsupported, would just crash
3984 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3989 fprintf(stderr, "\n");
3990 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3991 fprintf(stderr,"error parsing debug value\n");
3993 for(i=0;i<nb_input_streams;i++) {
3994 input_streams[i]->st->codec->debug = debug;
3996 for(i=0;i<nb_output_streams;i++) {
3997 OutputStream *ost = output_streams[i];
3998 ost->enc_ctx->debug = debug;
4000 if(debug) av_log_set_level(AV_LOG_DEBUG);
4001 fprintf(stderr,"debug=%d\n", debug);
4004 fprintf(stderr, "key function\n"
4005 "? show this help\n"
4006 "+ increase verbosity\n"
4007 "- decrease verbosity\n"
4008 "c Send command to first matching filter supporting it\n"
4009 "C Send/Queue command to all matching filters\n"
4010 "D cycle through available debug modes\n"
4011 "h dump packets/hex press to cycle through the 3 states\n"
4013 "s Show QP histogram\n"
4020 static void *input_thread(void *arg)
4023 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4028 ret = av_read_frame(f->ctx, &pkt);
4030 if (ret == AVERROR(EAGAIN)) {
4035 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4038 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4039 if (flags && ret == AVERROR(EAGAIN)) {
4041 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4042 av_log(f->ctx, AV_LOG_WARNING,
4043 "Thread message queue blocking; consider raising the "
4044 "thread_queue_size option (current value: %d)\n",
4045 f->thread_queue_size);
4048 if (ret != AVERROR_EOF)
4049 av_log(f->ctx, AV_LOG_ERROR,
4050 "Unable to send packet to main thread: %s\n",
4052 av_packet_unref(&pkt);
4053 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4061 static void free_input_thread(int i)
4063 InputFile *f = input_files[i];
4066 if (!f || !f->in_thread_queue)
4068 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4069 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4070 av_packet_unref(&pkt);
4072 pthread_join(f->thread, NULL);
4074 av_thread_message_queue_free(&f->in_thread_queue);
4077 static void free_input_threads(void)
4081 for (i = 0; i < nb_input_files; i++)
4082 free_input_thread(i);
4085 static int init_input_thread(int i)
4088 InputFile *f = input_files[i];
4090 if (f->thread_queue_size < 0)
4091 f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4092 if (!f->thread_queue_size)
4095 if (f->ctx->pb ? !f->ctx->pb->seekable :
4096 strcmp(f->ctx->iformat->name, "lavfi"))
4097 f->non_blocking = 1;
4098 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4099 f->thread_queue_size, sizeof(AVPacket));
4103 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4104 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4105 av_thread_message_queue_free(&f->in_thread_queue);
4106 return AVERROR(ret);
4112 static int init_input_threads(void)
4116 for (i = 0; i < nb_input_files; i++) {
4117 ret = init_input_thread(i);
4124 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4126 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4128 AV_THREAD_MESSAGE_NONBLOCK : 0);
4132 static int get_input_packet(InputFile *f, AVPacket *pkt)
4136 for (i = 0; i < f->nb_streams; i++) {
4137 InputStream *ist = input_streams[f->ist_index + i];
4138 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4139 int64_t now = av_gettime_relative() - ist->start;
4141 return AVERROR(EAGAIN);
4146 if (f->thread_queue_size)
4147 return get_input_packet_mt(f, pkt);
4149 return av_read_frame(f->ctx, pkt);
4152 static int got_eagain(void)
4155 for (i = 0; i < nb_output_streams; i++)
4156 if (output_streams[i]->unavailable)
4161 static void reset_eagain(void)
4164 for (i = 0; i < nb_input_files; i++)
4165 input_files[i]->eagain = 0;
4166 for (i = 0; i < nb_output_streams; i++)
4167 output_streams[i]->unavailable = 0;
4170 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4171 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4172 AVRational time_base)
4178 return tmp_time_base;
4181 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4184 return tmp_time_base;
4190 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4193 AVCodecContext *avctx;
4194 int i, ret, has_audio = 0;
4195 int64_t duration = 0;
4197 ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4201 for (i = 0; i < ifile->nb_streams; i++) {
4202 ist = input_streams[ifile->ist_index + i];
4203 avctx = ist->dec_ctx;
4205 /* duration is the length of the last frame in a stream
4206 * when audio stream is present we don't care about
4207 * last video frame length because it's not defined exactly */
4208 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4212 for (i = 0; i < ifile->nb_streams; i++) {
4213 ist = input_streams[ifile->ist_index + i];
4214 avctx = ist->dec_ctx;
4217 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4218 AVRational sample_rate = {1, avctx->sample_rate};
4220 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4225 if (ist->framerate.num) {
4226 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4227 } else if (ist->st->avg_frame_rate.num) {
4228 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4233 if (!ifile->duration)
4234 ifile->time_base = ist->st->time_base;
4235 /* the total duration of the stream, max_pts - min_pts is
4236 * the duration of the stream without the last frame */
4237 if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4238 duration += ist->max_pts - ist->min_pts;
4239 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4243 if (ifile->loop > 0)
4251 * - 0 -- one packet was read and processed
4252 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4253 * this function should be called again
4254 * - AVERROR_EOF -- this function should not be called again
4256 static int process_input(int file_index)
4258 InputFile *ifile = input_files[file_index];
4259 AVFormatContext *is;
4262 int ret, thread_ret, i, j;
4265 int disable_discontinuity_correction = copy_ts;
4268 ret = get_input_packet(ifile, &pkt);
4270 if (ret == AVERROR(EAGAIN)) {
4274 if (ret < 0 && ifile->loop) {
4275 AVCodecContext *avctx;
4276 for (i = 0; i < ifile->nb_streams; i++) {
4277 ist = input_streams[ifile->ist_index + i];
4278 avctx = ist->dec_ctx;
4279 if (ist->decoding_needed) {
4280 ret = process_input_packet(ist, NULL, 1);
4283 avcodec_flush_buffers(avctx);
4287 free_input_thread(file_index);
4289 ret = seek_to_start(ifile, is);
4291 thread_ret = init_input_thread(file_index);
4296 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4298 ret = get_input_packet(ifile, &pkt);
4299 if (ret == AVERROR(EAGAIN)) {
4305 if (ret != AVERROR_EOF) {
4306 print_error(is->url, ret);
4311 for (i = 0; i < ifile->nb_streams; i++) {
4312 ist = input_streams[ifile->ist_index + i];
4313 if (ist->decoding_needed) {
4314 ret = process_input_packet(ist, NULL, 0);
4319 /* mark all outputs that don't go through lavfi as finished */
4320 for (j = 0; j < nb_output_streams; j++) {
4321 OutputStream *ost = output_streams[j];
4323 if (ost->source_index == ifile->ist_index + i &&
4324 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4325 finish_output_stream(ost);
4329 ifile->eof_reached = 1;
4330 return AVERROR(EAGAIN);
4336 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4337 is->streams[pkt.stream_index]);
4339 /* the following test is needed in case new streams appear
4340 dynamically in stream : we ignore them */
4341 if (pkt.stream_index >= ifile->nb_streams) {
4342 report_new_stream(file_index, &pkt);
4343 goto discard_packet;
4346 ist = input_streams[ifile->ist_index + pkt.stream_index];
4348 ist->data_size += pkt.size;
4352 goto discard_packet;
4354 if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4355 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
4356 "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4362 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4363 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4364 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4365 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4366 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4367 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4368 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4369 av_ts2str(input_files[ist->file_index]->ts_offset),
4370 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4373 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4374 int64_t stime, stime2;
4375 // Correcting starttime based on the enabled streams
4376 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4377 // so we instead do it here as part of discontinuity handling
4378 if ( ist->next_dts == AV_NOPTS_VALUE
4379 && ifile->ts_offset == -is->start_time
4380 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4381 int64_t new_start_time = INT64_MAX;
4382 for (i=0; i<is->nb_streams; i++) {
4383 AVStream *st = is->streams[i];
4384 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4386 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4388 if (new_start_time > is->start_time) {
4389 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4390 ifile->ts_offset = -new_start_time;
4394 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4395 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4396 ist->wrap_correction_done = 1;
4398 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4399 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4400 ist->wrap_correction_done = 0;
4402 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4403 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4404 ist->wrap_correction_done = 0;
4408 /* add the stream-global side data to the first packet */
4409 if (ist->nb_packets == 1) {
4410 for (i = 0; i < ist->st->nb_side_data; i++) {
4411 AVPacketSideData *src_sd = &ist->st->side_data[i];
4414 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4417 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4420 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4424 memcpy(dst_data, src_sd->data, src_sd->size);
4428 if (pkt.dts != AV_NOPTS_VALUE)
4429 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4430 if (pkt.pts != AV_NOPTS_VALUE)
4431 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4433 if (pkt.pts != AV_NOPTS_VALUE)
4434 pkt.pts *= ist->ts_scale;
4435 if (pkt.dts != AV_NOPTS_VALUE)
4436 pkt.dts *= ist->ts_scale;
4438 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4439 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4440 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4441 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4442 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4443 int64_t delta = pkt_dts - ifile->last_ts;
4444 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4445 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4446 ifile->ts_offset -= delta;
4447 av_log(NULL, AV_LOG_DEBUG,
4448 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4449 delta, ifile->ts_offset);
4450 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4451 if (pkt.pts != AV_NOPTS_VALUE)
4452 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4456 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4457 if (pkt.pts != AV_NOPTS_VALUE) {
4458 pkt.pts += duration;
4459 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4460 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4463 if (pkt.dts != AV_NOPTS_VALUE)
4464 pkt.dts += duration;
4466 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4468 if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4469 (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4470 int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
4471 ist->st->time_base, AV_TIME_BASE_Q,
4472 AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4473 if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4474 disable_discontinuity_correction = 0;
4477 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4478 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4479 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4480 !disable_discontinuity_correction) {
4481 int64_t delta = pkt_dts - ist->next_dts;
4482 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4483 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4484 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4485 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4486 ifile->ts_offset -= delta;
4487 av_log(NULL, AV_LOG_DEBUG,
4488 "timestamp discontinuity for stream #%d:%d "
4489 "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4490 ist->file_index, ist->st->index, ist->st->id,
4491 av_get_media_type_string(ist->dec_ctx->codec_type),
4492 delta, ifile->ts_offset);
4493 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4494 if (pkt.pts != AV_NOPTS_VALUE)
4495 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4498 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4499 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4500 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4501 pkt.dts = AV_NOPTS_VALUE;
4503 if (pkt.pts != AV_NOPTS_VALUE){
4504 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4505 delta = pkt_pts - ist->next_dts;
4506 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4507 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4508 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4509 pkt.pts = AV_NOPTS_VALUE;
4515 if (pkt.dts != AV_NOPTS_VALUE)
4516 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4519 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4520 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4521 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4522 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4523 av_ts2str(input_files[ist->file_index]->ts_offset),
4524 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4527 sub2video_heartbeat(ist, pkt.pts);
4529 process_input_packet(ist, &pkt, 0);
4532 av_packet_unref(&pkt);
4538 * Perform a step of transcoding for the specified filter graph.
4540 * @param[in] graph filter graph to consider
4541 * @param[out] best_ist input stream where a frame would allow to continue
4542 * @return 0 for success, <0 for error
4544 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4547 int nb_requests, nb_requests_max = 0;
4548 InputFilter *ifilter;
4552 ret = avfilter_graph_request_oldest(graph->graph);
4554 return reap_filters(0);
4556 if (ret == AVERROR_EOF) {
4557 ret = reap_filters(1);
4558 for (i = 0; i < graph->nb_outputs; i++)
4559 close_output_stream(graph->outputs[i]->ost);
4562 if (ret != AVERROR(EAGAIN))
4565 for (i = 0; i < graph->nb_inputs; i++) {
4566 ifilter = graph->inputs[i];
4568 if (input_files[ist->file_index]->eagain ||
4569 input_files[ist->file_index]->eof_reached)
4571 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4572 if (nb_requests > nb_requests_max) {
4573 nb_requests_max = nb_requests;
4579 for (i = 0; i < graph->nb_outputs; i++)
4580 graph->outputs[i]->ost->unavailable = 1;
4586 * Run a single step of transcoding.
4588 * @return 0 for success, <0 for error
4590 static int transcode_step(void)
4593 InputStream *ist = NULL;
4596 ost = choose_output();
4603 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4607 if (ost->filter && !ost->filter->graph->graph) {
4608 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4609 ret = configure_filtergraph(ost->filter->graph);
4611 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4617 if (ost->filter && ost->filter->graph->graph) {
4618 init_output_stream_wrapper(ost, 1);
4620 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4624 } else if (ost->filter) {
4626 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4627 InputFilter *ifilter = ost->filter->graph->inputs[i];
4628 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4634 ost->inputs_done = 1;
4638 av_assert0(ost->source_index >= 0);
4639 ist = input_streams[ost->source_index];
4642 ret = process_input(ist->file_index);
4643 if (ret == AVERROR(EAGAIN)) {
4644 if (input_files[ist->file_index]->eagain)
4645 ost->unavailable = 1;
4650 return ret == AVERROR_EOF ? 0 : ret;
4652 return reap_filters(0);
4656 * The following code is the main loop of the file converter
4658 static int transcode(void)
4661 AVFormatContext *os;
4664 int64_t timer_start;
4665 int64_t total_packets_written = 0;
4667 ret = transcode_init();
4671 if (stdin_interaction) {
4672 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4675 timer_start = av_gettime_relative();
4678 if ((ret = init_input_threads()) < 0)
4682 while (!received_sigterm) {
4683 int64_t cur_time= av_gettime_relative();
4685 /* if 'q' pressed, exits */
4686 if (stdin_interaction)
4687 if (check_keyboard_interaction(cur_time) < 0)
4690 /* check if there's any stream where output is still needed */
4691 if (!need_output()) {
4692 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4696 ret = transcode_step();
4697 if (ret < 0 && ret != AVERROR_EOF) {
4698 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4702 /* dump report by using the output first video and audio streams */
4703 print_report(0, timer_start, cur_time);
4706 free_input_threads();
4709 /* at the end of stream, we must flush the decoder buffers */
4710 for (i = 0; i < nb_input_streams; i++) {
4711 ist = input_streams[i];
4712 if (!input_files[ist->file_index]->eof_reached) {
4713 process_input_packet(ist, NULL, 0);
4720 /* write the trailer if needed and close file */
4721 for (i = 0; i < nb_output_files; i++) {
4722 os = output_files[i]->ctx;
4723 if (!output_files[i]->header_written) {
4724 av_log(NULL, AV_LOG_ERROR,
4725 "Nothing was written into output file %d (%s), because "
4726 "at least one of its streams received no packets.\n",
4730 if ((ret = av_write_trailer(os)) < 0) {
4731 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4737 /* dump report by using the first video and audio streams */
4738 print_report(1, timer_start, av_gettime_relative());
4740 /* close each encoder */
4741 for (i = 0; i < nb_output_streams; i++) {
4742 ost = output_streams[i];
4743 if (ost->encoding_needed) {
4744 av_freep(&ost->enc_ctx->stats_in);
4746 total_packets_written += ost->packets_written;
4747 if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
4748 av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4753 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4754 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4758 /* close each decoder */
4759 for (i = 0; i < nb_input_streams; i++) {
4760 ist = input_streams[i];
4761 if (ist->decoding_needed) {
4762 avcodec_close(ist->dec_ctx);
4763 if (ist->hwaccel_uninit)
4764 ist->hwaccel_uninit(ist->dec_ctx);
4768 hw_device_free_all();
4775 free_input_threads();
4778 if (output_streams) {
4779 for (i = 0; i < nb_output_streams; i++) {
4780 ost = output_streams[i];
4783 if (fclose(ost->logfile))
4784 av_log(NULL, AV_LOG_ERROR,
4785 "Error closing logfile, loss of information possible: %s\n",
4786 av_err2str(AVERROR(errno)));
4787 ost->logfile = NULL;
4789 av_freep(&ost->forced_kf_pts);
4790 av_freep(&ost->apad);
4791 av_freep(&ost->disposition);
4792 av_dict_free(&ost->encoder_opts);
4793 av_dict_free(&ost->sws_dict);
4794 av_dict_free(&ost->swr_opts);
4795 av_dict_free(&ost->resample_opts);
4802 static BenchmarkTimeStamps get_benchmark_time_stamps(void)
4804 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4806 struct rusage rusage;
4808 getrusage(RUSAGE_SELF, &rusage);
4809 time_stamps.user_usec =
4810 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4811 time_stamps.sys_usec =
4812 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4813 #elif HAVE_GETPROCESSTIMES
4815 FILETIME c, e, k, u;
4816 proc = GetCurrentProcess();
4817 GetProcessTimes(proc, &c, &e, &k, &u);
4818 time_stamps.user_usec =
4819 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4820 time_stamps.sys_usec =
4821 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4823 time_stamps.user_usec = time_stamps.sys_usec = 0;
4828 static int64_t getmaxrss(void)
4830 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4831 struct rusage rusage;
4832 getrusage(RUSAGE_SELF, &rusage);
4833 return (int64_t)rusage.ru_maxrss * 1024;
4834 #elif HAVE_GETPROCESSMEMORYINFO
4836 PROCESS_MEMORY_COUNTERS memcounters;
4837 proc = GetCurrentProcess();
4838 memcounters.cb = sizeof(memcounters);
4839 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4840 return memcounters.PeakPagefileUsage;
4846 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4850 int main(int argc, char **argv)
4853 BenchmarkTimeStamps ti;
4857 register_exit(ffmpeg_cleanup);
4859 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4861 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4862 parse_loglevel(argc, argv, options);
4864 if(argc>1 && !strcmp(argv[1], "-d")){
4866 av_log_set_callback(log_callback_null);
4872 avdevice_register_all();
4874 avformat_network_init();
4876 show_banner(argc, argv, options);
4878 /* parse options and open all input/output files */
4879 ret = ffmpeg_parse_options(argc, argv);
4883 if (nb_output_files <= 0 && nb_input_files == 0) {
4885 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4889 /* file converter / grab */
4890 if (nb_output_files <= 0) {
4891 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4895 for (i = 0; i < nb_output_files; i++) {
4896 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4900 current_time = ti = get_benchmark_time_stamps();
4901 if (transcode() < 0)
4904 int64_t utime, stime, rtime;
4905 current_time = get_benchmark_time_stamps();
4906 utime = current_time.user_usec - ti.user_usec;
4907 stime = current_time.sys_usec - ti.sys_usec;
4908 rtime = current_time.real_usec - ti.real_usec;
4909 av_log(NULL, AV_LOG_INFO,
4910 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4911 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4913 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4914 decode_error_stat[0], decode_error_stat[1]);
4915 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4918 exit_program(received_nb_signals ? 255 : main_return_code);
4919 return main_return_code;