2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
65 #include "libavutil/threadmessage.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
73 #if HAVE_SYS_RESOURCE_H
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
80 #if HAVE_GETPROCESSMEMORYINFO
84 #if HAVE_SETCONSOLECTRLHANDLER
90 #include <sys/select.h>
95 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 typedef struct BenchmarkTimeStamps {
127 } BenchmarkTimeStamps;
129 static void do_video_stats(OutputStream *ost, int frame_size);
130 static BenchmarkTimeStamps get_benchmark_time_stamps(void);
131 static int64_t getmaxrss(void);
132 static int ifilter_has_all_input_formats(FilterGraph *fg);
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
140 static int want_sdp = 1;
142 static BenchmarkTimeStamps current_time;
143 AVIOContext *progress_avio = NULL;
145 static uint8_t *subtitle_out;
147 InputStream **input_streams = NULL;
148 int nb_input_streams = 0;
149 InputFile **input_files = NULL;
150 int nb_input_files = 0;
152 OutputStream **output_streams = NULL;
153 int nb_output_streams = 0;
154 OutputFile **output_files = NULL;
155 int nb_output_files = 0;
157 FilterGraph **filtergraphs;
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
168 static void free_input_threads(void);
172 Convert subtitles to video with alpha to insert them in filter graphs.
173 This is a temporary solution until libavfilter gets real subtitles support.
176 static int sub2video_get_blank_frame(InputStream *ist)
179 AVFrame *frame = ist->sub2video.frame;
181 av_frame_unref(frame);
182 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
184 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
185 if ((ret = av_frame_get_buffer(frame, 0)) < 0)
187 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
194 uint32_t *pal, *dst2;
198 if (r->type != SUBTITLE_BITMAP) {
199 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
202 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204 r->x, r->y, r->w, r->h, w, h
209 dst += r->y * dst_linesize + r->x * 4;
211 pal = (uint32_t *)r->data[1];
212 for (y = 0; y < r->h; y++) {
213 dst2 = (uint32_t *)dst;
215 for (x = 0; x < r->w; x++)
216 *(dst2++) = pal[*(src2++)];
218 src += r->linesize[0];
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
224 AVFrame *frame = ist->sub2video.frame;
228 av_assert1(frame->data[0]);
229 ist->sub2video.last_pts = frame->pts = pts;
230 for (i = 0; i < ist->nb_filters; i++) {
231 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
232 AV_BUFFERSRC_FLAG_KEEP_REF |
233 AV_BUFFERSRC_FLAG_PUSH);
234 if (ret != AVERROR_EOF && ret < 0)
235 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
240 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
242 AVFrame *frame = ist->sub2video.frame;
246 int64_t pts, end_pts;
251 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252 AV_TIME_BASE_Q, ist->st->time_base);
253 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254 AV_TIME_BASE_Q, ist->st->time_base);
255 num_rects = sub->num_rects;
257 /* If we are initializing the system, utilize current heartbeat
258 PTS as the start time, and show until the following subpicture
259 is received. Otherwise, utilize the previous subpicture's end time
260 as the fall-back value. */
261 pts = ist->sub2video.initialize ?
262 heartbeat_pts : ist->sub2video.end_pts;
266 if (sub2video_get_blank_frame(ist) < 0) {
267 av_log(ist->dec_ctx, AV_LOG_ERROR,
268 "Impossible to get a blank canvas.\n");
271 dst = frame->data [0];
272 dst_linesize = frame->linesize[0];
273 for (i = 0; i < num_rects; i++)
274 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
275 sub2video_push_ref(ist, pts);
276 ist->sub2video.end_pts = end_pts;
277 ist->sub2video.initialize = 0;
280 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
282 InputFile *infile = input_files[ist->file_index];
286 /* When a frame is read from a file, examine all sub2video streams in
287 the same file and send the sub2video frame again. Otherwise, decoded
288 video frames could be accumulating in the filter graph while a filter
289 (possibly overlay) is desperately waiting for a subtitle frame. */
290 for (i = 0; i < infile->nb_streams; i++) {
291 InputStream *ist2 = input_streams[infile->ist_index + i];
292 if (!ist2->sub2video.frame)
294 /* subtitles seem to be usually muxed ahead of other streams;
295 if not, subtracting a larger time here is necessary */
296 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
297 /* do not send the heartbeat frame if the subtitle is already ahead */
298 if (pts2 <= ist2->sub2video.last_pts)
300 if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
301 /* if we have hit the end of the current displayed subpicture,
302 or if we need to initialize the system, update the
303 overlayed subpicture and its start/end times */
304 sub2video_update(ist2, pts2 + 1, NULL);
305 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
306 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
308 sub2video_push_ref(ist2, pts2);
312 static void sub2video_flush(InputStream *ist)
317 if (ist->sub2video.end_pts < INT64_MAX)
318 sub2video_update(ist, INT64_MAX, NULL);
319 for (i = 0; i < ist->nb_filters; i++) {
320 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
321 if (ret != AVERROR_EOF && ret < 0)
322 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
326 /* end of sub2video hack */
328 static void term_exit_sigsafe(void)
332 tcsetattr (0, TCSANOW, &oldtty);
338 av_log(NULL, AV_LOG_QUIET, "%s", "");
342 static volatile int received_sigterm = 0;
343 static volatile int received_nb_signals = 0;
344 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
345 static volatile int ffmpeg_exited = 0;
346 static int main_return_code = 0;
349 sigterm_handler(int sig)
352 received_sigterm = sig;
353 received_nb_signals++;
355 if(received_nb_signals > 3) {
356 ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
357 strlen("Received > 3 system signals, hard exiting\n"));
358 if (ret < 0) { /* Do nothing */ };
363 #if HAVE_SETCONSOLECTRLHANDLER
364 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
366 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
371 case CTRL_BREAK_EVENT:
372 sigterm_handler(SIGINT);
375 case CTRL_CLOSE_EVENT:
376 case CTRL_LOGOFF_EVENT:
377 case CTRL_SHUTDOWN_EVENT:
378 sigterm_handler(SIGTERM);
379 /* Basically, with these 3 events, when we return from this method the
380 process is hard terminated, so stall as long as we need to
381 to try and let the main thread(s) clean up and gracefully terminate
382 (we have at most 5 seconds, but should be done far before that). */
383 while (!ffmpeg_exited) {
389 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
398 if (!run_as_daemon && stdin_interaction) {
400 if (tcgetattr (0, &tty) == 0) {
404 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
405 |INLCR|IGNCR|ICRNL|IXON);
406 tty.c_oflag |= OPOST;
407 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
408 tty.c_cflag &= ~(CSIZE|PARENB);
413 tcsetattr (0, TCSANOW, &tty);
415 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
419 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
420 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
422 signal(SIGXCPU, sigterm_handler);
425 signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
427 #if HAVE_SETCONSOLECTRLHANDLER
428 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
432 /* read a key without blocking */
433 static int read_key(void)
445 n = select(1, &rfds, NULL, NULL, &tv);
454 # if HAVE_PEEKNAMEDPIPE
456 static HANDLE input_handle;
459 input_handle = GetStdHandle(STD_INPUT_HANDLE);
460 is_pipe = !GetConsoleMode(input_handle, &dw);
464 /* When running under a GUI, you will end here. */
465 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
466 // input pipe may have been closed by the program that ran ffmpeg
484 static int decode_interrupt_cb(void *ctx)
486 return received_nb_signals > atomic_load(&transcode_init_done);
489 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
491 static void ffmpeg_cleanup(int ret)
496 int maxrss = getmaxrss() / 1024;
497 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
500 for (i = 0; i < nb_filtergraphs; i++) {
501 FilterGraph *fg = filtergraphs[i];
502 avfilter_graph_free(&fg->graph);
503 for (j = 0; j < fg->nb_inputs; j++) {
504 InputFilter *ifilter = fg->inputs[j];
505 struct InputStream *ist = ifilter->ist;
507 while (av_fifo_size(ifilter->frame_queue)) {
509 av_fifo_generic_read(ifilter->frame_queue, &frame,
510 sizeof(frame), NULL);
511 av_frame_free(&frame);
513 av_fifo_freep(&ifilter->frame_queue);
514 if (ist->sub2video.sub_queue) {
515 while (av_fifo_size(ist->sub2video.sub_queue)) {
517 av_fifo_generic_read(ist->sub2video.sub_queue,
518 &sub, sizeof(sub), NULL);
519 avsubtitle_free(&sub);
521 av_fifo_freep(&ist->sub2video.sub_queue);
523 av_buffer_unref(&ifilter->hw_frames_ctx);
524 av_freep(&ifilter->name);
525 av_freep(&fg->inputs[j]);
527 av_freep(&fg->inputs);
528 for (j = 0; j < fg->nb_outputs; j++) {
529 OutputFilter *ofilter = fg->outputs[j];
531 avfilter_inout_free(&ofilter->out_tmp);
532 av_freep(&ofilter->name);
533 av_freep(&ofilter->formats);
534 av_freep(&ofilter->channel_layouts);
535 av_freep(&ofilter->sample_rates);
536 av_freep(&fg->outputs[j]);
538 av_freep(&fg->outputs);
539 av_freep(&fg->graph_desc);
541 av_freep(&filtergraphs[i]);
543 av_freep(&filtergraphs);
545 av_freep(&subtitle_out);
548 for (i = 0; i < nb_output_files; i++) {
549 OutputFile *of = output_files[i];
554 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
556 avformat_free_context(s);
557 av_dict_free(&of->opts);
559 av_freep(&output_files[i]);
561 for (i = 0; i < nb_output_streams; i++) {
562 OutputStream *ost = output_streams[i];
567 av_bsf_free(&ost->bsf_ctx);
569 av_frame_free(&ost->filtered_frame);
570 av_frame_free(&ost->last_frame);
571 av_dict_free(&ost->encoder_opts);
573 av_freep(&ost->forced_keyframes);
574 av_expr_free(ost->forced_keyframes_pexpr);
575 av_freep(&ost->avfilter);
576 av_freep(&ost->logfile_prefix);
578 av_freep(&ost->audio_channels_map);
579 ost->audio_channels_mapped = 0;
581 av_dict_free(&ost->sws_dict);
582 av_dict_free(&ost->swr_opts);
584 avcodec_free_context(&ost->enc_ctx);
585 avcodec_parameters_free(&ost->ref_par);
587 if (ost->muxing_queue) {
588 while (av_fifo_size(ost->muxing_queue)) {
590 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
591 av_packet_unref(&pkt);
593 av_fifo_freep(&ost->muxing_queue);
596 av_freep(&output_streams[i]);
599 free_input_threads();
601 for (i = 0; i < nb_input_files; i++) {
602 avformat_close_input(&input_files[i]->ctx);
603 av_freep(&input_files[i]);
605 for (i = 0; i < nb_input_streams; i++) {
606 InputStream *ist = input_streams[i];
608 av_frame_free(&ist->decoded_frame);
609 av_frame_free(&ist->filter_frame);
610 av_dict_free(&ist->decoder_opts);
611 avsubtitle_free(&ist->prev_sub.subtitle);
612 av_frame_free(&ist->sub2video.frame);
613 av_freep(&ist->filters);
614 av_freep(&ist->hwaccel_device);
615 av_freep(&ist->dts_buffer);
617 avcodec_free_context(&ist->dec_ctx);
619 av_freep(&input_streams[i]);
623 if (fclose(vstats_file))
624 av_log(NULL, AV_LOG_ERROR,
625 "Error closing vstats file, loss of information possible: %s\n",
626 av_err2str(AVERROR(errno)));
628 av_freep(&vstats_filename);
630 av_freep(&input_streams);
631 av_freep(&input_files);
632 av_freep(&output_streams);
633 av_freep(&output_files);
637 avformat_network_deinit();
639 if (received_sigterm) {
640 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
641 (int) received_sigterm);
642 } else if (ret && atomic_load(&transcode_init_done)) {
643 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
649 void remove_avoptions(AVDictionary **a, AVDictionary *b)
651 AVDictionaryEntry *t = NULL;
653 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
654 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
658 void assert_avoptions(AVDictionary *m)
660 AVDictionaryEntry *t;
661 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
662 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
667 static void abort_codec_experimental(AVCodec *c, int encoder)
672 static void update_benchmark(const char *fmt, ...)
674 if (do_benchmark_all) {
675 BenchmarkTimeStamps t = get_benchmark_time_stamps();
681 vsnprintf(buf, sizeof(buf), fmt, va);
683 av_log(NULL, AV_LOG_INFO,
684 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
685 t.user_usec - current_time.user_usec,
686 t.sys_usec - current_time.sys_usec,
687 t.real_usec - current_time.real_usec, buf);
693 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
696 for (i = 0; i < nb_output_streams; i++) {
697 OutputStream *ost2 = output_streams[i];
698 ost2->finished |= ost == ost2 ? this_stream : others;
702 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
704 AVFormatContext *s = of->ctx;
705 AVStream *st = ost->st;
709 * Audio encoders may split the packets -- #frames in != #packets out.
710 * But there is no reordering, so we can limit the number of output packets
711 * by simply dropping them here.
712 * Counting encoded video frames needs to be done separately because of
713 * reordering, see do_video_out().
714 * Do not count the packet when unqueued because it has been counted when queued.
716 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
717 if (ost->frame_number >= ost->max_frames) {
718 av_packet_unref(pkt);
724 if (!of->header_written) {
725 AVPacket tmp_pkt = {0};
726 /* the muxer is not initialized yet, buffer the packet */
727 if (!av_fifo_space(ost->muxing_queue)) {
728 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
729 ost->max_muxing_queue_size);
730 if (new_size <= av_fifo_size(ost->muxing_queue)) {
731 av_log(NULL, AV_LOG_ERROR,
732 "Too many packets buffered for output stream %d:%d.\n",
733 ost->file_index, ost->st->index);
736 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
740 ret = av_packet_make_refcounted(pkt);
743 av_packet_move_ref(&tmp_pkt, pkt);
744 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
748 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
749 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
750 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
752 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
754 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
756 ost->quality = sd ? AV_RL32(sd) : -1;
757 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
759 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
761 ost->error[i] = AV_RL64(sd + 8 + 8*i);
766 if (ost->frame_rate.num && ost->is_cfr) {
767 if (pkt->duration > 0)
768 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
769 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
774 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
776 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
777 if (pkt->dts != AV_NOPTS_VALUE &&
778 pkt->pts != AV_NOPTS_VALUE &&
779 pkt->dts > pkt->pts) {
780 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
782 ost->file_index, ost->st->index);
784 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
785 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
786 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
788 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
789 pkt->dts != AV_NOPTS_VALUE &&
790 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
791 ost->last_mux_dts != AV_NOPTS_VALUE) {
792 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
793 if (pkt->dts < max) {
794 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
796 loglevel = AV_LOG_ERROR;
797 av_log(s, loglevel, "Non-monotonous DTS in output stream "
798 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
799 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
801 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
804 av_log(s, loglevel, "changing to %"PRId64". This may result "
805 "in incorrect timestamps in the output file.\n",
807 if (pkt->pts >= pkt->dts)
808 pkt->pts = FFMAX(pkt->pts, max);
813 ost->last_mux_dts = pkt->dts;
815 ost->data_size += pkt->size;
816 ost->packets_written++;
818 pkt->stream_index = ost->index;
821 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
822 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
823 av_get_media_type_string(ost->enc_ctx->codec_type),
824 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
825 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
830 ret = av_interleaved_write_frame(s, pkt);
832 print_error("av_interleaved_write_frame()", ret);
833 main_return_code = 1;
834 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
836 av_packet_unref(pkt);
839 static void close_output_stream(OutputStream *ost)
841 OutputFile *of = output_files[ost->file_index];
843 ost->finished |= ENCODER_FINISHED;
845 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
846 of->recording_time = FFMIN(of->recording_time, end);
851 * Send a single packet to the output, applying any bitstream filters
852 * associated with the output stream. This may result in any number
853 * of packets actually being written, depending on what bitstream
854 * filters are applied. The supplied packet is consumed and will be
855 * blank (as if newly-allocated) when this function returns.
857 * If eof is set, instead indicate EOF to all bitstream filters and
858 * therefore flush any delayed packets to the output. A blank packet
859 * must be supplied in this case.
861 static void output_packet(OutputFile *of, AVPacket *pkt,
862 OutputStream *ost, int eof)
866 /* apply the output bitstream filters */
868 ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
871 while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
872 write_packet(of, pkt, ost, 0);
873 if (ret == AVERROR(EAGAIN))
876 write_packet(of, pkt, ost, 0);
879 if (ret < 0 && ret != AVERROR_EOF) {
880 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
881 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
887 static int check_recording_time(OutputStream *ost)
889 OutputFile *of = output_files[ost->file_index];
891 if (of->recording_time != INT64_MAX &&
892 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
893 AV_TIME_BASE_Q) >= 0) {
894 close_output_stream(ost);
900 static void do_audio_out(OutputFile *of, OutputStream *ost,
903 AVCodecContext *enc = ost->enc_ctx;
907 av_init_packet(&pkt);
911 if (!check_recording_time(ost))
914 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
915 frame->pts = ost->sync_opts;
916 ost->sync_opts = frame->pts + frame->nb_samples;
917 ost->samples_encoded += frame->nb_samples;
918 ost->frames_encoded++;
920 av_assert0(pkt.size || !pkt.data);
921 update_benchmark(NULL);
923 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
924 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
925 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
926 enc->time_base.num, enc->time_base.den);
929 ret = avcodec_send_frame(enc, frame);
934 ret = avcodec_receive_packet(enc, &pkt);
935 if (ret == AVERROR(EAGAIN))
940 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
942 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
945 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
946 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
947 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
948 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
951 output_packet(of, &pkt, ost, 0);
956 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
960 static void do_subtitle_out(OutputFile *of,
964 int subtitle_out_max_size = 1024 * 1024;
965 int subtitle_out_size, nb, i;
970 if (sub->pts == AV_NOPTS_VALUE) {
971 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
980 subtitle_out = av_malloc(subtitle_out_max_size);
982 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
987 /* Note: DVB subtitle need one packet to draw them and one other
988 packet to clear them */
989 /* XXX: signal it in the codec context ? */
990 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
995 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
997 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
998 pts -= output_files[ost->file_index]->start_time;
999 for (i = 0; i < nb; i++) {
1000 unsigned save_num_rects = sub->num_rects;
1002 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1003 if (!check_recording_time(ost))
1007 // start_display_time is required to be 0
1008 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1009 sub->end_display_time -= sub->start_display_time;
1010 sub->start_display_time = 0;
1014 ost->frames_encoded++;
1016 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1017 subtitle_out_max_size, sub);
1019 sub->num_rects = save_num_rects;
1020 if (subtitle_out_size < 0) {
1021 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1025 av_init_packet(&pkt);
1026 pkt.data = subtitle_out;
1027 pkt.size = subtitle_out_size;
1028 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1029 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1030 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1031 /* XXX: the pts correction is handled here. Maybe handling
1032 it in the codec would be better */
1034 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1036 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1039 output_packet(of, &pkt, ost, 0);
1043 static void do_video_out(OutputFile *of,
1045 AVFrame *next_picture,
1048 int ret, format_video_sync;
1050 AVCodecContext *enc = ost->enc_ctx;
1051 AVCodecParameters *mux_par = ost->st->codecpar;
1052 AVRational frame_rate;
1053 int nb_frames, nb0_frames, i;
1054 double delta, delta0;
1055 double duration = 0;
1057 InputStream *ist = NULL;
1058 AVFilterContext *filter = ost->filter->filter;
1060 if (ost->source_index >= 0)
1061 ist = input_streams[ost->source_index];
1063 frame_rate = av_buffersink_get_frame_rate(filter);
1064 if (frame_rate.num > 0 && frame_rate.den > 0)
1065 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1067 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1068 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1070 if (!ost->filters_script &&
1072 (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1075 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1076 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1079 if (!next_picture) {
1081 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1082 ost->last_nb0_frames[1],
1083 ost->last_nb0_frames[2]);
1085 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1086 delta = delta0 + duration;
1088 /* by default, we output a single frame */
1089 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1092 format_video_sync = video_sync_method;
1093 if (format_video_sync == VSYNC_AUTO) {
1094 if(!strcmp(of->ctx->oformat->name, "avi")) {
1095 format_video_sync = VSYNC_VFR;
1097 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1099 && format_video_sync == VSYNC_CFR
1100 && input_files[ist->file_index]->ctx->nb_streams == 1
1101 && input_files[ist->file_index]->input_ts_offset == 0) {
1102 format_video_sync = VSYNC_VSCFR;
1104 if (format_video_sync == VSYNC_CFR && copy_ts) {
1105 format_video_sync = VSYNC_VSCFR;
1108 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1112 format_video_sync != VSYNC_PASSTHROUGH &&
1113 format_video_sync != VSYNC_DROP) {
1114 if (delta0 < -0.6) {
1115 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1117 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1118 sync_ipts = ost->sync_opts;
1123 switch (format_video_sync) {
1125 if (ost->frame_number == 0 && delta0 >= 0.5) {
1126 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1129 ost->sync_opts = llrint(sync_ipts);
1132 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1133 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1135 } else if (delta < -1.1)
1137 else if (delta > 1.1) {
1138 nb_frames = lrintf(delta);
1140 nb0_frames = llrintf(delta0 - 0.6);
1146 else if (delta > 0.6)
1147 ost->sync_opts = llrint(sync_ipts);
1150 case VSYNC_PASSTHROUGH:
1151 ost->sync_opts = llrint(sync_ipts);
1158 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1159 nb0_frames = FFMIN(nb0_frames, nb_frames);
1161 memmove(ost->last_nb0_frames + 1,
1162 ost->last_nb0_frames,
1163 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1164 ost->last_nb0_frames[0] = nb0_frames;
1166 if (nb0_frames == 0 && ost->last_dropped) {
1168 av_log(NULL, AV_LOG_VERBOSE,
1169 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1170 ost->frame_number, ost->st->index, ost->last_frame->pts);
1172 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1173 if (nb_frames > dts_error_threshold * 30) {
1174 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1178 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1179 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1180 if (nb_frames_dup > dup_warning) {
1181 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1185 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1187 /* duplicates frame if needed */
1188 for (i = 0; i < nb_frames; i++) {
1189 AVFrame *in_picture;
1190 int forced_keyframe = 0;
1192 av_init_packet(&pkt);
1196 if (i < nb0_frames && ost->last_frame) {
1197 in_picture = ost->last_frame;
1199 in_picture = next_picture;
1204 in_picture->pts = ost->sync_opts;
1206 if (!check_recording_time(ost))
1209 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1210 ost->top_field_first >= 0)
1211 in_picture->top_field_first = !!ost->top_field_first;
1213 if (in_picture->interlaced_frame) {
1214 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1215 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1217 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1219 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1221 in_picture->quality = enc->global_quality;
1222 in_picture->pict_type = 0;
1224 if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1225 in_picture->pts != AV_NOPTS_VALUE)
1226 ost->forced_kf_ref_pts = in_picture->pts;
1228 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1229 (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1230 if (ost->forced_kf_index < ost->forced_kf_count &&
1231 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1232 ost->forced_kf_index++;
1233 forced_keyframe = 1;
1234 } else if (ost->forced_keyframes_pexpr) {
1236 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1237 res = av_expr_eval(ost->forced_keyframes_pexpr,
1238 ost->forced_keyframes_expr_const_values, NULL);
1239 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1240 ost->forced_keyframes_expr_const_values[FKF_N],
1241 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1242 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1243 ost->forced_keyframes_expr_const_values[FKF_T],
1244 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1247 forced_keyframe = 1;
1248 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1249 ost->forced_keyframes_expr_const_values[FKF_N];
1250 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1251 ost->forced_keyframes_expr_const_values[FKF_T];
1252 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1255 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1256 } else if ( ost->forced_keyframes
1257 && !strncmp(ost->forced_keyframes, "source", 6)
1258 && in_picture->key_frame==1
1260 forced_keyframe = 1;
1263 if (forced_keyframe) {
1264 in_picture->pict_type = AV_PICTURE_TYPE_I;
1265 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1268 update_benchmark(NULL);
1270 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1271 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1272 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1273 enc->time_base.num, enc->time_base.den);
1276 ost->frames_encoded++;
1278 ret = avcodec_send_frame(enc, in_picture);
1281 // Make sure Closed Captions will not be duplicated
1282 av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
1285 ret = avcodec_receive_packet(enc, &pkt);
1286 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1287 if (ret == AVERROR(EAGAIN))
1293 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1294 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1295 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1296 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1299 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1300 pkt.pts = ost->sync_opts;
1302 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1305 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1306 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1307 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1308 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1311 frame_size = pkt.size;
1312 output_packet(of, &pkt, ost, 0);
1314 /* if two pass, output log */
1315 if (ost->logfile && enc->stats_out) {
1316 fprintf(ost->logfile, "%s", enc->stats_out);
1321 * For video, number of frames in == number of packets out.
1322 * But there may be reordering, so we can't throw away frames on encoder
1323 * flush, we need to limit them here, before they go into encoder.
1325 ost->frame_number++;
1327 if (vstats_filename && frame_size)
1328 do_video_stats(ost, frame_size);
1331 if (!ost->last_frame)
1332 ost->last_frame = av_frame_alloc();
1333 av_frame_unref(ost->last_frame);
1334 if (next_picture && ost->last_frame)
1335 av_frame_ref(ost->last_frame, next_picture);
1337 av_frame_free(&ost->last_frame);
1341 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1345 static double psnr(double d)
1347 return -10.0 * log10(d);
1350 static void do_video_stats(OutputStream *ost, int frame_size)
1352 AVCodecContext *enc;
1354 double ti1, bitrate, avg_bitrate;
1356 /* this is executed just the first time do_video_stats is called */
1358 vstats_file = fopen(vstats_filename, "w");
1366 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1367 frame_number = ost->st->nb_frames;
1368 if (vstats_version <= 1) {
1369 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1370 ost->quality / (float)FF_QP2LAMBDA);
1372 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1373 ost->quality / (float)FF_QP2LAMBDA);
1376 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1377 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1379 fprintf(vstats_file,"f_size= %6d ", frame_size);
1380 /* compute pts value */
1381 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1385 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1386 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1387 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1388 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1389 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1393 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1395 static int init_output_stream_wrapper(OutputStream *ost, unsigned int fatal)
1397 int ret = AVERROR_BUG;
1398 char error[1024] = {0};
1400 if (ost->initialized)
1403 ret = init_output_stream(ost, error, sizeof(error));
1405 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1406 ost->file_index, ost->index, error);
1415 static void finish_output_stream(OutputStream *ost)
1417 OutputFile *of = output_files[ost->file_index];
1420 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1423 for (i = 0; i < of->ctx->nb_streams; i++)
1424 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1429 * Get and encode new output from any of the filtergraphs, without causing
1432 * @return 0 for success, <0 for severe errors
1434 static int reap_filters(int flush)
1436 AVFrame *filtered_frame = NULL;
1439 /* Reap all buffers present in the buffer sinks */
1440 for (i = 0; i < nb_output_streams; i++) {
1441 OutputStream *ost = output_streams[i];
1442 OutputFile *of = output_files[ost->file_index];
1443 AVFilterContext *filter;
1444 AVCodecContext *enc = ost->enc_ctx;
1447 if (!ost->filter || !ost->filter->graph->graph)
1449 filter = ost->filter->filter;
1451 init_output_stream_wrapper(ost, 1);
1453 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1454 return AVERROR(ENOMEM);
1456 filtered_frame = ost->filtered_frame;
1459 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1460 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1461 AV_BUFFERSINK_FLAG_NO_REQUEST);
1463 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1464 av_log(NULL, AV_LOG_WARNING,
1465 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1466 } else if (flush && ret == AVERROR_EOF) {
1467 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1468 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1472 if (ost->finished) {
1473 av_frame_unref(filtered_frame);
1476 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1477 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1478 AVRational filter_tb = av_buffersink_get_time_base(filter);
1479 AVRational tb = enc->time_base;
1480 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1482 tb.den <<= extra_bits;
1484 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1485 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1486 float_pts /= 1 << extra_bits;
1487 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1488 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1490 filtered_frame->pts =
1491 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1492 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1495 switch (av_buffersink_get_type(filter)) {
1496 case AVMEDIA_TYPE_VIDEO:
1497 if (!ost->frame_aspect_ratio.num)
1498 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1501 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1502 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1504 enc->time_base.num, enc->time_base.den);
1507 do_video_out(of, ost, filtered_frame, float_pts);
1509 case AVMEDIA_TYPE_AUDIO:
1510 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1511 enc->channels != filtered_frame->channels) {
1512 av_log(NULL, AV_LOG_ERROR,
1513 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1516 do_audio_out(of, ost, filtered_frame);
1519 // TODO support subtitle filters
1523 av_frame_unref(filtered_frame);
1530 static void print_final_stats(int64_t total_size)
1532 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1533 uint64_t subtitle_size = 0;
1534 uint64_t data_size = 0;
1535 float percent = -1.0;
1539 for (i = 0; i < nb_output_streams; i++) {
1540 OutputStream *ost = output_streams[i];
1541 switch (ost->enc_ctx->codec_type) {
1542 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1543 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1544 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1545 default: other_size += ost->data_size; break;
1547 extra_size += ost->enc_ctx->extradata_size;
1548 data_size += ost->data_size;
1549 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1550 != AV_CODEC_FLAG_PASS1)
1554 if (data_size && total_size>0 && total_size >= data_size)
1555 percent = 100.0 * (total_size - data_size) / data_size;
1557 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1558 video_size / 1024.0,
1559 audio_size / 1024.0,
1560 subtitle_size / 1024.0,
1561 other_size / 1024.0,
1562 extra_size / 1024.0);
1564 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1566 av_log(NULL, AV_LOG_INFO, "unknown");
1567 av_log(NULL, AV_LOG_INFO, "\n");
1569 /* print verbose per-stream stats */
1570 for (i = 0; i < nb_input_files; i++) {
1571 InputFile *f = input_files[i];
1572 uint64_t total_packets = 0, total_size = 0;
1574 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1577 for (j = 0; j < f->nb_streams; j++) {
1578 InputStream *ist = input_streams[f->ist_index + j];
1579 enum AVMediaType type = ist->dec_ctx->codec_type;
1581 total_size += ist->data_size;
1582 total_packets += ist->nb_packets;
1584 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1585 i, j, media_type_string(type));
1586 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1587 ist->nb_packets, ist->data_size);
1589 if (ist->decoding_needed) {
1590 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1591 ist->frames_decoded);
1592 if (type == AVMEDIA_TYPE_AUDIO)
1593 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1594 av_log(NULL, AV_LOG_VERBOSE, "; ");
1597 av_log(NULL, AV_LOG_VERBOSE, "\n");
1600 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1601 total_packets, total_size);
1604 for (i = 0; i < nb_output_files; i++) {
1605 OutputFile *of = output_files[i];
1606 uint64_t total_packets = 0, total_size = 0;
1608 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1611 for (j = 0; j < of->ctx->nb_streams; j++) {
1612 OutputStream *ost = output_streams[of->ost_index + j];
1613 enum AVMediaType type = ost->enc_ctx->codec_type;
1615 total_size += ost->data_size;
1616 total_packets += ost->packets_written;
1618 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1619 i, j, media_type_string(type));
1620 if (ost->encoding_needed) {
1621 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1622 ost->frames_encoded);
1623 if (type == AVMEDIA_TYPE_AUDIO)
1624 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1625 av_log(NULL, AV_LOG_VERBOSE, "; ");
1628 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1629 ost->packets_written, ost->data_size);
1631 av_log(NULL, AV_LOG_VERBOSE, "\n");
1634 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1635 total_packets, total_size);
1637 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1638 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1640 av_log(NULL, AV_LOG_WARNING, "\n");
1642 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1647 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1649 AVBPrint buf, buf_script;
1651 AVFormatContext *oc;
1653 AVCodecContext *enc;
1654 int frame_number, vid, i;
1657 int64_t pts = INT64_MIN + 1;
1658 static int64_t last_time = -1;
1659 static int qp_histogram[52];
1660 int hours, mins, secs, us;
1661 const char *hours_sign;
1665 if (!print_stats && !is_last_report && !progress_avio)
1668 if (!is_last_report) {
1669 if (last_time == -1) {
1670 last_time = cur_time;
1673 if ((cur_time - last_time) < 500000)
1675 last_time = cur_time;
1678 t = (cur_time-timer_start) / 1000000.0;
1681 oc = output_files[0]->ctx;
1683 total_size = avio_size(oc->pb);
1684 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1685 total_size = avio_tell(oc->pb);
1688 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1689 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1690 for (i = 0; i < nb_output_streams; i++) {
1692 ost = output_streams[i];
1694 if (!ost->stream_copy)
1695 q = ost->quality / (float) FF_QP2LAMBDA;
1697 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1698 av_bprintf(&buf, "q=%2.1f ", q);
1699 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1700 ost->file_index, ost->index, q);
1702 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1705 frame_number = ost->frame_number;
1706 fps = t > 1 ? frame_number / t : 0;
1707 av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1708 frame_number, fps < 9.95, fps, q);
1709 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1710 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1711 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1712 ost->file_index, ost->index, q);
1714 av_bprintf(&buf, "L");
1718 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1720 for (j = 0; j < 32; j++)
1721 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1724 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1726 double error, error_sum = 0;
1727 double scale, scale_sum = 0;
1729 char type[3] = { 'Y','U','V' };
1730 av_bprintf(&buf, "PSNR=");
1731 for (j = 0; j < 3; j++) {
1732 if (is_last_report) {
1733 error = enc->error[j];
1734 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1736 error = ost->error[j];
1737 scale = enc->width * enc->height * 255.0 * 255.0;
1743 p = psnr(error / scale);
1744 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1745 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1746 ost->file_index, ost->index, type[j] | 32, p);
1748 p = psnr(error_sum / scale_sum);
1749 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1750 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1751 ost->file_index, ost->index, p);
1755 /* compute min output value */
1756 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1757 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1758 ost->st->time_base, AV_TIME_BASE_Q));
1760 nb_frames_drop += ost->last_dropped;
1763 secs = FFABS(pts) / AV_TIME_BASE;
1764 us = FFABS(pts) % AV_TIME_BASE;
1769 hours_sign = (pts < 0) ? "-" : "";
1771 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1772 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1774 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1775 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1776 if (pts == AV_NOPTS_VALUE) {
1777 av_bprintf(&buf, "N/A ");
1779 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1780 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1784 av_bprintf(&buf, "bitrate=N/A");
1785 av_bprintf(&buf_script, "bitrate=N/A\n");
1787 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1788 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1791 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1792 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1793 if (pts == AV_NOPTS_VALUE) {
1794 av_bprintf(&buf_script, "out_time_us=N/A\n");
1795 av_bprintf(&buf_script, "out_time_ms=N/A\n");
1796 av_bprintf(&buf_script, "out_time=N/A\n");
1798 av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1799 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1800 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1801 hours_sign, hours, mins, secs, us);
1804 if (nb_frames_dup || nb_frames_drop)
1805 av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1806 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1807 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1810 av_bprintf(&buf, " speed=N/A");
1811 av_bprintf(&buf_script, "speed=N/A\n");
1813 av_bprintf(&buf, " speed=%4.3gx", speed);
1814 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1817 if (print_stats || is_last_report) {
1818 const char end = is_last_report ? '\n' : '\r';
1819 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1820 fprintf(stderr, "%s %c", buf.str, end);
1822 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1826 av_bprint_finalize(&buf, NULL);
1828 if (progress_avio) {
1829 av_bprintf(&buf_script, "progress=%s\n",
1830 is_last_report ? "end" : "continue");
1831 avio_write(progress_avio, buf_script.str,
1832 FFMIN(buf_script.len, buf_script.size - 1));
1833 avio_flush(progress_avio);
1834 av_bprint_finalize(&buf_script, NULL);
1835 if (is_last_report) {
1836 if ((ret = avio_closep(&progress_avio)) < 0)
1837 av_log(NULL, AV_LOG_ERROR,
1838 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1843 print_final_stats(total_size);
1846 static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1848 // We never got any input. Set a fake format, which will
1849 // come from libavformat.
1850 ifilter->format = par->format;
1851 ifilter->sample_rate = par->sample_rate;
1852 ifilter->channels = par->channels;
1853 ifilter->channel_layout = par->channel_layout;
1854 ifilter->width = par->width;
1855 ifilter->height = par->height;
1856 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1859 static void flush_encoders(void)
1863 for (i = 0; i < nb_output_streams; i++) {
1864 OutputStream *ost = output_streams[i];
1865 AVCodecContext *enc = ost->enc_ctx;
1866 OutputFile *of = output_files[ost->file_index];
1868 if (!ost->encoding_needed)
1871 // Try to enable encoding with no input frames.
1872 // Maybe we should just let encoding fail instead.
1873 if (!ost->initialized) {
1874 FilterGraph *fg = ost->filter->graph;
1876 av_log(NULL, AV_LOG_WARNING,
1877 "Finishing stream %d:%d without any data written to it.\n",
1878 ost->file_index, ost->st->index);
1880 if (ost->filter && !fg->graph) {
1882 for (x = 0; x < fg->nb_inputs; x++) {
1883 InputFilter *ifilter = fg->inputs[x];
1884 if (ifilter->format < 0)
1885 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1888 if (!ifilter_has_all_input_formats(fg))
1891 ret = configure_filtergraph(fg);
1893 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1897 finish_output_stream(ost);
1900 init_output_stream_wrapper(ost, 1);
1903 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1907 const char *desc = NULL;
1911 switch (enc->codec_type) {
1912 case AVMEDIA_TYPE_AUDIO:
1915 case AVMEDIA_TYPE_VIDEO:
1922 av_init_packet(&pkt);
1926 update_benchmark(NULL);
1928 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1929 ret = avcodec_send_frame(enc, NULL);
1931 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1938 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1939 if (ret < 0 && ret != AVERROR_EOF) {
1940 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1945 if (ost->logfile && enc->stats_out) {
1946 fprintf(ost->logfile, "%s", enc->stats_out);
1948 if (ret == AVERROR_EOF) {
1949 output_packet(of, &pkt, ost, 1);
1952 if (ost->finished & MUXER_FINISHED) {
1953 av_packet_unref(&pkt);
1956 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1957 pkt_size = pkt.size;
1958 output_packet(of, &pkt, ost, 0);
1959 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1960 do_video_stats(ost, pkt_size);
1967 * Check whether a packet from ist should be written into ost at this time
1969 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1971 OutputFile *of = output_files[ost->file_index];
1972 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1974 if (ost->source_index != ist_index)
1980 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1986 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1988 OutputFile *of = output_files[ost->file_index];
1989 InputFile *f = input_files [ist->file_index];
1990 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1991 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1994 // EOF: flush output bitstream filters.
1996 av_init_packet(&opkt);
1999 output_packet(of, &opkt, ost, 1);
2003 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2004 !ost->copy_initial_nonkeyframes)
2007 if (!ost->frame_number && !ost->copy_prior_start) {
2008 int64_t comp_start = start_time;
2009 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2010 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2011 if (pkt->pts == AV_NOPTS_VALUE ?
2012 ist->pts < comp_start :
2013 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2017 if (of->recording_time != INT64_MAX &&
2018 ist->pts >= of->recording_time + start_time) {
2019 close_output_stream(ost);
2023 if (f->recording_time != INT64_MAX) {
2024 start_time = f->ctx->start_time;
2025 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2026 start_time += f->start_time;
2027 if (ist->pts >= f->recording_time + start_time) {
2028 close_output_stream(ost);
2033 /* force the input stream PTS */
2034 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2037 if (av_packet_ref(&opkt, pkt) < 0)
2040 if (pkt->pts != AV_NOPTS_VALUE)
2041 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2043 if (pkt->dts == AV_NOPTS_VALUE) {
2044 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2045 } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2046 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2048 duration = ist->dec_ctx->frame_size;
2049 opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2050 (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2051 &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2052 /* dts will be set immediately afterwards to what pts is now */
2053 opkt.pts = opkt.dts - ost_tb_start_time;
2055 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2056 opkt.dts -= ost_tb_start_time;
2058 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2060 output_packet(of, &opkt, ost, 0);
2063 int guess_input_channel_layout(InputStream *ist)
2065 AVCodecContext *dec = ist->dec_ctx;
2067 if (!dec->channel_layout) {
2068 char layout_name[256];
2070 if (dec->channels > ist->guess_layout_max)
2072 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2073 if (!dec->channel_layout)
2075 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2076 dec->channels, dec->channel_layout);
2077 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2078 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2083 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2085 if (*got_output || ret<0)
2086 decode_error_stat[ret<0] ++;
2088 if (ret < 0 && exit_on_error)
2091 if (*got_output && ist) {
2092 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2093 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2094 "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2101 // Filters can be configured only if the formats of all inputs are known.
2102 static int ifilter_has_all_input_formats(FilterGraph *fg)
2105 for (i = 0; i < fg->nb_inputs; i++) {
2106 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2107 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2113 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2115 FilterGraph *fg = ifilter->graph;
2116 int need_reinit, ret, i;
2118 /* determine if the parameters for this input changed */
2119 need_reinit = ifilter->format != frame->format;
2121 switch (ifilter->ist->st->codecpar->codec_type) {
2122 case AVMEDIA_TYPE_AUDIO:
2123 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2124 ifilter->channels != frame->channels ||
2125 ifilter->channel_layout != frame->channel_layout;
2127 case AVMEDIA_TYPE_VIDEO:
2128 need_reinit |= ifilter->width != frame->width ||
2129 ifilter->height != frame->height;
2133 if (!ifilter->ist->reinit_filters && fg->graph)
2136 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2137 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2141 ret = ifilter_parameters_from_frame(ifilter, frame);
2146 /* (re)init the graph if possible, otherwise buffer the frame and return */
2147 if (need_reinit || !fg->graph) {
2148 for (i = 0; i < fg->nb_inputs; i++) {
2149 if (!ifilter_has_all_input_formats(fg)) {
2150 AVFrame *tmp = av_frame_clone(frame);
2152 return AVERROR(ENOMEM);
2153 av_frame_unref(frame);
2155 if (!av_fifo_space(ifilter->frame_queue)) {
2156 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2158 av_frame_free(&tmp);
2162 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2167 ret = reap_filters(1);
2168 if (ret < 0 && ret != AVERROR_EOF) {
2169 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2173 ret = configure_filtergraph(fg);
2175 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2180 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2182 if (ret != AVERROR_EOF)
2183 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2190 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2196 if (ifilter->filter) {
2197 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2201 // the filtergraph was never configured
2202 if (ifilter->format < 0)
2203 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2204 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2205 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2206 return AVERROR_INVALIDDATA;
2213 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2214 // There is the following difference: if you got a frame, you must call
2215 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2216 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2217 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2224 ret = avcodec_send_packet(avctx, pkt);
2225 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2226 // decoded frames with avcodec_receive_frame() until done.
2227 if (ret < 0 && ret != AVERROR_EOF)
2231 ret = avcodec_receive_frame(avctx, frame);
2232 if (ret < 0 && ret != AVERROR(EAGAIN))
2240 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2245 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2246 for (i = 0; i < ist->nb_filters; i++) {
2247 if (i < ist->nb_filters - 1) {
2248 f = ist->filter_frame;
2249 ret = av_frame_ref(f, decoded_frame);
2254 ret = ifilter_send_frame(ist->filters[i], f);
2255 if (ret == AVERROR_EOF)
2256 ret = 0; /* ignore */
2258 av_log(NULL, AV_LOG_ERROR,
2259 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2266 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2269 AVFrame *decoded_frame;
2270 AVCodecContext *avctx = ist->dec_ctx;
2272 AVRational decoded_frame_tb;
2274 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2275 return AVERROR(ENOMEM);
2276 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2277 return AVERROR(ENOMEM);
2278 decoded_frame = ist->decoded_frame;
2280 update_benchmark(NULL);
2281 ret = decode(avctx, decoded_frame, got_output, pkt);
2282 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2286 if (ret >= 0 && avctx->sample_rate <= 0) {
2287 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2288 ret = AVERROR_INVALIDDATA;
2291 if (ret != AVERROR_EOF)
2292 check_decode_result(ist, got_output, ret);
2294 if (!*got_output || ret < 0)
2297 ist->samples_decoded += decoded_frame->nb_samples;
2298 ist->frames_decoded++;
2300 /* increment next_dts to use for the case where the input stream does not
2301 have timestamps or there are multiple frames in the packet */
2302 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2304 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2307 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2308 decoded_frame_tb = ist->st->time_base;
2309 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2310 decoded_frame->pts = pkt->pts;
2311 decoded_frame_tb = ist->st->time_base;
2313 decoded_frame->pts = ist->dts;
2314 decoded_frame_tb = AV_TIME_BASE_Q;
2316 if (decoded_frame->pts != AV_NOPTS_VALUE)
2317 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2318 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2319 (AVRational){1, avctx->sample_rate});
2320 ist->nb_samples = decoded_frame->nb_samples;
2321 err = send_frame_to_filters(ist, decoded_frame);
2323 av_frame_unref(ist->filter_frame);
2324 av_frame_unref(decoded_frame);
2325 return err < 0 ? err : ret;
2328 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2331 AVFrame *decoded_frame;
2332 int i, ret = 0, err = 0;
2333 int64_t best_effort_timestamp;
2334 int64_t dts = AV_NOPTS_VALUE;
2337 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2338 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2340 if (!eof && pkt && pkt->size == 0)
2343 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2344 return AVERROR(ENOMEM);
2345 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2346 return AVERROR(ENOMEM);
2347 decoded_frame = ist->decoded_frame;
2348 if (ist->dts != AV_NOPTS_VALUE)
2349 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2352 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2355 // The old code used to set dts on the drain packet, which does not work
2356 // with the new API anymore.
2358 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2360 return AVERROR(ENOMEM);
2361 ist->dts_buffer = new;
2362 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2365 update_benchmark(NULL);
2366 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2367 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2371 // The following line may be required in some cases where there is no parser
2372 // or the parser does not has_b_frames correctly
2373 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2374 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2375 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2377 av_log(ist->dec_ctx, AV_LOG_WARNING,
2378 "video_delay is larger in decoder than demuxer %d > %d.\n"
2379 "If you want to help, upload a sample "
2380 "of this file to https://streams.videolan.org/upload/ "
2381 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2382 ist->dec_ctx->has_b_frames,
2383 ist->st->codecpar->video_delay);
2386 if (ret != AVERROR_EOF)
2387 check_decode_result(ist, got_output, ret);
2389 if (*got_output && ret >= 0) {
2390 if (ist->dec_ctx->width != decoded_frame->width ||
2391 ist->dec_ctx->height != decoded_frame->height ||
2392 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2393 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2394 decoded_frame->width,
2395 decoded_frame->height,
2396 decoded_frame->format,
2397 ist->dec_ctx->width,
2398 ist->dec_ctx->height,
2399 ist->dec_ctx->pix_fmt);
2403 if (!*got_output || ret < 0)
2406 if(ist->top_field_first>=0)
2407 decoded_frame->top_field_first = ist->top_field_first;
2409 ist->frames_decoded++;
2411 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2412 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2416 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2418 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2419 *duration_pts = decoded_frame->pkt_duration;
2421 if (ist->framerate.num)
2422 best_effort_timestamp = ist->cfr_next_pts++;
2424 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2425 best_effort_timestamp = ist->dts_buffer[0];
2427 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2428 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2429 ist->nb_dts_buffer--;
2432 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2433 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2435 if (ts != AV_NOPTS_VALUE)
2436 ist->next_pts = ist->pts = ts;
2440 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2441 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2442 ist->st->index, av_ts2str(decoded_frame->pts),
2443 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2444 best_effort_timestamp,
2445 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2446 decoded_frame->key_frame, decoded_frame->pict_type,
2447 ist->st->time_base.num, ist->st->time_base.den);
2450 if (ist->st->sample_aspect_ratio.num)
2451 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2453 err = send_frame_to_filters(ist, decoded_frame);
2456 av_frame_unref(ist->filter_frame);
2457 av_frame_unref(decoded_frame);
2458 return err < 0 ? err : ret;
2461 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2464 AVSubtitle subtitle;
2466 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2467 &subtitle, got_output, pkt);
2469 check_decode_result(NULL, got_output, ret);
2471 if (ret < 0 || !*got_output) {
2474 sub2video_flush(ist);
2478 if (ist->fix_sub_duration) {
2480 if (ist->prev_sub.got_output) {
2481 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2482 1000, AV_TIME_BASE);
2483 if (end < ist->prev_sub.subtitle.end_display_time) {
2484 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2485 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2486 ist->prev_sub.subtitle.end_display_time, end,
2487 end <= 0 ? ", dropping it" : "");
2488 ist->prev_sub.subtitle.end_display_time = end;
2491 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2492 FFSWAP(int, ret, ist->prev_sub.ret);
2493 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2501 if (ist->sub2video.frame) {
2502 sub2video_update(ist, INT64_MIN, &subtitle);
2503 } else if (ist->nb_filters) {
2504 if (!ist->sub2video.sub_queue)
2505 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2506 if (!ist->sub2video.sub_queue)
2508 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2509 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2513 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2517 if (!subtitle.num_rects)
2520 ist->frames_decoded++;
2522 for (i = 0; i < nb_output_streams; i++) {
2523 OutputStream *ost = output_streams[i];
2525 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2526 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2529 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2534 avsubtitle_free(&subtitle);
2538 static int send_filter_eof(InputStream *ist)
2541 /* TODO keep pts also in stream time base to avoid converting back */
2542 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2543 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2545 for (i = 0; i < ist->nb_filters; i++) {
2546 ret = ifilter_send_eof(ist->filters[i], pts);
2553 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2554 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2558 int eof_reached = 0;
2561 if (!ist->saw_first_ts) {
2562 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2564 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2565 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2566 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2568 ist->saw_first_ts = 1;
2571 if (ist->next_dts == AV_NOPTS_VALUE)
2572 ist->next_dts = ist->dts;
2573 if (ist->next_pts == AV_NOPTS_VALUE)
2574 ist->next_pts = ist->pts;
2578 av_init_packet(&avpkt);
2585 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2586 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2587 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2588 ist->next_pts = ist->pts = ist->dts;
2591 // while we have more to decode or while the decoder did output something on EOF
2592 while (ist->decoding_needed) {
2593 int64_t duration_dts = 0;
2594 int64_t duration_pts = 0;
2596 int decode_failed = 0;
2598 ist->pts = ist->next_pts;
2599 ist->dts = ist->next_dts;
2601 switch (ist->dec_ctx->codec_type) {
2602 case AVMEDIA_TYPE_AUDIO:
2603 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2606 case AVMEDIA_TYPE_VIDEO:
2607 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2609 if (!repeating || !pkt || got_output) {
2610 if (pkt && pkt->duration) {
2611 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2612 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2613 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2614 duration_dts = ((int64_t)AV_TIME_BASE *
2615 ist->dec_ctx->framerate.den * ticks) /
2616 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2619 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2620 ist->next_dts += duration_dts;
2622 ist->next_dts = AV_NOPTS_VALUE;
2626 if (duration_pts > 0) {
2627 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2629 ist->next_pts += duration_dts;
2633 case AVMEDIA_TYPE_SUBTITLE:
2636 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2637 if (!pkt && ret >= 0)
2644 if (ret == AVERROR_EOF) {
2650 if (decode_failed) {
2651 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2652 ist->file_index, ist->st->index, av_err2str(ret));
2654 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2655 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2657 if (!decode_failed || exit_on_error)
2663 ist->got_output = 1;
2668 // During draining, we might get multiple output frames in this loop.
2669 // ffmpeg.c does not drain the filter chain on configuration changes,
2670 // which means if we send multiple frames at once to the filters, and
2671 // one of those frames changes configuration, the buffered frames will
2672 // be lost. This can upset certain FATE tests.
2673 // Decode only 1 frame per call on EOF to appease these FATE tests.
2674 // The ideal solution would be to rewrite decoding to use the new
2675 // decoding API in a better way.
2682 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2683 /* except when looping we need to flush but not to send an EOF */
2684 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2685 int ret = send_filter_eof(ist);
2687 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2692 /* handle stream copy */
2693 if (!ist->decoding_needed && pkt) {
2694 ist->dts = ist->next_dts;
2695 switch (ist->dec_ctx->codec_type) {
2696 case AVMEDIA_TYPE_AUDIO:
2697 av_assert1(pkt->duration >= 0);
2698 if (ist->dec_ctx->sample_rate) {
2699 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2700 ist->dec_ctx->sample_rate;
2702 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2705 case AVMEDIA_TYPE_VIDEO:
2706 if (ist->framerate.num) {
2707 // TODO: Remove work-around for c99-to-c89 issue 7
2708 AVRational time_base_q = AV_TIME_BASE_Q;
2709 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2710 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2711 } else if (pkt->duration) {
2712 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2713 } else if(ist->dec_ctx->framerate.num != 0) {
2714 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2715 ist->next_dts += ((int64_t)AV_TIME_BASE *
2716 ist->dec_ctx->framerate.den * ticks) /
2717 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2721 ist->pts = ist->dts;
2722 ist->next_pts = ist->next_dts;
2724 for (i = 0; i < nb_output_streams; i++) {
2725 OutputStream *ost = output_streams[i];
2727 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2730 do_streamcopy(ist, ost, pkt);
2733 return !eof_reached;
2736 static void print_sdp(void)
2741 AVIOContext *sdp_pb;
2742 AVFormatContext **avc;
2744 for (i = 0; i < nb_output_files; i++) {
2745 if (!output_files[i]->header_written)
2749 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2752 for (i = 0, j = 0; i < nb_output_files; i++) {
2753 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2754 avc[j] = output_files[i]->ctx;
2762 av_sdp_create(avc, j, sdp, sizeof(sdp));
2764 if (!sdp_filename) {
2765 printf("SDP:\n%s\n", sdp);
2768 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2769 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2771 avio_print(sdp_pb, sdp);
2772 avio_closep(&sdp_pb);
2773 av_freep(&sdp_filename);
2781 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2783 InputStream *ist = s->opaque;
2784 const enum AVPixelFormat *p;
2787 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2788 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2789 const AVCodecHWConfig *config = NULL;
2792 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2795 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2796 ist->hwaccel_id == HWACCEL_AUTO) {
2798 config = avcodec_get_hw_config(s->codec, i);
2801 if (!(config->methods &
2802 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2804 if (config->pix_fmt == *p)
2809 if (config->device_type != ist->hwaccel_device_type) {
2810 // Different hwaccel offered, ignore.
2814 ret = hwaccel_decode_init(s);
2816 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2817 av_log(NULL, AV_LOG_FATAL,
2818 "%s hwaccel requested for input stream #%d:%d, "
2819 "but cannot be initialized.\n",
2820 av_hwdevice_get_type_name(config->device_type),
2821 ist->file_index, ist->st->index);
2822 return AV_PIX_FMT_NONE;
2827 const HWAccel *hwaccel = NULL;
2829 for (i = 0; hwaccels[i].name; i++) {
2830 if (hwaccels[i].pix_fmt == *p) {
2831 hwaccel = &hwaccels[i];
2836 // No hwaccel supporting this pixfmt.
2839 if (hwaccel->id != ist->hwaccel_id) {
2840 // Does not match requested hwaccel.
2844 ret = hwaccel->init(s);
2846 av_log(NULL, AV_LOG_FATAL,
2847 "%s hwaccel requested for input stream #%d:%d, "
2848 "but cannot be initialized.\n", hwaccel->name,
2849 ist->file_index, ist->st->index);
2850 return AV_PIX_FMT_NONE;
2854 if (ist->hw_frames_ctx) {
2855 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2856 if (!s->hw_frames_ctx)
2857 return AV_PIX_FMT_NONE;
2860 ist->hwaccel_pix_fmt = *p;
2867 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2869 InputStream *ist = s->opaque;
2871 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2872 return ist->hwaccel_get_buffer(s, frame, flags);
2874 return avcodec_default_get_buffer2(s, frame, flags);
2877 static int init_input_stream(int ist_index, char *error, int error_len)
2880 InputStream *ist = input_streams[ist_index];
2882 if (ist->decoding_needed) {
2883 AVCodec *codec = ist->dec;
2885 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2886 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2887 return AVERROR(EINVAL);
2890 ist->dec_ctx->opaque = ist;
2891 ist->dec_ctx->get_format = get_format;
2892 ist->dec_ctx->get_buffer2 = get_buffer;
2893 ist->dec_ctx->thread_safe_callbacks = 1;
2895 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2896 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2897 (ist->decoding_needed & DECODING_FOR_OST)) {
2898 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2899 if (ist->decoding_needed & DECODING_FOR_FILTER)
2900 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2903 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2905 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2906 * audio, and video decoders such as cuvid or mediacodec */
2907 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2909 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2910 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2911 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2912 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2913 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2915 ret = hw_device_setup_for_decode(ist);
2917 snprintf(error, error_len, "Device setup failed for "
2918 "decoder on input stream #%d:%d : %s",
2919 ist->file_index, ist->st->index, av_err2str(ret));
2923 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2924 if (ret == AVERROR_EXPERIMENTAL)
2925 abort_codec_experimental(codec, 0);
2927 snprintf(error, error_len,
2928 "Error while opening decoder for input stream "
2930 ist->file_index, ist->st->index, av_err2str(ret));
2933 assert_avoptions(ist->decoder_opts);
2936 ist->next_pts = AV_NOPTS_VALUE;
2937 ist->next_dts = AV_NOPTS_VALUE;
2942 static InputStream *get_input_stream(OutputStream *ost)
2944 if (ost->source_index >= 0)
2945 return input_streams[ost->source_index];
2949 static int compare_int64(const void *a, const void *b)
2951 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2954 /* open the muxer when all the streams are initialized */
2955 static int check_init_output_file(OutputFile *of, int file_index)
2959 for (i = 0; i < of->ctx->nb_streams; i++) {
2960 OutputStream *ost = output_streams[of->ost_index + i];
2961 if (!ost->initialized)
2965 of->ctx->interrupt_callback = int_cb;
2967 ret = avformat_write_header(of->ctx, &of->opts);
2969 av_log(NULL, AV_LOG_ERROR,
2970 "Could not write header for output file #%d "
2971 "(incorrect codec parameters ?): %s\n",
2972 file_index, av_err2str(ret));
2975 //assert_avoptions(of->opts);
2976 of->header_written = 1;
2978 av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2980 if (sdp_filename || want_sdp)
2983 /* flush the muxing queues */
2984 for (i = 0; i < of->ctx->nb_streams; i++) {
2985 OutputStream *ost = output_streams[of->ost_index + i];
2987 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2988 if (!av_fifo_size(ost->muxing_queue))
2989 ost->mux_timebase = ost->st->time_base;
2991 while (av_fifo_size(ost->muxing_queue)) {
2993 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2994 write_packet(of, &pkt, ost, 1);
3001 static int init_output_bsfs(OutputStream *ost)
3003 AVBSFContext *ctx = ost->bsf_ctx;
3009 ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3013 ctx->time_base_in = ost->st->time_base;
3015 ret = av_bsf_init(ctx);
3017 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3022 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3025 ost->st->time_base = ctx->time_base_out;
3030 static int init_output_stream_streamcopy(OutputStream *ost)
3032 OutputFile *of = output_files[ost->file_index];
3033 InputStream *ist = get_input_stream(ost);
3034 AVCodecParameters *par_dst = ost->st->codecpar;
3035 AVCodecParameters *par_src = ost->ref_par;
3038 uint32_t codec_tag = par_dst->codec_tag;
3040 av_assert0(ist && !ost->filter);
3042 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3044 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3046 av_log(NULL, AV_LOG_FATAL,
3047 "Error setting up codec context options.\n");
3051 ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3053 av_log(NULL, AV_LOG_FATAL,
3054 "Error getting reference codec parameters.\n");
3059 unsigned int codec_tag_tmp;
3060 if (!of->ctx->oformat->codec_tag ||
3061 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3062 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3063 codec_tag = par_src->codec_tag;
3066 ret = avcodec_parameters_copy(par_dst, par_src);
3070 par_dst->codec_tag = codec_tag;
3072 if (!ost->frame_rate.num)
3073 ost->frame_rate = ist->framerate;
3074 ost->st->avg_frame_rate = ost->frame_rate;
3076 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3080 // copy timebase while removing common factors
3081 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3082 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3084 // copy estimated duration as a hint to the muxer
3085 if (ost->st->duration <= 0 && ist->st->duration > 0)
3086 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3089 ost->st->disposition = ist->st->disposition;
3091 if (ist->st->nb_side_data) {
3092 for (i = 0; i < ist->st->nb_side_data; i++) {
3093 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3096 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3098 return AVERROR(ENOMEM);
3099 memcpy(dst_data, sd_src->data, sd_src->size);
3103 if (ost->rotate_overridden) {
3104 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3105 sizeof(int32_t) * 9);
3107 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3110 switch (par_dst->codec_type) {
3111 case AVMEDIA_TYPE_AUDIO:
3112 if (audio_volume != 256) {
3113 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3116 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3117 par_dst->block_align= 0;
3118 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3119 par_dst->block_align= 0;
3121 case AVMEDIA_TYPE_VIDEO:
3122 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3124 av_mul_q(ost->frame_aspect_ratio,
3125 (AVRational){ par_dst->height, par_dst->width });
3126 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3127 "with stream copy may produce invalid files\n");
3129 else if (ist->st->sample_aspect_ratio.num)
3130 sar = ist->st->sample_aspect_ratio;
3132 sar = par_src->sample_aspect_ratio;
3133 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3134 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3135 ost->st->r_frame_rate = ist->st->r_frame_rate;
3139 ost->mux_timebase = ist->st->time_base;
3144 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3146 AVDictionaryEntry *e;
3148 uint8_t *encoder_string;
3149 int encoder_string_len;
3150 int format_flags = 0;
3151 int codec_flags = ost->enc_ctx->flags;
3153 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3156 e = av_dict_get(of->opts, "fflags", NULL, 0);
3158 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3161 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3163 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3165 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3168 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3171 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3172 encoder_string = av_mallocz(encoder_string_len);
3173 if (!encoder_string)
3176 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3177 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3179 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3180 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3181 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3182 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3185 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3186 AVCodecContext *avctx)
3189 int n = 1, i, size, index = 0;
3192 for (p = kf; *p; p++)
3196 pts = av_malloc_array(size, sizeof(*pts));
3198 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3203 for (i = 0; i < n; i++) {
3204 char *next = strchr(p, ',');
3209 if (!memcmp(p, "chapters", 8)) {
3211 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3214 if (avf->nb_chapters > INT_MAX - size ||
3215 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3217 av_log(NULL, AV_LOG_FATAL,
3218 "Could not allocate forced key frames array.\n");
3221 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3222 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3224 for (j = 0; j < avf->nb_chapters; j++) {
3225 AVChapter *c = avf->chapters[j];
3226 av_assert1(index < size);
3227 pts[index++] = av_rescale_q(c->start, c->time_base,
3228 avctx->time_base) + t;
3233 t = parse_time_or_die("force_key_frames", p, 1);
3234 av_assert1(index < size);
3235 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3242 av_assert0(index == size);
3243 qsort(pts, size, sizeof(*pts), compare_int64);
3244 ost->forced_kf_count = size;
3245 ost->forced_kf_pts = pts;
3248 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3250 InputStream *ist = get_input_stream(ost);
3251 AVCodecContext *enc_ctx = ost->enc_ctx;
3252 AVFormatContext *oc;
3254 if (ost->enc_timebase.num > 0) {
3255 enc_ctx->time_base = ost->enc_timebase;
3259 if (ost->enc_timebase.num < 0) {
3261 enc_ctx->time_base = ist->st->time_base;
3265 oc = output_files[ost->file_index]->ctx;
3266 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3269 enc_ctx->time_base = default_time_base;
3272 static int init_output_stream_encode(OutputStream *ost)
3274 InputStream *ist = get_input_stream(ost);
3275 AVCodecContext *enc_ctx = ost->enc_ctx;
3276 AVCodecContext *dec_ctx = NULL;
3277 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3280 set_encoder_id(output_files[ost->file_index], ost);
3282 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3283 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3284 // which have to be filtered out to prevent leaking them to output files.
3285 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3288 ost->st->disposition = ist->st->disposition;
3290 dec_ctx = ist->dec_ctx;
3292 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3294 for (j = 0; j < oc->nb_streams; j++) {
3295 AVStream *st = oc->streams[j];
3296 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3299 if (j == oc->nb_streams)
3300 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3301 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3302 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3305 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3306 if (!ost->frame_rate.num)
3307 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3308 if (ist && !ost->frame_rate.num)
3309 ost->frame_rate = ist->framerate;
3310 if (ist && !ost->frame_rate.num)
3311 ost->frame_rate = ist->st->r_frame_rate;
3312 if (ist && !ost->frame_rate.num) {
3313 ost->frame_rate = (AVRational){25, 1};
3314 av_log(NULL, AV_LOG_WARNING,
3316 "about the input framerate is available. Falling "
3317 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3318 "if you want a different framerate.\n",
3319 ost->file_index, ost->index);
3322 if (ost->enc->supported_framerates && !ost->force_fps) {
3323 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3324 ost->frame_rate = ost->enc->supported_framerates[idx];
3326 // reduce frame rate for mpeg4 to be within the spec limits
3327 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3328 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3329 ost->frame_rate.num, ost->frame_rate.den, 65535);
3333 switch (enc_ctx->codec_type) {
3334 case AVMEDIA_TYPE_AUDIO:
3335 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3337 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3338 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3339 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3340 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3341 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3343 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3346 case AVMEDIA_TYPE_VIDEO:
3347 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3349 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3350 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3351 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3352 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3353 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3354 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3357 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3358 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3359 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3360 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3361 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3362 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3364 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3366 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3367 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3369 enc_ctx->framerate = ost->frame_rate;
3371 ost->st->avg_frame_rate = ost->frame_rate;
3374 enc_ctx->width != dec_ctx->width ||
3375 enc_ctx->height != dec_ctx->height ||
3376 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3377 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3380 if (ost->top_field_first == 0) {
3381 enc_ctx->field_order = AV_FIELD_BB;
3382 } else if (ost->top_field_first == 1) {
3383 enc_ctx->field_order = AV_FIELD_TT;
3386 if (ost->forced_keyframes) {
3387 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3388 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3389 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3391 av_log(NULL, AV_LOG_ERROR,
3392 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3395 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3396 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3397 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3398 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3400 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3401 // parse it only for static kf timings
3402 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3403 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3407 case AVMEDIA_TYPE_SUBTITLE:
3408 enc_ctx->time_base = AV_TIME_BASE_Q;
3409 if (!enc_ctx->width) {
3410 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3411 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3414 case AVMEDIA_TYPE_DATA:
3421 ost->mux_timebase = enc_ctx->time_base;
3426 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3430 if (ost->encoding_needed) {
3431 AVCodec *codec = ost->enc;
3432 AVCodecContext *dec = NULL;
3435 ret = init_output_stream_encode(ost);
3439 if ((ist = get_input_stream(ost)))
3441 if (dec && dec->subtitle_header) {
3442 /* ASS code assumes this buffer is null terminated so add extra byte. */
3443 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3444 if (!ost->enc_ctx->subtitle_header)
3445 return AVERROR(ENOMEM);
3446 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3447 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3449 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3450 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3451 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3453 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3454 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3455 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3457 ret = hw_device_setup_for_encode(ost);
3459 snprintf(error, error_len, "Device setup failed for "
3460 "encoder on output stream #%d:%d : %s",
3461 ost->file_index, ost->index, av_err2str(ret));
3465 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3466 int input_props = 0, output_props = 0;
3467 AVCodecDescriptor const *input_descriptor =
3468 avcodec_descriptor_get(dec->codec_id);
3469 AVCodecDescriptor const *output_descriptor =
3470 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3471 if (input_descriptor)
3472 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3473 if (output_descriptor)
3474 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3475 if (input_props && output_props && input_props != output_props) {
3476 snprintf(error, error_len,
3477 "Subtitle encoding currently only possible from text to text "
3478 "or bitmap to bitmap");
3479 return AVERROR_INVALIDDATA;
3483 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3484 if (ret == AVERROR_EXPERIMENTAL)
3485 abort_codec_experimental(codec, 1);
3486 snprintf(error, error_len,
3487 "Error while opening encoder for output stream #%d:%d - "
3488 "maybe incorrect parameters such as bit_rate, rate, width or height",
3489 ost->file_index, ost->index);
3492 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3493 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3494 av_buffersink_set_frame_size(ost->filter->filter,
3495 ost->enc_ctx->frame_size);
3496 assert_avoptions(ost->encoder_opts);
3497 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3498 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3499 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3500 " It takes bits/s as argument, not kbits/s\n");
3502 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3504 av_log(NULL, AV_LOG_FATAL,
3505 "Error initializing the output stream codec context.\n");
3509 * FIXME: ost->st->codec should't be needed here anymore.
3511 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3515 if (ost->enc_ctx->nb_coded_side_data) {
3518 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3519 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3522 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3524 return AVERROR(ENOMEM);
3525 memcpy(dst_data, sd_src->data, sd_src->size);
3530 * Add global input side data. For now this is naive, and copies it
3531 * from the input stream's global side data. All side data should
3532 * really be funneled over AVFrame and libavfilter, then added back to
3533 * packet side data, and then potentially using the first packet for
3538 for (i = 0; i < ist->st->nb_side_data; i++) {
3539 AVPacketSideData *sd = &ist->st->side_data[i];
3540 if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3541 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3543 return AVERROR(ENOMEM);
3544 memcpy(dst, sd->data, sd->size);
3545 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3546 av_display_rotation_set((uint32_t *)dst, 0);
3551 // copy timebase while removing common factors
3552 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3553 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3555 // copy estimated duration as a hint to the muxer
3556 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3557 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3559 ost->st->codec->codec= ost->enc_ctx->codec;
3560 } else if (ost->stream_copy) {
3561 ret = init_output_stream_streamcopy(ost);
3566 // parse user provided disposition, and update stream values
3567 if (ost->disposition) {
3568 static const AVOption opts[] = {
3569 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3570 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3571 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3572 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3573 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3574 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3575 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3576 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3577 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3578 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3579 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3580 { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3581 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3582 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3583 { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3584 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3587 static const AVClass class = {
3589 .item_name = av_default_item_name,
3591 .version = LIBAVUTIL_VERSION_INT,
3593 const AVClass *pclass = &class;
3595 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3600 /* initialize bitstream filters for the output stream
3601 * needs to be done here, because the codec id for streamcopy is not
3602 * known until now */
3603 ret = init_output_bsfs(ost);
3607 ost->initialized = 1;
3609 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3616 static void report_new_stream(int input_index, AVPacket *pkt)
3618 InputFile *file = input_files[input_index];
3619 AVStream *st = file->ctx->streams[pkt->stream_index];
3621 if (pkt->stream_index < file->nb_streams_warn)
3623 av_log(file->ctx, AV_LOG_WARNING,
3624 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3625 av_get_media_type_string(st->codecpar->codec_type),
3626 input_index, pkt->stream_index,
3627 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3628 file->nb_streams_warn = pkt->stream_index + 1;
3631 static int transcode_init(void)
3633 int ret = 0, i, j, k;
3634 AVFormatContext *oc;
3637 char error[1024] = {0};
3639 for (i = 0; i < nb_filtergraphs; i++) {
3640 FilterGraph *fg = filtergraphs[i];
3641 for (j = 0; j < fg->nb_outputs; j++) {
3642 OutputFilter *ofilter = fg->outputs[j];
3643 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3645 if (fg->nb_inputs != 1)
3647 for (k = nb_input_streams-1; k >= 0 ; k--)
3648 if (fg->inputs[0]->ist == input_streams[k])
3650 ofilter->ost->source_index = k;
3654 /* init framerate emulation */
3655 for (i = 0; i < nb_input_files; i++) {
3656 InputFile *ifile = input_files[i];
3657 if (ifile->rate_emu)
3658 for (j = 0; j < ifile->nb_streams; j++)
3659 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3662 /* init input streams */
3663 for (i = 0; i < nb_input_streams; i++)
3664 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3665 for (i = 0; i < nb_output_streams; i++) {
3666 ost = output_streams[i];
3667 avcodec_close(ost->enc_ctx);
3672 /* open each encoder */
3673 for (i = 0; i < nb_output_streams; i++) {
3674 // skip streams fed from filtergraphs until we have a frame for them
3675 if (output_streams[i]->filter)
3678 ret = init_output_stream_wrapper(output_streams[i], 0);
3683 /* discard unused programs */
3684 for (i = 0; i < nb_input_files; i++) {
3685 InputFile *ifile = input_files[i];
3686 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3687 AVProgram *p = ifile->ctx->programs[j];
3688 int discard = AVDISCARD_ALL;
3690 for (k = 0; k < p->nb_stream_indexes; k++)
3691 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3692 discard = AVDISCARD_DEFAULT;
3695 p->discard = discard;
3699 /* write headers for files with no streams */
3700 for (i = 0; i < nb_output_files; i++) {
3701 oc = output_files[i]->ctx;
3702 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3703 ret = check_init_output_file(output_files[i], i);
3710 /* dump the stream mapping */
3711 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3712 for (i = 0; i < nb_input_streams; i++) {
3713 ist = input_streams[i];
3715 for (j = 0; j < ist->nb_filters; j++) {
3716 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3717 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3718 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3719 ist->filters[j]->name);
3720 if (nb_filtergraphs > 1)
3721 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3722 av_log(NULL, AV_LOG_INFO, "\n");
3727 for (i = 0; i < nb_output_streams; i++) {
3728 ost = output_streams[i];
3730 if (ost->attachment_filename) {
3731 /* an attached file */
3732 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3733 ost->attachment_filename, ost->file_index, ost->index);
3737 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3738 /* output from a complex graph */
3739 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3740 if (nb_filtergraphs > 1)
3741 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3743 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3744 ost->index, ost->enc ? ost->enc->name : "?");
3748 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3749 input_streams[ost->source_index]->file_index,
3750 input_streams[ost->source_index]->st->index,
3753 if (ost->sync_ist != input_streams[ost->source_index])
3754 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3755 ost->sync_ist->file_index,
3756 ost->sync_ist->st->index);
3757 if (ost->stream_copy)
3758 av_log(NULL, AV_LOG_INFO, " (copy)");
3760 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3761 const AVCodec *out_codec = ost->enc;
3762 const char *decoder_name = "?";
3763 const char *in_codec_name = "?";
3764 const char *encoder_name = "?";
3765 const char *out_codec_name = "?";
3766 const AVCodecDescriptor *desc;
3769 decoder_name = in_codec->name;
3770 desc = avcodec_descriptor_get(in_codec->id);
3772 in_codec_name = desc->name;
3773 if (!strcmp(decoder_name, in_codec_name))
3774 decoder_name = "native";
3778 encoder_name = out_codec->name;
3779 desc = avcodec_descriptor_get(out_codec->id);
3781 out_codec_name = desc->name;
3782 if (!strcmp(encoder_name, out_codec_name))
3783 encoder_name = "native";
3786 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3787 in_codec_name, decoder_name,
3788 out_codec_name, encoder_name);
3790 av_log(NULL, AV_LOG_INFO, "\n");
3794 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3798 atomic_store(&transcode_init_done, 1);
3803 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3804 static int need_output(void)
3808 for (i = 0; i < nb_output_streams; i++) {
3809 OutputStream *ost = output_streams[i];
3810 OutputFile *of = output_files[ost->file_index];
3811 AVFormatContext *os = output_files[ost->file_index]->ctx;
3813 if (ost->finished ||
3814 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3816 if (ost->frame_number >= ost->max_frames) {
3818 for (j = 0; j < of->ctx->nb_streams; j++)
3819 close_output_stream(output_streams[of->ost_index + j]);
3830 * Select the output stream to process.
3832 * @return selected output stream, or NULL if none available
3834 static OutputStream *choose_output(void)
3837 int64_t opts_min = INT64_MAX;
3838 OutputStream *ost_min = NULL;
3840 for (i = 0; i < nb_output_streams; i++) {
3841 OutputStream *ost = output_streams[i];
3842 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3843 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3845 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3846 av_log(NULL, AV_LOG_DEBUG,
3847 "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3848 ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3850 if (!ost->initialized && !ost->inputs_done)
3853 if (!ost->finished && opts < opts_min) {
3855 ost_min = ost->unavailable ? NULL : ost;
3861 static void set_tty_echo(int on)
3865 if (tcgetattr(0, &tty) == 0) {
3866 if (on) tty.c_lflag |= ECHO;
3867 else tty.c_lflag &= ~ECHO;
3868 tcsetattr(0, TCSANOW, &tty);
3873 static int check_keyboard_interaction(int64_t cur_time)
3876 static int64_t last_time;
3877 if (received_nb_signals)
3878 return AVERROR_EXIT;
3879 /* read_key() returns 0 on EOF */
3880 if(cur_time - last_time >= 100000 && !run_as_daemon){
3882 last_time = cur_time;
3886 return AVERROR_EXIT;
3887 if (key == '+') av_log_set_level(av_log_get_level()+10);
3888 if (key == '-') av_log_set_level(av_log_get_level()-10);
3889 if (key == 's') qp_hist ^= 1;
3892 do_hex_dump = do_pkt_dump = 0;
3893 } else if(do_pkt_dump){
3897 av_log_set_level(AV_LOG_DEBUG);
3899 if (key == 'c' || key == 'C'){
3900 char buf[4096], target[64], command[256], arg[256] = {0};
3903 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3906 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3911 fprintf(stderr, "\n");
3913 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3914 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3915 target, time, command, arg);
3916 for (i = 0; i < nb_filtergraphs; i++) {
3917 FilterGraph *fg = filtergraphs[i];
3920 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3921 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3922 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3923 } else if (key == 'c') {
3924 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3925 ret = AVERROR_PATCHWELCOME;
3927 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3929 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3934 av_log(NULL, AV_LOG_ERROR,
3935 "Parse error, at least 3 arguments were expected, "
3936 "only %d given in string '%s'\n", n, buf);
3939 if (key == 'd' || key == 'D'){
3942 debug = input_streams[0]->st->codec->debug<<1;
3943 if(!debug) debug = 1;
3944 while(debug & (FF_DEBUG_DCT_COEFF
3946 |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3948 )) //unsupported, would just crash
3955 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3960 fprintf(stderr, "\n");
3961 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3962 fprintf(stderr,"error parsing debug value\n");
3964 for(i=0;i<nb_input_streams;i++) {
3965 input_streams[i]->st->codec->debug = debug;
3967 for(i=0;i<nb_output_streams;i++) {
3968 OutputStream *ost = output_streams[i];
3969 ost->enc_ctx->debug = debug;
3971 if(debug) av_log_set_level(AV_LOG_DEBUG);
3972 fprintf(stderr,"debug=%d\n", debug);
3975 fprintf(stderr, "key function\n"
3976 "? show this help\n"
3977 "+ increase verbosity\n"
3978 "- decrease verbosity\n"
3979 "c Send command to first matching filter supporting it\n"
3980 "C Send/Queue command to all matching filters\n"
3981 "D cycle through available debug modes\n"
3982 "h dump packets/hex press to cycle through the 3 states\n"
3984 "s Show QP histogram\n"
3991 static void *input_thread(void *arg)
3994 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3999 ret = av_read_frame(f->ctx, &pkt);
4001 if (ret == AVERROR(EAGAIN)) {
4006 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4009 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4010 if (flags && ret == AVERROR(EAGAIN)) {
4012 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4013 av_log(f->ctx, AV_LOG_WARNING,
4014 "Thread message queue blocking; consider raising the "
4015 "thread_queue_size option (current value: %d)\n",
4016 f->thread_queue_size);
4019 if (ret != AVERROR_EOF)
4020 av_log(f->ctx, AV_LOG_ERROR,
4021 "Unable to send packet to main thread: %s\n",
4023 av_packet_unref(&pkt);
4024 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4032 static void free_input_thread(int i)
4034 InputFile *f = input_files[i];
4037 if (!f || !f->in_thread_queue)
4039 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4040 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4041 av_packet_unref(&pkt);
4043 pthread_join(f->thread, NULL);
4045 av_thread_message_queue_free(&f->in_thread_queue);
4048 static void free_input_threads(void)
4052 for (i = 0; i < nb_input_files; i++)
4053 free_input_thread(i);
4056 static int init_input_thread(int i)
4059 InputFile *f = input_files[i];
4061 if (f->thread_queue_size < 0)
4062 f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4063 if (!f->thread_queue_size)
4066 if (f->ctx->pb ? !f->ctx->pb->seekable :
4067 strcmp(f->ctx->iformat->name, "lavfi"))
4068 f->non_blocking = 1;
4069 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4070 f->thread_queue_size, sizeof(AVPacket));
4074 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4075 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4076 av_thread_message_queue_free(&f->in_thread_queue);
4077 return AVERROR(ret);
4083 static int init_input_threads(void)
4087 for (i = 0; i < nb_input_files; i++) {
4088 ret = init_input_thread(i);
4095 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4097 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4099 AV_THREAD_MESSAGE_NONBLOCK : 0);
4103 static int get_input_packet(InputFile *f, AVPacket *pkt)
4107 for (i = 0; i < f->nb_streams; i++) {
4108 InputStream *ist = input_streams[f->ist_index + i];
4109 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4110 int64_t now = av_gettime_relative() - ist->start;
4112 return AVERROR(EAGAIN);
4117 if (f->thread_queue_size)
4118 return get_input_packet_mt(f, pkt);
4120 return av_read_frame(f->ctx, pkt);
4123 static int got_eagain(void)
4126 for (i = 0; i < nb_output_streams; i++)
4127 if (output_streams[i]->unavailable)
4132 static void reset_eagain(void)
4135 for (i = 0; i < nb_input_files; i++)
4136 input_files[i]->eagain = 0;
4137 for (i = 0; i < nb_output_streams; i++)
4138 output_streams[i]->unavailable = 0;
4141 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4142 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4143 AVRational time_base)
4149 return tmp_time_base;
4152 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4155 return tmp_time_base;
4161 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4164 AVCodecContext *avctx;
4165 int i, ret, has_audio = 0;
4166 int64_t duration = 0;
4168 ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4172 for (i = 0; i < ifile->nb_streams; i++) {
4173 ist = input_streams[ifile->ist_index + i];
4174 avctx = ist->dec_ctx;
4176 /* duration is the length of the last frame in a stream
4177 * when audio stream is present we don't care about
4178 * last video frame length because it's not defined exactly */
4179 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4183 for (i = 0; i < ifile->nb_streams; i++) {
4184 ist = input_streams[ifile->ist_index + i];
4185 avctx = ist->dec_ctx;
4188 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4189 AVRational sample_rate = {1, avctx->sample_rate};
4191 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4196 if (ist->framerate.num) {
4197 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4198 } else if (ist->st->avg_frame_rate.num) {
4199 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4204 if (!ifile->duration)
4205 ifile->time_base = ist->st->time_base;
4206 /* the total duration of the stream, max_pts - min_pts is
4207 * the duration of the stream without the last frame */
4208 if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4209 duration += ist->max_pts - ist->min_pts;
4210 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4214 if (ifile->loop > 0)
4222 * - 0 -- one packet was read and processed
4223 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4224 * this function should be called again
4225 * - AVERROR_EOF -- this function should not be called again
4227 static int process_input(int file_index)
4229 InputFile *ifile = input_files[file_index];
4230 AVFormatContext *is;
4233 int ret, thread_ret, i, j;
4236 int disable_discontinuity_correction = copy_ts;
4239 ret = get_input_packet(ifile, &pkt);
4241 if (ret == AVERROR(EAGAIN)) {
4245 if (ret < 0 && ifile->loop) {
4246 AVCodecContext *avctx;
4247 for (i = 0; i < ifile->nb_streams; i++) {
4248 ist = input_streams[ifile->ist_index + i];
4249 avctx = ist->dec_ctx;
4250 if (ist->decoding_needed) {
4251 ret = process_input_packet(ist, NULL, 1);
4254 avcodec_flush_buffers(avctx);
4258 free_input_thread(file_index);
4260 ret = seek_to_start(ifile, is);
4262 thread_ret = init_input_thread(file_index);
4267 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4269 ret = get_input_packet(ifile, &pkt);
4270 if (ret == AVERROR(EAGAIN)) {
4276 if (ret != AVERROR_EOF) {
4277 print_error(is->url, ret);
4282 for (i = 0; i < ifile->nb_streams; i++) {
4283 ist = input_streams[ifile->ist_index + i];
4284 if (ist->decoding_needed) {
4285 ret = process_input_packet(ist, NULL, 0);
4290 /* mark all outputs that don't go through lavfi as finished */
4291 for (j = 0; j < nb_output_streams; j++) {
4292 OutputStream *ost = output_streams[j];
4294 if (ost->source_index == ifile->ist_index + i &&
4295 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4296 finish_output_stream(ost);
4300 ifile->eof_reached = 1;
4301 return AVERROR(EAGAIN);
4307 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4308 is->streams[pkt.stream_index]);
4310 /* the following test is needed in case new streams appear
4311 dynamically in stream : we ignore them */
4312 if (pkt.stream_index >= ifile->nb_streams) {
4313 report_new_stream(file_index, &pkt);
4314 goto discard_packet;
4317 ist = input_streams[ifile->ist_index + pkt.stream_index];
4319 ist->data_size += pkt.size;
4323 goto discard_packet;
4325 if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4326 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
4327 "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4333 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4334 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4335 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4336 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4337 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4338 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4339 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4340 av_ts2str(input_files[ist->file_index]->ts_offset),
4341 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4344 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4345 int64_t stime, stime2;
4346 // Correcting starttime based on the enabled streams
4347 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4348 // so we instead do it here as part of discontinuity handling
4349 if ( ist->next_dts == AV_NOPTS_VALUE
4350 && ifile->ts_offset == -is->start_time
4351 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4352 int64_t new_start_time = INT64_MAX;
4353 for (i=0; i<is->nb_streams; i++) {
4354 AVStream *st = is->streams[i];
4355 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4357 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4359 if (new_start_time > is->start_time) {
4360 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4361 ifile->ts_offset = -new_start_time;
4365 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4366 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4367 ist->wrap_correction_done = 1;
4369 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4370 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4371 ist->wrap_correction_done = 0;
4373 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4374 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4375 ist->wrap_correction_done = 0;
4379 /* add the stream-global side data to the first packet */
4380 if (ist->nb_packets == 1) {
4381 for (i = 0; i < ist->st->nb_side_data; i++) {
4382 AVPacketSideData *src_sd = &ist->st->side_data[i];
4385 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4388 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4391 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4395 memcpy(dst_data, src_sd->data, src_sd->size);
4399 if (pkt.dts != AV_NOPTS_VALUE)
4400 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4401 if (pkt.pts != AV_NOPTS_VALUE)
4402 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4404 if (pkt.pts != AV_NOPTS_VALUE)
4405 pkt.pts *= ist->ts_scale;
4406 if (pkt.dts != AV_NOPTS_VALUE)
4407 pkt.dts *= ist->ts_scale;
4409 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4410 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4411 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4412 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4413 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4414 int64_t delta = pkt_dts - ifile->last_ts;
4415 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4416 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4417 ifile->ts_offset -= delta;
4418 av_log(NULL, AV_LOG_DEBUG,
4419 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4420 delta, ifile->ts_offset);
4421 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4422 if (pkt.pts != AV_NOPTS_VALUE)
4423 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4427 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4428 if (pkt.pts != AV_NOPTS_VALUE) {
4429 pkt.pts += duration;
4430 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4431 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4434 if (pkt.dts != AV_NOPTS_VALUE)
4435 pkt.dts += duration;
4437 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4439 if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4440 (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4441 int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
4442 ist->st->time_base, AV_TIME_BASE_Q,
4443 AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4444 if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4445 disable_discontinuity_correction = 0;
4448 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4449 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4450 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4451 !disable_discontinuity_correction) {
4452 int64_t delta = pkt_dts - ist->next_dts;
4453 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4454 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4455 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4456 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4457 ifile->ts_offset -= delta;
4458 av_log(NULL, AV_LOG_DEBUG,
4459 "timestamp discontinuity for stream #%d:%d "
4460 "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4461 ist->file_index, ist->st->index, ist->st->id,
4462 av_get_media_type_string(ist->dec_ctx->codec_type),
4463 delta, ifile->ts_offset);
4464 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4465 if (pkt.pts != AV_NOPTS_VALUE)
4466 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4469 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4470 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4471 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4472 pkt.dts = AV_NOPTS_VALUE;
4474 if (pkt.pts != AV_NOPTS_VALUE){
4475 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4476 delta = pkt_pts - ist->next_dts;
4477 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4478 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4479 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4480 pkt.pts = AV_NOPTS_VALUE;
4486 if (pkt.dts != AV_NOPTS_VALUE)
4487 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4490 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4491 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4492 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4493 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4494 av_ts2str(input_files[ist->file_index]->ts_offset),
4495 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4498 sub2video_heartbeat(ist, pkt.pts);
4500 process_input_packet(ist, &pkt, 0);
4503 av_packet_unref(&pkt);
4509 * Perform a step of transcoding for the specified filter graph.
4511 * @param[in] graph filter graph to consider
4512 * @param[out] best_ist input stream where a frame would allow to continue
4513 * @return 0 for success, <0 for error
4515 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4518 int nb_requests, nb_requests_max = 0;
4519 InputFilter *ifilter;
4523 ret = avfilter_graph_request_oldest(graph->graph);
4525 return reap_filters(0);
4527 if (ret == AVERROR_EOF) {
4528 ret = reap_filters(1);
4529 for (i = 0; i < graph->nb_outputs; i++)
4530 close_output_stream(graph->outputs[i]->ost);
4533 if (ret != AVERROR(EAGAIN))
4536 for (i = 0; i < graph->nb_inputs; i++) {
4537 ifilter = graph->inputs[i];
4539 if (input_files[ist->file_index]->eagain ||
4540 input_files[ist->file_index]->eof_reached)
4542 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4543 if (nb_requests > nb_requests_max) {
4544 nb_requests_max = nb_requests;
4550 for (i = 0; i < graph->nb_outputs; i++)
4551 graph->outputs[i]->ost->unavailable = 1;
4557 * Run a single step of transcoding.
4559 * @return 0 for success, <0 for error
4561 static int transcode_step(void)
4564 InputStream *ist = NULL;
4567 ost = choose_output();
4574 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4578 if (ost->filter && !ost->filter->graph->graph) {
4579 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4580 ret = configure_filtergraph(ost->filter->graph);
4582 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4588 if (ost->filter && ost->filter->graph->graph) {
4589 init_output_stream_wrapper(ost, 1);
4591 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4595 } else if (ost->filter) {
4597 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4598 InputFilter *ifilter = ost->filter->graph->inputs[i];
4599 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4605 ost->inputs_done = 1;
4609 av_assert0(ost->source_index >= 0);
4610 ist = input_streams[ost->source_index];
4613 ret = process_input(ist->file_index);
4614 if (ret == AVERROR(EAGAIN)) {
4615 if (input_files[ist->file_index]->eagain)
4616 ost->unavailable = 1;
4621 return ret == AVERROR_EOF ? 0 : ret;
4623 return reap_filters(0);
4627 * The following code is the main loop of the file converter
4629 static int transcode(void)
4632 AVFormatContext *os;
4635 int64_t timer_start;
4636 int64_t total_packets_written = 0;
4638 ret = transcode_init();
4642 if (stdin_interaction) {
4643 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4646 timer_start = av_gettime_relative();
4649 if ((ret = init_input_threads()) < 0)
4653 while (!received_sigterm) {
4654 int64_t cur_time= av_gettime_relative();
4656 /* if 'q' pressed, exits */
4657 if (stdin_interaction)
4658 if (check_keyboard_interaction(cur_time) < 0)
4661 /* check if there's any stream where output is still needed */
4662 if (!need_output()) {
4663 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4667 ret = transcode_step();
4668 if (ret < 0 && ret != AVERROR_EOF) {
4669 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4673 /* dump report by using the output first video and audio streams */
4674 print_report(0, timer_start, cur_time);
4677 free_input_threads();
4680 /* at the end of stream, we must flush the decoder buffers */
4681 for (i = 0; i < nb_input_streams; i++) {
4682 ist = input_streams[i];
4683 if (!input_files[ist->file_index]->eof_reached) {
4684 process_input_packet(ist, NULL, 0);
4691 /* write the trailer if needed and close file */
4692 for (i = 0; i < nb_output_files; i++) {
4693 os = output_files[i]->ctx;
4694 if (!output_files[i]->header_written) {
4695 av_log(NULL, AV_LOG_ERROR,
4696 "Nothing was written into output file %d (%s), because "
4697 "at least one of its streams received no packets.\n",
4701 if ((ret = av_write_trailer(os)) < 0) {
4702 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4708 /* dump report by using the first video and audio streams */
4709 print_report(1, timer_start, av_gettime_relative());
4711 /* close each encoder */
4712 for (i = 0; i < nb_output_streams; i++) {
4713 ost = output_streams[i];
4714 if (ost->encoding_needed) {
4715 av_freep(&ost->enc_ctx->stats_in);
4717 total_packets_written += ost->packets_written;
4718 if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
4719 av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4724 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4725 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4729 /* close each decoder */
4730 for (i = 0; i < nb_input_streams; i++) {
4731 ist = input_streams[i];
4732 if (ist->decoding_needed) {
4733 avcodec_close(ist->dec_ctx);
4734 if (ist->hwaccel_uninit)
4735 ist->hwaccel_uninit(ist->dec_ctx);
4739 hw_device_free_all();
4746 free_input_threads();
4749 if (output_streams) {
4750 for (i = 0; i < nb_output_streams; i++) {
4751 ost = output_streams[i];
4754 if (fclose(ost->logfile))
4755 av_log(NULL, AV_LOG_ERROR,
4756 "Error closing logfile, loss of information possible: %s\n",
4757 av_err2str(AVERROR(errno)));
4758 ost->logfile = NULL;
4760 av_freep(&ost->forced_kf_pts);
4761 av_freep(&ost->apad);
4762 av_freep(&ost->disposition);
4763 av_dict_free(&ost->encoder_opts);
4764 av_dict_free(&ost->sws_dict);
4765 av_dict_free(&ost->swr_opts);
4766 av_dict_free(&ost->resample_opts);
4773 static BenchmarkTimeStamps get_benchmark_time_stamps(void)
4775 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4777 struct rusage rusage;
4779 getrusage(RUSAGE_SELF, &rusage);
4780 time_stamps.user_usec =
4781 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4782 time_stamps.sys_usec =
4783 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4784 #elif HAVE_GETPROCESSTIMES
4786 FILETIME c, e, k, u;
4787 proc = GetCurrentProcess();
4788 GetProcessTimes(proc, &c, &e, &k, &u);
4789 time_stamps.user_usec =
4790 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4791 time_stamps.sys_usec =
4792 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4794 time_stamps.user_usec = time_stamps.sys_usec = 0;
4799 static int64_t getmaxrss(void)
4801 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4802 struct rusage rusage;
4803 getrusage(RUSAGE_SELF, &rusage);
4804 return (int64_t)rusage.ru_maxrss * 1024;
4805 #elif HAVE_GETPROCESSMEMORYINFO
4807 PROCESS_MEMORY_COUNTERS memcounters;
4808 proc = GetCurrentProcess();
4809 memcounters.cb = sizeof(memcounters);
4810 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4811 return memcounters.PeakPagefileUsage;
4817 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4821 int main(int argc, char **argv)
4824 BenchmarkTimeStamps ti;
4828 register_exit(ffmpeg_cleanup);
4830 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4832 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4833 parse_loglevel(argc, argv, options);
4835 if(argc>1 && !strcmp(argv[1], "-d")){
4837 av_log_set_callback(log_callback_null);
4843 avdevice_register_all();
4845 avformat_network_init();
4847 show_banner(argc, argv, options);
4849 /* parse options and open all input/output files */
4850 ret = ffmpeg_parse_options(argc, argv);
4854 if (nb_output_files <= 0 && nb_input_files == 0) {
4856 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4860 /* file converter / grab */
4861 if (nb_output_files <= 0) {
4862 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4866 for (i = 0; i < nb_output_files; i++) {
4867 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4871 current_time = ti = get_benchmark_time_stamps();
4872 if (transcode() < 0)
4875 int64_t utime, stime, rtime;
4876 current_time = get_benchmark_time_stamps();
4877 utime = current_time.user_usec - ti.user_usec;
4878 stime = current_time.sys_usec - ti.sys_usec;
4879 rtime = current_time.real_usec - ti.real_usec;
4880 av_log(NULL, AV_LOG_INFO,
4881 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4882 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4884 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4885 decode_error_stat[0], decode_error_stat[1]);
4886 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4889 exit_program(received_nb_signals ? 255 : main_return_code);
4890 return main_return_code;