2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
65 #include "libavutil/threadmessage.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
73 #if HAVE_SYS_RESOURCE_H
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
80 #if HAVE_GETPROCESSMEMORYINFO
84 #if HAVE_SETCONSOLECTRLHANDLER
90 #include <sys/select.h>
95 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 typedef struct BenchmarkTimeStamps {
127 } BenchmarkTimeStamps;
129 static void do_video_stats(OutputStream *ost, int frame_size);
130 static BenchmarkTimeStamps get_benchmark_time_stamps(void);
131 static int64_t getmaxrss(void);
132 static int ifilter_has_all_input_formats(FilterGraph *fg);
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
140 static int want_sdp = 1;
142 static BenchmarkTimeStamps current_time;
143 AVIOContext *progress_avio = NULL;
145 static uint8_t *subtitle_out;
147 InputStream **input_streams = NULL;
148 int nb_input_streams = 0;
149 InputFile **input_files = NULL;
150 int nb_input_files = 0;
152 OutputStream **output_streams = NULL;
153 int nb_output_streams = 0;
154 OutputFile **output_files = NULL;
155 int nb_output_files = 0;
157 FilterGraph **filtergraphs;
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
168 static void free_input_threads(void);
172 Convert subtitles to video with alpha to insert them in filter graphs.
173 This is a temporary solution until libavfilter gets real subtitles support.
176 static int sub2video_get_blank_frame(InputStream *ist)
179 AVFrame *frame = ist->sub2video.frame;
181 av_frame_unref(frame);
182 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
184 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
185 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
187 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
194 uint32_t *pal, *dst2;
198 if (r->type != SUBTITLE_BITMAP) {
199 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
202 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204 r->x, r->y, r->w, r->h, w, h
209 dst += r->y * dst_linesize + r->x * 4;
211 pal = (uint32_t *)r->data[1];
212 for (y = 0; y < r->h; y++) {
213 dst2 = (uint32_t *)dst;
215 for (x = 0; x < r->w; x++)
216 *(dst2++) = pal[*(src2++)];
218 src += r->linesize[0];
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
224 AVFrame *frame = ist->sub2video.frame;
228 av_assert1(frame->data[0]);
229 ist->sub2video.last_pts = frame->pts = pts;
230 for (i = 0; i < ist->nb_filters; i++) {
231 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
232 AV_BUFFERSRC_FLAG_KEEP_REF |
233 AV_BUFFERSRC_FLAG_PUSH);
234 if (ret != AVERROR_EOF && ret < 0)
235 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
240 void sub2video_update(InputStream *ist, AVSubtitle *sub)
242 AVFrame *frame = ist->sub2video.frame;
246 int64_t pts, end_pts;
251 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252 AV_TIME_BASE_Q, ist->st->time_base);
253 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254 AV_TIME_BASE_Q, ist->st->time_base);
255 num_rects = sub->num_rects;
257 pts = ist->sub2video.end_pts;
261 if (sub2video_get_blank_frame(ist) < 0) {
262 av_log(ist->dec_ctx, AV_LOG_ERROR,
263 "Impossible to get a blank canvas.\n");
266 dst = frame->data [0];
267 dst_linesize = frame->linesize[0];
268 for (i = 0; i < num_rects; i++)
269 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
270 sub2video_push_ref(ist, pts);
271 ist->sub2video.end_pts = end_pts;
274 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
276 InputFile *infile = input_files[ist->file_index];
280 /* When a frame is read from a file, examine all sub2video streams in
281 the same file and send the sub2video frame again. Otherwise, decoded
282 video frames could be accumulating in the filter graph while a filter
283 (possibly overlay) is desperately waiting for a subtitle frame. */
284 for (i = 0; i < infile->nb_streams; i++) {
285 InputStream *ist2 = input_streams[infile->ist_index + i];
286 if (!ist2->sub2video.frame)
288 /* subtitles seem to be usually muxed ahead of other streams;
289 if not, subtracting a larger time here is necessary */
290 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
291 /* do not send the heartbeat frame if the subtitle is already ahead */
292 if (pts2 <= ist2->sub2video.last_pts)
294 if (pts2 >= ist2->sub2video.end_pts ||
295 (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX))
296 sub2video_update(ist2, NULL);
297 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
298 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
300 sub2video_push_ref(ist2, pts2);
304 static void sub2video_flush(InputStream *ist)
309 if (ist->sub2video.end_pts < INT64_MAX)
310 sub2video_update(ist, NULL);
311 for (i = 0; i < ist->nb_filters; i++) {
312 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
313 if (ret != AVERROR_EOF && ret < 0)
314 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
318 /* end of sub2video hack */
320 static void term_exit_sigsafe(void)
324 tcsetattr (0, TCSANOW, &oldtty);
330 av_log(NULL, AV_LOG_QUIET, "%s", "");
334 static volatile int received_sigterm = 0;
335 static volatile int received_nb_signals = 0;
336 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
337 static volatile int ffmpeg_exited = 0;
338 static int main_return_code = 0;
341 sigterm_handler(int sig)
344 received_sigterm = sig;
345 received_nb_signals++;
347 if(received_nb_signals > 3) {
348 ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
349 strlen("Received > 3 system signals, hard exiting\n"));
350 if (ret < 0) { /* Do nothing */ };
355 #if HAVE_SETCONSOLECTRLHANDLER
356 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
358 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
363 case CTRL_BREAK_EVENT:
364 sigterm_handler(SIGINT);
367 case CTRL_CLOSE_EVENT:
368 case CTRL_LOGOFF_EVENT:
369 case CTRL_SHUTDOWN_EVENT:
370 sigterm_handler(SIGTERM);
371 /* Basically, with these 3 events, when we return from this method the
372 process is hard terminated, so stall as long as we need to
373 to try and let the main thread(s) clean up and gracefully terminate
374 (we have at most 5 seconds, but should be done far before that). */
375 while (!ffmpeg_exited) {
381 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
390 if (!run_as_daemon && stdin_interaction) {
392 if (tcgetattr (0, &tty) == 0) {
396 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
397 |INLCR|IGNCR|ICRNL|IXON);
398 tty.c_oflag |= OPOST;
399 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
400 tty.c_cflag &= ~(CSIZE|PARENB);
405 tcsetattr (0, TCSANOW, &tty);
407 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
411 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
412 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
414 signal(SIGXCPU, sigterm_handler);
417 signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
419 #if HAVE_SETCONSOLECTRLHANDLER
420 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
424 /* read a key without blocking */
425 static int read_key(void)
437 n = select(1, &rfds, NULL, NULL, &tv);
446 # if HAVE_PEEKNAMEDPIPE
448 static HANDLE input_handle;
451 input_handle = GetStdHandle(STD_INPUT_HANDLE);
452 is_pipe = !GetConsoleMode(input_handle, &dw);
456 /* When running under a GUI, you will end here. */
457 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
458 // input pipe may have been closed by the program that ran ffmpeg
476 static int decode_interrupt_cb(void *ctx)
478 return received_nb_signals > atomic_load(&transcode_init_done);
481 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
483 static void ffmpeg_cleanup(int ret)
488 int maxrss = getmaxrss() / 1024;
489 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
492 for (i = 0; i < nb_filtergraphs; i++) {
493 FilterGraph *fg = filtergraphs[i];
494 avfilter_graph_free(&fg->graph);
495 for (j = 0; j < fg->nb_inputs; j++) {
496 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
498 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
499 sizeof(frame), NULL);
500 av_frame_free(&frame);
502 av_fifo_freep(&fg->inputs[j]->frame_queue);
503 if (fg->inputs[j]->ist->sub2video.sub_queue) {
504 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
506 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
507 &sub, sizeof(sub), NULL);
508 avsubtitle_free(&sub);
510 av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
512 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
513 av_freep(&fg->inputs[j]->name);
514 av_freep(&fg->inputs[j]);
516 av_freep(&fg->inputs);
517 for (j = 0; j < fg->nb_outputs; j++) {
518 av_freep(&fg->outputs[j]->name);
519 av_freep(&fg->outputs[j]->formats);
520 av_freep(&fg->outputs[j]->channel_layouts);
521 av_freep(&fg->outputs[j]->sample_rates);
522 av_freep(&fg->outputs[j]);
524 av_freep(&fg->outputs);
525 av_freep(&fg->graph_desc);
527 av_freep(&filtergraphs[i]);
529 av_freep(&filtergraphs);
531 av_freep(&subtitle_out);
534 for (i = 0; i < nb_output_files; i++) {
535 OutputFile *of = output_files[i];
540 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
542 avformat_free_context(s);
543 av_dict_free(&of->opts);
545 av_freep(&output_files[i]);
547 for (i = 0; i < nb_output_streams; i++) {
548 OutputStream *ost = output_streams[i];
553 for (j = 0; j < ost->nb_bitstream_filters; j++)
554 av_bsf_free(&ost->bsf_ctx[j]);
555 av_freep(&ost->bsf_ctx);
557 av_frame_free(&ost->filtered_frame);
558 av_frame_free(&ost->last_frame);
559 av_dict_free(&ost->encoder_opts);
561 av_freep(&ost->forced_keyframes);
562 av_expr_free(ost->forced_keyframes_pexpr);
563 av_freep(&ost->avfilter);
564 av_freep(&ost->logfile_prefix);
566 av_freep(&ost->audio_channels_map);
567 ost->audio_channels_mapped = 0;
569 av_dict_free(&ost->sws_dict);
571 avcodec_free_context(&ost->enc_ctx);
572 avcodec_parameters_free(&ost->ref_par);
574 if (ost->muxing_queue) {
575 while (av_fifo_size(ost->muxing_queue)) {
577 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
578 av_packet_unref(&pkt);
580 av_fifo_freep(&ost->muxing_queue);
583 av_freep(&output_streams[i]);
586 free_input_threads();
588 for (i = 0; i < nb_input_files; i++) {
589 avformat_close_input(&input_files[i]->ctx);
590 av_freep(&input_files[i]);
592 for (i = 0; i < nb_input_streams; i++) {
593 InputStream *ist = input_streams[i];
595 av_frame_free(&ist->decoded_frame);
596 av_frame_free(&ist->filter_frame);
597 av_dict_free(&ist->decoder_opts);
598 avsubtitle_free(&ist->prev_sub.subtitle);
599 av_frame_free(&ist->sub2video.frame);
600 av_freep(&ist->filters);
601 av_freep(&ist->hwaccel_device);
602 av_freep(&ist->dts_buffer);
604 avcodec_free_context(&ist->dec_ctx);
606 av_freep(&input_streams[i]);
610 if (fclose(vstats_file))
611 av_log(NULL, AV_LOG_ERROR,
612 "Error closing vstats file, loss of information possible: %s\n",
613 av_err2str(AVERROR(errno)));
615 av_freep(&vstats_filename);
617 av_freep(&input_streams);
618 av_freep(&input_files);
619 av_freep(&output_streams);
620 av_freep(&output_files);
624 avformat_network_deinit();
626 if (received_sigterm) {
627 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
628 (int) received_sigterm);
629 } else if (ret && atomic_load(&transcode_init_done)) {
630 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
636 void remove_avoptions(AVDictionary **a, AVDictionary *b)
638 AVDictionaryEntry *t = NULL;
640 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
641 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
645 void assert_avoptions(AVDictionary *m)
647 AVDictionaryEntry *t;
648 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
649 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
654 static void abort_codec_experimental(AVCodec *c, int encoder)
659 static void update_benchmark(const char *fmt, ...)
661 if (do_benchmark_all) {
662 BenchmarkTimeStamps t = get_benchmark_time_stamps();
668 vsnprintf(buf, sizeof(buf), fmt, va);
670 av_log(NULL, AV_LOG_INFO,
671 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
672 t.user_usec - current_time.user_usec,
673 t.sys_usec - current_time.sys_usec,
674 t.real_usec - current_time.real_usec, buf);
680 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
683 for (i = 0; i < nb_output_streams; i++) {
684 OutputStream *ost2 = output_streams[i];
685 ost2->finished |= ost == ost2 ? this_stream : others;
689 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
691 AVFormatContext *s = of->ctx;
692 AVStream *st = ost->st;
696 * Audio encoders may split the packets -- #frames in != #packets out.
697 * But there is no reordering, so we can limit the number of output packets
698 * by simply dropping them here.
699 * Counting encoded video frames needs to be done separately because of
700 * reordering, see do_video_out().
701 * Do not count the packet when unqueued because it has been counted when queued.
703 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
704 if (ost->frame_number >= ost->max_frames) {
705 av_packet_unref(pkt);
711 if (!of->header_written) {
712 AVPacket tmp_pkt = {0};
713 /* the muxer is not initialized yet, buffer the packet */
714 if (!av_fifo_space(ost->muxing_queue)) {
715 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
716 ost->max_muxing_queue_size);
717 if (new_size <= av_fifo_size(ost->muxing_queue)) {
718 av_log(NULL, AV_LOG_ERROR,
719 "Too many packets buffered for output stream %d:%d.\n",
720 ost->file_index, ost->st->index);
723 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
727 ret = av_packet_make_refcounted(pkt);
730 av_packet_move_ref(&tmp_pkt, pkt);
731 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
735 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
736 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
737 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
739 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
741 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
743 ost->quality = sd ? AV_RL32(sd) : -1;
744 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
746 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
748 ost->error[i] = AV_RL64(sd + 8 + 8*i);
753 if (ost->frame_rate.num && ost->is_cfr) {
754 if (pkt->duration > 0)
755 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
756 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
761 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
763 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
764 if (pkt->dts != AV_NOPTS_VALUE &&
765 pkt->pts != AV_NOPTS_VALUE &&
766 pkt->dts > pkt->pts) {
767 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
769 ost->file_index, ost->st->index);
771 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
772 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
773 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
775 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
776 pkt->dts != AV_NOPTS_VALUE &&
777 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
778 ost->last_mux_dts != AV_NOPTS_VALUE) {
779 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
780 if (pkt->dts < max) {
781 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
782 av_log(s, loglevel, "Non-monotonous DTS in output stream "
783 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
784 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
786 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
789 av_log(s, loglevel, "changing to %"PRId64". This may result "
790 "in incorrect timestamps in the output file.\n",
792 if (pkt->pts >= pkt->dts)
793 pkt->pts = FFMAX(pkt->pts, max);
798 ost->last_mux_dts = pkt->dts;
800 ost->data_size += pkt->size;
801 ost->packets_written++;
803 pkt->stream_index = ost->index;
806 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
807 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
808 av_get_media_type_string(ost->enc_ctx->codec_type),
809 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
810 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
815 ret = av_interleaved_write_frame(s, pkt);
817 print_error("av_interleaved_write_frame()", ret);
818 main_return_code = 1;
819 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
821 av_packet_unref(pkt);
824 static void close_output_stream(OutputStream *ost)
826 OutputFile *of = output_files[ost->file_index];
828 ost->finished |= ENCODER_FINISHED;
830 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
831 of->recording_time = FFMIN(of->recording_time, end);
836 * Send a single packet to the output, applying any bitstream filters
837 * associated with the output stream. This may result in any number
838 * of packets actually being written, depending on what bitstream
839 * filters are applied. The supplied packet is consumed and will be
840 * blank (as if newly-allocated) when this function returns.
842 * If eof is set, instead indicate EOF to all bitstream filters and
843 * therefore flush any delayed packets to the output. A blank packet
844 * must be supplied in this case.
846 static void output_packet(OutputFile *of, AVPacket *pkt,
847 OutputStream *ost, int eof)
851 /* apply the output bitstream filters, if any */
852 if (ost->nb_bitstream_filters) {
855 ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
862 /* get a packet from the previous filter up the chain */
863 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
864 if (ret == AVERROR(EAGAIN)) {
868 } else if (ret == AVERROR_EOF) {
873 /* send it to the next filter down the chain or to the muxer */
874 if (idx < ost->nb_bitstream_filters) {
875 ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
883 write_packet(of, pkt, ost, 0);
886 write_packet(of, pkt, ost, 0);
889 if (ret < 0 && ret != AVERROR_EOF) {
890 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
891 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
897 static int check_recording_time(OutputStream *ost)
899 OutputFile *of = output_files[ost->file_index];
901 if (of->recording_time != INT64_MAX &&
902 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
903 AV_TIME_BASE_Q) >= 0) {
904 close_output_stream(ost);
910 static void do_audio_out(OutputFile *of, OutputStream *ost,
913 AVCodecContext *enc = ost->enc_ctx;
917 av_init_packet(&pkt);
921 if (!check_recording_time(ost))
924 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
925 frame->pts = ost->sync_opts;
926 ost->sync_opts = frame->pts + frame->nb_samples;
927 ost->samples_encoded += frame->nb_samples;
928 ost->frames_encoded++;
930 av_assert0(pkt.size || !pkt.data);
931 update_benchmark(NULL);
933 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
934 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
935 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
936 enc->time_base.num, enc->time_base.den);
939 ret = avcodec_send_frame(enc, frame);
944 ret = avcodec_receive_packet(enc, &pkt);
945 if (ret == AVERROR(EAGAIN))
950 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
952 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
955 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
956 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
957 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
958 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
961 output_packet(of, &pkt, ost, 0);
966 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
970 static void do_subtitle_out(OutputFile *of,
974 int subtitle_out_max_size = 1024 * 1024;
975 int subtitle_out_size, nb, i;
980 if (sub->pts == AV_NOPTS_VALUE) {
981 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
990 subtitle_out = av_malloc(subtitle_out_max_size);
992 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
997 /* Note: DVB subtitle need one packet to draw them and one other
998 packet to clear them */
999 /* XXX: signal it in the codec context ? */
1000 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1005 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1007 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1008 pts -= output_files[ost->file_index]->start_time;
1009 for (i = 0; i < nb; i++) {
1010 unsigned save_num_rects = sub->num_rects;
1012 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1013 if (!check_recording_time(ost))
1017 // start_display_time is required to be 0
1018 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1019 sub->end_display_time -= sub->start_display_time;
1020 sub->start_display_time = 0;
1024 ost->frames_encoded++;
1026 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1027 subtitle_out_max_size, sub);
1029 sub->num_rects = save_num_rects;
1030 if (subtitle_out_size < 0) {
1031 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1035 av_init_packet(&pkt);
1036 pkt.data = subtitle_out;
1037 pkt.size = subtitle_out_size;
1038 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1039 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1040 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1041 /* XXX: the pts correction is handled here. Maybe handling
1042 it in the codec would be better */
1044 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1046 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1049 output_packet(of, &pkt, ost, 0);
1053 static void do_video_out(OutputFile *of,
1055 AVFrame *next_picture,
1058 int ret, format_video_sync;
1060 AVCodecContext *enc = ost->enc_ctx;
1061 AVCodecParameters *mux_par = ost->st->codecpar;
1062 AVRational frame_rate;
1063 int nb_frames, nb0_frames, i;
1064 double delta, delta0;
1065 double duration = 0;
1067 InputStream *ist = NULL;
1068 AVFilterContext *filter = ost->filter->filter;
1070 if (ost->source_index >= 0)
1071 ist = input_streams[ost->source_index];
1073 frame_rate = av_buffersink_get_frame_rate(filter);
1074 if (frame_rate.num > 0 && frame_rate.den > 0)
1075 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1077 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1078 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1080 if (!ost->filters_script &&
1082 (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1085 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1086 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1089 if (!next_picture) {
1091 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1092 ost->last_nb0_frames[1],
1093 ost->last_nb0_frames[2]);
1095 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1096 delta = delta0 + duration;
1098 /* by default, we output a single frame */
1099 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1102 format_video_sync = video_sync_method;
1103 if (format_video_sync == VSYNC_AUTO) {
1104 if(!strcmp(of->ctx->oformat->name, "avi")) {
1105 format_video_sync = VSYNC_VFR;
1107 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1109 && format_video_sync == VSYNC_CFR
1110 && input_files[ist->file_index]->ctx->nb_streams == 1
1111 && input_files[ist->file_index]->input_ts_offset == 0) {
1112 format_video_sync = VSYNC_VSCFR;
1114 if (format_video_sync == VSYNC_CFR && copy_ts) {
1115 format_video_sync = VSYNC_VSCFR;
1118 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1122 format_video_sync != VSYNC_PASSTHROUGH &&
1123 format_video_sync != VSYNC_DROP) {
1124 if (delta0 < -0.6) {
1125 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1127 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1128 sync_ipts = ost->sync_opts;
1133 switch (format_video_sync) {
1135 if (ost->frame_number == 0 && delta0 >= 0.5) {
1136 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1139 ost->sync_opts = lrint(sync_ipts);
1142 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1143 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1145 } else if (delta < -1.1)
1147 else if (delta > 1.1) {
1148 nb_frames = lrintf(delta);
1150 nb0_frames = lrintf(delta0 - 0.6);
1156 else if (delta > 0.6)
1157 ost->sync_opts = lrint(sync_ipts);
1160 case VSYNC_PASSTHROUGH:
1161 ost->sync_opts = lrint(sync_ipts);
1168 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1169 nb0_frames = FFMIN(nb0_frames, nb_frames);
1171 memmove(ost->last_nb0_frames + 1,
1172 ost->last_nb0_frames,
1173 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1174 ost->last_nb0_frames[0] = nb0_frames;
1176 if (nb0_frames == 0 && ost->last_dropped) {
1178 av_log(NULL, AV_LOG_VERBOSE,
1179 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1180 ost->frame_number, ost->st->index, ost->last_frame->pts);
1182 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1183 if (nb_frames > dts_error_threshold * 30) {
1184 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1188 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1189 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1190 if (nb_frames_dup > dup_warning) {
1191 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1195 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1197 /* duplicates frame if needed */
1198 for (i = 0; i < nb_frames; i++) {
1199 AVFrame *in_picture;
1200 int forced_keyframe = 0;
1202 av_init_packet(&pkt);
1206 if (i < nb0_frames && ost->last_frame) {
1207 in_picture = ost->last_frame;
1209 in_picture = next_picture;
1214 in_picture->pts = ost->sync_opts;
1216 if (!check_recording_time(ost))
1219 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1220 ost->top_field_first >= 0)
1221 in_picture->top_field_first = !!ost->top_field_first;
1223 if (in_picture->interlaced_frame) {
1224 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1225 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1227 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1229 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1231 in_picture->quality = enc->global_quality;
1232 in_picture->pict_type = 0;
1234 if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1235 in_picture->pts != AV_NOPTS_VALUE)
1236 ost->forced_kf_ref_pts = in_picture->pts;
1238 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1239 (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1240 if (ost->forced_kf_index < ost->forced_kf_count &&
1241 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1242 ost->forced_kf_index++;
1243 forced_keyframe = 1;
1244 } else if (ost->forced_keyframes_pexpr) {
1246 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1247 res = av_expr_eval(ost->forced_keyframes_pexpr,
1248 ost->forced_keyframes_expr_const_values, NULL);
1249 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1250 ost->forced_keyframes_expr_const_values[FKF_N],
1251 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1252 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1253 ost->forced_keyframes_expr_const_values[FKF_T],
1254 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1257 forced_keyframe = 1;
1258 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1259 ost->forced_keyframes_expr_const_values[FKF_N];
1260 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1261 ost->forced_keyframes_expr_const_values[FKF_T];
1262 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1265 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1266 } else if ( ost->forced_keyframes
1267 && !strncmp(ost->forced_keyframes, "source", 6)
1268 && in_picture->key_frame==1) {
1269 forced_keyframe = 1;
1272 if (forced_keyframe) {
1273 in_picture->pict_type = AV_PICTURE_TYPE_I;
1274 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1277 update_benchmark(NULL);
1279 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1280 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1281 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1282 enc->time_base.num, enc->time_base.den);
1285 ost->frames_encoded++;
1287 ret = avcodec_send_frame(enc, in_picture);
1290 // Make sure Closed Captions will not be duplicated
1291 av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
1294 ret = avcodec_receive_packet(enc, &pkt);
1295 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1296 if (ret == AVERROR(EAGAIN))
1302 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1303 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1304 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1305 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1308 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1309 pkt.pts = ost->sync_opts;
1311 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1314 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1315 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1316 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1317 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1320 frame_size = pkt.size;
1321 output_packet(of, &pkt, ost, 0);
1323 /* if two pass, output log */
1324 if (ost->logfile && enc->stats_out) {
1325 fprintf(ost->logfile, "%s", enc->stats_out);
1330 * For video, number of frames in == number of packets out.
1331 * But there may be reordering, so we can't throw away frames on encoder
1332 * flush, we need to limit them here, before they go into encoder.
1334 ost->frame_number++;
1336 if (vstats_filename && frame_size)
1337 do_video_stats(ost, frame_size);
1340 if (!ost->last_frame)
1341 ost->last_frame = av_frame_alloc();
1342 av_frame_unref(ost->last_frame);
1343 if (next_picture && ost->last_frame)
1344 av_frame_ref(ost->last_frame, next_picture);
1346 av_frame_free(&ost->last_frame);
1350 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1354 static double psnr(double d)
1356 return -10.0 * log10(d);
1359 static void do_video_stats(OutputStream *ost, int frame_size)
1361 AVCodecContext *enc;
1363 double ti1, bitrate, avg_bitrate;
1365 /* this is executed just the first time do_video_stats is called */
1367 vstats_file = fopen(vstats_filename, "w");
1375 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1376 frame_number = ost->st->nb_frames;
1377 if (vstats_version <= 1) {
1378 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1379 ost->quality / (float)FF_QP2LAMBDA);
1381 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1382 ost->quality / (float)FF_QP2LAMBDA);
1385 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1386 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1388 fprintf(vstats_file,"f_size= %6d ", frame_size);
1389 /* compute pts value */
1390 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1394 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1395 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1396 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1397 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1398 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1402 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1404 static void finish_output_stream(OutputStream *ost)
1406 OutputFile *of = output_files[ost->file_index];
1409 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1412 for (i = 0; i < of->ctx->nb_streams; i++)
1413 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1418 * Get and encode new output from any of the filtergraphs, without causing
1421 * @return 0 for success, <0 for severe errors
1423 static int reap_filters(int flush)
1425 AVFrame *filtered_frame = NULL;
1428 /* Reap all buffers present in the buffer sinks */
1429 for (i = 0; i < nb_output_streams; i++) {
1430 OutputStream *ost = output_streams[i];
1431 OutputFile *of = output_files[ost->file_index];
1432 AVFilterContext *filter;
1433 AVCodecContext *enc = ost->enc_ctx;
1436 if (!ost->filter || !ost->filter->graph->graph)
1438 filter = ost->filter->filter;
1440 if (!ost->initialized) {
1441 char error[1024] = "";
1442 ret = init_output_stream(ost, error, sizeof(error));
1444 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1445 ost->file_index, ost->index, error);
1450 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1451 return AVERROR(ENOMEM);
1453 filtered_frame = ost->filtered_frame;
1456 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1457 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1458 AV_BUFFERSINK_FLAG_NO_REQUEST);
1460 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1461 av_log(NULL, AV_LOG_WARNING,
1462 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1463 } else if (flush && ret == AVERROR_EOF) {
1464 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1465 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1469 if (ost->finished) {
1470 av_frame_unref(filtered_frame);
1473 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1474 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1475 AVRational filter_tb = av_buffersink_get_time_base(filter);
1476 AVRational tb = enc->time_base;
1477 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1479 tb.den <<= extra_bits;
1481 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1482 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1483 float_pts /= 1 << extra_bits;
1484 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1485 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1487 filtered_frame->pts =
1488 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1489 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1492 switch (av_buffersink_get_type(filter)) {
1493 case AVMEDIA_TYPE_VIDEO:
1494 if (!ost->frame_aspect_ratio.num)
1495 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1498 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1499 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1501 enc->time_base.num, enc->time_base.den);
1504 do_video_out(of, ost, filtered_frame, float_pts);
1506 case AVMEDIA_TYPE_AUDIO:
1507 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1508 enc->channels != filtered_frame->channels) {
1509 av_log(NULL, AV_LOG_ERROR,
1510 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1513 do_audio_out(of, ost, filtered_frame);
1516 // TODO support subtitle filters
1520 av_frame_unref(filtered_frame);
1527 static void print_final_stats(int64_t total_size)
1529 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1530 uint64_t subtitle_size = 0;
1531 uint64_t data_size = 0;
1532 float percent = -1.0;
1536 for (i = 0; i < nb_output_streams; i++) {
1537 OutputStream *ost = output_streams[i];
1538 switch (ost->enc_ctx->codec_type) {
1539 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1540 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1541 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1542 default: other_size += ost->data_size; break;
1544 extra_size += ost->enc_ctx->extradata_size;
1545 data_size += ost->data_size;
1546 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1547 != AV_CODEC_FLAG_PASS1)
1551 if (data_size && total_size>0 && total_size >= data_size)
1552 percent = 100.0 * (total_size - data_size) / data_size;
1554 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1555 video_size / 1024.0,
1556 audio_size / 1024.0,
1557 subtitle_size / 1024.0,
1558 other_size / 1024.0,
1559 extra_size / 1024.0);
1561 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1563 av_log(NULL, AV_LOG_INFO, "unknown");
1564 av_log(NULL, AV_LOG_INFO, "\n");
1566 /* print verbose per-stream stats */
1567 for (i = 0; i < nb_input_files; i++) {
1568 InputFile *f = input_files[i];
1569 uint64_t total_packets = 0, total_size = 0;
1571 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1574 for (j = 0; j < f->nb_streams; j++) {
1575 InputStream *ist = input_streams[f->ist_index + j];
1576 enum AVMediaType type = ist->dec_ctx->codec_type;
1578 total_size += ist->data_size;
1579 total_packets += ist->nb_packets;
1581 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1582 i, j, media_type_string(type));
1583 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1584 ist->nb_packets, ist->data_size);
1586 if (ist->decoding_needed) {
1587 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1588 ist->frames_decoded);
1589 if (type == AVMEDIA_TYPE_AUDIO)
1590 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1591 av_log(NULL, AV_LOG_VERBOSE, "; ");
1594 av_log(NULL, AV_LOG_VERBOSE, "\n");
1597 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1598 total_packets, total_size);
1601 for (i = 0; i < nb_output_files; i++) {
1602 OutputFile *of = output_files[i];
1603 uint64_t total_packets = 0, total_size = 0;
1605 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1608 for (j = 0; j < of->ctx->nb_streams; j++) {
1609 OutputStream *ost = output_streams[of->ost_index + j];
1610 enum AVMediaType type = ost->enc_ctx->codec_type;
1612 total_size += ost->data_size;
1613 total_packets += ost->packets_written;
1615 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1616 i, j, media_type_string(type));
1617 if (ost->encoding_needed) {
1618 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1619 ost->frames_encoded);
1620 if (type == AVMEDIA_TYPE_AUDIO)
1621 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1622 av_log(NULL, AV_LOG_VERBOSE, "; ");
1625 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1626 ost->packets_written, ost->data_size);
1628 av_log(NULL, AV_LOG_VERBOSE, "\n");
1631 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1632 total_packets, total_size);
1634 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1635 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1637 av_log(NULL, AV_LOG_WARNING, "\n");
1639 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1644 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1646 AVBPrint buf, buf_script;
1648 AVFormatContext *oc;
1650 AVCodecContext *enc;
1651 int frame_number, vid, i;
1654 int64_t pts = INT64_MIN + 1;
1655 static int64_t last_time = -1;
1656 static int qp_histogram[52];
1657 int hours, mins, secs, us;
1658 const char *hours_sign;
1662 if (!print_stats && !is_last_report && !progress_avio)
1665 if (!is_last_report) {
1666 if (last_time == -1) {
1667 last_time = cur_time;
1670 if ((cur_time - last_time) < 500000)
1672 last_time = cur_time;
1675 t = (cur_time-timer_start) / 1000000.0;
1678 oc = output_files[0]->ctx;
1680 total_size = avio_size(oc->pb);
1681 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1682 total_size = avio_tell(oc->pb);
1685 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1686 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1687 for (i = 0; i < nb_output_streams; i++) {
1689 ost = output_streams[i];
1691 if (!ost->stream_copy)
1692 q = ost->quality / (float) FF_QP2LAMBDA;
1694 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1695 av_bprintf(&buf, "q=%2.1f ", q);
1696 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1697 ost->file_index, ost->index, q);
1699 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1702 frame_number = ost->frame_number;
1703 fps = t > 1 ? frame_number / t : 0;
1704 av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1705 frame_number, fps < 9.95, fps, q);
1706 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1707 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1708 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1709 ost->file_index, ost->index, q);
1711 av_bprintf(&buf, "L");
1715 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1717 for (j = 0; j < 32; j++)
1718 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1721 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1723 double error, error_sum = 0;
1724 double scale, scale_sum = 0;
1726 char type[3] = { 'Y','U','V' };
1727 av_bprintf(&buf, "PSNR=");
1728 for (j = 0; j < 3; j++) {
1729 if (is_last_report) {
1730 error = enc->error[j];
1731 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1733 error = ost->error[j];
1734 scale = enc->width * enc->height * 255.0 * 255.0;
1740 p = psnr(error / scale);
1741 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1742 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1743 ost->file_index, ost->index, type[j] | 32, p);
1745 p = psnr(error_sum / scale_sum);
1746 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1747 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1748 ost->file_index, ost->index, p);
1752 /* compute min output value */
1753 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1754 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1755 ost->st->time_base, AV_TIME_BASE_Q));
1757 nb_frames_drop += ost->last_dropped;
1760 secs = FFABS(pts) / AV_TIME_BASE;
1761 us = FFABS(pts) % AV_TIME_BASE;
1766 hours_sign = (pts < 0) ? "-" : "";
1768 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1769 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1771 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1772 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1773 if (pts == AV_NOPTS_VALUE) {
1774 av_bprintf(&buf, "N/A ");
1776 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1777 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1781 av_bprintf(&buf, "bitrate=N/A");
1782 av_bprintf(&buf_script, "bitrate=N/A\n");
1784 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1785 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1788 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1789 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1790 if (pts == AV_NOPTS_VALUE) {
1791 av_bprintf(&buf_script, "out_time_us=N/A\n");
1792 av_bprintf(&buf_script, "out_time_ms=N/A\n");
1793 av_bprintf(&buf_script, "out_time=N/A\n");
1795 av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1796 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1797 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1798 hours_sign, hours, mins, secs, us);
1801 if (nb_frames_dup || nb_frames_drop)
1802 av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1803 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1804 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1807 av_bprintf(&buf, " speed=N/A");
1808 av_bprintf(&buf_script, "speed=N/A\n");
1810 av_bprintf(&buf, " speed=%4.3gx", speed);
1811 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1814 if (print_stats || is_last_report) {
1815 const char end = is_last_report ? '\n' : '\r';
1816 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1817 fprintf(stderr, "%s %c", buf.str, end);
1819 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1823 av_bprint_finalize(&buf, NULL);
1825 if (progress_avio) {
1826 av_bprintf(&buf_script, "progress=%s\n",
1827 is_last_report ? "end" : "continue");
1828 avio_write(progress_avio, buf_script.str,
1829 FFMIN(buf_script.len, buf_script.size - 1));
1830 avio_flush(progress_avio);
1831 av_bprint_finalize(&buf_script, NULL);
1832 if (is_last_report) {
1833 if ((ret = avio_closep(&progress_avio)) < 0)
1834 av_log(NULL, AV_LOG_ERROR,
1835 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1840 print_final_stats(total_size);
1843 static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1845 // We never got any input. Set a fake format, which will
1846 // come from libavformat.
1847 ifilter->format = par->format;
1848 ifilter->sample_rate = par->sample_rate;
1849 ifilter->channels = par->channels;
1850 ifilter->channel_layout = par->channel_layout;
1851 ifilter->width = par->width;
1852 ifilter->height = par->height;
1853 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1856 static void flush_encoders(void)
1860 for (i = 0; i < nb_output_streams; i++) {
1861 OutputStream *ost = output_streams[i];
1862 AVCodecContext *enc = ost->enc_ctx;
1863 OutputFile *of = output_files[ost->file_index];
1865 if (!ost->encoding_needed)
1868 // Try to enable encoding with no input frames.
1869 // Maybe we should just let encoding fail instead.
1870 if (!ost->initialized) {
1871 FilterGraph *fg = ost->filter->graph;
1872 char error[1024] = "";
1874 av_log(NULL, AV_LOG_WARNING,
1875 "Finishing stream %d:%d without any data written to it.\n",
1876 ost->file_index, ost->st->index);
1878 if (ost->filter && !fg->graph) {
1880 for (x = 0; x < fg->nb_inputs; x++) {
1881 InputFilter *ifilter = fg->inputs[x];
1882 if (ifilter->format < 0)
1883 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1886 if (!ifilter_has_all_input_formats(fg))
1889 ret = configure_filtergraph(fg);
1891 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1895 finish_output_stream(ost);
1898 ret = init_output_stream(ost, error, sizeof(error));
1900 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1901 ost->file_index, ost->index, error);
1906 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1909 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1913 const char *desc = NULL;
1917 switch (enc->codec_type) {
1918 case AVMEDIA_TYPE_AUDIO:
1921 case AVMEDIA_TYPE_VIDEO:
1928 av_init_packet(&pkt);
1932 update_benchmark(NULL);
1934 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1935 ret = avcodec_send_frame(enc, NULL);
1937 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1944 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1945 if (ret < 0 && ret != AVERROR_EOF) {
1946 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1951 if (ost->logfile && enc->stats_out) {
1952 fprintf(ost->logfile, "%s", enc->stats_out);
1954 if (ret == AVERROR_EOF) {
1955 output_packet(of, &pkt, ost, 1);
1958 if (ost->finished & MUXER_FINISHED) {
1959 av_packet_unref(&pkt);
1962 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1963 pkt_size = pkt.size;
1964 output_packet(of, &pkt, ost, 0);
1965 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1966 do_video_stats(ost, pkt_size);
1973 * Check whether a packet from ist should be written into ost at this time
1975 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1977 OutputFile *of = output_files[ost->file_index];
1978 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1980 if (ost->source_index != ist_index)
1986 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1992 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1994 OutputFile *of = output_files[ost->file_index];
1995 InputFile *f = input_files [ist->file_index];
1996 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1997 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1998 AVPacket opkt = { 0 };
2000 av_init_packet(&opkt);
2002 // EOF: flush output bitstream filters.
2004 output_packet(of, &opkt, ost, 1);
2008 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2009 !ost->copy_initial_nonkeyframes)
2012 if (!ost->frame_number && !ost->copy_prior_start) {
2013 int64_t comp_start = start_time;
2014 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2015 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2016 if (pkt->pts == AV_NOPTS_VALUE ?
2017 ist->pts < comp_start :
2018 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2022 if (of->recording_time != INT64_MAX &&
2023 ist->pts >= of->recording_time + start_time) {
2024 close_output_stream(ost);
2028 if (f->recording_time != INT64_MAX) {
2029 start_time = f->ctx->start_time;
2030 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2031 start_time += f->start_time;
2032 if (ist->pts >= f->recording_time + start_time) {
2033 close_output_stream(ost);
2038 /* force the input stream PTS */
2039 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2042 if (pkt->pts != AV_NOPTS_VALUE)
2043 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2045 opkt.pts = AV_NOPTS_VALUE;
2047 if (pkt->dts == AV_NOPTS_VALUE)
2048 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2050 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2051 opkt.dts -= ost_tb_start_time;
2053 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2054 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2056 duration = ist->dec_ctx->frame_size;
2057 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2058 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2059 ost->mux_timebase) - ost_tb_start_time;
2062 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2064 opkt.flags = pkt->flags;
2067 opkt.buf = av_buffer_ref(pkt->buf);
2071 opkt.data = pkt->data;
2072 opkt.size = pkt->size;
2074 av_copy_packet_side_data(&opkt, pkt);
2076 output_packet(of, &opkt, ost, 0);
2079 int guess_input_channel_layout(InputStream *ist)
2081 AVCodecContext *dec = ist->dec_ctx;
2083 if (!dec->channel_layout) {
2084 char layout_name[256];
2086 if (dec->channels > ist->guess_layout_max)
2088 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2089 if (!dec->channel_layout)
2091 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2092 dec->channels, dec->channel_layout);
2093 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2094 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2099 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2101 if (*got_output || ret<0)
2102 decode_error_stat[ret<0] ++;
2104 if (ret < 0 && exit_on_error)
2107 if (*got_output && ist) {
2108 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2109 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2110 "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2117 // Filters can be configured only if the formats of all inputs are known.
2118 static int ifilter_has_all_input_formats(FilterGraph *fg)
2121 for (i = 0; i < fg->nb_inputs; i++) {
2122 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2123 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2129 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2131 FilterGraph *fg = ifilter->graph;
2132 int need_reinit, ret, i;
2134 /* determine if the parameters for this input changed */
2135 need_reinit = ifilter->format != frame->format;
2137 switch (ifilter->ist->st->codecpar->codec_type) {
2138 case AVMEDIA_TYPE_AUDIO:
2139 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2140 ifilter->channels != frame->channels ||
2141 ifilter->channel_layout != frame->channel_layout;
2143 case AVMEDIA_TYPE_VIDEO:
2144 need_reinit |= ifilter->width != frame->width ||
2145 ifilter->height != frame->height;
2149 if (!ifilter->ist->reinit_filters && fg->graph)
2152 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2153 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2157 ret = ifilter_parameters_from_frame(ifilter, frame);
2162 /* (re)init the graph if possible, otherwise buffer the frame and return */
2163 if (need_reinit || !fg->graph) {
2164 for (i = 0; i < fg->nb_inputs; i++) {
2165 if (!ifilter_has_all_input_formats(fg)) {
2166 AVFrame *tmp = av_frame_clone(frame);
2168 return AVERROR(ENOMEM);
2169 av_frame_unref(frame);
2171 if (!av_fifo_space(ifilter->frame_queue)) {
2172 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2174 av_frame_free(&tmp);
2178 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2183 ret = reap_filters(1);
2184 if (ret < 0 && ret != AVERROR_EOF) {
2185 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2189 ret = configure_filtergraph(fg);
2191 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2196 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2198 if (ret != AVERROR_EOF)
2199 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2206 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2212 if (ifilter->filter) {
2213 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2217 // the filtergraph was never configured
2218 if (ifilter->format < 0)
2219 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2220 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2221 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2222 return AVERROR_INVALIDDATA;
2229 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2230 // There is the following difference: if you got a frame, you must call
2231 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2232 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2233 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2240 ret = avcodec_send_packet(avctx, pkt);
2241 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2242 // decoded frames with avcodec_receive_frame() until done.
2243 if (ret < 0 && ret != AVERROR_EOF)
2247 ret = avcodec_receive_frame(avctx, frame);
2248 if (ret < 0 && ret != AVERROR(EAGAIN))
2256 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2261 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2262 for (i = 0; i < ist->nb_filters; i++) {
2263 if (i < ist->nb_filters - 1) {
2264 f = ist->filter_frame;
2265 ret = av_frame_ref(f, decoded_frame);
2270 ret = ifilter_send_frame(ist->filters[i], f);
2271 if (ret == AVERROR_EOF)
2272 ret = 0; /* ignore */
2274 av_log(NULL, AV_LOG_ERROR,
2275 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2282 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2285 AVFrame *decoded_frame;
2286 AVCodecContext *avctx = ist->dec_ctx;
2288 AVRational decoded_frame_tb;
2290 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2291 return AVERROR(ENOMEM);
2292 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2293 return AVERROR(ENOMEM);
2294 decoded_frame = ist->decoded_frame;
2296 update_benchmark(NULL);
2297 ret = decode(avctx, decoded_frame, got_output, pkt);
2298 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2302 if (ret >= 0 && avctx->sample_rate <= 0) {
2303 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2304 ret = AVERROR_INVALIDDATA;
2307 if (ret != AVERROR_EOF)
2308 check_decode_result(ist, got_output, ret);
2310 if (!*got_output || ret < 0)
2313 ist->samples_decoded += decoded_frame->nb_samples;
2314 ist->frames_decoded++;
2316 /* increment next_dts to use for the case where the input stream does not
2317 have timestamps or there are multiple frames in the packet */
2318 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2320 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2323 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2324 decoded_frame_tb = ist->st->time_base;
2325 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2326 decoded_frame->pts = pkt->pts;
2327 decoded_frame_tb = ist->st->time_base;
2329 decoded_frame->pts = ist->dts;
2330 decoded_frame_tb = AV_TIME_BASE_Q;
2332 if (decoded_frame->pts != AV_NOPTS_VALUE)
2333 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2334 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2335 (AVRational){1, avctx->sample_rate});
2336 ist->nb_samples = decoded_frame->nb_samples;
2337 err = send_frame_to_filters(ist, decoded_frame);
2339 av_frame_unref(ist->filter_frame);
2340 av_frame_unref(decoded_frame);
2341 return err < 0 ? err : ret;
2344 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2347 AVFrame *decoded_frame;
2348 int i, ret = 0, err = 0;
2349 int64_t best_effort_timestamp;
2350 int64_t dts = AV_NOPTS_VALUE;
2353 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2354 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2356 if (!eof && pkt && pkt->size == 0)
2359 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2360 return AVERROR(ENOMEM);
2361 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2362 return AVERROR(ENOMEM);
2363 decoded_frame = ist->decoded_frame;
2364 if (ist->dts != AV_NOPTS_VALUE)
2365 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2368 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2371 // The old code used to set dts on the drain packet, which does not work
2372 // with the new API anymore.
2374 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2376 return AVERROR(ENOMEM);
2377 ist->dts_buffer = new;
2378 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2381 update_benchmark(NULL);
2382 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2383 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2387 // The following line may be required in some cases where there is no parser
2388 // or the parser does not has_b_frames correctly
2389 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2390 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2391 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2393 av_log(ist->dec_ctx, AV_LOG_WARNING,
2394 "video_delay is larger in decoder than demuxer %d > %d.\n"
2395 "If you want to help, upload a sample "
2396 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2397 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2398 ist->dec_ctx->has_b_frames,
2399 ist->st->codecpar->video_delay);
2402 if (ret != AVERROR_EOF)
2403 check_decode_result(ist, got_output, ret);
2405 if (*got_output && ret >= 0) {
2406 if (ist->dec_ctx->width != decoded_frame->width ||
2407 ist->dec_ctx->height != decoded_frame->height ||
2408 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2409 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2410 decoded_frame->width,
2411 decoded_frame->height,
2412 decoded_frame->format,
2413 ist->dec_ctx->width,
2414 ist->dec_ctx->height,
2415 ist->dec_ctx->pix_fmt);
2419 if (!*got_output || ret < 0)
2422 if(ist->top_field_first>=0)
2423 decoded_frame->top_field_first = ist->top_field_first;
2425 ist->frames_decoded++;
2427 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2428 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2432 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2434 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2435 *duration_pts = decoded_frame->pkt_duration;
2437 if (ist->framerate.num)
2438 best_effort_timestamp = ist->cfr_next_pts++;
2440 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2441 best_effort_timestamp = ist->dts_buffer[0];
2443 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2444 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2445 ist->nb_dts_buffer--;
2448 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2449 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2451 if (ts != AV_NOPTS_VALUE)
2452 ist->next_pts = ist->pts = ts;
2456 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2457 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2458 ist->st->index, av_ts2str(decoded_frame->pts),
2459 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2460 best_effort_timestamp,
2461 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2462 decoded_frame->key_frame, decoded_frame->pict_type,
2463 ist->st->time_base.num, ist->st->time_base.den);
2466 if (ist->st->sample_aspect_ratio.num)
2467 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2469 err = send_frame_to_filters(ist, decoded_frame);
2472 av_frame_unref(ist->filter_frame);
2473 av_frame_unref(decoded_frame);
2474 return err < 0 ? err : ret;
2477 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2480 AVSubtitle subtitle;
2482 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2483 &subtitle, got_output, pkt);
2485 check_decode_result(NULL, got_output, ret);
2487 if (ret < 0 || !*got_output) {
2490 sub2video_flush(ist);
2494 if (ist->fix_sub_duration) {
2496 if (ist->prev_sub.got_output) {
2497 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2498 1000, AV_TIME_BASE);
2499 if (end < ist->prev_sub.subtitle.end_display_time) {
2500 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2501 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2502 ist->prev_sub.subtitle.end_display_time, end,
2503 end <= 0 ? ", dropping it" : "");
2504 ist->prev_sub.subtitle.end_display_time = end;
2507 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2508 FFSWAP(int, ret, ist->prev_sub.ret);
2509 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2517 if (ist->sub2video.frame) {
2518 sub2video_update(ist, &subtitle);
2519 } else if (ist->nb_filters) {
2520 if (!ist->sub2video.sub_queue)
2521 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2522 if (!ist->sub2video.sub_queue)
2524 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2525 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2529 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2533 if (!subtitle.num_rects)
2536 ist->frames_decoded++;
2538 for (i = 0; i < nb_output_streams; i++) {
2539 OutputStream *ost = output_streams[i];
2541 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2542 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2545 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2550 avsubtitle_free(&subtitle);
2554 static int send_filter_eof(InputStream *ist)
2557 /* TODO keep pts also in stream time base to avoid converting back */
2558 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2559 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2561 for (i = 0; i < ist->nb_filters; i++) {
2562 ret = ifilter_send_eof(ist->filters[i], pts);
2569 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2570 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2574 int eof_reached = 0;
2577 if (!ist->saw_first_ts) {
2578 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2580 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2581 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2582 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2584 ist->saw_first_ts = 1;
2587 if (ist->next_dts == AV_NOPTS_VALUE)
2588 ist->next_dts = ist->dts;
2589 if (ist->next_pts == AV_NOPTS_VALUE)
2590 ist->next_pts = ist->pts;
2594 av_init_packet(&avpkt);
2601 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2602 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2603 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2604 ist->next_pts = ist->pts = ist->dts;
2607 // while we have more to decode or while the decoder did output something on EOF
2608 while (ist->decoding_needed) {
2609 int64_t duration_dts = 0;
2610 int64_t duration_pts = 0;
2612 int decode_failed = 0;
2614 ist->pts = ist->next_pts;
2615 ist->dts = ist->next_dts;
2617 switch (ist->dec_ctx->codec_type) {
2618 case AVMEDIA_TYPE_AUDIO:
2619 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2622 case AVMEDIA_TYPE_VIDEO:
2623 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2625 if (!repeating || !pkt || got_output) {
2626 if (pkt && pkt->duration) {
2627 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2628 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2629 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2630 duration_dts = ((int64_t)AV_TIME_BASE *
2631 ist->dec_ctx->framerate.den * ticks) /
2632 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2635 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2636 ist->next_dts += duration_dts;
2638 ist->next_dts = AV_NOPTS_VALUE;
2642 if (duration_pts > 0) {
2643 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2645 ist->next_pts += duration_dts;
2649 case AVMEDIA_TYPE_SUBTITLE:
2652 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2653 if (!pkt && ret >= 0)
2660 if (ret == AVERROR_EOF) {
2666 if (decode_failed) {
2667 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2668 ist->file_index, ist->st->index, av_err2str(ret));
2670 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2671 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2673 if (!decode_failed || exit_on_error)
2679 ist->got_output = 1;
2684 // During draining, we might get multiple output frames in this loop.
2685 // ffmpeg.c does not drain the filter chain on configuration changes,
2686 // which means if we send multiple frames at once to the filters, and
2687 // one of those frames changes configuration, the buffered frames will
2688 // be lost. This can upset certain FATE tests.
2689 // Decode only 1 frame per call on EOF to appease these FATE tests.
2690 // The ideal solution would be to rewrite decoding to use the new
2691 // decoding API in a better way.
2698 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2699 /* except when looping we need to flush but not to send an EOF */
2700 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2701 int ret = send_filter_eof(ist);
2703 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2708 /* handle stream copy */
2709 if (!ist->decoding_needed && pkt) {
2710 ist->dts = ist->next_dts;
2711 switch (ist->dec_ctx->codec_type) {
2712 case AVMEDIA_TYPE_AUDIO:
2713 av_assert1(pkt->duration >= 0);
2714 if (ist->dec_ctx->sample_rate) {
2715 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2716 ist->dec_ctx->sample_rate;
2718 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2721 case AVMEDIA_TYPE_VIDEO:
2722 if (ist->framerate.num) {
2723 // TODO: Remove work-around for c99-to-c89 issue 7
2724 AVRational time_base_q = AV_TIME_BASE_Q;
2725 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2726 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2727 } else if (pkt->duration) {
2728 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2729 } else if(ist->dec_ctx->framerate.num != 0) {
2730 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2731 ist->next_dts += ((int64_t)AV_TIME_BASE *
2732 ist->dec_ctx->framerate.den * ticks) /
2733 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2737 ist->pts = ist->dts;
2738 ist->next_pts = ist->next_dts;
2740 for (i = 0; i < nb_output_streams; i++) {
2741 OutputStream *ost = output_streams[i];
2743 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2746 do_streamcopy(ist, ost, pkt);
2749 return !eof_reached;
2752 static void print_sdp(void)
2757 AVIOContext *sdp_pb;
2758 AVFormatContext **avc;
2760 for (i = 0; i < nb_output_files; i++) {
2761 if (!output_files[i]->header_written)
2765 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2768 for (i = 0, j = 0; i < nb_output_files; i++) {
2769 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2770 avc[j] = output_files[i]->ctx;
2778 av_sdp_create(avc, j, sdp, sizeof(sdp));
2780 if (!sdp_filename) {
2781 printf("SDP:\n%s\n", sdp);
2784 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2785 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2787 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2788 avio_closep(&sdp_pb);
2789 av_freep(&sdp_filename);
2797 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2799 InputStream *ist = s->opaque;
2800 const enum AVPixelFormat *p;
2803 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2804 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2805 const AVCodecHWConfig *config = NULL;
2808 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2811 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2812 ist->hwaccel_id == HWACCEL_AUTO) {
2814 config = avcodec_get_hw_config(s->codec, i);
2817 if (!(config->methods &
2818 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2820 if (config->pix_fmt == *p)
2825 if (config->device_type != ist->hwaccel_device_type) {
2826 // Different hwaccel offered, ignore.
2830 ret = hwaccel_decode_init(s);
2832 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2833 av_log(NULL, AV_LOG_FATAL,
2834 "%s hwaccel requested for input stream #%d:%d, "
2835 "but cannot be initialized.\n",
2836 av_hwdevice_get_type_name(config->device_type),
2837 ist->file_index, ist->st->index);
2838 return AV_PIX_FMT_NONE;
2843 const HWAccel *hwaccel = NULL;
2845 for (i = 0; hwaccels[i].name; i++) {
2846 if (hwaccels[i].pix_fmt == *p) {
2847 hwaccel = &hwaccels[i];
2852 // No hwaccel supporting this pixfmt.
2855 if (hwaccel->id != ist->hwaccel_id) {
2856 // Does not match requested hwaccel.
2860 ret = hwaccel->init(s);
2862 av_log(NULL, AV_LOG_FATAL,
2863 "%s hwaccel requested for input stream #%d:%d, "
2864 "but cannot be initialized.\n", hwaccel->name,
2865 ist->file_index, ist->st->index);
2866 return AV_PIX_FMT_NONE;
2870 if (ist->hw_frames_ctx) {
2871 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2872 if (!s->hw_frames_ctx)
2873 return AV_PIX_FMT_NONE;
2876 ist->hwaccel_pix_fmt = *p;
2883 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2885 InputStream *ist = s->opaque;
2887 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2888 return ist->hwaccel_get_buffer(s, frame, flags);
2890 return avcodec_default_get_buffer2(s, frame, flags);
2893 static int init_input_stream(int ist_index, char *error, int error_len)
2896 InputStream *ist = input_streams[ist_index];
2898 if (ist->decoding_needed) {
2899 AVCodec *codec = ist->dec;
2901 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2902 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2903 return AVERROR(EINVAL);
2906 ist->dec_ctx->opaque = ist;
2907 ist->dec_ctx->get_format = get_format;
2908 ist->dec_ctx->get_buffer2 = get_buffer;
2909 ist->dec_ctx->thread_safe_callbacks = 1;
2911 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2912 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2913 (ist->decoding_needed & DECODING_FOR_OST)) {
2914 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2915 if (ist->decoding_needed & DECODING_FOR_FILTER)
2916 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2919 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2921 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2922 * audio, and video decoders such as cuvid or mediacodec */
2923 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2925 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2926 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2927 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2928 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2929 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2931 ret = hw_device_setup_for_decode(ist);
2933 snprintf(error, error_len, "Device setup failed for "
2934 "decoder on input stream #%d:%d : %s",
2935 ist->file_index, ist->st->index, av_err2str(ret));
2939 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2940 if (ret == AVERROR_EXPERIMENTAL)
2941 abort_codec_experimental(codec, 0);
2943 snprintf(error, error_len,
2944 "Error while opening decoder for input stream "
2946 ist->file_index, ist->st->index, av_err2str(ret));
2949 assert_avoptions(ist->decoder_opts);
2952 ist->next_pts = AV_NOPTS_VALUE;
2953 ist->next_dts = AV_NOPTS_VALUE;
2958 static InputStream *get_input_stream(OutputStream *ost)
2960 if (ost->source_index >= 0)
2961 return input_streams[ost->source_index];
2965 static int compare_int64(const void *a, const void *b)
2967 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2970 /* open the muxer when all the streams are initialized */
2971 static int check_init_output_file(OutputFile *of, int file_index)
2975 for (i = 0; i < of->ctx->nb_streams; i++) {
2976 OutputStream *ost = output_streams[of->ost_index + i];
2977 if (!ost->initialized)
2981 of->ctx->interrupt_callback = int_cb;
2983 ret = avformat_write_header(of->ctx, &of->opts);
2985 av_log(NULL, AV_LOG_ERROR,
2986 "Could not write header for output file #%d "
2987 "(incorrect codec parameters ?): %s\n",
2988 file_index, av_err2str(ret));
2991 //assert_avoptions(of->opts);
2992 of->header_written = 1;
2994 av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2996 if (sdp_filename || want_sdp)
2999 /* flush the muxing queues */
3000 for (i = 0; i < of->ctx->nb_streams; i++) {
3001 OutputStream *ost = output_streams[of->ost_index + i];
3003 /* try to improve muxing time_base (only possible if nothing has been written yet) */
3004 if (!av_fifo_size(ost->muxing_queue))
3005 ost->mux_timebase = ost->st->time_base;
3007 while (av_fifo_size(ost->muxing_queue)) {
3009 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3010 write_packet(of, &pkt, ost, 1);
3017 static int init_output_bsfs(OutputStream *ost)
3022 if (!ost->nb_bitstream_filters)
3025 for (i = 0; i < ost->nb_bitstream_filters; i++) {
3026 ctx = ost->bsf_ctx[i];
3028 ret = avcodec_parameters_copy(ctx->par_in,
3029 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3033 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3035 ret = av_bsf_init(ctx);
3037 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3038 ost->bsf_ctx[i]->filter->name);
3043 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3044 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3048 ost->st->time_base = ctx->time_base_out;
3053 static int init_output_stream_streamcopy(OutputStream *ost)
3055 OutputFile *of = output_files[ost->file_index];
3056 InputStream *ist = get_input_stream(ost);
3057 AVCodecParameters *par_dst = ost->st->codecpar;
3058 AVCodecParameters *par_src = ost->ref_par;
3061 uint32_t codec_tag = par_dst->codec_tag;
3063 av_assert0(ist && !ost->filter);
3065 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3067 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3069 av_log(NULL, AV_LOG_FATAL,
3070 "Error setting up codec context options.\n");
3074 ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3076 av_log(NULL, AV_LOG_FATAL,
3077 "Error getting reference codec parameters.\n");
3082 unsigned int codec_tag_tmp;
3083 if (!of->ctx->oformat->codec_tag ||
3084 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3085 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3086 codec_tag = par_src->codec_tag;
3089 ret = avcodec_parameters_copy(par_dst, par_src);
3093 par_dst->codec_tag = codec_tag;
3095 if (!ost->frame_rate.num)
3096 ost->frame_rate = ist->framerate;
3097 ost->st->avg_frame_rate = ost->frame_rate;
3099 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3103 // copy timebase while removing common factors
3104 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3105 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3107 // copy estimated duration as a hint to the muxer
3108 if (ost->st->duration <= 0 && ist->st->duration > 0)
3109 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3112 ost->st->disposition = ist->st->disposition;
3114 if (ist->st->nb_side_data) {
3115 for (i = 0; i < ist->st->nb_side_data; i++) {
3116 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3119 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3121 return AVERROR(ENOMEM);
3122 memcpy(dst_data, sd_src->data, sd_src->size);
3126 if (ost->rotate_overridden) {
3127 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3128 sizeof(int32_t) * 9);
3130 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3133 switch (par_dst->codec_type) {
3134 case AVMEDIA_TYPE_AUDIO:
3135 if (audio_volume != 256) {
3136 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3139 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3140 par_dst->block_align= 0;
3141 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3142 par_dst->block_align= 0;
3144 case AVMEDIA_TYPE_VIDEO:
3145 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3147 av_mul_q(ost->frame_aspect_ratio,
3148 (AVRational){ par_dst->height, par_dst->width });
3149 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3150 "with stream copy may produce invalid files\n");
3152 else if (ist->st->sample_aspect_ratio.num)
3153 sar = ist->st->sample_aspect_ratio;
3155 sar = par_src->sample_aspect_ratio;
3156 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3157 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3158 ost->st->r_frame_rate = ist->st->r_frame_rate;
3162 ost->mux_timebase = ist->st->time_base;
3167 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3169 AVDictionaryEntry *e;
3171 uint8_t *encoder_string;
3172 int encoder_string_len;
3173 int format_flags = 0;
3174 int codec_flags = ost->enc_ctx->flags;
3176 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3179 e = av_dict_get(of->opts, "fflags", NULL, 0);
3181 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3184 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3186 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3188 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3191 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3194 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3195 encoder_string = av_mallocz(encoder_string_len);
3196 if (!encoder_string)
3199 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3200 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3202 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3203 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3204 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3205 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3208 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3209 AVCodecContext *avctx)
3212 int n = 1, i, size, index = 0;
3215 for (p = kf; *p; p++)
3219 pts = av_malloc_array(size, sizeof(*pts));
3221 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3226 for (i = 0; i < n; i++) {
3227 char *next = strchr(p, ',');
3232 if (!memcmp(p, "chapters", 8)) {
3234 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3237 if (avf->nb_chapters > INT_MAX - size ||
3238 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3240 av_log(NULL, AV_LOG_FATAL,
3241 "Could not allocate forced key frames array.\n");
3244 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3245 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3247 for (j = 0; j < avf->nb_chapters; j++) {
3248 AVChapter *c = avf->chapters[j];
3249 av_assert1(index < size);
3250 pts[index++] = av_rescale_q(c->start, c->time_base,
3251 avctx->time_base) + t;
3256 t = parse_time_or_die("force_key_frames", p, 1);
3257 av_assert1(index < size);
3258 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3265 av_assert0(index == size);
3266 qsort(pts, size, sizeof(*pts), compare_int64);
3267 ost->forced_kf_count = size;
3268 ost->forced_kf_pts = pts;
3271 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3273 InputStream *ist = get_input_stream(ost);
3274 AVCodecContext *enc_ctx = ost->enc_ctx;
3275 AVFormatContext *oc;
3277 if (ost->enc_timebase.num > 0) {
3278 enc_ctx->time_base = ost->enc_timebase;
3282 if (ost->enc_timebase.num < 0) {
3284 enc_ctx->time_base = ist->st->time_base;
3288 oc = output_files[ost->file_index]->ctx;
3289 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3292 enc_ctx->time_base = default_time_base;
3295 static int init_output_stream_encode(OutputStream *ost)
3297 InputStream *ist = get_input_stream(ost);
3298 AVCodecContext *enc_ctx = ost->enc_ctx;
3299 AVCodecContext *dec_ctx = NULL;
3300 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3303 set_encoder_id(output_files[ost->file_index], ost);
3305 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3306 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3307 // which have to be filtered out to prevent leaking them to output files.
3308 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3311 ost->st->disposition = ist->st->disposition;
3313 dec_ctx = ist->dec_ctx;
3315 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3317 for (j = 0; j < oc->nb_streams; j++) {
3318 AVStream *st = oc->streams[j];
3319 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3322 if (j == oc->nb_streams)
3323 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3324 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3325 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3328 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3329 if (!ost->frame_rate.num)
3330 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3331 if (ist && !ost->frame_rate.num)
3332 ost->frame_rate = ist->framerate;
3333 if (ist && !ost->frame_rate.num)
3334 ost->frame_rate = ist->st->r_frame_rate;
3335 if (ist && !ost->frame_rate.num) {
3336 ost->frame_rate = (AVRational){25, 1};
3337 av_log(NULL, AV_LOG_WARNING,
3339 "about the input framerate is available. Falling "
3340 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3341 "if you want a different framerate.\n",
3342 ost->file_index, ost->index);
3345 if (ost->enc->supported_framerates && !ost->force_fps) {
3346 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3347 ost->frame_rate = ost->enc->supported_framerates[idx];
3349 // reduce frame rate for mpeg4 to be within the spec limits
3350 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3351 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3352 ost->frame_rate.num, ost->frame_rate.den, 65535);
3356 switch (enc_ctx->codec_type) {
3357 case AVMEDIA_TYPE_AUDIO:
3358 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3360 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3361 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3362 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3363 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3364 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3366 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3369 case AVMEDIA_TYPE_VIDEO:
3370 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3372 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3373 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3374 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3375 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3376 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3377 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3379 for (j = 0; j < ost->forced_kf_count; j++)
3380 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3382 enc_ctx->time_base);
3384 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3385 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3386 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3387 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3388 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3389 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3391 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3393 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3394 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3396 enc_ctx->framerate = ost->frame_rate;
3398 ost->st->avg_frame_rate = ost->frame_rate;
3401 enc_ctx->width != dec_ctx->width ||
3402 enc_ctx->height != dec_ctx->height ||
3403 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3404 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3407 if (ost->top_field_first == 0) {
3408 enc_ctx->field_order = AV_FIELD_BB;
3409 } else if (ost->top_field_first == 1) {
3410 enc_ctx->field_order = AV_FIELD_TT;
3413 if (ost->forced_keyframes) {
3414 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3415 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3416 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3418 av_log(NULL, AV_LOG_ERROR,
3419 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3422 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3423 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3424 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3425 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3427 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3428 // parse it only for static kf timings
3429 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3430 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3434 case AVMEDIA_TYPE_SUBTITLE:
3435 enc_ctx->time_base = AV_TIME_BASE_Q;
3436 if (!enc_ctx->width) {
3437 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3438 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3441 case AVMEDIA_TYPE_DATA:
3448 ost->mux_timebase = enc_ctx->time_base;
3453 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3457 if (ost->encoding_needed) {
3458 AVCodec *codec = ost->enc;
3459 AVCodecContext *dec = NULL;
3462 ret = init_output_stream_encode(ost);
3466 if ((ist = get_input_stream(ost)))
3468 if (dec && dec->subtitle_header) {
3469 /* ASS code assumes this buffer is null terminated so add extra byte. */
3470 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3471 if (!ost->enc_ctx->subtitle_header)
3472 return AVERROR(ENOMEM);
3473 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3474 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3476 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3477 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3478 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3480 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3481 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3482 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3484 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3485 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3486 av_buffersink_get_format(ost->filter->filter)) {
3487 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3488 if (!ost->enc_ctx->hw_frames_ctx)
3489 return AVERROR(ENOMEM);
3491 ret = hw_device_setup_for_encode(ost);
3493 snprintf(error, error_len, "Device setup failed for "
3494 "encoder on output stream #%d:%d : %s",
3495 ost->file_index, ost->index, av_err2str(ret));
3499 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3500 int input_props = 0, output_props = 0;
3501 AVCodecDescriptor const *input_descriptor =
3502 avcodec_descriptor_get(dec->codec_id);
3503 AVCodecDescriptor const *output_descriptor =
3504 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3505 if (input_descriptor)
3506 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3507 if (output_descriptor)
3508 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3509 if (input_props && output_props && input_props != output_props) {
3510 snprintf(error, error_len,
3511 "Subtitle encoding currently only possible from text to text "
3512 "or bitmap to bitmap");
3513 return AVERROR_INVALIDDATA;
3517 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3518 if (ret == AVERROR_EXPERIMENTAL)
3519 abort_codec_experimental(codec, 1);
3520 snprintf(error, error_len,
3521 "Error while opening encoder for output stream #%d:%d - "
3522 "maybe incorrect parameters such as bit_rate, rate, width or height",
3523 ost->file_index, ost->index);
3526 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3527 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3528 av_buffersink_set_frame_size(ost->filter->filter,
3529 ost->enc_ctx->frame_size);
3530 assert_avoptions(ost->encoder_opts);
3531 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3532 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3533 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3534 " It takes bits/s as argument, not kbits/s\n");
3536 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3538 av_log(NULL, AV_LOG_FATAL,
3539 "Error initializing the output stream codec context.\n");
3543 * FIXME: ost->st->codec should't be needed here anymore.
3545 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3549 if (ost->enc_ctx->nb_coded_side_data) {
3552 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3553 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3556 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3558 return AVERROR(ENOMEM);
3559 memcpy(dst_data, sd_src->data, sd_src->size);
3564 * Add global input side data. For now this is naive, and copies it
3565 * from the input stream's global side data. All side data should
3566 * really be funneled over AVFrame and libavfilter, then added back to
3567 * packet side data, and then potentially using the first packet for
3572 for (i = 0; i < ist->st->nb_side_data; i++) {
3573 AVPacketSideData *sd = &ist->st->side_data[i];
3574 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3576 return AVERROR(ENOMEM);
3577 memcpy(dst, sd->data, sd->size);
3578 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3579 av_display_rotation_set((uint32_t *)dst, 0);
3583 // copy timebase while removing common factors
3584 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3585 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3587 // copy estimated duration as a hint to the muxer
3588 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3589 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3591 ost->st->codec->codec= ost->enc_ctx->codec;
3592 } else if (ost->stream_copy) {
3593 ret = init_output_stream_streamcopy(ost);
3598 // parse user provided disposition, and update stream values
3599 if (ost->disposition) {
3600 static const AVOption opts[] = {
3601 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3602 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3603 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3604 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3605 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3606 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3607 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3608 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3609 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3610 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3611 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3612 { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3613 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3614 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3615 { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3616 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3619 static const AVClass class = {
3621 .item_name = av_default_item_name,
3623 .version = LIBAVUTIL_VERSION_INT,
3625 const AVClass *pclass = &class;
3627 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3632 /* initialize bitstream filters for the output stream
3633 * needs to be done here, because the codec id for streamcopy is not
3634 * known until now */
3635 ret = init_output_bsfs(ost);
3639 ost->initialized = 1;
3641 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3648 static void report_new_stream(int input_index, AVPacket *pkt)
3650 InputFile *file = input_files[input_index];
3651 AVStream *st = file->ctx->streams[pkt->stream_index];
3653 if (pkt->stream_index < file->nb_streams_warn)
3655 av_log(file->ctx, AV_LOG_WARNING,
3656 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3657 av_get_media_type_string(st->codecpar->codec_type),
3658 input_index, pkt->stream_index,
3659 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3660 file->nb_streams_warn = pkt->stream_index + 1;
3663 static int transcode_init(void)
3665 int ret = 0, i, j, k;
3666 AVFormatContext *oc;
3669 char error[1024] = {0};
3671 for (i = 0; i < nb_filtergraphs; i++) {
3672 FilterGraph *fg = filtergraphs[i];
3673 for (j = 0; j < fg->nb_outputs; j++) {
3674 OutputFilter *ofilter = fg->outputs[j];
3675 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3677 if (fg->nb_inputs != 1)
3679 for (k = nb_input_streams-1; k >= 0 ; k--)
3680 if (fg->inputs[0]->ist == input_streams[k])
3682 ofilter->ost->source_index = k;
3686 /* init framerate emulation */
3687 for (i = 0; i < nb_input_files; i++) {
3688 InputFile *ifile = input_files[i];
3689 if (ifile->rate_emu)
3690 for (j = 0; j < ifile->nb_streams; j++)
3691 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3694 /* init input streams */
3695 for (i = 0; i < nb_input_streams; i++)
3696 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3697 for (i = 0; i < nb_output_streams; i++) {
3698 ost = output_streams[i];
3699 avcodec_close(ost->enc_ctx);
3704 /* open each encoder */
3705 for (i = 0; i < nb_output_streams; i++) {
3706 // skip streams fed from filtergraphs until we have a frame for them
3707 if (output_streams[i]->filter)
3710 ret = init_output_stream(output_streams[i], error, sizeof(error));
3715 /* discard unused programs */
3716 for (i = 0; i < nb_input_files; i++) {
3717 InputFile *ifile = input_files[i];
3718 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3719 AVProgram *p = ifile->ctx->programs[j];
3720 int discard = AVDISCARD_ALL;
3722 for (k = 0; k < p->nb_stream_indexes; k++)
3723 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3724 discard = AVDISCARD_DEFAULT;
3727 p->discard = discard;
3731 /* write headers for files with no streams */
3732 for (i = 0; i < nb_output_files; i++) {
3733 oc = output_files[i]->ctx;
3734 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3735 ret = check_init_output_file(output_files[i], i);
3742 /* dump the stream mapping */
3743 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3744 for (i = 0; i < nb_input_streams; i++) {
3745 ist = input_streams[i];
3747 for (j = 0; j < ist->nb_filters; j++) {
3748 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3749 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3750 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3751 ist->filters[j]->name);
3752 if (nb_filtergraphs > 1)
3753 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3754 av_log(NULL, AV_LOG_INFO, "\n");
3759 for (i = 0; i < nb_output_streams; i++) {
3760 ost = output_streams[i];
3762 if (ost->attachment_filename) {
3763 /* an attached file */
3764 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3765 ost->attachment_filename, ost->file_index, ost->index);
3769 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3770 /* output from a complex graph */
3771 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3772 if (nb_filtergraphs > 1)
3773 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3775 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3776 ost->index, ost->enc ? ost->enc->name : "?");
3780 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3781 input_streams[ost->source_index]->file_index,
3782 input_streams[ost->source_index]->st->index,
3785 if (ost->sync_ist != input_streams[ost->source_index])
3786 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3787 ost->sync_ist->file_index,
3788 ost->sync_ist->st->index);
3789 if (ost->stream_copy)
3790 av_log(NULL, AV_LOG_INFO, " (copy)");
3792 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3793 const AVCodec *out_codec = ost->enc;
3794 const char *decoder_name = "?";
3795 const char *in_codec_name = "?";
3796 const char *encoder_name = "?";
3797 const char *out_codec_name = "?";
3798 const AVCodecDescriptor *desc;
3801 decoder_name = in_codec->name;
3802 desc = avcodec_descriptor_get(in_codec->id);
3804 in_codec_name = desc->name;
3805 if (!strcmp(decoder_name, in_codec_name))
3806 decoder_name = "native";
3810 encoder_name = out_codec->name;
3811 desc = avcodec_descriptor_get(out_codec->id);
3813 out_codec_name = desc->name;
3814 if (!strcmp(encoder_name, out_codec_name))
3815 encoder_name = "native";
3818 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3819 in_codec_name, decoder_name,
3820 out_codec_name, encoder_name);
3822 av_log(NULL, AV_LOG_INFO, "\n");
3826 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3830 atomic_store(&transcode_init_done, 1);
3835 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3836 static int need_output(void)
3840 for (i = 0; i < nb_output_streams; i++) {
3841 OutputStream *ost = output_streams[i];
3842 OutputFile *of = output_files[ost->file_index];
3843 AVFormatContext *os = output_files[ost->file_index]->ctx;
3845 if (ost->finished ||
3846 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3848 if (ost->frame_number >= ost->max_frames) {
3850 for (j = 0; j < of->ctx->nb_streams; j++)
3851 close_output_stream(output_streams[of->ost_index + j]);
3862 * Select the output stream to process.
3864 * @return selected output stream, or NULL if none available
3866 static OutputStream *choose_output(void)
3869 int64_t opts_min = INT64_MAX;
3870 OutputStream *ost_min = NULL;
3872 for (i = 0; i < nb_output_streams; i++) {
3873 OutputStream *ost = output_streams[i];
3874 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3875 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3877 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3878 av_log(NULL, AV_LOG_DEBUG,
3879 "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3880 ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3882 if (!ost->initialized && !ost->inputs_done)
3885 if (!ost->finished && opts < opts_min) {
3887 ost_min = ost->unavailable ? NULL : ost;
3893 static void set_tty_echo(int on)
3897 if (tcgetattr(0, &tty) == 0) {
3898 if (on) tty.c_lflag |= ECHO;
3899 else tty.c_lflag &= ~ECHO;
3900 tcsetattr(0, TCSANOW, &tty);
3905 static int check_keyboard_interaction(int64_t cur_time)
3908 static int64_t last_time;
3909 if (received_nb_signals)
3910 return AVERROR_EXIT;
3911 /* read_key() returns 0 on EOF */
3912 if(cur_time - last_time >= 100000 && !run_as_daemon){
3914 last_time = cur_time;
3918 return AVERROR_EXIT;
3919 if (key == '+') av_log_set_level(av_log_get_level()+10);
3920 if (key == '-') av_log_set_level(av_log_get_level()-10);
3921 if (key == 's') qp_hist ^= 1;
3924 do_hex_dump = do_pkt_dump = 0;
3925 } else if(do_pkt_dump){
3929 av_log_set_level(AV_LOG_DEBUG);
3931 if (key == 'c' || key == 'C'){
3932 char buf[4096], target[64], command[256], arg[256] = {0};
3935 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3938 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3943 fprintf(stderr, "\n");
3945 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3946 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3947 target, time, command, arg);
3948 for (i = 0; i < nb_filtergraphs; i++) {
3949 FilterGraph *fg = filtergraphs[i];
3952 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3953 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3954 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3955 } else if (key == 'c') {
3956 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3957 ret = AVERROR_PATCHWELCOME;
3959 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3961 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3966 av_log(NULL, AV_LOG_ERROR,
3967 "Parse error, at least 3 arguments were expected, "
3968 "only %d given in string '%s'\n", n, buf);
3971 if (key == 'd' || key == 'D'){
3974 debug = input_streams[0]->st->codec->debug<<1;
3975 if(!debug) debug = 1;
3976 while(debug & (FF_DEBUG_DCT_COEFF
3978 |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3980 )) //unsupported, would just crash
3987 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3992 fprintf(stderr, "\n");
3993 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3994 fprintf(stderr,"error parsing debug value\n");
3996 for(i=0;i<nb_input_streams;i++) {
3997 input_streams[i]->st->codec->debug = debug;
3999 for(i=0;i<nb_output_streams;i++) {
4000 OutputStream *ost = output_streams[i];
4001 ost->enc_ctx->debug = debug;
4003 if(debug) av_log_set_level(AV_LOG_DEBUG);
4004 fprintf(stderr,"debug=%d\n", debug);
4007 fprintf(stderr, "key function\n"
4008 "? show this help\n"
4009 "+ increase verbosity\n"
4010 "- decrease verbosity\n"
4011 "c Send command to first matching filter supporting it\n"
4012 "C Send/Queue command to all matching filters\n"
4013 "D cycle through available debug modes\n"
4014 "h dump packets/hex press to cycle through the 3 states\n"
4016 "s Show QP histogram\n"
4023 static void *input_thread(void *arg)
4026 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4031 ret = av_read_frame(f->ctx, &pkt);
4033 if (ret == AVERROR(EAGAIN)) {
4038 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4041 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4042 if (flags && ret == AVERROR(EAGAIN)) {
4044 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4045 av_log(f->ctx, AV_LOG_WARNING,
4046 "Thread message queue blocking; consider raising the "
4047 "thread_queue_size option (current value: %d)\n",
4048 f->thread_queue_size);
4051 if (ret != AVERROR_EOF)
4052 av_log(f->ctx, AV_LOG_ERROR,
4053 "Unable to send packet to main thread: %s\n",
4055 av_packet_unref(&pkt);
4056 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4064 static void free_input_thread(int i)
4066 InputFile *f = input_files[i];
4069 if (!f || !f->in_thread_queue)
4071 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4072 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4073 av_packet_unref(&pkt);
4075 pthread_join(f->thread, NULL);
4077 av_thread_message_queue_free(&f->in_thread_queue);
4080 static void free_input_threads(void)
4084 for (i = 0; i < nb_input_files; i++)
4085 free_input_thread(i);
4088 static int init_input_thread(int i)
4091 InputFile *f = input_files[i];
4093 if (nb_input_files == 1)
4096 if (f->ctx->pb ? !f->ctx->pb->seekable :
4097 strcmp(f->ctx->iformat->name, "lavfi"))
4098 f->non_blocking = 1;
4099 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4100 f->thread_queue_size, sizeof(AVPacket));
4104 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4105 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4106 av_thread_message_queue_free(&f->in_thread_queue);
4107 return AVERROR(ret);
4113 static int init_input_threads(void)
4117 for (i = 0; i < nb_input_files; i++) {
4118 ret = init_input_thread(i);
4125 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4127 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4129 AV_THREAD_MESSAGE_NONBLOCK : 0);
4133 static int get_input_packet(InputFile *f, AVPacket *pkt)
4137 for (i = 0; i < f->nb_streams; i++) {
4138 InputStream *ist = input_streams[f->ist_index + i];
4139 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4140 int64_t now = av_gettime_relative() - ist->start;
4142 return AVERROR(EAGAIN);
4147 if (nb_input_files > 1)
4148 return get_input_packet_mt(f, pkt);
4150 return av_read_frame(f->ctx, pkt);
4153 static int got_eagain(void)
4156 for (i = 0; i < nb_output_streams; i++)
4157 if (output_streams[i]->unavailable)
4162 static void reset_eagain(void)
4165 for (i = 0; i < nb_input_files; i++)
4166 input_files[i]->eagain = 0;
4167 for (i = 0; i < nb_output_streams; i++)
4168 output_streams[i]->unavailable = 0;
4171 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4172 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4173 AVRational time_base)
4179 return tmp_time_base;
4182 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4185 return tmp_time_base;
4191 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4194 AVCodecContext *avctx;
4195 int i, ret, has_audio = 0;
4196 int64_t duration = 0;
4198 ret = av_seek_frame(is, -1, is->start_time, 0);
4202 for (i = 0; i < ifile->nb_streams; i++) {
4203 ist = input_streams[ifile->ist_index + i];
4204 avctx = ist->dec_ctx;
4206 /* duration is the length of the last frame in a stream
4207 * when audio stream is present we don't care about
4208 * last video frame length because it's not defined exactly */
4209 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4213 for (i = 0; i < ifile->nb_streams; i++) {
4214 ist = input_streams[ifile->ist_index + i];
4215 avctx = ist->dec_ctx;
4218 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4219 AVRational sample_rate = {1, avctx->sample_rate};
4221 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4226 if (ist->framerate.num) {
4227 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4228 } else if (ist->st->avg_frame_rate.num) {
4229 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4234 if (!ifile->duration)
4235 ifile->time_base = ist->st->time_base;
4236 /* the total duration of the stream, max_pts - min_pts is
4237 * the duration of the stream without the last frame */
4238 duration += ist->max_pts - ist->min_pts;
4239 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4243 if (ifile->loop > 0)
4251 * - 0 -- one packet was read and processed
4252 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4253 * this function should be called again
4254 * - AVERROR_EOF -- this function should not be called again
4256 static int process_input(int file_index)
4258 InputFile *ifile = input_files[file_index];
4259 AVFormatContext *is;
4262 int ret, thread_ret, i, j;
4267 ret = get_input_packet(ifile, &pkt);
4269 if (ret == AVERROR(EAGAIN)) {
4273 if (ret < 0 && ifile->loop) {
4274 AVCodecContext *avctx;
4275 for (i = 0; i < ifile->nb_streams; i++) {
4276 ist = input_streams[ifile->ist_index + i];
4277 avctx = ist->dec_ctx;
4278 if (ist->decoding_needed) {
4279 ret = process_input_packet(ist, NULL, 1);
4282 avcodec_flush_buffers(avctx);
4286 free_input_thread(file_index);
4288 ret = seek_to_start(ifile, is);
4290 thread_ret = init_input_thread(file_index);
4295 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4297 ret = get_input_packet(ifile, &pkt);
4298 if (ret == AVERROR(EAGAIN)) {
4304 if (ret != AVERROR_EOF) {
4305 print_error(is->url, ret);
4310 for (i = 0; i < ifile->nb_streams; i++) {
4311 ist = input_streams[ifile->ist_index + i];
4312 if (ist->decoding_needed) {
4313 ret = process_input_packet(ist, NULL, 0);
4318 /* mark all outputs that don't go through lavfi as finished */
4319 for (j = 0; j < nb_output_streams; j++) {
4320 OutputStream *ost = output_streams[j];
4322 if (ost->source_index == ifile->ist_index + i &&
4323 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4324 finish_output_stream(ost);
4328 ifile->eof_reached = 1;
4329 return AVERROR(EAGAIN);
4335 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4336 is->streams[pkt.stream_index]);
4338 /* the following test is needed in case new streams appear
4339 dynamically in stream : we ignore them */
4340 if (pkt.stream_index >= ifile->nb_streams) {
4341 report_new_stream(file_index, &pkt);
4342 goto discard_packet;
4345 ist = input_streams[ifile->ist_index + pkt.stream_index];
4347 ist->data_size += pkt.size;
4351 goto discard_packet;
4353 if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4354 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
4355 "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4361 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4362 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4363 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4364 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4365 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4366 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4367 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4368 av_ts2str(input_files[ist->file_index]->ts_offset),
4369 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4372 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4373 int64_t stime, stime2;
4374 // Correcting starttime based on the enabled streams
4375 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4376 // so we instead do it here as part of discontinuity handling
4377 if ( ist->next_dts == AV_NOPTS_VALUE
4378 && ifile->ts_offset == -is->start_time
4379 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4380 int64_t new_start_time = INT64_MAX;
4381 for (i=0; i<is->nb_streams; i++) {
4382 AVStream *st = is->streams[i];
4383 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4385 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4387 if (new_start_time > is->start_time) {
4388 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4389 ifile->ts_offset = -new_start_time;
4393 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4394 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4395 ist->wrap_correction_done = 1;
4397 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4398 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4399 ist->wrap_correction_done = 0;
4401 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4402 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4403 ist->wrap_correction_done = 0;
4407 /* add the stream-global side data to the first packet */
4408 if (ist->nb_packets == 1) {
4409 for (i = 0; i < ist->st->nb_side_data; i++) {
4410 AVPacketSideData *src_sd = &ist->st->side_data[i];
4413 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4416 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4419 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4423 memcpy(dst_data, src_sd->data, src_sd->size);
4427 if (pkt.dts != AV_NOPTS_VALUE)
4428 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4429 if (pkt.pts != AV_NOPTS_VALUE)
4430 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4432 if (pkt.pts != AV_NOPTS_VALUE)
4433 pkt.pts *= ist->ts_scale;
4434 if (pkt.dts != AV_NOPTS_VALUE)
4435 pkt.dts *= ist->ts_scale;
4437 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4438 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4439 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4440 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4441 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4442 int64_t delta = pkt_dts - ifile->last_ts;
4443 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4444 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4445 ifile->ts_offset -= delta;
4446 av_log(NULL, AV_LOG_DEBUG,
4447 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4448 delta, ifile->ts_offset);
4449 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4450 if (pkt.pts != AV_NOPTS_VALUE)
4451 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4455 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4456 if (pkt.pts != AV_NOPTS_VALUE) {
4457 pkt.pts += duration;
4458 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4459 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4462 if (pkt.dts != AV_NOPTS_VALUE)
4463 pkt.dts += duration;
4465 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4466 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4467 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4468 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4470 int64_t delta = pkt_dts - ist->next_dts;
4471 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4472 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4473 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4474 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4475 ifile->ts_offset -= delta;
4476 av_log(NULL, AV_LOG_DEBUG,
4477 "timestamp discontinuity for stream #%d:%d "
4478 "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4479 ist->file_index, ist->st->index, ist->st->id,
4480 av_get_media_type_string(ist->dec_ctx->codec_type),
4481 delta, ifile->ts_offset);
4482 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4483 if (pkt.pts != AV_NOPTS_VALUE)
4484 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4487 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4488 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4489 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4490 pkt.dts = AV_NOPTS_VALUE;
4492 if (pkt.pts != AV_NOPTS_VALUE){
4493 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4494 delta = pkt_pts - ist->next_dts;
4495 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4496 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4497 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4498 pkt.pts = AV_NOPTS_VALUE;
4504 if (pkt.dts != AV_NOPTS_VALUE)
4505 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4508 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4509 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4510 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4511 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4512 av_ts2str(input_files[ist->file_index]->ts_offset),
4513 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4516 sub2video_heartbeat(ist, pkt.pts);
4518 process_input_packet(ist, &pkt, 0);
4521 av_packet_unref(&pkt);
4527 * Perform a step of transcoding for the specified filter graph.
4529 * @param[in] graph filter graph to consider
4530 * @param[out] best_ist input stream where a frame would allow to continue
4531 * @return 0 for success, <0 for error
4533 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4536 int nb_requests, nb_requests_max = 0;
4537 InputFilter *ifilter;
4541 ret = avfilter_graph_request_oldest(graph->graph);
4543 return reap_filters(0);
4545 if (ret == AVERROR_EOF) {
4546 ret = reap_filters(1);
4547 for (i = 0; i < graph->nb_outputs; i++)
4548 close_output_stream(graph->outputs[i]->ost);
4551 if (ret != AVERROR(EAGAIN))
4554 for (i = 0; i < graph->nb_inputs; i++) {
4555 ifilter = graph->inputs[i];
4557 if (input_files[ist->file_index]->eagain ||
4558 input_files[ist->file_index]->eof_reached)
4560 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4561 if (nb_requests > nb_requests_max) {
4562 nb_requests_max = nb_requests;
4568 for (i = 0; i < graph->nb_outputs; i++)
4569 graph->outputs[i]->ost->unavailable = 1;
4575 * Run a single step of transcoding.
4577 * @return 0 for success, <0 for error
4579 static int transcode_step(void)
4582 InputStream *ist = NULL;
4585 ost = choose_output();
4592 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4596 if (ost->filter && !ost->filter->graph->graph) {
4597 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4598 ret = configure_filtergraph(ost->filter->graph);
4600 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4606 if (ost->filter && ost->filter->graph->graph) {
4607 if (!ost->initialized) {
4608 char error[1024] = {0};
4609 ret = init_output_stream(ost, error, sizeof(error));
4611 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4612 ost->file_index, ost->index, error);
4616 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4620 } else if (ost->filter) {
4622 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4623 InputFilter *ifilter = ost->filter->graph->inputs[i];
4624 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4630 ost->inputs_done = 1;
4634 av_assert0(ost->source_index >= 0);
4635 ist = input_streams[ost->source_index];
4638 ret = process_input(ist->file_index);
4639 if (ret == AVERROR(EAGAIN)) {
4640 if (input_files[ist->file_index]->eagain)
4641 ost->unavailable = 1;
4646 return ret == AVERROR_EOF ? 0 : ret;
4648 return reap_filters(0);
4652 * The following code is the main loop of the file converter
4654 static int transcode(void)
4657 AVFormatContext *os;
4660 int64_t timer_start;
4661 int64_t total_packets_written = 0;
4663 ret = transcode_init();
4667 if (stdin_interaction) {
4668 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4671 timer_start = av_gettime_relative();
4674 if ((ret = init_input_threads()) < 0)
4678 while (!received_sigterm) {
4679 int64_t cur_time= av_gettime_relative();
4681 /* if 'q' pressed, exits */
4682 if (stdin_interaction)
4683 if (check_keyboard_interaction(cur_time) < 0)
4686 /* check if there's any stream where output is still needed */
4687 if (!need_output()) {
4688 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4692 ret = transcode_step();
4693 if (ret < 0 && ret != AVERROR_EOF) {
4694 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4698 /* dump report by using the output first video and audio streams */
4699 print_report(0, timer_start, cur_time);
4702 free_input_threads();
4705 /* at the end of stream, we must flush the decoder buffers */
4706 for (i = 0; i < nb_input_streams; i++) {
4707 ist = input_streams[i];
4708 if (!input_files[ist->file_index]->eof_reached) {
4709 process_input_packet(ist, NULL, 0);
4716 /* write the trailer if needed and close file */
4717 for (i = 0; i < nb_output_files; i++) {
4718 os = output_files[i]->ctx;
4719 if (!output_files[i]->header_written) {
4720 av_log(NULL, AV_LOG_ERROR,
4721 "Nothing was written into output file %d (%s), because "
4722 "at least one of its streams received no packets.\n",
4726 if ((ret = av_write_trailer(os)) < 0) {
4727 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4733 /* dump report by using the first video and audio streams */
4734 print_report(1, timer_start, av_gettime_relative());
4736 /* close each encoder */
4737 for (i = 0; i < nb_output_streams; i++) {
4738 ost = output_streams[i];
4739 if (ost->encoding_needed) {
4740 av_freep(&ost->enc_ctx->stats_in);
4742 total_packets_written += ost->packets_written;
4745 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4746 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4750 /* close each decoder */
4751 for (i = 0; i < nb_input_streams; i++) {
4752 ist = input_streams[i];
4753 if (ist->decoding_needed) {
4754 avcodec_close(ist->dec_ctx);
4755 if (ist->hwaccel_uninit)
4756 ist->hwaccel_uninit(ist->dec_ctx);
4760 av_buffer_unref(&hw_device_ctx);
4761 hw_device_free_all();
4768 free_input_threads();
4771 if (output_streams) {
4772 for (i = 0; i < nb_output_streams; i++) {
4773 ost = output_streams[i];
4776 if (fclose(ost->logfile))
4777 av_log(NULL, AV_LOG_ERROR,
4778 "Error closing logfile, loss of information possible: %s\n",
4779 av_err2str(AVERROR(errno)));
4780 ost->logfile = NULL;
4782 av_freep(&ost->forced_kf_pts);
4783 av_freep(&ost->apad);
4784 av_freep(&ost->disposition);
4785 av_dict_free(&ost->encoder_opts);
4786 av_dict_free(&ost->sws_dict);
4787 av_dict_free(&ost->swr_opts);
4788 av_dict_free(&ost->resample_opts);
4795 static BenchmarkTimeStamps get_benchmark_time_stamps(void)
4797 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4799 struct rusage rusage;
4801 getrusage(RUSAGE_SELF, &rusage);
4802 time_stamps.user_usec =
4803 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4804 time_stamps.sys_usec =
4805 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4806 #elif HAVE_GETPROCESSTIMES
4808 FILETIME c, e, k, u;
4809 proc = GetCurrentProcess();
4810 GetProcessTimes(proc, &c, &e, &k, &u);
4811 time_stamps.user_usec =
4812 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4813 time_stamps.sys_usec =
4814 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4816 time_stamps.user_usec = time_stamps.sys_usec = 0;
4821 static int64_t getmaxrss(void)
4823 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4824 struct rusage rusage;
4825 getrusage(RUSAGE_SELF, &rusage);
4826 return (int64_t)rusage.ru_maxrss * 1024;
4827 #elif HAVE_GETPROCESSMEMORYINFO
4829 PROCESS_MEMORY_COUNTERS memcounters;
4830 proc = GetCurrentProcess();
4831 memcounters.cb = sizeof(memcounters);
4832 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4833 return memcounters.PeakPagefileUsage;
4839 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4843 int main(int argc, char **argv)
4846 BenchmarkTimeStamps ti;
4850 register_exit(ffmpeg_cleanup);
4852 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4854 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4855 parse_loglevel(argc, argv, options);
4857 if(argc>1 && !strcmp(argv[1], "-d")){
4859 av_log_set_callback(log_callback_null);
4865 avdevice_register_all();
4867 avformat_network_init();
4869 show_banner(argc, argv, options);
4871 /* parse options and open all input/output files */
4872 ret = ffmpeg_parse_options(argc, argv);
4876 if (nb_output_files <= 0 && nb_input_files == 0) {
4878 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4882 /* file converter / grab */
4883 if (nb_output_files <= 0) {
4884 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4888 for (i = 0; i < nb_output_files; i++) {
4889 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4893 current_time = ti = get_benchmark_time_stamps();
4894 if (transcode() < 0)
4897 int64_t utime, stime, rtime;
4898 current_time = get_benchmark_time_stamps();
4899 utime = current_time.user_usec - ti.user_usec;
4900 stime = current_time.sys_usec - ti.sys_usec;
4901 rtime = current_time.real_usec - ti.real_usec;
4902 av_log(NULL, AV_LOG_INFO,
4903 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4904 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4906 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4907 decode_error_stat[0], decode_error_stat[1]);
4908 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4911 exit_program(received_nb_signals ? 255 : main_return_code);
4912 return main_return_code;