2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
65 #include "libavutil/threadmessage.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
73 #if HAVE_SYS_RESOURCE_H
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
80 #if HAVE_GETPROCESSMEMORYINFO
84 #if HAVE_SETCONSOLECTRLHANDLER
90 #include <sys/select.h>
95 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 typedef struct BenchmarkTimeStamps {
127 } BenchmarkTimeStamps;
129 static void do_video_stats(OutputStream *ost, int frame_size);
130 static BenchmarkTimeStamps get_benchmark_time_stamps(void);
131 static int64_t getmaxrss(void);
132 static int ifilter_has_all_input_formats(FilterGraph *fg);
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
140 static int want_sdp = 1;
142 static BenchmarkTimeStamps current_time;
143 AVIOContext *progress_avio = NULL;
145 static uint8_t *subtitle_out;
147 InputStream **input_streams = NULL;
148 int nb_input_streams = 0;
149 InputFile **input_files = NULL;
150 int nb_input_files = 0;
152 OutputStream **output_streams = NULL;
153 int nb_output_streams = 0;
154 OutputFile **output_files = NULL;
155 int nb_output_files = 0;
157 FilterGraph **filtergraphs;
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
168 static void free_input_threads(void);
172 Convert subtitles to video with alpha to insert them in filter graphs.
173 This is a temporary solution until libavfilter gets real subtitles support.
176 static int sub2video_get_blank_frame(InputStream *ist)
179 AVFrame *frame = ist->sub2video.frame;
181 av_frame_unref(frame);
182 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
184 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
185 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
187 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
194 uint32_t *pal, *dst2;
198 if (r->type != SUBTITLE_BITMAP) {
199 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
202 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204 r->x, r->y, r->w, r->h, w, h
209 dst += r->y * dst_linesize + r->x * 4;
211 pal = (uint32_t *)r->data[1];
212 for (y = 0; y < r->h; y++) {
213 dst2 = (uint32_t *)dst;
215 for (x = 0; x < r->w; x++)
216 *(dst2++) = pal[*(src2++)];
218 src += r->linesize[0];
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
224 AVFrame *frame = ist->sub2video.frame;
228 av_assert1(frame->data[0]);
229 ist->sub2video.last_pts = frame->pts = pts;
230 for (i = 0; i < ist->nb_filters; i++) {
231 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
232 AV_BUFFERSRC_FLAG_KEEP_REF |
233 AV_BUFFERSRC_FLAG_PUSH);
234 if (ret != AVERROR_EOF && ret < 0)
235 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
240 void sub2video_update(InputStream *ist, AVSubtitle *sub)
242 AVFrame *frame = ist->sub2video.frame;
246 int64_t pts, end_pts;
251 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252 AV_TIME_BASE_Q, ist->st->time_base);
253 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254 AV_TIME_BASE_Q, ist->st->time_base);
255 num_rects = sub->num_rects;
257 pts = ist->sub2video.end_pts;
261 if (sub2video_get_blank_frame(ist) < 0) {
262 av_log(ist->dec_ctx, AV_LOG_ERROR,
263 "Impossible to get a blank canvas.\n");
266 dst = frame->data [0];
267 dst_linesize = frame->linesize[0];
268 for (i = 0; i < num_rects; i++)
269 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
270 sub2video_push_ref(ist, pts);
271 ist->sub2video.end_pts = end_pts;
274 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
276 InputFile *infile = input_files[ist->file_index];
280 /* When a frame is read from a file, examine all sub2video streams in
281 the same file and send the sub2video frame again. Otherwise, decoded
282 video frames could be accumulating in the filter graph while a filter
283 (possibly overlay) is desperately waiting for a subtitle frame. */
284 for (i = 0; i < infile->nb_streams; i++) {
285 InputStream *ist2 = input_streams[infile->ist_index + i];
286 if (!ist2->sub2video.frame)
288 /* subtitles seem to be usually muxed ahead of other streams;
289 if not, subtracting a larger time here is necessary */
290 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
291 /* do not send the heartbeat frame if the subtitle is already ahead */
292 if (pts2 <= ist2->sub2video.last_pts)
294 if (pts2 >= ist2->sub2video.end_pts ||
295 (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX))
296 sub2video_update(ist2, NULL);
297 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
298 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
300 sub2video_push_ref(ist2, pts2);
304 static void sub2video_flush(InputStream *ist)
309 if (ist->sub2video.end_pts < INT64_MAX)
310 sub2video_update(ist, NULL);
311 for (i = 0; i < ist->nb_filters; i++) {
312 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
313 if (ret != AVERROR_EOF && ret < 0)
314 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
318 /* end of sub2video hack */
320 static void term_exit_sigsafe(void)
324 tcsetattr (0, TCSANOW, &oldtty);
330 av_log(NULL, AV_LOG_QUIET, "%s", "");
334 static volatile int received_sigterm = 0;
335 static volatile int received_nb_signals = 0;
336 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
337 static volatile int ffmpeg_exited = 0;
338 static int main_return_code = 0;
341 sigterm_handler(int sig)
344 received_sigterm = sig;
345 received_nb_signals++;
347 if(received_nb_signals > 3) {
348 ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
349 strlen("Received > 3 system signals, hard exiting\n"));
350 if (ret < 0) { /* Do nothing */ };
355 #if HAVE_SETCONSOLECTRLHANDLER
356 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
358 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
363 case CTRL_BREAK_EVENT:
364 sigterm_handler(SIGINT);
367 case CTRL_CLOSE_EVENT:
368 case CTRL_LOGOFF_EVENT:
369 case CTRL_SHUTDOWN_EVENT:
370 sigterm_handler(SIGTERM);
371 /* Basically, with these 3 events, when we return from this method the
372 process is hard terminated, so stall as long as we need to
373 to try and let the main thread(s) clean up and gracefully terminate
374 (we have at most 5 seconds, but should be done far before that). */
375 while (!ffmpeg_exited) {
381 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
390 if (!run_as_daemon && stdin_interaction) {
392 if (tcgetattr (0, &tty) == 0) {
396 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
397 |INLCR|IGNCR|ICRNL|IXON);
398 tty.c_oflag |= OPOST;
399 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
400 tty.c_cflag &= ~(CSIZE|PARENB);
405 tcsetattr (0, TCSANOW, &tty);
407 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
411 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
412 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
414 signal(SIGXCPU, sigterm_handler);
417 signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
419 #if HAVE_SETCONSOLECTRLHANDLER
420 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
424 /* read a key without blocking */
425 static int read_key(void)
437 n = select(1, &rfds, NULL, NULL, &tv);
446 # if HAVE_PEEKNAMEDPIPE
448 static HANDLE input_handle;
451 input_handle = GetStdHandle(STD_INPUT_HANDLE);
452 is_pipe = !GetConsoleMode(input_handle, &dw);
456 /* When running under a GUI, you will end here. */
457 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
458 // input pipe may have been closed by the program that ran ffmpeg
476 static int decode_interrupt_cb(void *ctx)
478 return received_nb_signals > atomic_load(&transcode_init_done);
481 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
483 static void ffmpeg_cleanup(int ret)
488 int maxrss = getmaxrss() / 1024;
489 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
492 for (i = 0; i < nb_filtergraphs; i++) {
493 FilterGraph *fg = filtergraphs[i];
494 avfilter_graph_free(&fg->graph);
495 for (j = 0; j < fg->nb_inputs; j++) {
496 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
498 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
499 sizeof(frame), NULL);
500 av_frame_free(&frame);
502 av_fifo_freep(&fg->inputs[j]->frame_queue);
503 if (fg->inputs[j]->ist->sub2video.sub_queue) {
504 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
506 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
507 &sub, sizeof(sub), NULL);
508 avsubtitle_free(&sub);
510 av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
512 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
513 av_freep(&fg->inputs[j]->name);
514 av_freep(&fg->inputs[j]);
516 av_freep(&fg->inputs);
517 for (j = 0; j < fg->nb_outputs; j++) {
518 av_freep(&fg->outputs[j]->name);
519 av_freep(&fg->outputs[j]->formats);
520 av_freep(&fg->outputs[j]->channel_layouts);
521 av_freep(&fg->outputs[j]->sample_rates);
522 av_freep(&fg->outputs[j]);
524 av_freep(&fg->outputs);
525 av_freep(&fg->graph_desc);
527 av_freep(&filtergraphs[i]);
529 av_freep(&filtergraphs);
531 av_freep(&subtitle_out);
534 for (i = 0; i < nb_output_files; i++) {
535 OutputFile *of = output_files[i];
540 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
542 avformat_free_context(s);
543 av_dict_free(&of->opts);
545 av_freep(&output_files[i]);
547 for (i = 0; i < nb_output_streams; i++) {
548 OutputStream *ost = output_streams[i];
553 for (j = 0; j < ost->nb_bitstream_filters; j++)
554 av_bsf_free(&ost->bsf_ctx[j]);
555 av_freep(&ost->bsf_ctx);
557 av_frame_free(&ost->filtered_frame);
558 av_frame_free(&ost->last_frame);
559 av_dict_free(&ost->encoder_opts);
561 av_freep(&ost->forced_keyframes);
562 av_expr_free(ost->forced_keyframes_pexpr);
563 av_freep(&ost->avfilter);
564 av_freep(&ost->logfile_prefix);
566 av_freep(&ost->audio_channels_map);
567 ost->audio_channels_mapped = 0;
569 av_dict_free(&ost->sws_dict);
570 av_dict_free(&ost->swr_opts);
572 avcodec_free_context(&ost->enc_ctx);
573 avcodec_parameters_free(&ost->ref_par);
575 if (ost->muxing_queue) {
576 while (av_fifo_size(ost->muxing_queue)) {
578 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
579 av_packet_unref(&pkt);
581 av_fifo_freep(&ost->muxing_queue);
584 av_freep(&output_streams[i]);
587 free_input_threads();
589 for (i = 0; i < nb_input_files; i++) {
590 avformat_close_input(&input_files[i]->ctx);
591 av_freep(&input_files[i]);
593 for (i = 0; i < nb_input_streams; i++) {
594 InputStream *ist = input_streams[i];
596 av_frame_free(&ist->decoded_frame);
597 av_frame_free(&ist->filter_frame);
598 av_dict_free(&ist->decoder_opts);
599 avsubtitle_free(&ist->prev_sub.subtitle);
600 av_frame_free(&ist->sub2video.frame);
601 av_freep(&ist->filters);
602 av_freep(&ist->hwaccel_device);
603 av_freep(&ist->dts_buffer);
605 avcodec_free_context(&ist->dec_ctx);
607 av_freep(&input_streams[i]);
611 if (fclose(vstats_file))
612 av_log(NULL, AV_LOG_ERROR,
613 "Error closing vstats file, loss of information possible: %s\n",
614 av_err2str(AVERROR(errno)));
616 av_freep(&vstats_filename);
618 av_freep(&input_streams);
619 av_freep(&input_files);
620 av_freep(&output_streams);
621 av_freep(&output_files);
625 avformat_network_deinit();
627 if (received_sigterm) {
628 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
629 (int) received_sigterm);
630 } else if (ret && atomic_load(&transcode_init_done)) {
631 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
637 void remove_avoptions(AVDictionary **a, AVDictionary *b)
639 AVDictionaryEntry *t = NULL;
641 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
642 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
646 void assert_avoptions(AVDictionary *m)
648 AVDictionaryEntry *t;
649 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
650 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
655 static void abort_codec_experimental(AVCodec *c, int encoder)
660 static void update_benchmark(const char *fmt, ...)
662 if (do_benchmark_all) {
663 BenchmarkTimeStamps t = get_benchmark_time_stamps();
669 vsnprintf(buf, sizeof(buf), fmt, va);
671 av_log(NULL, AV_LOG_INFO,
672 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
673 t.user_usec - current_time.user_usec,
674 t.sys_usec - current_time.sys_usec,
675 t.real_usec - current_time.real_usec, buf);
681 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
684 for (i = 0; i < nb_output_streams; i++) {
685 OutputStream *ost2 = output_streams[i];
686 ost2->finished |= ost == ost2 ? this_stream : others;
690 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
692 AVFormatContext *s = of->ctx;
693 AVStream *st = ost->st;
697 * Audio encoders may split the packets -- #frames in != #packets out.
698 * But there is no reordering, so we can limit the number of output packets
699 * by simply dropping them here.
700 * Counting encoded video frames needs to be done separately because of
701 * reordering, see do_video_out().
702 * Do not count the packet when unqueued because it has been counted when queued.
704 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
705 if (ost->frame_number >= ost->max_frames) {
706 av_packet_unref(pkt);
712 if (!of->header_written) {
713 AVPacket tmp_pkt = {0};
714 /* the muxer is not initialized yet, buffer the packet */
715 if (!av_fifo_space(ost->muxing_queue)) {
716 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
717 ost->max_muxing_queue_size);
718 if (new_size <= av_fifo_size(ost->muxing_queue)) {
719 av_log(NULL, AV_LOG_ERROR,
720 "Too many packets buffered for output stream %d:%d.\n",
721 ost->file_index, ost->st->index);
724 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
728 ret = av_packet_make_refcounted(pkt);
731 av_packet_move_ref(&tmp_pkt, pkt);
732 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
736 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
737 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
738 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
740 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
742 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
744 ost->quality = sd ? AV_RL32(sd) : -1;
745 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
747 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
749 ost->error[i] = AV_RL64(sd + 8 + 8*i);
754 if (ost->frame_rate.num && ost->is_cfr) {
755 if (pkt->duration > 0)
756 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
757 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
762 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
764 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
765 if (pkt->dts != AV_NOPTS_VALUE &&
766 pkt->pts != AV_NOPTS_VALUE &&
767 pkt->dts > pkt->pts) {
768 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
770 ost->file_index, ost->st->index);
772 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
773 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
774 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
776 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
777 pkt->dts != AV_NOPTS_VALUE &&
778 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
779 ost->last_mux_dts != AV_NOPTS_VALUE) {
780 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
781 if (pkt->dts < max) {
782 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
783 av_log(s, loglevel, "Non-monotonous DTS in output stream "
784 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
785 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
787 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
790 av_log(s, loglevel, "changing to %"PRId64". This may result "
791 "in incorrect timestamps in the output file.\n",
793 if (pkt->pts >= pkt->dts)
794 pkt->pts = FFMAX(pkt->pts, max);
799 ost->last_mux_dts = pkt->dts;
801 ost->data_size += pkt->size;
802 ost->packets_written++;
804 pkt->stream_index = ost->index;
807 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
808 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
809 av_get_media_type_string(ost->enc_ctx->codec_type),
810 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
811 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
816 ret = av_interleaved_write_frame(s, pkt);
818 print_error("av_interleaved_write_frame()", ret);
819 main_return_code = 1;
820 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
822 av_packet_unref(pkt);
825 static void close_output_stream(OutputStream *ost)
827 OutputFile *of = output_files[ost->file_index];
829 ost->finished |= ENCODER_FINISHED;
831 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
832 of->recording_time = FFMIN(of->recording_time, end);
837 * Send a single packet to the output, applying any bitstream filters
838 * associated with the output stream. This may result in any number
839 * of packets actually being written, depending on what bitstream
840 * filters are applied. The supplied packet is consumed and will be
841 * blank (as if newly-allocated) when this function returns.
843 * If eof is set, instead indicate EOF to all bitstream filters and
844 * therefore flush any delayed packets to the output. A blank packet
845 * must be supplied in this case.
847 static void output_packet(OutputFile *of, AVPacket *pkt,
848 OutputStream *ost, int eof)
852 /* apply the output bitstream filters, if any */
853 if (ost->nb_bitstream_filters) {
856 ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
863 /* get a packet from the previous filter up the chain */
864 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
865 if (ret == AVERROR(EAGAIN)) {
869 } else if (ret == AVERROR_EOF) {
874 /* send it to the next filter down the chain or to the muxer */
875 if (idx < ost->nb_bitstream_filters) {
876 ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
884 write_packet(of, pkt, ost, 0);
887 write_packet(of, pkt, ost, 0);
890 if (ret < 0 && ret != AVERROR_EOF) {
891 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
892 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
898 static int check_recording_time(OutputStream *ost)
900 OutputFile *of = output_files[ost->file_index];
902 if (of->recording_time != INT64_MAX &&
903 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
904 AV_TIME_BASE_Q) >= 0) {
905 close_output_stream(ost);
911 static void do_audio_out(OutputFile *of, OutputStream *ost,
914 AVCodecContext *enc = ost->enc_ctx;
918 av_init_packet(&pkt);
922 if (!check_recording_time(ost))
925 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
926 frame->pts = ost->sync_opts;
927 ost->sync_opts = frame->pts + frame->nb_samples;
928 ost->samples_encoded += frame->nb_samples;
929 ost->frames_encoded++;
931 av_assert0(pkt.size || !pkt.data);
932 update_benchmark(NULL);
934 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
935 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
936 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
937 enc->time_base.num, enc->time_base.den);
940 ret = avcodec_send_frame(enc, frame);
945 ret = avcodec_receive_packet(enc, &pkt);
946 if (ret == AVERROR(EAGAIN))
951 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
953 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
956 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
957 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
958 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
959 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
962 output_packet(of, &pkt, ost, 0);
967 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
971 static void do_subtitle_out(OutputFile *of,
975 int subtitle_out_max_size = 1024 * 1024;
976 int subtitle_out_size, nb, i;
981 if (sub->pts == AV_NOPTS_VALUE) {
982 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
991 subtitle_out = av_malloc(subtitle_out_max_size);
993 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
998 /* Note: DVB subtitle need one packet to draw them and one other
999 packet to clear them */
1000 /* XXX: signal it in the codec context ? */
1001 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1006 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1008 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1009 pts -= output_files[ost->file_index]->start_time;
1010 for (i = 0; i < nb; i++) {
1011 unsigned save_num_rects = sub->num_rects;
1013 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1014 if (!check_recording_time(ost))
1018 // start_display_time is required to be 0
1019 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1020 sub->end_display_time -= sub->start_display_time;
1021 sub->start_display_time = 0;
1025 ost->frames_encoded++;
1027 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1028 subtitle_out_max_size, sub);
1030 sub->num_rects = save_num_rects;
1031 if (subtitle_out_size < 0) {
1032 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1036 av_init_packet(&pkt);
1037 pkt.data = subtitle_out;
1038 pkt.size = subtitle_out_size;
1039 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1040 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1041 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1042 /* XXX: the pts correction is handled here. Maybe handling
1043 it in the codec would be better */
1045 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1047 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1050 output_packet(of, &pkt, ost, 0);
1054 static void do_video_out(OutputFile *of,
1056 AVFrame *next_picture,
1059 int ret, format_video_sync;
1061 AVCodecContext *enc = ost->enc_ctx;
1062 AVCodecParameters *mux_par = ost->st->codecpar;
1063 AVRational frame_rate;
1064 int nb_frames, nb0_frames, i;
1065 double delta, delta0;
1066 double duration = 0;
1068 InputStream *ist = NULL;
1069 AVFilterContext *filter = ost->filter->filter;
1071 if (ost->source_index >= 0)
1072 ist = input_streams[ost->source_index];
1074 frame_rate = av_buffersink_get_frame_rate(filter);
1075 if (frame_rate.num > 0 && frame_rate.den > 0)
1076 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1078 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1079 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1081 if (!ost->filters_script &&
1083 (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1086 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1087 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1090 if (!next_picture) {
1092 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1093 ost->last_nb0_frames[1],
1094 ost->last_nb0_frames[2]);
1096 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1097 delta = delta0 + duration;
1099 /* by default, we output a single frame */
1100 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1103 format_video_sync = video_sync_method;
1104 if (format_video_sync == VSYNC_AUTO) {
1105 if(!strcmp(of->ctx->oformat->name, "avi")) {
1106 format_video_sync = VSYNC_VFR;
1108 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1110 && format_video_sync == VSYNC_CFR
1111 && input_files[ist->file_index]->ctx->nb_streams == 1
1112 && input_files[ist->file_index]->input_ts_offset == 0) {
1113 format_video_sync = VSYNC_VSCFR;
1115 if (format_video_sync == VSYNC_CFR && copy_ts) {
1116 format_video_sync = VSYNC_VSCFR;
1119 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1123 format_video_sync != VSYNC_PASSTHROUGH &&
1124 format_video_sync != VSYNC_DROP) {
1125 if (delta0 < -0.6) {
1126 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1128 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1129 sync_ipts = ost->sync_opts;
1134 switch (format_video_sync) {
1136 if (ost->frame_number == 0 && delta0 >= 0.5) {
1137 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1140 ost->sync_opts = lrint(sync_ipts);
1143 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1144 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1146 } else if (delta < -1.1)
1148 else if (delta > 1.1) {
1149 nb_frames = lrintf(delta);
1151 nb0_frames = lrintf(delta0 - 0.6);
1157 else if (delta > 0.6)
1158 ost->sync_opts = lrint(sync_ipts);
1161 case VSYNC_PASSTHROUGH:
1162 ost->sync_opts = lrint(sync_ipts);
1169 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1170 nb0_frames = FFMIN(nb0_frames, nb_frames);
1172 memmove(ost->last_nb0_frames + 1,
1173 ost->last_nb0_frames,
1174 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1175 ost->last_nb0_frames[0] = nb0_frames;
1177 if (nb0_frames == 0 && ost->last_dropped) {
1179 av_log(NULL, AV_LOG_VERBOSE,
1180 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1181 ost->frame_number, ost->st->index, ost->last_frame->pts);
1183 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1184 if (nb_frames > dts_error_threshold * 30) {
1185 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1189 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1190 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1191 if (nb_frames_dup > dup_warning) {
1192 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1196 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1198 /* duplicates frame if needed */
1199 for (i = 0; i < nb_frames; i++) {
1200 AVFrame *in_picture;
1201 int forced_keyframe = 0;
1203 av_init_packet(&pkt);
1207 if (i < nb0_frames && ost->last_frame) {
1208 in_picture = ost->last_frame;
1210 in_picture = next_picture;
1215 in_picture->pts = ost->sync_opts;
1217 if (!check_recording_time(ost))
1220 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1221 ost->top_field_first >= 0)
1222 in_picture->top_field_first = !!ost->top_field_first;
1224 if (in_picture->interlaced_frame) {
1225 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1226 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1228 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1230 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1232 in_picture->quality = enc->global_quality;
1233 in_picture->pict_type = 0;
1235 if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1236 in_picture->pts != AV_NOPTS_VALUE)
1237 ost->forced_kf_ref_pts = in_picture->pts;
1239 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1240 (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1241 if (ost->forced_kf_index < ost->forced_kf_count &&
1242 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1243 ost->forced_kf_index++;
1244 forced_keyframe = 1;
1245 } else if (ost->forced_keyframes_pexpr) {
1247 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1248 res = av_expr_eval(ost->forced_keyframes_pexpr,
1249 ost->forced_keyframes_expr_const_values, NULL);
1250 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1251 ost->forced_keyframes_expr_const_values[FKF_N],
1252 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1253 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1254 ost->forced_keyframes_expr_const_values[FKF_T],
1255 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1258 forced_keyframe = 1;
1259 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1260 ost->forced_keyframes_expr_const_values[FKF_N];
1261 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1262 ost->forced_keyframes_expr_const_values[FKF_T];
1263 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1266 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1267 } else if ( ost->forced_keyframes
1268 && !strncmp(ost->forced_keyframes, "source", 6)
1269 && in_picture->key_frame==1) {
1270 forced_keyframe = 1;
1273 if (forced_keyframe) {
1274 in_picture->pict_type = AV_PICTURE_TYPE_I;
1275 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1278 update_benchmark(NULL);
1280 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1281 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1282 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1283 enc->time_base.num, enc->time_base.den);
1286 ost->frames_encoded++;
1288 ret = avcodec_send_frame(enc, in_picture);
1291 // Make sure Closed Captions will not be duplicated
1292 av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
1295 ret = avcodec_receive_packet(enc, &pkt);
1296 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1297 if (ret == AVERROR(EAGAIN))
1303 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1304 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1305 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1306 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1309 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1310 pkt.pts = ost->sync_opts;
1312 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1315 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1316 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1317 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1318 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1321 frame_size = pkt.size;
1322 output_packet(of, &pkt, ost, 0);
1324 /* if two pass, output log */
1325 if (ost->logfile && enc->stats_out) {
1326 fprintf(ost->logfile, "%s", enc->stats_out);
1331 * For video, number of frames in == number of packets out.
1332 * But there may be reordering, so we can't throw away frames on encoder
1333 * flush, we need to limit them here, before they go into encoder.
1335 ost->frame_number++;
1337 if (vstats_filename && frame_size)
1338 do_video_stats(ost, frame_size);
1341 if (!ost->last_frame)
1342 ost->last_frame = av_frame_alloc();
1343 av_frame_unref(ost->last_frame);
1344 if (next_picture && ost->last_frame)
1345 av_frame_ref(ost->last_frame, next_picture);
1347 av_frame_free(&ost->last_frame);
1351 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1355 static double psnr(double d)
1357 return -10.0 * log10(d);
1360 static void do_video_stats(OutputStream *ost, int frame_size)
1362 AVCodecContext *enc;
1364 double ti1, bitrate, avg_bitrate;
1366 /* this is executed just the first time do_video_stats is called */
1368 vstats_file = fopen(vstats_filename, "w");
1376 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1377 frame_number = ost->st->nb_frames;
1378 if (vstats_version <= 1) {
1379 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1380 ost->quality / (float)FF_QP2LAMBDA);
1382 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1383 ost->quality / (float)FF_QP2LAMBDA);
1386 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1387 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1389 fprintf(vstats_file,"f_size= %6d ", frame_size);
1390 /* compute pts value */
1391 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1395 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1396 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1397 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1398 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1399 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1403 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1405 static void finish_output_stream(OutputStream *ost)
1407 OutputFile *of = output_files[ost->file_index];
1410 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1413 for (i = 0; i < of->ctx->nb_streams; i++)
1414 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1419 * Get and encode new output from any of the filtergraphs, without causing
1422 * @return 0 for success, <0 for severe errors
1424 static int reap_filters(int flush)
1426 AVFrame *filtered_frame = NULL;
1429 /* Reap all buffers present in the buffer sinks */
1430 for (i = 0; i < nb_output_streams; i++) {
1431 OutputStream *ost = output_streams[i];
1432 OutputFile *of = output_files[ost->file_index];
1433 AVFilterContext *filter;
1434 AVCodecContext *enc = ost->enc_ctx;
1437 if (!ost->filter || !ost->filter->graph->graph)
1439 filter = ost->filter->filter;
1441 if (!ost->initialized) {
1442 char error[1024] = "";
1443 ret = init_output_stream(ost, error, sizeof(error));
1445 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1446 ost->file_index, ost->index, error);
1451 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1452 return AVERROR(ENOMEM);
1454 filtered_frame = ost->filtered_frame;
1457 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1458 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1459 AV_BUFFERSINK_FLAG_NO_REQUEST);
1461 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1462 av_log(NULL, AV_LOG_WARNING,
1463 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1464 } else if (flush && ret == AVERROR_EOF) {
1465 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1466 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1470 if (ost->finished) {
1471 av_frame_unref(filtered_frame);
1474 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1475 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1476 AVRational filter_tb = av_buffersink_get_time_base(filter);
1477 AVRational tb = enc->time_base;
1478 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1480 tb.den <<= extra_bits;
1482 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1483 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1484 float_pts /= 1 << extra_bits;
1485 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1486 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1488 filtered_frame->pts =
1489 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1490 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1493 switch (av_buffersink_get_type(filter)) {
1494 case AVMEDIA_TYPE_VIDEO:
1495 if (!ost->frame_aspect_ratio.num)
1496 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1499 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1500 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1502 enc->time_base.num, enc->time_base.den);
1505 do_video_out(of, ost, filtered_frame, float_pts);
1507 case AVMEDIA_TYPE_AUDIO:
1508 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1509 enc->channels != filtered_frame->channels) {
1510 av_log(NULL, AV_LOG_ERROR,
1511 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1514 do_audio_out(of, ost, filtered_frame);
1517 // TODO support subtitle filters
1521 av_frame_unref(filtered_frame);
1528 static void print_final_stats(int64_t total_size)
1530 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1531 uint64_t subtitle_size = 0;
1532 uint64_t data_size = 0;
1533 float percent = -1.0;
1537 for (i = 0; i < nb_output_streams; i++) {
1538 OutputStream *ost = output_streams[i];
1539 switch (ost->enc_ctx->codec_type) {
1540 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1541 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1542 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1543 default: other_size += ost->data_size; break;
1545 extra_size += ost->enc_ctx->extradata_size;
1546 data_size += ost->data_size;
1547 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1548 != AV_CODEC_FLAG_PASS1)
1552 if (data_size && total_size>0 && total_size >= data_size)
1553 percent = 100.0 * (total_size - data_size) / data_size;
1555 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1556 video_size / 1024.0,
1557 audio_size / 1024.0,
1558 subtitle_size / 1024.0,
1559 other_size / 1024.0,
1560 extra_size / 1024.0);
1562 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1564 av_log(NULL, AV_LOG_INFO, "unknown");
1565 av_log(NULL, AV_LOG_INFO, "\n");
1567 /* print verbose per-stream stats */
1568 for (i = 0; i < nb_input_files; i++) {
1569 InputFile *f = input_files[i];
1570 uint64_t total_packets = 0, total_size = 0;
1572 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1575 for (j = 0; j < f->nb_streams; j++) {
1576 InputStream *ist = input_streams[f->ist_index + j];
1577 enum AVMediaType type = ist->dec_ctx->codec_type;
1579 total_size += ist->data_size;
1580 total_packets += ist->nb_packets;
1582 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1583 i, j, media_type_string(type));
1584 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1585 ist->nb_packets, ist->data_size);
1587 if (ist->decoding_needed) {
1588 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1589 ist->frames_decoded);
1590 if (type == AVMEDIA_TYPE_AUDIO)
1591 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1592 av_log(NULL, AV_LOG_VERBOSE, "; ");
1595 av_log(NULL, AV_LOG_VERBOSE, "\n");
1598 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1599 total_packets, total_size);
1602 for (i = 0; i < nb_output_files; i++) {
1603 OutputFile *of = output_files[i];
1604 uint64_t total_packets = 0, total_size = 0;
1606 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1609 for (j = 0; j < of->ctx->nb_streams; j++) {
1610 OutputStream *ost = output_streams[of->ost_index + j];
1611 enum AVMediaType type = ost->enc_ctx->codec_type;
1613 total_size += ost->data_size;
1614 total_packets += ost->packets_written;
1616 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1617 i, j, media_type_string(type));
1618 if (ost->encoding_needed) {
1619 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1620 ost->frames_encoded);
1621 if (type == AVMEDIA_TYPE_AUDIO)
1622 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1623 av_log(NULL, AV_LOG_VERBOSE, "; ");
1626 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1627 ost->packets_written, ost->data_size);
1629 av_log(NULL, AV_LOG_VERBOSE, "\n");
1632 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1633 total_packets, total_size);
1635 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1636 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1638 av_log(NULL, AV_LOG_WARNING, "\n");
1640 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1645 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1647 AVBPrint buf, buf_script;
1649 AVFormatContext *oc;
1651 AVCodecContext *enc;
1652 int frame_number, vid, i;
1655 int64_t pts = INT64_MIN + 1;
1656 static int64_t last_time = -1;
1657 static int qp_histogram[52];
1658 int hours, mins, secs, us;
1659 const char *hours_sign;
1663 if (!print_stats && !is_last_report && !progress_avio)
1666 if (!is_last_report) {
1667 if (last_time == -1) {
1668 last_time = cur_time;
1671 if ((cur_time - last_time) < 500000)
1673 last_time = cur_time;
1676 t = (cur_time-timer_start) / 1000000.0;
1679 oc = output_files[0]->ctx;
1681 total_size = avio_size(oc->pb);
1682 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1683 total_size = avio_tell(oc->pb);
1686 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1687 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1688 for (i = 0; i < nb_output_streams; i++) {
1690 ost = output_streams[i];
1692 if (!ost->stream_copy)
1693 q = ost->quality / (float) FF_QP2LAMBDA;
1695 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1696 av_bprintf(&buf, "q=%2.1f ", q);
1697 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1698 ost->file_index, ost->index, q);
1700 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1703 frame_number = ost->frame_number;
1704 fps = t > 1 ? frame_number / t : 0;
1705 av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1706 frame_number, fps < 9.95, fps, q);
1707 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1708 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1709 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1710 ost->file_index, ost->index, q);
1712 av_bprintf(&buf, "L");
1716 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1718 for (j = 0; j < 32; j++)
1719 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1722 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1724 double error, error_sum = 0;
1725 double scale, scale_sum = 0;
1727 char type[3] = { 'Y','U','V' };
1728 av_bprintf(&buf, "PSNR=");
1729 for (j = 0; j < 3; j++) {
1730 if (is_last_report) {
1731 error = enc->error[j];
1732 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1734 error = ost->error[j];
1735 scale = enc->width * enc->height * 255.0 * 255.0;
1741 p = psnr(error / scale);
1742 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1743 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1744 ost->file_index, ost->index, type[j] | 32, p);
1746 p = psnr(error_sum / scale_sum);
1747 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1748 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1749 ost->file_index, ost->index, p);
1753 /* compute min output value */
1754 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1755 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1756 ost->st->time_base, AV_TIME_BASE_Q));
1758 nb_frames_drop += ost->last_dropped;
1761 secs = FFABS(pts) / AV_TIME_BASE;
1762 us = FFABS(pts) % AV_TIME_BASE;
1767 hours_sign = (pts < 0) ? "-" : "";
1769 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1770 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1772 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1773 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1774 if (pts == AV_NOPTS_VALUE) {
1775 av_bprintf(&buf, "N/A ");
1777 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1778 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1782 av_bprintf(&buf, "bitrate=N/A");
1783 av_bprintf(&buf_script, "bitrate=N/A\n");
1785 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1786 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1789 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1790 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1791 if (pts == AV_NOPTS_VALUE) {
1792 av_bprintf(&buf_script, "out_time_us=N/A\n");
1793 av_bprintf(&buf_script, "out_time_ms=N/A\n");
1794 av_bprintf(&buf_script, "out_time=N/A\n");
1796 av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1797 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1798 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1799 hours_sign, hours, mins, secs, us);
1802 if (nb_frames_dup || nb_frames_drop)
1803 av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1804 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1805 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1808 av_bprintf(&buf, " speed=N/A");
1809 av_bprintf(&buf_script, "speed=N/A\n");
1811 av_bprintf(&buf, " speed=%4.3gx", speed);
1812 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1815 if (print_stats || is_last_report) {
1816 const char end = is_last_report ? '\n' : '\r';
1817 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1818 fprintf(stderr, "%s %c", buf.str, end);
1820 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1824 av_bprint_finalize(&buf, NULL);
1826 if (progress_avio) {
1827 av_bprintf(&buf_script, "progress=%s\n",
1828 is_last_report ? "end" : "continue");
1829 avio_write(progress_avio, buf_script.str,
1830 FFMIN(buf_script.len, buf_script.size - 1));
1831 avio_flush(progress_avio);
1832 av_bprint_finalize(&buf_script, NULL);
1833 if (is_last_report) {
1834 if ((ret = avio_closep(&progress_avio)) < 0)
1835 av_log(NULL, AV_LOG_ERROR,
1836 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1841 print_final_stats(total_size);
1844 static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1846 // We never got any input. Set a fake format, which will
1847 // come from libavformat.
1848 ifilter->format = par->format;
1849 ifilter->sample_rate = par->sample_rate;
1850 ifilter->channels = par->channels;
1851 ifilter->channel_layout = par->channel_layout;
1852 ifilter->width = par->width;
1853 ifilter->height = par->height;
1854 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1857 static void flush_encoders(void)
1861 for (i = 0; i < nb_output_streams; i++) {
1862 OutputStream *ost = output_streams[i];
1863 AVCodecContext *enc = ost->enc_ctx;
1864 OutputFile *of = output_files[ost->file_index];
1866 if (!ost->encoding_needed)
1869 // Try to enable encoding with no input frames.
1870 // Maybe we should just let encoding fail instead.
1871 if (!ost->initialized) {
1872 FilterGraph *fg = ost->filter->graph;
1873 char error[1024] = "";
1875 av_log(NULL, AV_LOG_WARNING,
1876 "Finishing stream %d:%d without any data written to it.\n",
1877 ost->file_index, ost->st->index);
1879 if (ost->filter && !fg->graph) {
1881 for (x = 0; x < fg->nb_inputs; x++) {
1882 InputFilter *ifilter = fg->inputs[x];
1883 if (ifilter->format < 0)
1884 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1887 if (!ifilter_has_all_input_formats(fg))
1890 ret = configure_filtergraph(fg);
1892 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1896 finish_output_stream(ost);
1899 ret = init_output_stream(ost, error, sizeof(error));
1901 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1902 ost->file_index, ost->index, error);
1907 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1910 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1914 const char *desc = NULL;
1918 switch (enc->codec_type) {
1919 case AVMEDIA_TYPE_AUDIO:
1922 case AVMEDIA_TYPE_VIDEO:
1929 av_init_packet(&pkt);
1933 update_benchmark(NULL);
1935 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1936 ret = avcodec_send_frame(enc, NULL);
1938 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1945 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1946 if (ret < 0 && ret != AVERROR_EOF) {
1947 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1952 if (ost->logfile && enc->stats_out) {
1953 fprintf(ost->logfile, "%s", enc->stats_out);
1955 if (ret == AVERROR_EOF) {
1956 output_packet(of, &pkt, ost, 1);
1959 if (ost->finished & MUXER_FINISHED) {
1960 av_packet_unref(&pkt);
1963 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1964 pkt_size = pkt.size;
1965 output_packet(of, &pkt, ost, 0);
1966 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1967 do_video_stats(ost, pkt_size);
1974 * Check whether a packet from ist should be written into ost at this time
1976 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1978 OutputFile *of = output_files[ost->file_index];
1979 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1981 if (ost->source_index != ist_index)
1987 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1993 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1995 OutputFile *of = output_files[ost->file_index];
1996 InputFile *f = input_files [ist->file_index];
1997 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1998 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2001 // EOF: flush output bitstream filters.
2003 av_init_packet(&opkt);
2006 output_packet(of, &opkt, ost, 1);
2010 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2011 !ost->copy_initial_nonkeyframes)
2014 if (!ost->frame_number && !ost->copy_prior_start) {
2015 int64_t comp_start = start_time;
2016 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2017 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2018 if (pkt->pts == AV_NOPTS_VALUE ?
2019 ist->pts < comp_start :
2020 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2024 if (of->recording_time != INT64_MAX &&
2025 ist->pts >= of->recording_time + start_time) {
2026 close_output_stream(ost);
2030 if (f->recording_time != INT64_MAX) {
2031 start_time = f->ctx->start_time;
2032 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2033 start_time += f->start_time;
2034 if (ist->pts >= f->recording_time + start_time) {
2035 close_output_stream(ost);
2040 /* force the input stream PTS */
2041 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2044 if (av_packet_ref(&opkt, pkt) < 0)
2047 if (pkt->pts != AV_NOPTS_VALUE)
2048 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2050 if (pkt->dts == AV_NOPTS_VALUE)
2051 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2053 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2054 opkt.dts -= ost_tb_start_time;
2056 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2057 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2059 duration = ist->dec_ctx->frame_size;
2060 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2061 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2062 ost->mux_timebase) - ost_tb_start_time;
2065 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2067 output_packet(of, &opkt, ost, 0);
2070 int guess_input_channel_layout(InputStream *ist)
2072 AVCodecContext *dec = ist->dec_ctx;
2074 if (!dec->channel_layout) {
2075 char layout_name[256];
2077 if (dec->channels > ist->guess_layout_max)
2079 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2080 if (!dec->channel_layout)
2082 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2083 dec->channels, dec->channel_layout);
2084 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2085 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2090 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2092 if (*got_output || ret<0)
2093 decode_error_stat[ret<0] ++;
2095 if (ret < 0 && exit_on_error)
2098 if (*got_output && ist) {
2099 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2100 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2101 "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2108 // Filters can be configured only if the formats of all inputs are known.
2109 static int ifilter_has_all_input_formats(FilterGraph *fg)
2112 for (i = 0; i < fg->nb_inputs; i++) {
2113 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2114 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2120 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2122 FilterGraph *fg = ifilter->graph;
2123 int need_reinit, ret, i;
2125 /* determine if the parameters for this input changed */
2126 need_reinit = ifilter->format != frame->format;
2128 switch (ifilter->ist->st->codecpar->codec_type) {
2129 case AVMEDIA_TYPE_AUDIO:
2130 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2131 ifilter->channels != frame->channels ||
2132 ifilter->channel_layout != frame->channel_layout;
2134 case AVMEDIA_TYPE_VIDEO:
2135 need_reinit |= ifilter->width != frame->width ||
2136 ifilter->height != frame->height;
2140 if (!ifilter->ist->reinit_filters && fg->graph)
2143 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2144 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2148 ret = ifilter_parameters_from_frame(ifilter, frame);
2153 /* (re)init the graph if possible, otherwise buffer the frame and return */
2154 if (need_reinit || !fg->graph) {
2155 for (i = 0; i < fg->nb_inputs; i++) {
2156 if (!ifilter_has_all_input_formats(fg)) {
2157 AVFrame *tmp = av_frame_clone(frame);
2159 return AVERROR(ENOMEM);
2160 av_frame_unref(frame);
2162 if (!av_fifo_space(ifilter->frame_queue)) {
2163 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2165 av_frame_free(&tmp);
2169 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2174 ret = reap_filters(1);
2175 if (ret < 0 && ret != AVERROR_EOF) {
2176 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2180 ret = configure_filtergraph(fg);
2182 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2187 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2189 if (ret != AVERROR_EOF)
2190 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2197 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2203 if (ifilter->filter) {
2204 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2208 // the filtergraph was never configured
2209 if (ifilter->format < 0)
2210 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2211 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2212 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2213 return AVERROR_INVALIDDATA;
2220 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2221 // There is the following difference: if you got a frame, you must call
2222 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2223 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2224 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2231 ret = avcodec_send_packet(avctx, pkt);
2232 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2233 // decoded frames with avcodec_receive_frame() until done.
2234 if (ret < 0 && ret != AVERROR_EOF)
2238 ret = avcodec_receive_frame(avctx, frame);
2239 if (ret < 0 && ret != AVERROR(EAGAIN))
2247 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2252 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2253 for (i = 0; i < ist->nb_filters; i++) {
2254 if (i < ist->nb_filters - 1) {
2255 f = ist->filter_frame;
2256 ret = av_frame_ref(f, decoded_frame);
2261 ret = ifilter_send_frame(ist->filters[i], f);
2262 if (ret == AVERROR_EOF)
2263 ret = 0; /* ignore */
2265 av_log(NULL, AV_LOG_ERROR,
2266 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2273 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2276 AVFrame *decoded_frame;
2277 AVCodecContext *avctx = ist->dec_ctx;
2279 AVRational decoded_frame_tb;
2281 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2282 return AVERROR(ENOMEM);
2283 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2284 return AVERROR(ENOMEM);
2285 decoded_frame = ist->decoded_frame;
2287 update_benchmark(NULL);
2288 ret = decode(avctx, decoded_frame, got_output, pkt);
2289 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2293 if (ret >= 0 && avctx->sample_rate <= 0) {
2294 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2295 ret = AVERROR_INVALIDDATA;
2298 if (ret != AVERROR_EOF)
2299 check_decode_result(ist, got_output, ret);
2301 if (!*got_output || ret < 0)
2304 ist->samples_decoded += decoded_frame->nb_samples;
2305 ist->frames_decoded++;
2307 /* increment next_dts to use for the case where the input stream does not
2308 have timestamps or there are multiple frames in the packet */
2309 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2311 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2314 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2315 decoded_frame_tb = ist->st->time_base;
2316 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2317 decoded_frame->pts = pkt->pts;
2318 decoded_frame_tb = ist->st->time_base;
2320 decoded_frame->pts = ist->dts;
2321 decoded_frame_tb = AV_TIME_BASE_Q;
2323 if (decoded_frame->pts != AV_NOPTS_VALUE)
2324 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2325 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2326 (AVRational){1, avctx->sample_rate});
2327 ist->nb_samples = decoded_frame->nb_samples;
2328 err = send_frame_to_filters(ist, decoded_frame);
2330 av_frame_unref(ist->filter_frame);
2331 av_frame_unref(decoded_frame);
2332 return err < 0 ? err : ret;
2335 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2338 AVFrame *decoded_frame;
2339 int i, ret = 0, err = 0;
2340 int64_t best_effort_timestamp;
2341 int64_t dts = AV_NOPTS_VALUE;
2344 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2345 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2347 if (!eof && pkt && pkt->size == 0)
2350 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2351 return AVERROR(ENOMEM);
2352 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2353 return AVERROR(ENOMEM);
2354 decoded_frame = ist->decoded_frame;
2355 if (ist->dts != AV_NOPTS_VALUE)
2356 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2359 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2362 // The old code used to set dts on the drain packet, which does not work
2363 // with the new API anymore.
2365 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2367 return AVERROR(ENOMEM);
2368 ist->dts_buffer = new;
2369 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2372 update_benchmark(NULL);
2373 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2374 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2378 // The following line may be required in some cases where there is no parser
2379 // or the parser does not has_b_frames correctly
2380 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2381 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2382 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2384 av_log(ist->dec_ctx, AV_LOG_WARNING,
2385 "video_delay is larger in decoder than demuxer %d > %d.\n"
2386 "If you want to help, upload a sample "
2387 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2388 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2389 ist->dec_ctx->has_b_frames,
2390 ist->st->codecpar->video_delay);
2393 if (ret != AVERROR_EOF)
2394 check_decode_result(ist, got_output, ret);
2396 if (*got_output && ret >= 0) {
2397 if (ist->dec_ctx->width != decoded_frame->width ||
2398 ist->dec_ctx->height != decoded_frame->height ||
2399 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2400 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2401 decoded_frame->width,
2402 decoded_frame->height,
2403 decoded_frame->format,
2404 ist->dec_ctx->width,
2405 ist->dec_ctx->height,
2406 ist->dec_ctx->pix_fmt);
2410 if (!*got_output || ret < 0)
2413 if(ist->top_field_first>=0)
2414 decoded_frame->top_field_first = ist->top_field_first;
2416 ist->frames_decoded++;
2418 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2419 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2423 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2425 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2426 *duration_pts = decoded_frame->pkt_duration;
2428 if (ist->framerate.num)
2429 best_effort_timestamp = ist->cfr_next_pts++;
2431 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2432 best_effort_timestamp = ist->dts_buffer[0];
2434 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2435 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2436 ist->nb_dts_buffer--;
2439 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2440 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2442 if (ts != AV_NOPTS_VALUE)
2443 ist->next_pts = ist->pts = ts;
2447 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2448 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2449 ist->st->index, av_ts2str(decoded_frame->pts),
2450 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2451 best_effort_timestamp,
2452 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2453 decoded_frame->key_frame, decoded_frame->pict_type,
2454 ist->st->time_base.num, ist->st->time_base.den);
2457 if (ist->st->sample_aspect_ratio.num)
2458 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2460 err = send_frame_to_filters(ist, decoded_frame);
2463 av_frame_unref(ist->filter_frame);
2464 av_frame_unref(decoded_frame);
2465 return err < 0 ? err : ret;
2468 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2471 AVSubtitle subtitle;
2473 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2474 &subtitle, got_output, pkt);
2476 check_decode_result(NULL, got_output, ret);
2478 if (ret < 0 || !*got_output) {
2481 sub2video_flush(ist);
2485 if (ist->fix_sub_duration) {
2487 if (ist->prev_sub.got_output) {
2488 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2489 1000, AV_TIME_BASE);
2490 if (end < ist->prev_sub.subtitle.end_display_time) {
2491 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2492 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2493 ist->prev_sub.subtitle.end_display_time, end,
2494 end <= 0 ? ", dropping it" : "");
2495 ist->prev_sub.subtitle.end_display_time = end;
2498 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2499 FFSWAP(int, ret, ist->prev_sub.ret);
2500 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2508 if (ist->sub2video.frame) {
2509 sub2video_update(ist, &subtitle);
2510 } else if (ist->nb_filters) {
2511 if (!ist->sub2video.sub_queue)
2512 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2513 if (!ist->sub2video.sub_queue)
2515 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2516 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2520 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2524 if (!subtitle.num_rects)
2527 ist->frames_decoded++;
2529 for (i = 0; i < nb_output_streams; i++) {
2530 OutputStream *ost = output_streams[i];
2532 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2533 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2536 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2541 avsubtitle_free(&subtitle);
2545 static int send_filter_eof(InputStream *ist)
2548 /* TODO keep pts also in stream time base to avoid converting back */
2549 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2550 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2552 for (i = 0; i < ist->nb_filters; i++) {
2553 ret = ifilter_send_eof(ist->filters[i], pts);
2560 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2561 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2565 int eof_reached = 0;
2568 if (!ist->saw_first_ts) {
2569 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2571 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2572 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2573 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2575 ist->saw_first_ts = 1;
2578 if (ist->next_dts == AV_NOPTS_VALUE)
2579 ist->next_dts = ist->dts;
2580 if (ist->next_pts == AV_NOPTS_VALUE)
2581 ist->next_pts = ist->pts;
2585 av_init_packet(&avpkt);
2592 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2593 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2594 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2595 ist->next_pts = ist->pts = ist->dts;
2598 // while we have more to decode or while the decoder did output something on EOF
2599 while (ist->decoding_needed) {
2600 int64_t duration_dts = 0;
2601 int64_t duration_pts = 0;
2603 int decode_failed = 0;
2605 ist->pts = ist->next_pts;
2606 ist->dts = ist->next_dts;
2608 switch (ist->dec_ctx->codec_type) {
2609 case AVMEDIA_TYPE_AUDIO:
2610 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2613 case AVMEDIA_TYPE_VIDEO:
2614 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2616 if (!repeating || !pkt || got_output) {
2617 if (pkt && pkt->duration) {
2618 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2619 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2620 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2621 duration_dts = ((int64_t)AV_TIME_BASE *
2622 ist->dec_ctx->framerate.den * ticks) /
2623 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2626 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2627 ist->next_dts += duration_dts;
2629 ist->next_dts = AV_NOPTS_VALUE;
2633 if (duration_pts > 0) {
2634 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2636 ist->next_pts += duration_dts;
2640 case AVMEDIA_TYPE_SUBTITLE:
2643 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2644 if (!pkt && ret >= 0)
2651 if (ret == AVERROR_EOF) {
2657 if (decode_failed) {
2658 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2659 ist->file_index, ist->st->index, av_err2str(ret));
2661 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2662 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2664 if (!decode_failed || exit_on_error)
2670 ist->got_output = 1;
2675 // During draining, we might get multiple output frames in this loop.
2676 // ffmpeg.c does not drain the filter chain on configuration changes,
2677 // which means if we send multiple frames at once to the filters, and
2678 // one of those frames changes configuration, the buffered frames will
2679 // be lost. This can upset certain FATE tests.
2680 // Decode only 1 frame per call on EOF to appease these FATE tests.
2681 // The ideal solution would be to rewrite decoding to use the new
2682 // decoding API in a better way.
2689 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2690 /* except when looping we need to flush but not to send an EOF */
2691 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2692 int ret = send_filter_eof(ist);
2694 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2699 /* handle stream copy */
2700 if (!ist->decoding_needed && pkt) {
2701 ist->dts = ist->next_dts;
2702 switch (ist->dec_ctx->codec_type) {
2703 case AVMEDIA_TYPE_AUDIO:
2704 av_assert1(pkt->duration >= 0);
2705 if (ist->dec_ctx->sample_rate) {
2706 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2707 ist->dec_ctx->sample_rate;
2709 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2712 case AVMEDIA_TYPE_VIDEO:
2713 if (ist->framerate.num) {
2714 // TODO: Remove work-around for c99-to-c89 issue 7
2715 AVRational time_base_q = AV_TIME_BASE_Q;
2716 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2717 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2718 } else if (pkt->duration) {
2719 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2720 } else if(ist->dec_ctx->framerate.num != 0) {
2721 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2722 ist->next_dts += ((int64_t)AV_TIME_BASE *
2723 ist->dec_ctx->framerate.den * ticks) /
2724 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2728 ist->pts = ist->dts;
2729 ist->next_pts = ist->next_dts;
2731 for (i = 0; i < nb_output_streams; i++) {
2732 OutputStream *ost = output_streams[i];
2734 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2737 do_streamcopy(ist, ost, pkt);
2740 return !eof_reached;
2743 static void print_sdp(void)
2748 AVIOContext *sdp_pb;
2749 AVFormatContext **avc;
2751 for (i = 0; i < nb_output_files; i++) {
2752 if (!output_files[i]->header_written)
2756 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2759 for (i = 0, j = 0; i < nb_output_files; i++) {
2760 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2761 avc[j] = output_files[i]->ctx;
2769 av_sdp_create(avc, j, sdp, sizeof(sdp));
2771 if (!sdp_filename) {
2772 printf("SDP:\n%s\n", sdp);
2775 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2776 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2778 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2779 avio_closep(&sdp_pb);
2780 av_freep(&sdp_filename);
2788 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2790 InputStream *ist = s->opaque;
2791 const enum AVPixelFormat *p;
2794 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2795 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2796 const AVCodecHWConfig *config = NULL;
2799 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2802 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2803 ist->hwaccel_id == HWACCEL_AUTO) {
2805 config = avcodec_get_hw_config(s->codec, i);
2808 if (!(config->methods &
2809 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2811 if (config->pix_fmt == *p)
2816 if (config->device_type != ist->hwaccel_device_type) {
2817 // Different hwaccel offered, ignore.
2821 ret = hwaccel_decode_init(s);
2823 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2824 av_log(NULL, AV_LOG_FATAL,
2825 "%s hwaccel requested for input stream #%d:%d, "
2826 "but cannot be initialized.\n",
2827 av_hwdevice_get_type_name(config->device_type),
2828 ist->file_index, ist->st->index);
2829 return AV_PIX_FMT_NONE;
2834 const HWAccel *hwaccel = NULL;
2836 for (i = 0; hwaccels[i].name; i++) {
2837 if (hwaccels[i].pix_fmt == *p) {
2838 hwaccel = &hwaccels[i];
2843 // No hwaccel supporting this pixfmt.
2846 if (hwaccel->id != ist->hwaccel_id) {
2847 // Does not match requested hwaccel.
2851 ret = hwaccel->init(s);
2853 av_log(NULL, AV_LOG_FATAL,
2854 "%s hwaccel requested for input stream #%d:%d, "
2855 "but cannot be initialized.\n", hwaccel->name,
2856 ist->file_index, ist->st->index);
2857 return AV_PIX_FMT_NONE;
2861 if (ist->hw_frames_ctx) {
2862 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2863 if (!s->hw_frames_ctx)
2864 return AV_PIX_FMT_NONE;
2867 ist->hwaccel_pix_fmt = *p;
2874 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2876 InputStream *ist = s->opaque;
2878 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2879 return ist->hwaccel_get_buffer(s, frame, flags);
2881 return avcodec_default_get_buffer2(s, frame, flags);
2884 static int init_input_stream(int ist_index, char *error, int error_len)
2887 InputStream *ist = input_streams[ist_index];
2889 if (ist->decoding_needed) {
2890 AVCodec *codec = ist->dec;
2892 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2893 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2894 return AVERROR(EINVAL);
2897 ist->dec_ctx->opaque = ist;
2898 ist->dec_ctx->get_format = get_format;
2899 ist->dec_ctx->get_buffer2 = get_buffer;
2900 ist->dec_ctx->thread_safe_callbacks = 1;
2902 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2903 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2904 (ist->decoding_needed & DECODING_FOR_OST)) {
2905 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2906 if (ist->decoding_needed & DECODING_FOR_FILTER)
2907 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2910 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2912 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2913 * audio, and video decoders such as cuvid or mediacodec */
2914 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2916 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2917 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2918 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2919 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2920 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2922 ret = hw_device_setup_for_decode(ist);
2924 snprintf(error, error_len, "Device setup failed for "
2925 "decoder on input stream #%d:%d : %s",
2926 ist->file_index, ist->st->index, av_err2str(ret));
2930 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2931 if (ret == AVERROR_EXPERIMENTAL)
2932 abort_codec_experimental(codec, 0);
2934 snprintf(error, error_len,
2935 "Error while opening decoder for input stream "
2937 ist->file_index, ist->st->index, av_err2str(ret));
2940 assert_avoptions(ist->decoder_opts);
2943 ist->next_pts = AV_NOPTS_VALUE;
2944 ist->next_dts = AV_NOPTS_VALUE;
2949 static InputStream *get_input_stream(OutputStream *ost)
2951 if (ost->source_index >= 0)
2952 return input_streams[ost->source_index];
2956 static int compare_int64(const void *a, const void *b)
2958 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2961 /* open the muxer when all the streams are initialized */
2962 static int check_init_output_file(OutputFile *of, int file_index)
2966 for (i = 0; i < of->ctx->nb_streams; i++) {
2967 OutputStream *ost = output_streams[of->ost_index + i];
2968 if (!ost->initialized)
2972 of->ctx->interrupt_callback = int_cb;
2974 ret = avformat_write_header(of->ctx, &of->opts);
2976 av_log(NULL, AV_LOG_ERROR,
2977 "Could not write header for output file #%d "
2978 "(incorrect codec parameters ?): %s\n",
2979 file_index, av_err2str(ret));
2982 //assert_avoptions(of->opts);
2983 of->header_written = 1;
2985 av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2987 if (sdp_filename || want_sdp)
2990 /* flush the muxing queues */
2991 for (i = 0; i < of->ctx->nb_streams; i++) {
2992 OutputStream *ost = output_streams[of->ost_index + i];
2994 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2995 if (!av_fifo_size(ost->muxing_queue))
2996 ost->mux_timebase = ost->st->time_base;
2998 while (av_fifo_size(ost->muxing_queue)) {
3000 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3001 write_packet(of, &pkt, ost, 1);
3008 static int init_output_bsfs(OutputStream *ost)
3013 if (!ost->nb_bitstream_filters)
3016 for (i = 0; i < ost->nb_bitstream_filters; i++) {
3017 ctx = ost->bsf_ctx[i];
3019 ret = avcodec_parameters_copy(ctx->par_in,
3020 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3024 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3026 ret = av_bsf_init(ctx);
3028 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3029 ost->bsf_ctx[i]->filter->name);
3034 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3035 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3039 ost->st->time_base = ctx->time_base_out;
3044 static int init_output_stream_streamcopy(OutputStream *ost)
3046 OutputFile *of = output_files[ost->file_index];
3047 InputStream *ist = get_input_stream(ost);
3048 AVCodecParameters *par_dst = ost->st->codecpar;
3049 AVCodecParameters *par_src = ost->ref_par;
3052 uint32_t codec_tag = par_dst->codec_tag;
3054 av_assert0(ist && !ost->filter);
3056 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3058 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3060 av_log(NULL, AV_LOG_FATAL,
3061 "Error setting up codec context options.\n");
3065 ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3067 av_log(NULL, AV_LOG_FATAL,
3068 "Error getting reference codec parameters.\n");
3073 unsigned int codec_tag_tmp;
3074 if (!of->ctx->oformat->codec_tag ||
3075 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3076 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3077 codec_tag = par_src->codec_tag;
3080 ret = avcodec_parameters_copy(par_dst, par_src);
3084 par_dst->codec_tag = codec_tag;
3086 if (!ost->frame_rate.num)
3087 ost->frame_rate = ist->framerate;
3088 ost->st->avg_frame_rate = ost->frame_rate;
3090 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3094 // copy timebase while removing common factors
3095 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3096 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3098 // copy estimated duration as a hint to the muxer
3099 if (ost->st->duration <= 0 && ist->st->duration > 0)
3100 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3103 ost->st->disposition = ist->st->disposition;
3105 if (ist->st->nb_side_data) {
3106 for (i = 0; i < ist->st->nb_side_data; i++) {
3107 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3110 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3112 return AVERROR(ENOMEM);
3113 memcpy(dst_data, sd_src->data, sd_src->size);
3117 if (ost->rotate_overridden) {
3118 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3119 sizeof(int32_t) * 9);
3121 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3124 switch (par_dst->codec_type) {
3125 case AVMEDIA_TYPE_AUDIO:
3126 if (audio_volume != 256) {
3127 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3130 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3131 par_dst->block_align= 0;
3132 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3133 par_dst->block_align= 0;
3135 case AVMEDIA_TYPE_VIDEO:
3136 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3138 av_mul_q(ost->frame_aspect_ratio,
3139 (AVRational){ par_dst->height, par_dst->width });
3140 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3141 "with stream copy may produce invalid files\n");
3143 else if (ist->st->sample_aspect_ratio.num)
3144 sar = ist->st->sample_aspect_ratio;
3146 sar = par_src->sample_aspect_ratio;
3147 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3148 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3149 ost->st->r_frame_rate = ist->st->r_frame_rate;
3153 ost->mux_timebase = ist->st->time_base;
3158 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3160 AVDictionaryEntry *e;
3162 uint8_t *encoder_string;
3163 int encoder_string_len;
3164 int format_flags = 0;
3165 int codec_flags = ost->enc_ctx->flags;
3167 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3170 e = av_dict_get(of->opts, "fflags", NULL, 0);
3172 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3175 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3177 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3179 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3182 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3185 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3186 encoder_string = av_mallocz(encoder_string_len);
3187 if (!encoder_string)
3190 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3191 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3193 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3194 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3195 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3196 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3199 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3200 AVCodecContext *avctx)
3203 int n = 1, i, size, index = 0;
3206 for (p = kf; *p; p++)
3210 pts = av_malloc_array(size, sizeof(*pts));
3212 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3217 for (i = 0; i < n; i++) {
3218 char *next = strchr(p, ',');
3223 if (!memcmp(p, "chapters", 8)) {
3225 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3228 if (avf->nb_chapters > INT_MAX - size ||
3229 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3231 av_log(NULL, AV_LOG_FATAL,
3232 "Could not allocate forced key frames array.\n");
3235 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3236 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3238 for (j = 0; j < avf->nb_chapters; j++) {
3239 AVChapter *c = avf->chapters[j];
3240 av_assert1(index < size);
3241 pts[index++] = av_rescale_q(c->start, c->time_base,
3242 avctx->time_base) + t;
3247 t = parse_time_or_die("force_key_frames", p, 1);
3248 av_assert1(index < size);
3249 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3256 av_assert0(index == size);
3257 qsort(pts, size, sizeof(*pts), compare_int64);
3258 ost->forced_kf_count = size;
3259 ost->forced_kf_pts = pts;
3262 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3264 InputStream *ist = get_input_stream(ost);
3265 AVCodecContext *enc_ctx = ost->enc_ctx;
3266 AVFormatContext *oc;
3268 if (ost->enc_timebase.num > 0) {
3269 enc_ctx->time_base = ost->enc_timebase;
3273 if (ost->enc_timebase.num < 0) {
3275 enc_ctx->time_base = ist->st->time_base;
3279 oc = output_files[ost->file_index]->ctx;
3280 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3283 enc_ctx->time_base = default_time_base;
3286 static int init_output_stream_encode(OutputStream *ost)
3288 InputStream *ist = get_input_stream(ost);
3289 AVCodecContext *enc_ctx = ost->enc_ctx;
3290 AVCodecContext *dec_ctx = NULL;
3291 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3294 set_encoder_id(output_files[ost->file_index], ost);
3296 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3297 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3298 // which have to be filtered out to prevent leaking them to output files.
3299 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3302 ost->st->disposition = ist->st->disposition;
3304 dec_ctx = ist->dec_ctx;
3306 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3308 for (j = 0; j < oc->nb_streams; j++) {
3309 AVStream *st = oc->streams[j];
3310 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3313 if (j == oc->nb_streams)
3314 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3315 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3316 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3319 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3320 if (!ost->frame_rate.num)
3321 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3322 if (ist && !ost->frame_rate.num)
3323 ost->frame_rate = ist->framerate;
3324 if (ist && !ost->frame_rate.num)
3325 ost->frame_rate = ist->st->r_frame_rate;
3326 if (ist && !ost->frame_rate.num) {
3327 ost->frame_rate = (AVRational){25, 1};
3328 av_log(NULL, AV_LOG_WARNING,
3330 "about the input framerate is available. Falling "
3331 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3332 "if you want a different framerate.\n",
3333 ost->file_index, ost->index);
3336 if (ost->enc->supported_framerates && !ost->force_fps) {
3337 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3338 ost->frame_rate = ost->enc->supported_framerates[idx];
3340 // reduce frame rate for mpeg4 to be within the spec limits
3341 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3342 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3343 ost->frame_rate.num, ost->frame_rate.den, 65535);
3347 switch (enc_ctx->codec_type) {
3348 case AVMEDIA_TYPE_AUDIO:
3349 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3351 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3352 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3353 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3354 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3355 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3357 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3360 case AVMEDIA_TYPE_VIDEO:
3361 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3363 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3364 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3365 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3366 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3367 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3368 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3370 for (j = 0; j < ost->forced_kf_count; j++)
3371 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3373 enc_ctx->time_base);
3375 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3376 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3377 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3378 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3379 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3380 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3382 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3384 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3385 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3387 enc_ctx->framerate = ost->frame_rate;
3389 ost->st->avg_frame_rate = ost->frame_rate;
3392 enc_ctx->width != dec_ctx->width ||
3393 enc_ctx->height != dec_ctx->height ||
3394 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3395 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3398 if (ost->top_field_first == 0) {
3399 enc_ctx->field_order = AV_FIELD_BB;
3400 } else if (ost->top_field_first == 1) {
3401 enc_ctx->field_order = AV_FIELD_TT;
3404 if (ost->forced_keyframes) {
3405 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3406 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3407 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3409 av_log(NULL, AV_LOG_ERROR,
3410 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3413 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3414 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3415 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3416 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3418 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3419 // parse it only for static kf timings
3420 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3421 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3425 case AVMEDIA_TYPE_SUBTITLE:
3426 enc_ctx->time_base = AV_TIME_BASE_Q;
3427 if (!enc_ctx->width) {
3428 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3429 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3432 case AVMEDIA_TYPE_DATA:
3439 ost->mux_timebase = enc_ctx->time_base;
3444 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3448 if (ost->encoding_needed) {
3449 AVCodec *codec = ost->enc;
3450 AVCodecContext *dec = NULL;
3453 ret = init_output_stream_encode(ost);
3457 if ((ist = get_input_stream(ost)))
3459 if (dec && dec->subtitle_header) {
3460 /* ASS code assumes this buffer is null terminated so add extra byte. */
3461 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3462 if (!ost->enc_ctx->subtitle_header)
3463 return AVERROR(ENOMEM);
3464 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3465 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3467 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3468 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3469 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3471 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3472 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3473 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3475 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3476 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3477 av_buffersink_get_format(ost->filter->filter)) {
3478 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3479 if (!ost->enc_ctx->hw_frames_ctx)
3480 return AVERROR(ENOMEM);
3482 ret = hw_device_setup_for_encode(ost);
3484 snprintf(error, error_len, "Device setup failed for "
3485 "encoder on output stream #%d:%d : %s",
3486 ost->file_index, ost->index, av_err2str(ret));
3490 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3491 int input_props = 0, output_props = 0;
3492 AVCodecDescriptor const *input_descriptor =
3493 avcodec_descriptor_get(dec->codec_id);
3494 AVCodecDescriptor const *output_descriptor =
3495 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3496 if (input_descriptor)
3497 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3498 if (output_descriptor)
3499 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3500 if (input_props && output_props && input_props != output_props) {
3501 snprintf(error, error_len,
3502 "Subtitle encoding currently only possible from text to text "
3503 "or bitmap to bitmap");
3504 return AVERROR_INVALIDDATA;
3508 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3509 if (ret == AVERROR_EXPERIMENTAL)
3510 abort_codec_experimental(codec, 1);
3511 snprintf(error, error_len,
3512 "Error while opening encoder for output stream #%d:%d - "
3513 "maybe incorrect parameters such as bit_rate, rate, width or height",
3514 ost->file_index, ost->index);
3517 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3518 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3519 av_buffersink_set_frame_size(ost->filter->filter,
3520 ost->enc_ctx->frame_size);
3521 assert_avoptions(ost->encoder_opts);
3522 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3523 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3524 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3525 " It takes bits/s as argument, not kbits/s\n");
3527 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3529 av_log(NULL, AV_LOG_FATAL,
3530 "Error initializing the output stream codec context.\n");
3534 * FIXME: ost->st->codec should't be needed here anymore.
3536 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3540 if (ost->enc_ctx->nb_coded_side_data) {
3543 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3544 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3547 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3549 return AVERROR(ENOMEM);
3550 memcpy(dst_data, sd_src->data, sd_src->size);
3555 * Add global input side data. For now this is naive, and copies it
3556 * from the input stream's global side data. All side data should
3557 * really be funneled over AVFrame and libavfilter, then added back to
3558 * packet side data, and then potentially using the first packet for
3563 for (i = 0; i < ist->st->nb_side_data; i++) {
3564 AVPacketSideData *sd = &ist->st->side_data[i];
3565 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3567 return AVERROR(ENOMEM);
3568 memcpy(dst, sd->data, sd->size);
3569 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3570 av_display_rotation_set((uint32_t *)dst, 0);
3574 // copy timebase while removing common factors
3575 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3576 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3578 // copy estimated duration as a hint to the muxer
3579 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3580 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3582 ost->st->codec->codec= ost->enc_ctx->codec;
3583 } else if (ost->stream_copy) {
3584 ret = init_output_stream_streamcopy(ost);
3589 // parse user provided disposition, and update stream values
3590 if (ost->disposition) {
3591 static const AVOption opts[] = {
3592 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3593 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3594 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3595 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3596 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3597 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3598 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3599 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3600 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3601 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3602 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3603 { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3604 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3605 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3606 { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3607 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3610 static const AVClass class = {
3612 .item_name = av_default_item_name,
3614 .version = LIBAVUTIL_VERSION_INT,
3616 const AVClass *pclass = &class;
3618 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3623 /* initialize bitstream filters for the output stream
3624 * needs to be done here, because the codec id for streamcopy is not
3625 * known until now */
3626 ret = init_output_bsfs(ost);
3630 ost->initialized = 1;
3632 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3639 static void report_new_stream(int input_index, AVPacket *pkt)
3641 InputFile *file = input_files[input_index];
3642 AVStream *st = file->ctx->streams[pkt->stream_index];
3644 if (pkt->stream_index < file->nb_streams_warn)
3646 av_log(file->ctx, AV_LOG_WARNING,
3647 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3648 av_get_media_type_string(st->codecpar->codec_type),
3649 input_index, pkt->stream_index,
3650 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3651 file->nb_streams_warn = pkt->stream_index + 1;
3654 static int transcode_init(void)
3656 int ret = 0, i, j, k;
3657 AVFormatContext *oc;
3660 char error[1024] = {0};
3662 for (i = 0; i < nb_filtergraphs; i++) {
3663 FilterGraph *fg = filtergraphs[i];
3664 for (j = 0; j < fg->nb_outputs; j++) {
3665 OutputFilter *ofilter = fg->outputs[j];
3666 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3668 if (fg->nb_inputs != 1)
3670 for (k = nb_input_streams-1; k >= 0 ; k--)
3671 if (fg->inputs[0]->ist == input_streams[k])
3673 ofilter->ost->source_index = k;
3677 /* init framerate emulation */
3678 for (i = 0; i < nb_input_files; i++) {
3679 InputFile *ifile = input_files[i];
3680 if (ifile->rate_emu)
3681 for (j = 0; j < ifile->nb_streams; j++)
3682 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3685 /* init input streams */
3686 for (i = 0; i < nb_input_streams; i++)
3687 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3688 for (i = 0; i < nb_output_streams; i++) {
3689 ost = output_streams[i];
3690 avcodec_close(ost->enc_ctx);
3695 /* open each encoder */
3696 for (i = 0; i < nb_output_streams; i++) {
3697 // skip streams fed from filtergraphs until we have a frame for them
3698 if (output_streams[i]->filter)
3701 ret = init_output_stream(output_streams[i], error, sizeof(error));
3706 /* discard unused programs */
3707 for (i = 0; i < nb_input_files; i++) {
3708 InputFile *ifile = input_files[i];
3709 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3710 AVProgram *p = ifile->ctx->programs[j];
3711 int discard = AVDISCARD_ALL;
3713 for (k = 0; k < p->nb_stream_indexes; k++)
3714 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3715 discard = AVDISCARD_DEFAULT;
3718 p->discard = discard;
3722 /* write headers for files with no streams */
3723 for (i = 0; i < nb_output_files; i++) {
3724 oc = output_files[i]->ctx;
3725 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3726 ret = check_init_output_file(output_files[i], i);
3733 /* dump the stream mapping */
3734 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3735 for (i = 0; i < nb_input_streams; i++) {
3736 ist = input_streams[i];
3738 for (j = 0; j < ist->nb_filters; j++) {
3739 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3740 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3741 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3742 ist->filters[j]->name);
3743 if (nb_filtergraphs > 1)
3744 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3745 av_log(NULL, AV_LOG_INFO, "\n");
3750 for (i = 0; i < nb_output_streams; i++) {
3751 ost = output_streams[i];
3753 if (ost->attachment_filename) {
3754 /* an attached file */
3755 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3756 ost->attachment_filename, ost->file_index, ost->index);
3760 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3761 /* output from a complex graph */
3762 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3763 if (nb_filtergraphs > 1)
3764 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3766 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3767 ost->index, ost->enc ? ost->enc->name : "?");
3771 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3772 input_streams[ost->source_index]->file_index,
3773 input_streams[ost->source_index]->st->index,
3776 if (ost->sync_ist != input_streams[ost->source_index])
3777 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3778 ost->sync_ist->file_index,
3779 ost->sync_ist->st->index);
3780 if (ost->stream_copy)
3781 av_log(NULL, AV_LOG_INFO, " (copy)");
3783 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3784 const AVCodec *out_codec = ost->enc;
3785 const char *decoder_name = "?";
3786 const char *in_codec_name = "?";
3787 const char *encoder_name = "?";
3788 const char *out_codec_name = "?";
3789 const AVCodecDescriptor *desc;
3792 decoder_name = in_codec->name;
3793 desc = avcodec_descriptor_get(in_codec->id);
3795 in_codec_name = desc->name;
3796 if (!strcmp(decoder_name, in_codec_name))
3797 decoder_name = "native";
3801 encoder_name = out_codec->name;
3802 desc = avcodec_descriptor_get(out_codec->id);
3804 out_codec_name = desc->name;
3805 if (!strcmp(encoder_name, out_codec_name))
3806 encoder_name = "native";
3809 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3810 in_codec_name, decoder_name,
3811 out_codec_name, encoder_name);
3813 av_log(NULL, AV_LOG_INFO, "\n");
3817 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3821 atomic_store(&transcode_init_done, 1);
3826 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3827 static int need_output(void)
3831 for (i = 0; i < nb_output_streams; i++) {
3832 OutputStream *ost = output_streams[i];
3833 OutputFile *of = output_files[ost->file_index];
3834 AVFormatContext *os = output_files[ost->file_index]->ctx;
3836 if (ost->finished ||
3837 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3839 if (ost->frame_number >= ost->max_frames) {
3841 for (j = 0; j < of->ctx->nb_streams; j++)
3842 close_output_stream(output_streams[of->ost_index + j]);
3853 * Select the output stream to process.
3855 * @return selected output stream, or NULL if none available
3857 static OutputStream *choose_output(void)
3860 int64_t opts_min = INT64_MAX;
3861 OutputStream *ost_min = NULL;
3863 for (i = 0; i < nb_output_streams; i++) {
3864 OutputStream *ost = output_streams[i];
3865 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3866 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3868 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3869 av_log(NULL, AV_LOG_DEBUG,
3870 "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3871 ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3873 if (!ost->initialized && !ost->inputs_done)
3876 if (!ost->finished && opts < opts_min) {
3878 ost_min = ost->unavailable ? NULL : ost;
3884 static void set_tty_echo(int on)
3888 if (tcgetattr(0, &tty) == 0) {
3889 if (on) tty.c_lflag |= ECHO;
3890 else tty.c_lflag &= ~ECHO;
3891 tcsetattr(0, TCSANOW, &tty);
3896 static int check_keyboard_interaction(int64_t cur_time)
3899 static int64_t last_time;
3900 if (received_nb_signals)
3901 return AVERROR_EXIT;
3902 /* read_key() returns 0 on EOF */
3903 if(cur_time - last_time >= 100000 && !run_as_daemon){
3905 last_time = cur_time;
3909 return AVERROR_EXIT;
3910 if (key == '+') av_log_set_level(av_log_get_level()+10);
3911 if (key == '-') av_log_set_level(av_log_get_level()-10);
3912 if (key == 's') qp_hist ^= 1;
3915 do_hex_dump = do_pkt_dump = 0;
3916 } else if(do_pkt_dump){
3920 av_log_set_level(AV_LOG_DEBUG);
3922 if (key == 'c' || key == 'C'){
3923 char buf[4096], target[64], command[256], arg[256] = {0};
3926 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3929 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3934 fprintf(stderr, "\n");
3936 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3937 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3938 target, time, command, arg);
3939 for (i = 0; i < nb_filtergraphs; i++) {
3940 FilterGraph *fg = filtergraphs[i];
3943 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3944 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3945 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3946 } else if (key == 'c') {
3947 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3948 ret = AVERROR_PATCHWELCOME;
3950 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3952 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3957 av_log(NULL, AV_LOG_ERROR,
3958 "Parse error, at least 3 arguments were expected, "
3959 "only %d given in string '%s'\n", n, buf);
3962 if (key == 'd' || key == 'D'){
3965 debug = input_streams[0]->st->codec->debug<<1;
3966 if(!debug) debug = 1;
3967 while(debug & (FF_DEBUG_DCT_COEFF
3969 |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3971 )) //unsupported, would just crash
3978 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3983 fprintf(stderr, "\n");
3984 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3985 fprintf(stderr,"error parsing debug value\n");
3987 for(i=0;i<nb_input_streams;i++) {
3988 input_streams[i]->st->codec->debug = debug;
3990 for(i=0;i<nb_output_streams;i++) {
3991 OutputStream *ost = output_streams[i];
3992 ost->enc_ctx->debug = debug;
3994 if(debug) av_log_set_level(AV_LOG_DEBUG);
3995 fprintf(stderr,"debug=%d\n", debug);
3998 fprintf(stderr, "key function\n"
3999 "? show this help\n"
4000 "+ increase verbosity\n"
4001 "- decrease verbosity\n"
4002 "c Send command to first matching filter supporting it\n"
4003 "C Send/Queue command to all matching filters\n"
4004 "D cycle through available debug modes\n"
4005 "h dump packets/hex press to cycle through the 3 states\n"
4007 "s Show QP histogram\n"
4014 static void *input_thread(void *arg)
4017 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4022 ret = av_read_frame(f->ctx, &pkt);
4024 if (ret == AVERROR(EAGAIN)) {
4029 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4032 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4033 if (flags && ret == AVERROR(EAGAIN)) {
4035 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4036 av_log(f->ctx, AV_LOG_WARNING,
4037 "Thread message queue blocking; consider raising the "
4038 "thread_queue_size option (current value: %d)\n",
4039 f->thread_queue_size);
4042 if (ret != AVERROR_EOF)
4043 av_log(f->ctx, AV_LOG_ERROR,
4044 "Unable to send packet to main thread: %s\n",
4046 av_packet_unref(&pkt);
4047 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4055 static void free_input_thread(int i)
4057 InputFile *f = input_files[i];
4060 if (!f || !f->in_thread_queue)
4062 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4063 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4064 av_packet_unref(&pkt);
4066 pthread_join(f->thread, NULL);
4068 av_thread_message_queue_free(&f->in_thread_queue);
4071 static void free_input_threads(void)
4075 for (i = 0; i < nb_input_files; i++)
4076 free_input_thread(i);
4079 static int init_input_thread(int i)
4082 InputFile *f = input_files[i];
4084 if (nb_input_files == 1)
4087 if (f->ctx->pb ? !f->ctx->pb->seekable :
4088 strcmp(f->ctx->iformat->name, "lavfi"))
4089 f->non_blocking = 1;
4090 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4091 f->thread_queue_size, sizeof(AVPacket));
4095 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4096 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4097 av_thread_message_queue_free(&f->in_thread_queue);
4098 return AVERROR(ret);
4104 static int init_input_threads(void)
4108 for (i = 0; i < nb_input_files; i++) {
4109 ret = init_input_thread(i);
4116 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4118 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4120 AV_THREAD_MESSAGE_NONBLOCK : 0);
4124 static int get_input_packet(InputFile *f, AVPacket *pkt)
4128 for (i = 0; i < f->nb_streams; i++) {
4129 InputStream *ist = input_streams[f->ist_index + i];
4130 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4131 int64_t now = av_gettime_relative() - ist->start;
4133 return AVERROR(EAGAIN);
4138 if (nb_input_files > 1)
4139 return get_input_packet_mt(f, pkt);
4141 return av_read_frame(f->ctx, pkt);
4144 static int got_eagain(void)
4147 for (i = 0; i < nb_output_streams; i++)
4148 if (output_streams[i]->unavailable)
4153 static void reset_eagain(void)
4156 for (i = 0; i < nb_input_files; i++)
4157 input_files[i]->eagain = 0;
4158 for (i = 0; i < nb_output_streams; i++)
4159 output_streams[i]->unavailable = 0;
4162 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4163 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4164 AVRational time_base)
4170 return tmp_time_base;
4173 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4176 return tmp_time_base;
4182 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4185 AVCodecContext *avctx;
4186 int i, ret, has_audio = 0;
4187 int64_t duration = 0;
4189 ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4193 for (i = 0; i < ifile->nb_streams; i++) {
4194 ist = input_streams[ifile->ist_index + i];
4195 avctx = ist->dec_ctx;
4197 /* duration is the length of the last frame in a stream
4198 * when audio stream is present we don't care about
4199 * last video frame length because it's not defined exactly */
4200 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4204 for (i = 0; i < ifile->nb_streams; i++) {
4205 ist = input_streams[ifile->ist_index + i];
4206 avctx = ist->dec_ctx;
4209 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4210 AVRational sample_rate = {1, avctx->sample_rate};
4212 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4217 if (ist->framerate.num) {
4218 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4219 } else if (ist->st->avg_frame_rate.num) {
4220 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4225 if (!ifile->duration)
4226 ifile->time_base = ist->st->time_base;
4227 /* the total duration of the stream, max_pts - min_pts is
4228 * the duration of the stream without the last frame */
4229 duration += ist->max_pts - ist->min_pts;
4230 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4234 if (ifile->loop > 0)
4242 * - 0 -- one packet was read and processed
4243 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4244 * this function should be called again
4245 * - AVERROR_EOF -- this function should not be called again
4247 static int process_input(int file_index)
4249 InputFile *ifile = input_files[file_index];
4250 AVFormatContext *is;
4253 int ret, thread_ret, i, j;
4258 ret = get_input_packet(ifile, &pkt);
4260 if (ret == AVERROR(EAGAIN)) {
4264 if (ret < 0 && ifile->loop) {
4265 AVCodecContext *avctx;
4266 for (i = 0; i < ifile->nb_streams; i++) {
4267 ist = input_streams[ifile->ist_index + i];
4268 avctx = ist->dec_ctx;
4269 if (ist->decoding_needed) {
4270 ret = process_input_packet(ist, NULL, 1);
4273 avcodec_flush_buffers(avctx);
4277 free_input_thread(file_index);
4279 ret = seek_to_start(ifile, is);
4281 thread_ret = init_input_thread(file_index);
4286 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4288 ret = get_input_packet(ifile, &pkt);
4289 if (ret == AVERROR(EAGAIN)) {
4295 if (ret != AVERROR_EOF) {
4296 print_error(is->url, ret);
4301 for (i = 0; i < ifile->nb_streams; i++) {
4302 ist = input_streams[ifile->ist_index + i];
4303 if (ist->decoding_needed) {
4304 ret = process_input_packet(ist, NULL, 0);
4309 /* mark all outputs that don't go through lavfi as finished */
4310 for (j = 0; j < nb_output_streams; j++) {
4311 OutputStream *ost = output_streams[j];
4313 if (ost->source_index == ifile->ist_index + i &&
4314 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4315 finish_output_stream(ost);
4319 ifile->eof_reached = 1;
4320 return AVERROR(EAGAIN);
4326 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4327 is->streams[pkt.stream_index]);
4329 /* the following test is needed in case new streams appear
4330 dynamically in stream : we ignore them */
4331 if (pkt.stream_index >= ifile->nb_streams) {
4332 report_new_stream(file_index, &pkt);
4333 goto discard_packet;
4336 ist = input_streams[ifile->ist_index + pkt.stream_index];
4338 ist->data_size += pkt.size;
4342 goto discard_packet;
4344 if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4345 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
4346 "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4352 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4353 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4354 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4355 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4356 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4357 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4358 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4359 av_ts2str(input_files[ist->file_index]->ts_offset),
4360 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4363 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4364 int64_t stime, stime2;
4365 // Correcting starttime based on the enabled streams
4366 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4367 // so we instead do it here as part of discontinuity handling
4368 if ( ist->next_dts == AV_NOPTS_VALUE
4369 && ifile->ts_offset == -is->start_time
4370 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4371 int64_t new_start_time = INT64_MAX;
4372 for (i=0; i<is->nb_streams; i++) {
4373 AVStream *st = is->streams[i];
4374 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4376 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4378 if (new_start_time > is->start_time) {
4379 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4380 ifile->ts_offset = -new_start_time;
4384 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4385 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4386 ist->wrap_correction_done = 1;
4388 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4389 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4390 ist->wrap_correction_done = 0;
4392 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4393 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4394 ist->wrap_correction_done = 0;
4398 /* add the stream-global side data to the first packet */
4399 if (ist->nb_packets == 1) {
4400 for (i = 0; i < ist->st->nb_side_data; i++) {
4401 AVPacketSideData *src_sd = &ist->st->side_data[i];
4404 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4407 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4410 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4414 memcpy(dst_data, src_sd->data, src_sd->size);
4418 if (pkt.dts != AV_NOPTS_VALUE)
4419 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4420 if (pkt.pts != AV_NOPTS_VALUE)
4421 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4423 if (pkt.pts != AV_NOPTS_VALUE)
4424 pkt.pts *= ist->ts_scale;
4425 if (pkt.dts != AV_NOPTS_VALUE)
4426 pkt.dts *= ist->ts_scale;
4428 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4429 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4430 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4431 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4432 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4433 int64_t delta = pkt_dts - ifile->last_ts;
4434 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4435 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4436 ifile->ts_offset -= delta;
4437 av_log(NULL, AV_LOG_DEBUG,
4438 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4439 delta, ifile->ts_offset);
4440 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4441 if (pkt.pts != AV_NOPTS_VALUE)
4442 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4446 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4447 if (pkt.pts != AV_NOPTS_VALUE) {
4448 pkt.pts += duration;
4449 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4450 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4453 if (pkt.dts != AV_NOPTS_VALUE)
4454 pkt.dts += duration;
4456 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4457 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4458 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4459 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4461 int64_t delta = pkt_dts - ist->next_dts;
4462 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4463 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4464 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4465 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4466 ifile->ts_offset -= delta;
4467 av_log(NULL, AV_LOG_DEBUG,
4468 "timestamp discontinuity for stream #%d:%d "
4469 "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4470 ist->file_index, ist->st->index, ist->st->id,
4471 av_get_media_type_string(ist->dec_ctx->codec_type),
4472 delta, ifile->ts_offset);
4473 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4474 if (pkt.pts != AV_NOPTS_VALUE)
4475 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4478 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4479 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4480 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4481 pkt.dts = AV_NOPTS_VALUE;
4483 if (pkt.pts != AV_NOPTS_VALUE){
4484 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4485 delta = pkt_pts - ist->next_dts;
4486 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4487 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4488 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4489 pkt.pts = AV_NOPTS_VALUE;
4495 if (pkt.dts != AV_NOPTS_VALUE)
4496 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4499 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4500 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4501 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4502 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4503 av_ts2str(input_files[ist->file_index]->ts_offset),
4504 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4507 sub2video_heartbeat(ist, pkt.pts);
4509 process_input_packet(ist, &pkt, 0);
4512 av_packet_unref(&pkt);
4518 * Perform a step of transcoding for the specified filter graph.
4520 * @param[in] graph filter graph to consider
4521 * @param[out] best_ist input stream where a frame would allow to continue
4522 * @return 0 for success, <0 for error
4524 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4527 int nb_requests, nb_requests_max = 0;
4528 InputFilter *ifilter;
4532 ret = avfilter_graph_request_oldest(graph->graph);
4534 return reap_filters(0);
4536 if (ret == AVERROR_EOF) {
4537 ret = reap_filters(1);
4538 for (i = 0; i < graph->nb_outputs; i++)
4539 close_output_stream(graph->outputs[i]->ost);
4542 if (ret != AVERROR(EAGAIN))
4545 for (i = 0; i < graph->nb_inputs; i++) {
4546 ifilter = graph->inputs[i];
4548 if (input_files[ist->file_index]->eagain ||
4549 input_files[ist->file_index]->eof_reached)
4551 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4552 if (nb_requests > nb_requests_max) {
4553 nb_requests_max = nb_requests;
4559 for (i = 0; i < graph->nb_outputs; i++)
4560 graph->outputs[i]->ost->unavailable = 1;
4566 * Run a single step of transcoding.
4568 * @return 0 for success, <0 for error
4570 static int transcode_step(void)
4573 InputStream *ist = NULL;
4576 ost = choose_output();
4583 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4587 if (ost->filter && !ost->filter->graph->graph) {
4588 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4589 ret = configure_filtergraph(ost->filter->graph);
4591 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4597 if (ost->filter && ost->filter->graph->graph) {
4598 if (!ost->initialized) {
4599 char error[1024] = {0};
4600 ret = init_output_stream(ost, error, sizeof(error));
4602 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4603 ost->file_index, ost->index, error);
4607 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4611 } else if (ost->filter) {
4613 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4614 InputFilter *ifilter = ost->filter->graph->inputs[i];
4615 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4621 ost->inputs_done = 1;
4625 av_assert0(ost->source_index >= 0);
4626 ist = input_streams[ost->source_index];
4629 ret = process_input(ist->file_index);
4630 if (ret == AVERROR(EAGAIN)) {
4631 if (input_files[ist->file_index]->eagain)
4632 ost->unavailable = 1;
4637 return ret == AVERROR_EOF ? 0 : ret;
4639 return reap_filters(0);
4643 * The following code is the main loop of the file converter
4645 static int transcode(void)
4648 AVFormatContext *os;
4651 int64_t timer_start;
4652 int64_t total_packets_written = 0;
4654 ret = transcode_init();
4658 if (stdin_interaction) {
4659 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4662 timer_start = av_gettime_relative();
4665 if ((ret = init_input_threads()) < 0)
4669 while (!received_sigterm) {
4670 int64_t cur_time= av_gettime_relative();
4672 /* if 'q' pressed, exits */
4673 if (stdin_interaction)
4674 if (check_keyboard_interaction(cur_time) < 0)
4677 /* check if there's any stream where output is still needed */
4678 if (!need_output()) {
4679 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4683 ret = transcode_step();
4684 if (ret < 0 && ret != AVERROR_EOF) {
4685 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4689 /* dump report by using the output first video and audio streams */
4690 print_report(0, timer_start, cur_time);
4693 free_input_threads();
4696 /* at the end of stream, we must flush the decoder buffers */
4697 for (i = 0; i < nb_input_streams; i++) {
4698 ist = input_streams[i];
4699 if (!input_files[ist->file_index]->eof_reached) {
4700 process_input_packet(ist, NULL, 0);
4707 /* write the trailer if needed and close file */
4708 for (i = 0; i < nb_output_files; i++) {
4709 os = output_files[i]->ctx;
4710 if (!output_files[i]->header_written) {
4711 av_log(NULL, AV_LOG_ERROR,
4712 "Nothing was written into output file %d (%s), because "
4713 "at least one of its streams received no packets.\n",
4717 if ((ret = av_write_trailer(os)) < 0) {
4718 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4724 /* dump report by using the first video and audio streams */
4725 print_report(1, timer_start, av_gettime_relative());
4727 /* close each encoder */
4728 for (i = 0; i < nb_output_streams; i++) {
4729 ost = output_streams[i];
4730 if (ost->encoding_needed) {
4731 av_freep(&ost->enc_ctx->stats_in);
4733 total_packets_written += ost->packets_written;
4736 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4737 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4741 /* close each decoder */
4742 for (i = 0; i < nb_input_streams; i++) {
4743 ist = input_streams[i];
4744 if (ist->decoding_needed) {
4745 avcodec_close(ist->dec_ctx);
4746 if (ist->hwaccel_uninit)
4747 ist->hwaccel_uninit(ist->dec_ctx);
4751 av_buffer_unref(&hw_device_ctx);
4752 hw_device_free_all();
4759 free_input_threads();
4762 if (output_streams) {
4763 for (i = 0; i < nb_output_streams; i++) {
4764 ost = output_streams[i];
4767 if (fclose(ost->logfile))
4768 av_log(NULL, AV_LOG_ERROR,
4769 "Error closing logfile, loss of information possible: %s\n",
4770 av_err2str(AVERROR(errno)));
4771 ost->logfile = NULL;
4773 av_freep(&ost->forced_kf_pts);
4774 av_freep(&ost->apad);
4775 av_freep(&ost->disposition);
4776 av_dict_free(&ost->encoder_opts);
4777 av_dict_free(&ost->sws_dict);
4778 av_dict_free(&ost->swr_opts);
4779 av_dict_free(&ost->resample_opts);
4786 static BenchmarkTimeStamps get_benchmark_time_stamps(void)
4788 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4790 struct rusage rusage;
4792 getrusage(RUSAGE_SELF, &rusage);
4793 time_stamps.user_usec =
4794 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4795 time_stamps.sys_usec =
4796 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4797 #elif HAVE_GETPROCESSTIMES
4799 FILETIME c, e, k, u;
4800 proc = GetCurrentProcess();
4801 GetProcessTimes(proc, &c, &e, &k, &u);
4802 time_stamps.user_usec =
4803 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4804 time_stamps.sys_usec =
4805 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4807 time_stamps.user_usec = time_stamps.sys_usec = 0;
4812 static int64_t getmaxrss(void)
4814 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4815 struct rusage rusage;
4816 getrusage(RUSAGE_SELF, &rusage);
4817 return (int64_t)rusage.ru_maxrss * 1024;
4818 #elif HAVE_GETPROCESSMEMORYINFO
4820 PROCESS_MEMORY_COUNTERS memcounters;
4821 proc = GetCurrentProcess();
4822 memcounters.cb = sizeof(memcounters);
4823 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4824 return memcounters.PeakPagefileUsage;
4830 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4834 int main(int argc, char **argv)
4837 BenchmarkTimeStamps ti;
4841 register_exit(ffmpeg_cleanup);
4843 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4845 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4846 parse_loglevel(argc, argv, options);
4848 if(argc>1 && !strcmp(argv[1], "-d")){
4850 av_log_set_callback(log_callback_null);
4856 avdevice_register_all();
4858 avformat_network_init();
4860 show_banner(argc, argv, options);
4862 /* parse options and open all input/output files */
4863 ret = ffmpeg_parse_options(argc, argv);
4867 if (nb_output_files <= 0 && nb_input_files == 0) {
4869 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4873 /* file converter / grab */
4874 if (nb_output_files <= 0) {
4875 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4879 for (i = 0; i < nb_output_files; i++) {
4880 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4884 current_time = ti = get_benchmark_time_stamps();
4885 if (transcode() < 0)
4888 int64_t utime, stime, rtime;
4889 current_time = get_benchmark_time_stamps();
4890 utime = current_time.user_usec - ti.user_usec;
4891 stime = current_time.sys_usec - ti.sys_usec;
4892 rtime = current_time.real_usec - ti.real_usec;
4893 av_log(NULL, AV_LOG_INFO,
4894 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4895 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4897 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4898 decode_error_stat[0], decode_error_stat[1]);
4899 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4902 exit_program(received_nb_signals ? 255 : main_return_code);
4903 return main_return_code;