2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
65 #include "libavutil/threadmessage.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
73 #if HAVE_SYS_RESOURCE_H
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
80 #if HAVE_GETPROCESSMEMORYINFO
84 #if HAVE_SETCONSOLECTRLHANDLER
90 #include <sys/select.h>
95 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 typedef struct BenchmarkTimeStamps {
127 } BenchmarkTimeStamps;
129 static void do_video_stats(OutputStream *ost, int frame_size);
130 static BenchmarkTimeStamps get_benchmark_time_stamps(void);
131 static int64_t getmaxrss(void);
132 static int ifilter_has_all_input_formats(FilterGraph *fg);
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
140 static int want_sdp = 1;
142 static BenchmarkTimeStamps current_time;
143 AVIOContext *progress_avio = NULL;
145 static uint8_t *subtitle_out;
147 InputStream **input_streams = NULL;
148 int nb_input_streams = 0;
149 InputFile **input_files = NULL;
150 int nb_input_files = 0;
152 OutputStream **output_streams = NULL;
153 int nb_output_streams = 0;
154 OutputFile **output_files = NULL;
155 int nb_output_files = 0;
157 FilterGraph **filtergraphs;
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
168 static void free_input_threads(void);
172 Convert subtitles to video with alpha to insert them in filter graphs.
173 This is a temporary solution until libavfilter gets real subtitles support.
176 static int sub2video_get_blank_frame(InputStream *ist)
179 AVFrame *frame = ist->sub2video.frame;
181 av_frame_unref(frame);
182 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
184 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
185 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
187 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
194 uint32_t *pal, *dst2;
198 if (r->type != SUBTITLE_BITMAP) {
199 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
202 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204 r->x, r->y, r->w, r->h, w, h
209 dst += r->y * dst_linesize + r->x * 4;
211 pal = (uint32_t *)r->data[1];
212 for (y = 0; y < r->h; y++) {
213 dst2 = (uint32_t *)dst;
215 for (x = 0; x < r->w; x++)
216 *(dst2++) = pal[*(src2++)];
218 src += r->linesize[0];
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
224 AVFrame *frame = ist->sub2video.frame;
228 av_assert1(frame->data[0]);
229 ist->sub2video.last_pts = frame->pts = pts;
230 for (i = 0; i < ist->nb_filters; i++) {
231 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
232 AV_BUFFERSRC_FLAG_KEEP_REF |
233 AV_BUFFERSRC_FLAG_PUSH);
234 if (ret != AVERROR_EOF && ret < 0)
235 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
240 void sub2video_update(InputStream *ist, AVSubtitle *sub)
242 AVFrame *frame = ist->sub2video.frame;
246 int64_t pts, end_pts;
251 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252 AV_TIME_BASE_Q, ist->st->time_base);
253 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254 AV_TIME_BASE_Q, ist->st->time_base);
255 num_rects = sub->num_rects;
257 pts = ist->sub2video.end_pts;
261 if (sub2video_get_blank_frame(ist) < 0) {
262 av_log(ist->dec_ctx, AV_LOG_ERROR,
263 "Impossible to get a blank canvas.\n");
266 dst = frame->data [0];
267 dst_linesize = frame->linesize[0];
268 for (i = 0; i < num_rects; i++)
269 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
270 sub2video_push_ref(ist, pts);
271 ist->sub2video.end_pts = end_pts;
274 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
276 InputFile *infile = input_files[ist->file_index];
280 /* When a frame is read from a file, examine all sub2video streams in
281 the same file and send the sub2video frame again. Otherwise, decoded
282 video frames could be accumulating in the filter graph while a filter
283 (possibly overlay) is desperately waiting for a subtitle frame. */
284 for (i = 0; i < infile->nb_streams; i++) {
285 InputStream *ist2 = input_streams[infile->ist_index + i];
286 if (!ist2->sub2video.frame)
288 /* subtitles seem to be usually muxed ahead of other streams;
289 if not, subtracting a larger time here is necessary */
290 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
291 /* do not send the heartbeat frame if the subtitle is already ahead */
292 if (pts2 <= ist2->sub2video.last_pts)
294 if (pts2 >= ist2->sub2video.end_pts ||
295 (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX))
296 sub2video_update(ist2, NULL);
297 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
298 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
300 sub2video_push_ref(ist2, pts2);
304 static void sub2video_flush(InputStream *ist)
309 if (ist->sub2video.end_pts < INT64_MAX)
310 sub2video_update(ist, NULL);
311 for (i = 0; i < ist->nb_filters; i++) {
312 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
313 if (ret != AVERROR_EOF && ret < 0)
314 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
318 /* end of sub2video hack */
320 static void term_exit_sigsafe(void)
324 tcsetattr (0, TCSANOW, &oldtty);
330 av_log(NULL, AV_LOG_QUIET, "%s", "");
334 static volatile int received_sigterm = 0;
335 static volatile int received_nb_signals = 0;
336 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
337 static volatile int ffmpeg_exited = 0;
338 static int main_return_code = 0;
341 sigterm_handler(int sig)
344 received_sigterm = sig;
345 received_nb_signals++;
347 if(received_nb_signals > 3) {
348 ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
349 strlen("Received > 3 system signals, hard exiting\n"));
350 if (ret < 0) { /* Do nothing */ };
355 #if HAVE_SETCONSOLECTRLHANDLER
356 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
358 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
363 case CTRL_BREAK_EVENT:
364 sigterm_handler(SIGINT);
367 case CTRL_CLOSE_EVENT:
368 case CTRL_LOGOFF_EVENT:
369 case CTRL_SHUTDOWN_EVENT:
370 sigterm_handler(SIGTERM);
371 /* Basically, with these 3 events, when we return from this method the
372 process is hard terminated, so stall as long as we need to
373 to try and let the main thread(s) clean up and gracefully terminate
374 (we have at most 5 seconds, but should be done far before that). */
375 while (!ffmpeg_exited) {
381 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
390 if (!run_as_daemon && stdin_interaction) {
392 if (tcgetattr (0, &tty) == 0) {
396 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
397 |INLCR|IGNCR|ICRNL|IXON);
398 tty.c_oflag |= OPOST;
399 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
400 tty.c_cflag &= ~(CSIZE|PARENB);
405 tcsetattr (0, TCSANOW, &tty);
407 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
411 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
412 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
414 signal(SIGXCPU, sigterm_handler);
417 signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
419 #if HAVE_SETCONSOLECTRLHANDLER
420 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
424 /* read a key without blocking */
425 static int read_key(void)
437 n = select(1, &rfds, NULL, NULL, &tv);
446 # if HAVE_PEEKNAMEDPIPE
448 static HANDLE input_handle;
451 input_handle = GetStdHandle(STD_INPUT_HANDLE);
452 is_pipe = !GetConsoleMode(input_handle, &dw);
456 /* When running under a GUI, you will end here. */
457 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
458 // input pipe may have been closed by the program that ran ffmpeg
476 static int decode_interrupt_cb(void *ctx)
478 return received_nb_signals > atomic_load(&transcode_init_done);
481 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
483 static void ffmpeg_cleanup(int ret)
488 int maxrss = getmaxrss() / 1024;
489 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
492 for (i = 0; i < nb_filtergraphs; i++) {
493 FilterGraph *fg = filtergraphs[i];
494 avfilter_graph_free(&fg->graph);
495 for (j = 0; j < fg->nb_inputs; j++) {
496 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
498 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
499 sizeof(frame), NULL);
500 av_frame_free(&frame);
502 av_fifo_freep(&fg->inputs[j]->frame_queue);
503 if (fg->inputs[j]->ist->sub2video.sub_queue) {
504 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
506 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
507 &sub, sizeof(sub), NULL);
508 avsubtitle_free(&sub);
510 av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
512 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
513 av_freep(&fg->inputs[j]->name);
514 av_freep(&fg->inputs[j]);
516 av_freep(&fg->inputs);
517 for (j = 0; j < fg->nb_outputs; j++) {
518 av_freep(&fg->outputs[j]->name);
519 av_freep(&fg->outputs[j]->formats);
520 av_freep(&fg->outputs[j]->channel_layouts);
521 av_freep(&fg->outputs[j]->sample_rates);
522 av_freep(&fg->outputs[j]);
524 av_freep(&fg->outputs);
525 av_freep(&fg->graph_desc);
527 av_freep(&filtergraphs[i]);
529 av_freep(&filtergraphs);
531 av_freep(&subtitle_out);
534 for (i = 0; i < nb_output_files; i++) {
535 OutputFile *of = output_files[i];
540 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
542 avformat_free_context(s);
543 av_dict_free(&of->opts);
545 av_freep(&output_files[i]);
547 for (i = 0; i < nb_output_streams; i++) {
548 OutputStream *ost = output_streams[i];
553 for (j = 0; j < ost->nb_bitstream_filters; j++)
554 av_bsf_free(&ost->bsf_ctx[j]);
555 av_freep(&ost->bsf_ctx);
557 av_frame_free(&ost->filtered_frame);
558 av_frame_free(&ost->last_frame);
559 av_dict_free(&ost->encoder_opts);
561 av_freep(&ost->forced_keyframes);
562 av_expr_free(ost->forced_keyframes_pexpr);
563 av_freep(&ost->avfilter);
564 av_freep(&ost->logfile_prefix);
566 av_freep(&ost->audio_channels_map);
567 ost->audio_channels_mapped = 0;
569 av_dict_free(&ost->sws_dict);
570 av_dict_free(&ost->swr_opts);
572 avcodec_free_context(&ost->enc_ctx);
573 avcodec_parameters_free(&ost->ref_par);
575 if (ost->muxing_queue) {
576 while (av_fifo_size(ost->muxing_queue)) {
578 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
579 av_packet_unref(&pkt);
581 av_fifo_freep(&ost->muxing_queue);
584 av_freep(&output_streams[i]);
587 free_input_threads();
589 for (i = 0; i < nb_input_files; i++) {
590 avformat_close_input(&input_files[i]->ctx);
591 av_freep(&input_files[i]);
593 for (i = 0; i < nb_input_streams; i++) {
594 InputStream *ist = input_streams[i];
596 av_frame_free(&ist->decoded_frame);
597 av_frame_free(&ist->filter_frame);
598 av_dict_free(&ist->decoder_opts);
599 avsubtitle_free(&ist->prev_sub.subtitle);
600 av_frame_free(&ist->sub2video.frame);
601 av_freep(&ist->filters);
602 av_freep(&ist->hwaccel_device);
603 av_freep(&ist->dts_buffer);
605 avcodec_free_context(&ist->dec_ctx);
607 av_freep(&input_streams[i]);
611 if (fclose(vstats_file))
612 av_log(NULL, AV_LOG_ERROR,
613 "Error closing vstats file, loss of information possible: %s\n",
614 av_err2str(AVERROR(errno)));
616 av_freep(&vstats_filename);
618 av_freep(&input_streams);
619 av_freep(&input_files);
620 av_freep(&output_streams);
621 av_freep(&output_files);
625 avformat_network_deinit();
627 if (received_sigterm) {
628 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
629 (int) received_sigterm);
630 } else if (ret && atomic_load(&transcode_init_done)) {
631 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
637 void remove_avoptions(AVDictionary **a, AVDictionary *b)
639 AVDictionaryEntry *t = NULL;
641 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
642 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
646 void assert_avoptions(AVDictionary *m)
648 AVDictionaryEntry *t;
649 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
650 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
655 static void abort_codec_experimental(AVCodec *c, int encoder)
660 static void update_benchmark(const char *fmt, ...)
662 if (do_benchmark_all) {
663 BenchmarkTimeStamps t = get_benchmark_time_stamps();
669 vsnprintf(buf, sizeof(buf), fmt, va);
671 av_log(NULL, AV_LOG_INFO,
672 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
673 t.user_usec - current_time.user_usec,
674 t.sys_usec - current_time.sys_usec,
675 t.real_usec - current_time.real_usec, buf);
681 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
684 for (i = 0; i < nb_output_streams; i++) {
685 OutputStream *ost2 = output_streams[i];
686 ost2->finished |= ost == ost2 ? this_stream : others;
690 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
692 AVFormatContext *s = of->ctx;
693 AVStream *st = ost->st;
697 * Audio encoders may split the packets -- #frames in != #packets out.
698 * But there is no reordering, so we can limit the number of output packets
699 * by simply dropping them here.
700 * Counting encoded video frames needs to be done separately because of
701 * reordering, see do_video_out().
702 * Do not count the packet when unqueued because it has been counted when queued.
704 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
705 if (ost->frame_number >= ost->max_frames) {
706 av_packet_unref(pkt);
712 if (!of->header_written) {
713 AVPacket tmp_pkt = {0};
714 /* the muxer is not initialized yet, buffer the packet */
715 if (!av_fifo_space(ost->muxing_queue)) {
716 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
717 ost->max_muxing_queue_size);
718 if (new_size <= av_fifo_size(ost->muxing_queue)) {
719 av_log(NULL, AV_LOG_ERROR,
720 "Too many packets buffered for output stream %d:%d.\n",
721 ost->file_index, ost->st->index);
724 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
728 ret = av_packet_make_refcounted(pkt);
731 av_packet_move_ref(&tmp_pkt, pkt);
732 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
736 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
737 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
738 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
740 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
742 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
744 ost->quality = sd ? AV_RL32(sd) : -1;
745 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
747 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
749 ost->error[i] = AV_RL64(sd + 8 + 8*i);
754 if (ost->frame_rate.num && ost->is_cfr) {
755 if (pkt->duration > 0)
756 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
757 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
762 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
764 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
765 if (pkt->dts != AV_NOPTS_VALUE &&
766 pkt->pts != AV_NOPTS_VALUE &&
767 pkt->dts > pkt->pts) {
768 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
770 ost->file_index, ost->st->index);
772 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
773 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
774 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
776 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
777 pkt->dts != AV_NOPTS_VALUE &&
778 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
779 ost->last_mux_dts != AV_NOPTS_VALUE) {
780 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
781 if (pkt->dts < max) {
782 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
783 av_log(s, loglevel, "Non-monotonous DTS in output stream "
784 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
785 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
787 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
790 av_log(s, loglevel, "changing to %"PRId64". This may result "
791 "in incorrect timestamps in the output file.\n",
793 if (pkt->pts >= pkt->dts)
794 pkt->pts = FFMAX(pkt->pts, max);
799 ost->last_mux_dts = pkt->dts;
801 ost->data_size += pkt->size;
802 ost->packets_written++;
804 pkt->stream_index = ost->index;
807 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
808 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
809 av_get_media_type_string(ost->enc_ctx->codec_type),
810 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
811 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
816 ret = av_interleaved_write_frame(s, pkt);
818 print_error("av_interleaved_write_frame()", ret);
819 main_return_code = 1;
820 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
822 av_packet_unref(pkt);
825 static void close_output_stream(OutputStream *ost)
827 OutputFile *of = output_files[ost->file_index];
829 ost->finished |= ENCODER_FINISHED;
831 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
832 of->recording_time = FFMIN(of->recording_time, end);
837 * Send a single packet to the output, applying any bitstream filters
838 * associated with the output stream. This may result in any number
839 * of packets actually being written, depending on what bitstream
840 * filters are applied. The supplied packet is consumed and will be
841 * blank (as if newly-allocated) when this function returns.
843 * If eof is set, instead indicate EOF to all bitstream filters and
844 * therefore flush any delayed packets to the output. A blank packet
845 * must be supplied in this case.
847 static void output_packet(OutputFile *of, AVPacket *pkt,
848 OutputStream *ost, int eof)
852 /* apply the output bitstream filters, if any */
853 if (ost->nb_bitstream_filters) {
856 ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
863 /* get a packet from the previous filter up the chain */
864 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
865 if (ret == AVERROR(EAGAIN)) {
869 } else if (ret == AVERROR_EOF) {
874 /* send it to the next filter down the chain or to the muxer */
875 if (idx < ost->nb_bitstream_filters) {
876 ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
884 write_packet(of, pkt, ost, 0);
887 write_packet(of, pkt, ost, 0);
890 if (ret < 0 && ret != AVERROR_EOF) {
891 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
892 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
898 static int check_recording_time(OutputStream *ost)
900 OutputFile *of = output_files[ost->file_index];
902 if (of->recording_time != INT64_MAX &&
903 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
904 AV_TIME_BASE_Q) >= 0) {
905 close_output_stream(ost);
911 static void do_audio_out(OutputFile *of, OutputStream *ost,
914 AVCodecContext *enc = ost->enc_ctx;
918 av_init_packet(&pkt);
922 if (!check_recording_time(ost))
925 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
926 frame->pts = ost->sync_opts;
927 ost->sync_opts = frame->pts + frame->nb_samples;
928 ost->samples_encoded += frame->nb_samples;
929 ost->frames_encoded++;
931 av_assert0(pkt.size || !pkt.data);
932 update_benchmark(NULL);
934 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
935 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
936 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
937 enc->time_base.num, enc->time_base.den);
940 ret = avcodec_send_frame(enc, frame);
945 ret = avcodec_receive_packet(enc, &pkt);
946 if (ret == AVERROR(EAGAIN))
951 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
953 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
956 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
957 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
958 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
959 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
962 output_packet(of, &pkt, ost, 0);
967 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
971 static void do_subtitle_out(OutputFile *of,
975 int subtitle_out_max_size = 1024 * 1024;
976 int subtitle_out_size, nb, i;
981 if (sub->pts == AV_NOPTS_VALUE) {
982 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
991 subtitle_out = av_malloc(subtitle_out_max_size);
993 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
998 /* Note: DVB subtitle need one packet to draw them and one other
999 packet to clear them */
1000 /* XXX: signal it in the codec context ? */
1001 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1006 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1008 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1009 pts -= output_files[ost->file_index]->start_time;
1010 for (i = 0; i < nb; i++) {
1011 unsigned save_num_rects = sub->num_rects;
1013 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1014 if (!check_recording_time(ost))
1018 // start_display_time is required to be 0
1019 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1020 sub->end_display_time -= sub->start_display_time;
1021 sub->start_display_time = 0;
1025 ost->frames_encoded++;
1027 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1028 subtitle_out_max_size, sub);
1030 sub->num_rects = save_num_rects;
1031 if (subtitle_out_size < 0) {
1032 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1036 av_init_packet(&pkt);
1037 pkt.data = subtitle_out;
1038 pkt.size = subtitle_out_size;
1039 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1040 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1041 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1042 /* XXX: the pts correction is handled here. Maybe handling
1043 it in the codec would be better */
1045 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1047 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1050 output_packet(of, &pkt, ost, 0);
1054 static void do_video_out(OutputFile *of,
1056 AVFrame *next_picture,
1059 int ret, format_video_sync;
1061 AVCodecContext *enc = ost->enc_ctx;
1062 AVCodecParameters *mux_par = ost->st->codecpar;
1063 AVRational frame_rate;
1064 int nb_frames, nb0_frames, i;
1065 double delta, delta0;
1066 double duration = 0;
1068 InputStream *ist = NULL;
1069 AVFilterContext *filter = ost->filter->filter;
1071 if (ost->source_index >= 0)
1072 ist = input_streams[ost->source_index];
1074 frame_rate = av_buffersink_get_frame_rate(filter);
1075 if (frame_rate.num > 0 && frame_rate.den > 0)
1076 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1078 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1079 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1081 if (!ost->filters_script &&
1083 (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1086 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1087 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1090 if (!next_picture) {
1092 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1093 ost->last_nb0_frames[1],
1094 ost->last_nb0_frames[2]);
1096 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1097 delta = delta0 + duration;
1099 /* by default, we output a single frame */
1100 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1103 format_video_sync = video_sync_method;
1104 if (format_video_sync == VSYNC_AUTO) {
1105 if(!strcmp(of->ctx->oformat->name, "avi")) {
1106 format_video_sync = VSYNC_VFR;
1108 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1110 && format_video_sync == VSYNC_CFR
1111 && input_files[ist->file_index]->ctx->nb_streams == 1
1112 && input_files[ist->file_index]->input_ts_offset == 0) {
1113 format_video_sync = VSYNC_VSCFR;
1115 if (format_video_sync == VSYNC_CFR && copy_ts) {
1116 format_video_sync = VSYNC_VSCFR;
1119 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1123 format_video_sync != VSYNC_PASSTHROUGH &&
1124 format_video_sync != VSYNC_DROP) {
1125 if (delta0 < -0.6) {
1126 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1128 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1129 sync_ipts = ost->sync_opts;
1134 switch (format_video_sync) {
1136 if (ost->frame_number == 0 && delta0 >= 0.5) {
1137 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1140 ost->sync_opts = lrint(sync_ipts);
1143 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1144 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1146 } else if (delta < -1.1)
1148 else if (delta > 1.1) {
1149 nb_frames = lrintf(delta);
1151 nb0_frames = lrintf(delta0 - 0.6);
1157 else if (delta > 0.6)
1158 ost->sync_opts = lrint(sync_ipts);
1161 case VSYNC_PASSTHROUGH:
1162 ost->sync_opts = lrint(sync_ipts);
1169 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1170 nb0_frames = FFMIN(nb0_frames, nb_frames);
1172 memmove(ost->last_nb0_frames + 1,
1173 ost->last_nb0_frames,
1174 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1175 ost->last_nb0_frames[0] = nb0_frames;
1177 if (nb0_frames == 0 && ost->last_dropped) {
1179 av_log(NULL, AV_LOG_VERBOSE,
1180 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1181 ost->frame_number, ost->st->index, ost->last_frame->pts);
1183 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1184 if (nb_frames > dts_error_threshold * 30) {
1185 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1189 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1190 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1191 if (nb_frames_dup > dup_warning) {
1192 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1196 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1198 /* duplicates frame if needed */
1199 for (i = 0; i < nb_frames; i++) {
1200 AVFrame *in_picture;
1201 int forced_keyframe = 0;
1203 av_init_packet(&pkt);
1207 if (i < nb0_frames && ost->last_frame) {
1208 in_picture = ost->last_frame;
1210 in_picture = next_picture;
1215 in_picture->pts = ost->sync_opts;
1217 if (!check_recording_time(ost))
1220 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1221 ost->top_field_first >= 0)
1222 in_picture->top_field_first = !!ost->top_field_first;
1224 if (in_picture->interlaced_frame) {
1225 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1226 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1228 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1230 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1232 in_picture->quality = enc->global_quality;
1233 in_picture->pict_type = 0;
1235 if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1236 in_picture->pts != AV_NOPTS_VALUE)
1237 ost->forced_kf_ref_pts = in_picture->pts;
1239 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1240 (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1241 if (ost->forced_kf_index < ost->forced_kf_count &&
1242 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1243 ost->forced_kf_index++;
1244 forced_keyframe = 1;
1245 } else if (ost->forced_keyframes_pexpr) {
1247 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1248 res = av_expr_eval(ost->forced_keyframes_pexpr,
1249 ost->forced_keyframes_expr_const_values, NULL);
1250 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1251 ost->forced_keyframes_expr_const_values[FKF_N],
1252 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1253 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1254 ost->forced_keyframes_expr_const_values[FKF_T],
1255 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1258 forced_keyframe = 1;
1259 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1260 ost->forced_keyframes_expr_const_values[FKF_N];
1261 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1262 ost->forced_keyframes_expr_const_values[FKF_T];
1263 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1266 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1267 } else if ( ost->forced_keyframes
1268 && !strncmp(ost->forced_keyframes, "source", 6)
1269 && in_picture->key_frame==1
1271 forced_keyframe = 1;
1274 if (forced_keyframe) {
1275 in_picture->pict_type = AV_PICTURE_TYPE_I;
1276 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1279 update_benchmark(NULL);
1281 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1282 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1283 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1284 enc->time_base.num, enc->time_base.den);
1287 ost->frames_encoded++;
1289 ret = avcodec_send_frame(enc, in_picture);
1292 // Make sure Closed Captions will not be duplicated
1293 av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
1296 ret = avcodec_receive_packet(enc, &pkt);
1297 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1298 if (ret == AVERROR(EAGAIN))
1304 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1305 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1306 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1307 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1310 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1311 pkt.pts = ost->sync_opts;
1313 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1316 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1317 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1318 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1319 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1322 frame_size = pkt.size;
1323 output_packet(of, &pkt, ost, 0);
1325 /* if two pass, output log */
1326 if (ost->logfile && enc->stats_out) {
1327 fprintf(ost->logfile, "%s", enc->stats_out);
1332 * For video, number of frames in == number of packets out.
1333 * But there may be reordering, so we can't throw away frames on encoder
1334 * flush, we need to limit them here, before they go into encoder.
1336 ost->frame_number++;
1338 if (vstats_filename && frame_size)
1339 do_video_stats(ost, frame_size);
1342 if (!ost->last_frame)
1343 ost->last_frame = av_frame_alloc();
1344 av_frame_unref(ost->last_frame);
1345 if (next_picture && ost->last_frame)
1346 av_frame_ref(ost->last_frame, next_picture);
1348 av_frame_free(&ost->last_frame);
1352 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1356 static double psnr(double d)
1358 return -10.0 * log10(d);
1361 static void do_video_stats(OutputStream *ost, int frame_size)
1363 AVCodecContext *enc;
1365 double ti1, bitrate, avg_bitrate;
1367 /* this is executed just the first time do_video_stats is called */
1369 vstats_file = fopen(vstats_filename, "w");
1377 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1378 frame_number = ost->st->nb_frames;
1379 if (vstats_version <= 1) {
1380 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1381 ost->quality / (float)FF_QP2LAMBDA);
1383 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1384 ost->quality / (float)FF_QP2LAMBDA);
1387 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1388 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1390 fprintf(vstats_file,"f_size= %6d ", frame_size);
1391 /* compute pts value */
1392 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1396 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1397 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1398 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1399 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1400 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1404 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1406 static void finish_output_stream(OutputStream *ost)
1408 OutputFile *of = output_files[ost->file_index];
1411 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1414 for (i = 0; i < of->ctx->nb_streams; i++)
1415 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1420 * Get and encode new output from any of the filtergraphs, without causing
1423 * @return 0 for success, <0 for severe errors
1425 static int reap_filters(int flush)
1427 AVFrame *filtered_frame = NULL;
1430 /* Reap all buffers present in the buffer sinks */
1431 for (i = 0; i < nb_output_streams; i++) {
1432 OutputStream *ost = output_streams[i];
1433 OutputFile *of = output_files[ost->file_index];
1434 AVFilterContext *filter;
1435 AVCodecContext *enc = ost->enc_ctx;
1438 if (!ost->filter || !ost->filter->graph->graph)
1440 filter = ost->filter->filter;
1442 if (!ost->initialized) {
1443 char error[1024] = "";
1444 ret = init_output_stream(ost, error, sizeof(error));
1446 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1447 ost->file_index, ost->index, error);
1452 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1453 return AVERROR(ENOMEM);
1455 filtered_frame = ost->filtered_frame;
1458 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1459 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1460 AV_BUFFERSINK_FLAG_NO_REQUEST);
1462 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1463 av_log(NULL, AV_LOG_WARNING,
1464 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1465 } else if (flush && ret == AVERROR_EOF) {
1466 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1467 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1471 if (ost->finished) {
1472 av_frame_unref(filtered_frame);
1475 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1476 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1477 AVRational filter_tb = av_buffersink_get_time_base(filter);
1478 AVRational tb = enc->time_base;
1479 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1481 tb.den <<= extra_bits;
1483 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1484 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1485 float_pts /= 1 << extra_bits;
1486 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1487 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1489 filtered_frame->pts =
1490 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1491 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1494 switch (av_buffersink_get_type(filter)) {
1495 case AVMEDIA_TYPE_VIDEO:
1496 if (!ost->frame_aspect_ratio.num)
1497 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1500 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1501 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1503 enc->time_base.num, enc->time_base.den);
1506 do_video_out(of, ost, filtered_frame, float_pts);
1508 case AVMEDIA_TYPE_AUDIO:
1509 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1510 enc->channels != filtered_frame->channels) {
1511 av_log(NULL, AV_LOG_ERROR,
1512 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1515 do_audio_out(of, ost, filtered_frame);
1518 // TODO support subtitle filters
1522 av_frame_unref(filtered_frame);
1529 static void print_final_stats(int64_t total_size)
1531 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1532 uint64_t subtitle_size = 0;
1533 uint64_t data_size = 0;
1534 float percent = -1.0;
1538 for (i = 0; i < nb_output_streams; i++) {
1539 OutputStream *ost = output_streams[i];
1540 switch (ost->enc_ctx->codec_type) {
1541 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1542 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1543 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1544 default: other_size += ost->data_size; break;
1546 extra_size += ost->enc_ctx->extradata_size;
1547 data_size += ost->data_size;
1548 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1549 != AV_CODEC_FLAG_PASS1)
1553 if (data_size && total_size>0 && total_size >= data_size)
1554 percent = 100.0 * (total_size - data_size) / data_size;
1556 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1557 video_size / 1024.0,
1558 audio_size / 1024.0,
1559 subtitle_size / 1024.0,
1560 other_size / 1024.0,
1561 extra_size / 1024.0);
1563 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1565 av_log(NULL, AV_LOG_INFO, "unknown");
1566 av_log(NULL, AV_LOG_INFO, "\n");
1568 /* print verbose per-stream stats */
1569 for (i = 0; i < nb_input_files; i++) {
1570 InputFile *f = input_files[i];
1571 uint64_t total_packets = 0, total_size = 0;
1573 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1576 for (j = 0; j < f->nb_streams; j++) {
1577 InputStream *ist = input_streams[f->ist_index + j];
1578 enum AVMediaType type = ist->dec_ctx->codec_type;
1580 total_size += ist->data_size;
1581 total_packets += ist->nb_packets;
1583 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1584 i, j, media_type_string(type));
1585 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1586 ist->nb_packets, ist->data_size);
1588 if (ist->decoding_needed) {
1589 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1590 ist->frames_decoded);
1591 if (type == AVMEDIA_TYPE_AUDIO)
1592 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1593 av_log(NULL, AV_LOG_VERBOSE, "; ");
1596 av_log(NULL, AV_LOG_VERBOSE, "\n");
1599 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1600 total_packets, total_size);
1603 for (i = 0; i < nb_output_files; i++) {
1604 OutputFile *of = output_files[i];
1605 uint64_t total_packets = 0, total_size = 0;
1607 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1610 for (j = 0; j < of->ctx->nb_streams; j++) {
1611 OutputStream *ost = output_streams[of->ost_index + j];
1612 enum AVMediaType type = ost->enc_ctx->codec_type;
1614 total_size += ost->data_size;
1615 total_packets += ost->packets_written;
1617 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1618 i, j, media_type_string(type));
1619 if (ost->encoding_needed) {
1620 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1621 ost->frames_encoded);
1622 if (type == AVMEDIA_TYPE_AUDIO)
1623 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1624 av_log(NULL, AV_LOG_VERBOSE, "; ");
1627 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1628 ost->packets_written, ost->data_size);
1630 av_log(NULL, AV_LOG_VERBOSE, "\n");
1633 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1634 total_packets, total_size);
1636 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1637 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1639 av_log(NULL, AV_LOG_WARNING, "\n");
1641 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1646 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1648 AVBPrint buf, buf_script;
1650 AVFormatContext *oc;
1652 AVCodecContext *enc;
1653 int frame_number, vid, i;
1656 int64_t pts = INT64_MIN + 1;
1657 static int64_t last_time = -1;
1658 static int qp_histogram[52];
1659 int hours, mins, secs, us;
1660 const char *hours_sign;
1664 if (!print_stats && !is_last_report && !progress_avio)
1667 if (!is_last_report) {
1668 if (last_time == -1) {
1669 last_time = cur_time;
1672 if ((cur_time - last_time) < 500000)
1674 last_time = cur_time;
1677 t = (cur_time-timer_start) / 1000000.0;
1680 oc = output_files[0]->ctx;
1682 total_size = avio_size(oc->pb);
1683 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1684 total_size = avio_tell(oc->pb);
1687 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1688 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1689 for (i = 0; i < nb_output_streams; i++) {
1691 ost = output_streams[i];
1693 if (!ost->stream_copy)
1694 q = ost->quality / (float) FF_QP2LAMBDA;
1696 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1697 av_bprintf(&buf, "q=%2.1f ", q);
1698 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1699 ost->file_index, ost->index, q);
1701 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1704 frame_number = ost->frame_number;
1705 fps = t > 1 ? frame_number / t : 0;
1706 av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1707 frame_number, fps < 9.95, fps, q);
1708 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1709 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1710 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1711 ost->file_index, ost->index, q);
1713 av_bprintf(&buf, "L");
1717 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1719 for (j = 0; j < 32; j++)
1720 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1723 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1725 double error, error_sum = 0;
1726 double scale, scale_sum = 0;
1728 char type[3] = { 'Y','U','V' };
1729 av_bprintf(&buf, "PSNR=");
1730 for (j = 0; j < 3; j++) {
1731 if (is_last_report) {
1732 error = enc->error[j];
1733 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1735 error = ost->error[j];
1736 scale = enc->width * enc->height * 255.0 * 255.0;
1742 p = psnr(error / scale);
1743 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1744 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1745 ost->file_index, ost->index, type[j] | 32, p);
1747 p = psnr(error_sum / scale_sum);
1748 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1749 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1750 ost->file_index, ost->index, p);
1754 /* compute min output value */
1755 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1756 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1757 ost->st->time_base, AV_TIME_BASE_Q));
1759 nb_frames_drop += ost->last_dropped;
1762 secs = FFABS(pts) / AV_TIME_BASE;
1763 us = FFABS(pts) % AV_TIME_BASE;
1768 hours_sign = (pts < 0) ? "-" : "";
1770 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1771 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1773 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1774 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1775 if (pts == AV_NOPTS_VALUE) {
1776 av_bprintf(&buf, "N/A ");
1778 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1779 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1783 av_bprintf(&buf, "bitrate=N/A");
1784 av_bprintf(&buf_script, "bitrate=N/A\n");
1786 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1787 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1790 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1791 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1792 if (pts == AV_NOPTS_VALUE) {
1793 av_bprintf(&buf_script, "out_time_us=N/A\n");
1794 av_bprintf(&buf_script, "out_time_ms=N/A\n");
1795 av_bprintf(&buf_script, "out_time=N/A\n");
1797 av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1798 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1799 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1800 hours_sign, hours, mins, secs, us);
1803 if (nb_frames_dup || nb_frames_drop)
1804 av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1805 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1806 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1809 av_bprintf(&buf, " speed=N/A");
1810 av_bprintf(&buf_script, "speed=N/A\n");
1812 av_bprintf(&buf, " speed=%4.3gx", speed);
1813 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1816 if (print_stats || is_last_report) {
1817 const char end = is_last_report ? '\n' : '\r';
1818 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1819 fprintf(stderr, "%s %c", buf.str, end);
1821 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1825 av_bprint_finalize(&buf, NULL);
1827 if (progress_avio) {
1828 av_bprintf(&buf_script, "progress=%s\n",
1829 is_last_report ? "end" : "continue");
1830 avio_write(progress_avio, buf_script.str,
1831 FFMIN(buf_script.len, buf_script.size - 1));
1832 avio_flush(progress_avio);
1833 av_bprint_finalize(&buf_script, NULL);
1834 if (is_last_report) {
1835 if ((ret = avio_closep(&progress_avio)) < 0)
1836 av_log(NULL, AV_LOG_ERROR,
1837 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1842 print_final_stats(total_size);
1845 static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1847 // We never got any input. Set a fake format, which will
1848 // come from libavformat.
1849 ifilter->format = par->format;
1850 ifilter->sample_rate = par->sample_rate;
1851 ifilter->channels = par->channels;
1852 ifilter->channel_layout = par->channel_layout;
1853 ifilter->width = par->width;
1854 ifilter->height = par->height;
1855 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1858 static void flush_encoders(void)
1862 for (i = 0; i < nb_output_streams; i++) {
1863 OutputStream *ost = output_streams[i];
1864 AVCodecContext *enc = ost->enc_ctx;
1865 OutputFile *of = output_files[ost->file_index];
1867 if (!ost->encoding_needed)
1870 // Try to enable encoding with no input frames.
1871 // Maybe we should just let encoding fail instead.
1872 if (!ost->initialized) {
1873 FilterGraph *fg = ost->filter->graph;
1874 char error[1024] = "";
1876 av_log(NULL, AV_LOG_WARNING,
1877 "Finishing stream %d:%d without any data written to it.\n",
1878 ost->file_index, ost->st->index);
1880 if (ost->filter && !fg->graph) {
1882 for (x = 0; x < fg->nb_inputs; x++) {
1883 InputFilter *ifilter = fg->inputs[x];
1884 if (ifilter->format < 0)
1885 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1888 if (!ifilter_has_all_input_formats(fg))
1891 ret = configure_filtergraph(fg);
1893 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1897 finish_output_stream(ost);
1900 ret = init_output_stream(ost, error, sizeof(error));
1902 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1903 ost->file_index, ost->index, error);
1908 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1911 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1915 const char *desc = NULL;
1919 switch (enc->codec_type) {
1920 case AVMEDIA_TYPE_AUDIO:
1923 case AVMEDIA_TYPE_VIDEO:
1930 av_init_packet(&pkt);
1934 update_benchmark(NULL);
1936 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1937 ret = avcodec_send_frame(enc, NULL);
1939 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1946 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1947 if (ret < 0 && ret != AVERROR_EOF) {
1948 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1953 if (ost->logfile && enc->stats_out) {
1954 fprintf(ost->logfile, "%s", enc->stats_out);
1956 if (ret == AVERROR_EOF) {
1957 output_packet(of, &pkt, ost, 1);
1960 if (ost->finished & MUXER_FINISHED) {
1961 av_packet_unref(&pkt);
1964 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1965 pkt_size = pkt.size;
1966 output_packet(of, &pkt, ost, 0);
1967 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1968 do_video_stats(ost, pkt_size);
1975 * Check whether a packet from ist should be written into ost at this time
1977 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1979 OutputFile *of = output_files[ost->file_index];
1980 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1982 if (ost->source_index != ist_index)
1988 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1994 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1996 OutputFile *of = output_files[ost->file_index];
1997 InputFile *f = input_files [ist->file_index];
1998 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1999 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2002 // EOF: flush output bitstream filters.
2004 av_init_packet(&opkt);
2007 output_packet(of, &opkt, ost, 1);
2011 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2012 !ost->copy_initial_nonkeyframes)
2015 if (!ost->frame_number && !ost->copy_prior_start) {
2016 int64_t comp_start = start_time;
2017 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2018 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2019 if (pkt->pts == AV_NOPTS_VALUE ?
2020 ist->pts < comp_start :
2021 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2025 if (of->recording_time != INT64_MAX &&
2026 ist->pts >= of->recording_time + start_time) {
2027 close_output_stream(ost);
2031 if (f->recording_time != INT64_MAX) {
2032 start_time = f->ctx->start_time;
2033 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2034 start_time += f->start_time;
2035 if (ist->pts >= f->recording_time + start_time) {
2036 close_output_stream(ost);
2041 /* force the input stream PTS */
2042 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2045 if (av_packet_ref(&opkt, pkt) < 0)
2048 if (pkt->pts != AV_NOPTS_VALUE)
2049 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2051 if (pkt->dts == AV_NOPTS_VALUE) {
2052 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2053 } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2054 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2056 duration = ist->dec_ctx->frame_size;
2057 opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2058 (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2059 &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2060 /* dts will be set immediately afterwards to what pts is now */
2061 opkt.pts = opkt.dts - ost_tb_start_time;
2063 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2064 opkt.dts -= ost_tb_start_time;
2066 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2068 output_packet(of, &opkt, ost, 0);
2071 int guess_input_channel_layout(InputStream *ist)
2073 AVCodecContext *dec = ist->dec_ctx;
2075 if (!dec->channel_layout) {
2076 char layout_name[256];
2078 if (dec->channels > ist->guess_layout_max)
2080 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2081 if (!dec->channel_layout)
2083 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2084 dec->channels, dec->channel_layout);
2085 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2086 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2091 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2093 if (*got_output || ret<0)
2094 decode_error_stat[ret<0] ++;
2096 if (ret < 0 && exit_on_error)
2099 if (*got_output && ist) {
2100 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2101 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2102 "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2109 // Filters can be configured only if the formats of all inputs are known.
2110 static int ifilter_has_all_input_formats(FilterGraph *fg)
2113 for (i = 0; i < fg->nb_inputs; i++) {
2114 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2115 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2121 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2123 FilterGraph *fg = ifilter->graph;
2124 int need_reinit, ret, i;
2126 /* determine if the parameters for this input changed */
2127 need_reinit = ifilter->format != frame->format;
2129 switch (ifilter->ist->st->codecpar->codec_type) {
2130 case AVMEDIA_TYPE_AUDIO:
2131 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2132 ifilter->channels != frame->channels ||
2133 ifilter->channel_layout != frame->channel_layout;
2135 case AVMEDIA_TYPE_VIDEO:
2136 need_reinit |= ifilter->width != frame->width ||
2137 ifilter->height != frame->height;
2141 if (!ifilter->ist->reinit_filters && fg->graph)
2144 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2145 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2149 ret = ifilter_parameters_from_frame(ifilter, frame);
2154 /* (re)init the graph if possible, otherwise buffer the frame and return */
2155 if (need_reinit || !fg->graph) {
2156 for (i = 0; i < fg->nb_inputs; i++) {
2157 if (!ifilter_has_all_input_formats(fg)) {
2158 AVFrame *tmp = av_frame_clone(frame);
2160 return AVERROR(ENOMEM);
2161 av_frame_unref(frame);
2163 if (!av_fifo_space(ifilter->frame_queue)) {
2164 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2166 av_frame_free(&tmp);
2170 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2175 ret = reap_filters(1);
2176 if (ret < 0 && ret != AVERROR_EOF) {
2177 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2181 ret = configure_filtergraph(fg);
2183 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2188 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2190 if (ret != AVERROR_EOF)
2191 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2198 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2204 if (ifilter->filter) {
2205 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2209 // the filtergraph was never configured
2210 if (ifilter->format < 0)
2211 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2212 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2213 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2214 return AVERROR_INVALIDDATA;
2221 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2222 // There is the following difference: if you got a frame, you must call
2223 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2224 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2225 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2232 ret = avcodec_send_packet(avctx, pkt);
2233 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2234 // decoded frames with avcodec_receive_frame() until done.
2235 if (ret < 0 && ret != AVERROR_EOF)
2239 ret = avcodec_receive_frame(avctx, frame);
2240 if (ret < 0 && ret != AVERROR(EAGAIN))
2248 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2253 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2254 for (i = 0; i < ist->nb_filters; i++) {
2255 if (i < ist->nb_filters - 1) {
2256 f = ist->filter_frame;
2257 ret = av_frame_ref(f, decoded_frame);
2262 ret = ifilter_send_frame(ist->filters[i], f);
2263 if (ret == AVERROR_EOF)
2264 ret = 0; /* ignore */
2266 av_log(NULL, AV_LOG_ERROR,
2267 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2274 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2277 AVFrame *decoded_frame;
2278 AVCodecContext *avctx = ist->dec_ctx;
2280 AVRational decoded_frame_tb;
2282 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2283 return AVERROR(ENOMEM);
2284 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2285 return AVERROR(ENOMEM);
2286 decoded_frame = ist->decoded_frame;
2288 update_benchmark(NULL);
2289 ret = decode(avctx, decoded_frame, got_output, pkt);
2290 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2294 if (ret >= 0 && avctx->sample_rate <= 0) {
2295 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2296 ret = AVERROR_INVALIDDATA;
2299 if (ret != AVERROR_EOF)
2300 check_decode_result(ist, got_output, ret);
2302 if (!*got_output || ret < 0)
2305 ist->samples_decoded += decoded_frame->nb_samples;
2306 ist->frames_decoded++;
2308 /* increment next_dts to use for the case where the input stream does not
2309 have timestamps or there are multiple frames in the packet */
2310 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2312 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2315 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2316 decoded_frame_tb = ist->st->time_base;
2317 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2318 decoded_frame->pts = pkt->pts;
2319 decoded_frame_tb = ist->st->time_base;
2321 decoded_frame->pts = ist->dts;
2322 decoded_frame_tb = AV_TIME_BASE_Q;
2324 if (decoded_frame->pts != AV_NOPTS_VALUE)
2325 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2326 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2327 (AVRational){1, avctx->sample_rate});
2328 ist->nb_samples = decoded_frame->nb_samples;
2329 err = send_frame_to_filters(ist, decoded_frame);
2331 av_frame_unref(ist->filter_frame);
2332 av_frame_unref(decoded_frame);
2333 return err < 0 ? err : ret;
2336 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2339 AVFrame *decoded_frame;
2340 int i, ret = 0, err = 0;
2341 int64_t best_effort_timestamp;
2342 int64_t dts = AV_NOPTS_VALUE;
2345 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2346 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2348 if (!eof && pkt && pkt->size == 0)
2351 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2352 return AVERROR(ENOMEM);
2353 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2354 return AVERROR(ENOMEM);
2355 decoded_frame = ist->decoded_frame;
2356 if (ist->dts != AV_NOPTS_VALUE)
2357 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2360 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2363 // The old code used to set dts on the drain packet, which does not work
2364 // with the new API anymore.
2366 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2368 return AVERROR(ENOMEM);
2369 ist->dts_buffer = new;
2370 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2373 update_benchmark(NULL);
2374 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2375 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2379 // The following line may be required in some cases where there is no parser
2380 // or the parser does not has_b_frames correctly
2381 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2382 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2383 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2385 av_log(ist->dec_ctx, AV_LOG_WARNING,
2386 "video_delay is larger in decoder than demuxer %d > %d.\n"
2387 "If you want to help, upload a sample "
2388 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2389 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2390 ist->dec_ctx->has_b_frames,
2391 ist->st->codecpar->video_delay);
2394 if (ret != AVERROR_EOF)
2395 check_decode_result(ist, got_output, ret);
2397 if (*got_output && ret >= 0) {
2398 if (ist->dec_ctx->width != decoded_frame->width ||
2399 ist->dec_ctx->height != decoded_frame->height ||
2400 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2401 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2402 decoded_frame->width,
2403 decoded_frame->height,
2404 decoded_frame->format,
2405 ist->dec_ctx->width,
2406 ist->dec_ctx->height,
2407 ist->dec_ctx->pix_fmt);
2411 if (!*got_output || ret < 0)
2414 if(ist->top_field_first>=0)
2415 decoded_frame->top_field_first = ist->top_field_first;
2417 ist->frames_decoded++;
2419 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2420 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2424 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2426 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2427 *duration_pts = decoded_frame->pkt_duration;
2429 if (ist->framerate.num)
2430 best_effort_timestamp = ist->cfr_next_pts++;
2432 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2433 best_effort_timestamp = ist->dts_buffer[0];
2435 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2436 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2437 ist->nb_dts_buffer--;
2440 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2441 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2443 if (ts != AV_NOPTS_VALUE)
2444 ist->next_pts = ist->pts = ts;
2448 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2449 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2450 ist->st->index, av_ts2str(decoded_frame->pts),
2451 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2452 best_effort_timestamp,
2453 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2454 decoded_frame->key_frame, decoded_frame->pict_type,
2455 ist->st->time_base.num, ist->st->time_base.den);
2458 if (ist->st->sample_aspect_ratio.num)
2459 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2461 err = send_frame_to_filters(ist, decoded_frame);
2464 av_frame_unref(ist->filter_frame);
2465 av_frame_unref(decoded_frame);
2466 return err < 0 ? err : ret;
2469 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2472 AVSubtitle subtitle;
2474 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2475 &subtitle, got_output, pkt);
2477 check_decode_result(NULL, got_output, ret);
2479 if (ret < 0 || !*got_output) {
2482 sub2video_flush(ist);
2486 if (ist->fix_sub_duration) {
2488 if (ist->prev_sub.got_output) {
2489 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2490 1000, AV_TIME_BASE);
2491 if (end < ist->prev_sub.subtitle.end_display_time) {
2492 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2493 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2494 ist->prev_sub.subtitle.end_display_time, end,
2495 end <= 0 ? ", dropping it" : "");
2496 ist->prev_sub.subtitle.end_display_time = end;
2499 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2500 FFSWAP(int, ret, ist->prev_sub.ret);
2501 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2509 if (ist->sub2video.frame) {
2510 sub2video_update(ist, &subtitle);
2511 } else if (ist->nb_filters) {
2512 if (!ist->sub2video.sub_queue)
2513 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2514 if (!ist->sub2video.sub_queue)
2516 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2517 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2521 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2525 if (!subtitle.num_rects)
2528 ist->frames_decoded++;
2530 for (i = 0; i < nb_output_streams; i++) {
2531 OutputStream *ost = output_streams[i];
2533 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2534 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2537 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2542 avsubtitle_free(&subtitle);
2546 static int send_filter_eof(InputStream *ist)
2549 /* TODO keep pts also in stream time base to avoid converting back */
2550 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2551 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2553 for (i = 0; i < ist->nb_filters; i++) {
2554 ret = ifilter_send_eof(ist->filters[i], pts);
2561 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2562 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2566 int eof_reached = 0;
2569 if (!ist->saw_first_ts) {
2570 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2572 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2573 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2574 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2576 ist->saw_first_ts = 1;
2579 if (ist->next_dts == AV_NOPTS_VALUE)
2580 ist->next_dts = ist->dts;
2581 if (ist->next_pts == AV_NOPTS_VALUE)
2582 ist->next_pts = ist->pts;
2586 av_init_packet(&avpkt);
2593 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2594 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2595 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2596 ist->next_pts = ist->pts = ist->dts;
2599 // while we have more to decode or while the decoder did output something on EOF
2600 while (ist->decoding_needed) {
2601 int64_t duration_dts = 0;
2602 int64_t duration_pts = 0;
2604 int decode_failed = 0;
2606 ist->pts = ist->next_pts;
2607 ist->dts = ist->next_dts;
2609 switch (ist->dec_ctx->codec_type) {
2610 case AVMEDIA_TYPE_AUDIO:
2611 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2614 case AVMEDIA_TYPE_VIDEO:
2615 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2617 if (!repeating || !pkt || got_output) {
2618 if (pkt && pkt->duration) {
2619 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2620 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2621 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2622 duration_dts = ((int64_t)AV_TIME_BASE *
2623 ist->dec_ctx->framerate.den * ticks) /
2624 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2627 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2628 ist->next_dts += duration_dts;
2630 ist->next_dts = AV_NOPTS_VALUE;
2634 if (duration_pts > 0) {
2635 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2637 ist->next_pts += duration_dts;
2641 case AVMEDIA_TYPE_SUBTITLE:
2644 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2645 if (!pkt && ret >= 0)
2652 if (ret == AVERROR_EOF) {
2658 if (decode_failed) {
2659 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2660 ist->file_index, ist->st->index, av_err2str(ret));
2662 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2663 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2665 if (!decode_failed || exit_on_error)
2671 ist->got_output = 1;
2676 // During draining, we might get multiple output frames in this loop.
2677 // ffmpeg.c does not drain the filter chain on configuration changes,
2678 // which means if we send multiple frames at once to the filters, and
2679 // one of those frames changes configuration, the buffered frames will
2680 // be lost. This can upset certain FATE tests.
2681 // Decode only 1 frame per call on EOF to appease these FATE tests.
2682 // The ideal solution would be to rewrite decoding to use the new
2683 // decoding API in a better way.
2690 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2691 /* except when looping we need to flush but not to send an EOF */
2692 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2693 int ret = send_filter_eof(ist);
2695 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2700 /* handle stream copy */
2701 if (!ist->decoding_needed && pkt) {
2702 ist->dts = ist->next_dts;
2703 switch (ist->dec_ctx->codec_type) {
2704 case AVMEDIA_TYPE_AUDIO:
2705 av_assert1(pkt->duration >= 0);
2706 if (ist->dec_ctx->sample_rate) {
2707 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2708 ist->dec_ctx->sample_rate;
2710 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2713 case AVMEDIA_TYPE_VIDEO:
2714 if (ist->framerate.num) {
2715 // TODO: Remove work-around for c99-to-c89 issue 7
2716 AVRational time_base_q = AV_TIME_BASE_Q;
2717 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2718 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2719 } else if (pkt->duration) {
2720 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2721 } else if(ist->dec_ctx->framerate.num != 0) {
2722 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2723 ist->next_dts += ((int64_t)AV_TIME_BASE *
2724 ist->dec_ctx->framerate.den * ticks) /
2725 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2729 ist->pts = ist->dts;
2730 ist->next_pts = ist->next_dts;
2732 for (i = 0; i < nb_output_streams; i++) {
2733 OutputStream *ost = output_streams[i];
2735 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2738 do_streamcopy(ist, ost, pkt);
2741 return !eof_reached;
2744 static void print_sdp(void)
2749 AVIOContext *sdp_pb;
2750 AVFormatContext **avc;
2752 for (i = 0; i < nb_output_files; i++) {
2753 if (!output_files[i]->header_written)
2757 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2760 for (i = 0, j = 0; i < nb_output_files; i++) {
2761 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2762 avc[j] = output_files[i]->ctx;
2770 av_sdp_create(avc, j, sdp, sizeof(sdp));
2772 if (!sdp_filename) {
2773 printf("SDP:\n%s\n", sdp);
2776 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2777 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2779 avio_print(sdp_pb, sdp);
2780 avio_closep(&sdp_pb);
2781 av_freep(&sdp_filename);
2789 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2791 InputStream *ist = s->opaque;
2792 const enum AVPixelFormat *p;
2795 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2796 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2797 const AVCodecHWConfig *config = NULL;
2800 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2803 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2804 ist->hwaccel_id == HWACCEL_AUTO) {
2806 config = avcodec_get_hw_config(s->codec, i);
2809 if (!(config->methods &
2810 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2812 if (config->pix_fmt == *p)
2817 if (config->device_type != ist->hwaccel_device_type) {
2818 // Different hwaccel offered, ignore.
2822 ret = hwaccel_decode_init(s);
2824 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2825 av_log(NULL, AV_LOG_FATAL,
2826 "%s hwaccel requested for input stream #%d:%d, "
2827 "but cannot be initialized.\n",
2828 av_hwdevice_get_type_name(config->device_type),
2829 ist->file_index, ist->st->index);
2830 return AV_PIX_FMT_NONE;
2835 const HWAccel *hwaccel = NULL;
2837 for (i = 0; hwaccels[i].name; i++) {
2838 if (hwaccels[i].pix_fmt == *p) {
2839 hwaccel = &hwaccels[i];
2844 // No hwaccel supporting this pixfmt.
2847 if (hwaccel->id != ist->hwaccel_id) {
2848 // Does not match requested hwaccel.
2852 ret = hwaccel->init(s);
2854 av_log(NULL, AV_LOG_FATAL,
2855 "%s hwaccel requested for input stream #%d:%d, "
2856 "but cannot be initialized.\n", hwaccel->name,
2857 ist->file_index, ist->st->index);
2858 return AV_PIX_FMT_NONE;
2862 if (ist->hw_frames_ctx) {
2863 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2864 if (!s->hw_frames_ctx)
2865 return AV_PIX_FMT_NONE;
2868 ist->hwaccel_pix_fmt = *p;
2875 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2877 InputStream *ist = s->opaque;
2879 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2880 return ist->hwaccel_get_buffer(s, frame, flags);
2882 return avcodec_default_get_buffer2(s, frame, flags);
2885 static int init_input_stream(int ist_index, char *error, int error_len)
2888 InputStream *ist = input_streams[ist_index];
2890 if (ist->decoding_needed) {
2891 AVCodec *codec = ist->dec;
2893 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2894 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2895 return AVERROR(EINVAL);
2898 ist->dec_ctx->opaque = ist;
2899 ist->dec_ctx->get_format = get_format;
2900 ist->dec_ctx->get_buffer2 = get_buffer;
2901 ist->dec_ctx->thread_safe_callbacks = 1;
2903 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2904 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2905 (ist->decoding_needed & DECODING_FOR_OST)) {
2906 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2907 if (ist->decoding_needed & DECODING_FOR_FILTER)
2908 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2911 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2913 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2914 * audio, and video decoders such as cuvid or mediacodec */
2915 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2917 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2918 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2919 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2920 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2921 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2923 ret = hw_device_setup_for_decode(ist);
2925 snprintf(error, error_len, "Device setup failed for "
2926 "decoder on input stream #%d:%d : %s",
2927 ist->file_index, ist->st->index, av_err2str(ret));
2931 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2932 if (ret == AVERROR_EXPERIMENTAL)
2933 abort_codec_experimental(codec, 0);
2935 snprintf(error, error_len,
2936 "Error while opening decoder for input stream "
2938 ist->file_index, ist->st->index, av_err2str(ret));
2941 assert_avoptions(ist->decoder_opts);
2944 ist->next_pts = AV_NOPTS_VALUE;
2945 ist->next_dts = AV_NOPTS_VALUE;
2950 static InputStream *get_input_stream(OutputStream *ost)
2952 if (ost->source_index >= 0)
2953 return input_streams[ost->source_index];
2957 static int compare_int64(const void *a, const void *b)
2959 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2962 /* open the muxer when all the streams are initialized */
2963 static int check_init_output_file(OutputFile *of, int file_index)
2967 for (i = 0; i < of->ctx->nb_streams; i++) {
2968 OutputStream *ost = output_streams[of->ost_index + i];
2969 if (!ost->initialized)
2973 of->ctx->interrupt_callback = int_cb;
2975 ret = avformat_write_header(of->ctx, &of->opts);
2977 av_log(NULL, AV_LOG_ERROR,
2978 "Could not write header for output file #%d "
2979 "(incorrect codec parameters ?): %s\n",
2980 file_index, av_err2str(ret));
2983 //assert_avoptions(of->opts);
2984 of->header_written = 1;
2986 av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2988 if (sdp_filename || want_sdp)
2991 /* flush the muxing queues */
2992 for (i = 0; i < of->ctx->nb_streams; i++) {
2993 OutputStream *ost = output_streams[of->ost_index + i];
2995 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2996 if (!av_fifo_size(ost->muxing_queue))
2997 ost->mux_timebase = ost->st->time_base;
2999 while (av_fifo_size(ost->muxing_queue)) {
3001 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3002 write_packet(of, &pkt, ost, 1);
3009 static int init_output_bsfs(OutputStream *ost)
3014 if (!ost->nb_bitstream_filters)
3017 for (i = 0; i < ost->nb_bitstream_filters; i++) {
3018 ctx = ost->bsf_ctx[i];
3020 ret = avcodec_parameters_copy(ctx->par_in,
3021 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3025 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3027 ret = av_bsf_init(ctx);
3029 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3030 ost->bsf_ctx[i]->filter->name);
3035 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3036 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3040 ost->st->time_base = ctx->time_base_out;
3045 static int init_output_stream_streamcopy(OutputStream *ost)
3047 OutputFile *of = output_files[ost->file_index];
3048 InputStream *ist = get_input_stream(ost);
3049 AVCodecParameters *par_dst = ost->st->codecpar;
3050 AVCodecParameters *par_src = ost->ref_par;
3053 uint32_t codec_tag = par_dst->codec_tag;
3055 av_assert0(ist && !ost->filter);
3057 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3059 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3061 av_log(NULL, AV_LOG_FATAL,
3062 "Error setting up codec context options.\n");
3066 ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3068 av_log(NULL, AV_LOG_FATAL,
3069 "Error getting reference codec parameters.\n");
3074 unsigned int codec_tag_tmp;
3075 if (!of->ctx->oformat->codec_tag ||
3076 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3077 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3078 codec_tag = par_src->codec_tag;
3081 ret = avcodec_parameters_copy(par_dst, par_src);
3085 par_dst->codec_tag = codec_tag;
3087 if (!ost->frame_rate.num)
3088 ost->frame_rate = ist->framerate;
3089 ost->st->avg_frame_rate = ost->frame_rate;
3091 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3095 // copy timebase while removing common factors
3096 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3097 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3099 // copy estimated duration as a hint to the muxer
3100 if (ost->st->duration <= 0 && ist->st->duration > 0)
3101 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3104 ost->st->disposition = ist->st->disposition;
3106 if (ist->st->nb_side_data) {
3107 for (i = 0; i < ist->st->nb_side_data; i++) {
3108 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3111 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3113 return AVERROR(ENOMEM);
3114 memcpy(dst_data, sd_src->data, sd_src->size);
3118 if (ost->rotate_overridden) {
3119 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3120 sizeof(int32_t) * 9);
3122 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3125 switch (par_dst->codec_type) {
3126 case AVMEDIA_TYPE_AUDIO:
3127 if (audio_volume != 256) {
3128 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3131 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3132 par_dst->block_align= 0;
3133 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3134 par_dst->block_align= 0;
3136 case AVMEDIA_TYPE_VIDEO:
3137 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3139 av_mul_q(ost->frame_aspect_ratio,
3140 (AVRational){ par_dst->height, par_dst->width });
3141 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3142 "with stream copy may produce invalid files\n");
3144 else if (ist->st->sample_aspect_ratio.num)
3145 sar = ist->st->sample_aspect_ratio;
3147 sar = par_src->sample_aspect_ratio;
3148 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3149 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3150 ost->st->r_frame_rate = ist->st->r_frame_rate;
3154 ost->mux_timebase = ist->st->time_base;
3159 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3161 AVDictionaryEntry *e;
3163 uint8_t *encoder_string;
3164 int encoder_string_len;
3165 int format_flags = 0;
3166 int codec_flags = ost->enc_ctx->flags;
3168 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3171 e = av_dict_get(of->opts, "fflags", NULL, 0);
3173 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3176 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3178 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3180 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3183 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3186 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3187 encoder_string = av_mallocz(encoder_string_len);
3188 if (!encoder_string)
3191 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3192 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3194 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3195 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3196 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3197 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3200 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3201 AVCodecContext *avctx)
3204 int n = 1, i, size, index = 0;
3207 for (p = kf; *p; p++)
3211 pts = av_malloc_array(size, sizeof(*pts));
3213 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3218 for (i = 0; i < n; i++) {
3219 char *next = strchr(p, ',');
3224 if (!memcmp(p, "chapters", 8)) {
3226 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3229 if (avf->nb_chapters > INT_MAX - size ||
3230 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3232 av_log(NULL, AV_LOG_FATAL,
3233 "Could not allocate forced key frames array.\n");
3236 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3237 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3239 for (j = 0; j < avf->nb_chapters; j++) {
3240 AVChapter *c = avf->chapters[j];
3241 av_assert1(index < size);
3242 pts[index++] = av_rescale_q(c->start, c->time_base,
3243 avctx->time_base) + t;
3248 t = parse_time_or_die("force_key_frames", p, 1);
3249 av_assert1(index < size);
3250 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3257 av_assert0(index == size);
3258 qsort(pts, size, sizeof(*pts), compare_int64);
3259 ost->forced_kf_count = size;
3260 ost->forced_kf_pts = pts;
3263 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3265 InputStream *ist = get_input_stream(ost);
3266 AVCodecContext *enc_ctx = ost->enc_ctx;
3267 AVFormatContext *oc;
3269 if (ost->enc_timebase.num > 0) {
3270 enc_ctx->time_base = ost->enc_timebase;
3274 if (ost->enc_timebase.num < 0) {
3276 enc_ctx->time_base = ist->st->time_base;
3280 oc = output_files[ost->file_index]->ctx;
3281 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3284 enc_ctx->time_base = default_time_base;
3287 static int init_output_stream_encode(OutputStream *ost)
3289 InputStream *ist = get_input_stream(ost);
3290 AVCodecContext *enc_ctx = ost->enc_ctx;
3291 AVCodecContext *dec_ctx = NULL;
3292 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3295 set_encoder_id(output_files[ost->file_index], ost);
3297 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3298 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3299 // which have to be filtered out to prevent leaking them to output files.
3300 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3303 ost->st->disposition = ist->st->disposition;
3305 dec_ctx = ist->dec_ctx;
3307 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3309 for (j = 0; j < oc->nb_streams; j++) {
3310 AVStream *st = oc->streams[j];
3311 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3314 if (j == oc->nb_streams)
3315 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3316 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3317 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3320 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3321 if (!ost->frame_rate.num)
3322 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3323 if (ist && !ost->frame_rate.num)
3324 ost->frame_rate = ist->framerate;
3325 if (ist && !ost->frame_rate.num)
3326 ost->frame_rate = ist->st->r_frame_rate;
3327 if (ist && !ost->frame_rate.num) {
3328 ost->frame_rate = (AVRational){25, 1};
3329 av_log(NULL, AV_LOG_WARNING,
3331 "about the input framerate is available. Falling "
3332 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3333 "if you want a different framerate.\n",
3334 ost->file_index, ost->index);
3337 if (ost->enc->supported_framerates && !ost->force_fps) {
3338 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3339 ost->frame_rate = ost->enc->supported_framerates[idx];
3341 // reduce frame rate for mpeg4 to be within the spec limits
3342 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3343 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3344 ost->frame_rate.num, ost->frame_rate.den, 65535);
3348 switch (enc_ctx->codec_type) {
3349 case AVMEDIA_TYPE_AUDIO:
3350 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3352 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3353 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3354 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3355 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3356 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3358 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3361 case AVMEDIA_TYPE_VIDEO:
3362 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3364 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3365 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3366 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3367 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3368 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3369 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3372 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3373 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3374 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3375 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3376 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3377 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3379 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3381 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3382 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3384 enc_ctx->framerate = ost->frame_rate;
3386 ost->st->avg_frame_rate = ost->frame_rate;
3389 enc_ctx->width != dec_ctx->width ||
3390 enc_ctx->height != dec_ctx->height ||
3391 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3392 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3395 if (ost->top_field_first == 0) {
3396 enc_ctx->field_order = AV_FIELD_BB;
3397 } else if (ost->top_field_first == 1) {
3398 enc_ctx->field_order = AV_FIELD_TT;
3401 if (ost->forced_keyframes) {
3402 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3403 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3404 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3406 av_log(NULL, AV_LOG_ERROR,
3407 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3410 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3411 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3412 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3413 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3415 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3416 // parse it only for static kf timings
3417 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3418 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3422 case AVMEDIA_TYPE_SUBTITLE:
3423 enc_ctx->time_base = AV_TIME_BASE_Q;
3424 if (!enc_ctx->width) {
3425 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3426 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3429 case AVMEDIA_TYPE_DATA:
3436 ost->mux_timebase = enc_ctx->time_base;
3441 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3445 if (ost->encoding_needed) {
3446 AVCodec *codec = ost->enc;
3447 AVCodecContext *dec = NULL;
3450 ret = init_output_stream_encode(ost);
3454 if ((ist = get_input_stream(ost)))
3456 if (dec && dec->subtitle_header) {
3457 /* ASS code assumes this buffer is null terminated so add extra byte. */
3458 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3459 if (!ost->enc_ctx->subtitle_header)
3460 return AVERROR(ENOMEM);
3461 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3462 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3464 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3465 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3466 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3468 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3469 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3470 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3472 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3473 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3474 av_buffersink_get_format(ost->filter->filter)) {
3475 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3476 if (!ost->enc_ctx->hw_frames_ctx)
3477 return AVERROR(ENOMEM);
3479 ret = hw_device_setup_for_encode(ost);
3481 snprintf(error, error_len, "Device setup failed for "
3482 "encoder on output stream #%d:%d : %s",
3483 ost->file_index, ost->index, av_err2str(ret));
3487 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3488 int input_props = 0, output_props = 0;
3489 AVCodecDescriptor const *input_descriptor =
3490 avcodec_descriptor_get(dec->codec_id);
3491 AVCodecDescriptor const *output_descriptor =
3492 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3493 if (input_descriptor)
3494 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3495 if (output_descriptor)
3496 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3497 if (input_props && output_props && input_props != output_props) {
3498 snprintf(error, error_len,
3499 "Subtitle encoding currently only possible from text to text "
3500 "or bitmap to bitmap");
3501 return AVERROR_INVALIDDATA;
3505 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3506 if (ret == AVERROR_EXPERIMENTAL)
3507 abort_codec_experimental(codec, 1);
3508 snprintf(error, error_len,
3509 "Error while opening encoder for output stream #%d:%d - "
3510 "maybe incorrect parameters such as bit_rate, rate, width or height",
3511 ost->file_index, ost->index);
3514 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3515 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3516 av_buffersink_set_frame_size(ost->filter->filter,
3517 ost->enc_ctx->frame_size);
3518 assert_avoptions(ost->encoder_opts);
3519 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3520 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3521 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3522 " It takes bits/s as argument, not kbits/s\n");
3524 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3526 av_log(NULL, AV_LOG_FATAL,
3527 "Error initializing the output stream codec context.\n");
3531 * FIXME: ost->st->codec should't be needed here anymore.
3533 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3537 if (ost->enc_ctx->nb_coded_side_data) {
3540 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3541 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3544 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3546 return AVERROR(ENOMEM);
3547 memcpy(dst_data, sd_src->data, sd_src->size);
3552 * Add global input side data. For now this is naive, and copies it
3553 * from the input stream's global side data. All side data should
3554 * really be funneled over AVFrame and libavfilter, then added back to
3555 * packet side data, and then potentially using the first packet for
3560 for (i = 0; i < ist->st->nb_side_data; i++) {
3561 AVPacketSideData *sd = &ist->st->side_data[i];
3562 if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3563 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3565 return AVERROR(ENOMEM);
3566 memcpy(dst, sd->data, sd->size);
3567 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3568 av_display_rotation_set((uint32_t *)dst, 0);
3573 // copy timebase while removing common factors
3574 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3575 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3577 // copy estimated duration as a hint to the muxer
3578 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3579 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3581 ost->st->codec->codec= ost->enc_ctx->codec;
3582 } else if (ost->stream_copy) {
3583 ret = init_output_stream_streamcopy(ost);
3588 // parse user provided disposition, and update stream values
3589 if (ost->disposition) {
3590 static const AVOption opts[] = {
3591 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3592 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3593 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3594 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3595 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3596 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3597 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3598 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3599 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3600 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3601 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3602 { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3603 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3604 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3605 { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3606 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3609 static const AVClass class = {
3611 .item_name = av_default_item_name,
3613 .version = LIBAVUTIL_VERSION_INT,
3615 const AVClass *pclass = &class;
3617 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3622 /* initialize bitstream filters for the output stream
3623 * needs to be done here, because the codec id for streamcopy is not
3624 * known until now */
3625 ret = init_output_bsfs(ost);
3629 ost->initialized = 1;
3631 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3638 static void report_new_stream(int input_index, AVPacket *pkt)
3640 InputFile *file = input_files[input_index];
3641 AVStream *st = file->ctx->streams[pkt->stream_index];
3643 if (pkt->stream_index < file->nb_streams_warn)
3645 av_log(file->ctx, AV_LOG_WARNING,
3646 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3647 av_get_media_type_string(st->codecpar->codec_type),
3648 input_index, pkt->stream_index,
3649 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3650 file->nb_streams_warn = pkt->stream_index + 1;
3653 static int transcode_init(void)
3655 int ret = 0, i, j, k;
3656 AVFormatContext *oc;
3659 char error[1024] = {0};
3661 for (i = 0; i < nb_filtergraphs; i++) {
3662 FilterGraph *fg = filtergraphs[i];
3663 for (j = 0; j < fg->nb_outputs; j++) {
3664 OutputFilter *ofilter = fg->outputs[j];
3665 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3667 if (fg->nb_inputs != 1)
3669 for (k = nb_input_streams-1; k >= 0 ; k--)
3670 if (fg->inputs[0]->ist == input_streams[k])
3672 ofilter->ost->source_index = k;
3676 /* init framerate emulation */
3677 for (i = 0; i < nb_input_files; i++) {
3678 InputFile *ifile = input_files[i];
3679 if (ifile->rate_emu)
3680 for (j = 0; j < ifile->nb_streams; j++)
3681 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3684 /* init input streams */
3685 for (i = 0; i < nb_input_streams; i++)
3686 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3687 for (i = 0; i < nb_output_streams; i++) {
3688 ost = output_streams[i];
3689 avcodec_close(ost->enc_ctx);
3694 /* open each encoder */
3695 for (i = 0; i < nb_output_streams; i++) {
3696 // skip streams fed from filtergraphs until we have a frame for them
3697 if (output_streams[i]->filter)
3700 ret = init_output_stream(output_streams[i], error, sizeof(error));
3705 /* discard unused programs */
3706 for (i = 0; i < nb_input_files; i++) {
3707 InputFile *ifile = input_files[i];
3708 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3709 AVProgram *p = ifile->ctx->programs[j];
3710 int discard = AVDISCARD_ALL;
3712 for (k = 0; k < p->nb_stream_indexes; k++)
3713 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3714 discard = AVDISCARD_DEFAULT;
3717 p->discard = discard;
3721 /* write headers for files with no streams */
3722 for (i = 0; i < nb_output_files; i++) {
3723 oc = output_files[i]->ctx;
3724 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3725 ret = check_init_output_file(output_files[i], i);
3732 /* dump the stream mapping */
3733 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3734 for (i = 0; i < nb_input_streams; i++) {
3735 ist = input_streams[i];
3737 for (j = 0; j < ist->nb_filters; j++) {
3738 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3739 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3740 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3741 ist->filters[j]->name);
3742 if (nb_filtergraphs > 1)
3743 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3744 av_log(NULL, AV_LOG_INFO, "\n");
3749 for (i = 0; i < nb_output_streams; i++) {
3750 ost = output_streams[i];
3752 if (ost->attachment_filename) {
3753 /* an attached file */
3754 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3755 ost->attachment_filename, ost->file_index, ost->index);
3759 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3760 /* output from a complex graph */
3761 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3762 if (nb_filtergraphs > 1)
3763 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3765 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3766 ost->index, ost->enc ? ost->enc->name : "?");
3770 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3771 input_streams[ost->source_index]->file_index,
3772 input_streams[ost->source_index]->st->index,
3775 if (ost->sync_ist != input_streams[ost->source_index])
3776 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3777 ost->sync_ist->file_index,
3778 ost->sync_ist->st->index);
3779 if (ost->stream_copy)
3780 av_log(NULL, AV_LOG_INFO, " (copy)");
3782 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3783 const AVCodec *out_codec = ost->enc;
3784 const char *decoder_name = "?";
3785 const char *in_codec_name = "?";
3786 const char *encoder_name = "?";
3787 const char *out_codec_name = "?";
3788 const AVCodecDescriptor *desc;
3791 decoder_name = in_codec->name;
3792 desc = avcodec_descriptor_get(in_codec->id);
3794 in_codec_name = desc->name;
3795 if (!strcmp(decoder_name, in_codec_name))
3796 decoder_name = "native";
3800 encoder_name = out_codec->name;
3801 desc = avcodec_descriptor_get(out_codec->id);
3803 out_codec_name = desc->name;
3804 if (!strcmp(encoder_name, out_codec_name))
3805 encoder_name = "native";
3808 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3809 in_codec_name, decoder_name,
3810 out_codec_name, encoder_name);
3812 av_log(NULL, AV_LOG_INFO, "\n");
3816 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3820 atomic_store(&transcode_init_done, 1);
3825 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3826 static int need_output(void)
3830 for (i = 0; i < nb_output_streams; i++) {
3831 OutputStream *ost = output_streams[i];
3832 OutputFile *of = output_files[ost->file_index];
3833 AVFormatContext *os = output_files[ost->file_index]->ctx;
3835 if (ost->finished ||
3836 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3838 if (ost->frame_number >= ost->max_frames) {
3840 for (j = 0; j < of->ctx->nb_streams; j++)
3841 close_output_stream(output_streams[of->ost_index + j]);
3852 * Select the output stream to process.
3854 * @return selected output stream, or NULL if none available
3856 static OutputStream *choose_output(void)
3859 int64_t opts_min = INT64_MAX;
3860 OutputStream *ost_min = NULL;
3862 for (i = 0; i < nb_output_streams; i++) {
3863 OutputStream *ost = output_streams[i];
3864 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3865 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3867 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3868 av_log(NULL, AV_LOG_DEBUG,
3869 "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3870 ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3872 if (!ost->initialized && !ost->inputs_done)
3875 if (!ost->finished && opts < opts_min) {
3877 ost_min = ost->unavailable ? NULL : ost;
3883 static void set_tty_echo(int on)
3887 if (tcgetattr(0, &tty) == 0) {
3888 if (on) tty.c_lflag |= ECHO;
3889 else tty.c_lflag &= ~ECHO;
3890 tcsetattr(0, TCSANOW, &tty);
3895 static int check_keyboard_interaction(int64_t cur_time)
3898 static int64_t last_time;
3899 if (received_nb_signals)
3900 return AVERROR_EXIT;
3901 /* read_key() returns 0 on EOF */
3902 if(cur_time - last_time >= 100000 && !run_as_daemon){
3904 last_time = cur_time;
3908 return AVERROR_EXIT;
3909 if (key == '+') av_log_set_level(av_log_get_level()+10);
3910 if (key == '-') av_log_set_level(av_log_get_level()-10);
3911 if (key == 's') qp_hist ^= 1;
3914 do_hex_dump = do_pkt_dump = 0;
3915 } else if(do_pkt_dump){
3919 av_log_set_level(AV_LOG_DEBUG);
3921 if (key == 'c' || key == 'C'){
3922 char buf[4096], target[64], command[256], arg[256] = {0};
3925 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3928 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3933 fprintf(stderr, "\n");
3935 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3936 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3937 target, time, command, arg);
3938 for (i = 0; i < nb_filtergraphs; i++) {
3939 FilterGraph *fg = filtergraphs[i];
3942 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3943 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3944 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3945 } else if (key == 'c') {
3946 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3947 ret = AVERROR_PATCHWELCOME;
3949 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3951 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3956 av_log(NULL, AV_LOG_ERROR,
3957 "Parse error, at least 3 arguments were expected, "
3958 "only %d given in string '%s'\n", n, buf);
3961 if (key == 'd' || key == 'D'){
3964 debug = input_streams[0]->st->codec->debug<<1;
3965 if(!debug) debug = 1;
3966 while(debug & (FF_DEBUG_DCT_COEFF
3968 |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3970 )) //unsupported, would just crash
3977 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3982 fprintf(stderr, "\n");
3983 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3984 fprintf(stderr,"error parsing debug value\n");
3986 for(i=0;i<nb_input_streams;i++) {
3987 input_streams[i]->st->codec->debug = debug;
3989 for(i=0;i<nb_output_streams;i++) {
3990 OutputStream *ost = output_streams[i];
3991 ost->enc_ctx->debug = debug;
3993 if(debug) av_log_set_level(AV_LOG_DEBUG);
3994 fprintf(stderr,"debug=%d\n", debug);
3997 fprintf(stderr, "key function\n"
3998 "? show this help\n"
3999 "+ increase verbosity\n"
4000 "- decrease verbosity\n"
4001 "c Send command to first matching filter supporting it\n"
4002 "C Send/Queue command to all matching filters\n"
4003 "D cycle through available debug modes\n"
4004 "h dump packets/hex press to cycle through the 3 states\n"
4006 "s Show QP histogram\n"
4013 static void *input_thread(void *arg)
4016 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4021 ret = av_read_frame(f->ctx, &pkt);
4023 if (ret == AVERROR(EAGAIN)) {
4028 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4031 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4032 if (flags && ret == AVERROR(EAGAIN)) {
4034 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4035 av_log(f->ctx, AV_LOG_WARNING,
4036 "Thread message queue blocking; consider raising the "
4037 "thread_queue_size option (current value: %d)\n",
4038 f->thread_queue_size);
4041 if (ret != AVERROR_EOF)
4042 av_log(f->ctx, AV_LOG_ERROR,
4043 "Unable to send packet to main thread: %s\n",
4045 av_packet_unref(&pkt);
4046 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4054 static void free_input_thread(int i)
4056 InputFile *f = input_files[i];
4059 if (!f || !f->in_thread_queue)
4061 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4062 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4063 av_packet_unref(&pkt);
4065 pthread_join(f->thread, NULL);
4067 av_thread_message_queue_free(&f->in_thread_queue);
4070 static void free_input_threads(void)
4074 for (i = 0; i < nb_input_files; i++)
4075 free_input_thread(i);
4078 static int init_input_thread(int i)
4081 InputFile *f = input_files[i];
4083 if (nb_input_files == 1)
4086 if (f->ctx->pb ? !f->ctx->pb->seekable :
4087 strcmp(f->ctx->iformat->name, "lavfi"))
4088 f->non_blocking = 1;
4089 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4090 f->thread_queue_size, sizeof(AVPacket));
4094 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4095 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4096 av_thread_message_queue_free(&f->in_thread_queue);
4097 return AVERROR(ret);
4103 static int init_input_threads(void)
4107 for (i = 0; i < nb_input_files; i++) {
4108 ret = init_input_thread(i);
4115 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4117 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4119 AV_THREAD_MESSAGE_NONBLOCK : 0);
4123 static int get_input_packet(InputFile *f, AVPacket *pkt)
4127 for (i = 0; i < f->nb_streams; i++) {
4128 InputStream *ist = input_streams[f->ist_index + i];
4129 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4130 int64_t now = av_gettime_relative() - ist->start;
4132 return AVERROR(EAGAIN);
4137 if (nb_input_files > 1)
4138 return get_input_packet_mt(f, pkt);
4140 return av_read_frame(f->ctx, pkt);
4143 static int got_eagain(void)
4146 for (i = 0; i < nb_output_streams; i++)
4147 if (output_streams[i]->unavailable)
4152 static void reset_eagain(void)
4155 for (i = 0; i < nb_input_files; i++)
4156 input_files[i]->eagain = 0;
4157 for (i = 0; i < nb_output_streams; i++)
4158 output_streams[i]->unavailable = 0;
4161 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4162 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4163 AVRational time_base)
4169 return tmp_time_base;
4172 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4175 return tmp_time_base;
4181 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4184 AVCodecContext *avctx;
4185 int i, ret, has_audio = 0;
4186 int64_t duration = 0;
4188 ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4192 for (i = 0; i < ifile->nb_streams; i++) {
4193 ist = input_streams[ifile->ist_index + i];
4194 avctx = ist->dec_ctx;
4196 /* duration is the length of the last frame in a stream
4197 * when audio stream is present we don't care about
4198 * last video frame length because it's not defined exactly */
4199 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4203 for (i = 0; i < ifile->nb_streams; i++) {
4204 ist = input_streams[ifile->ist_index + i];
4205 avctx = ist->dec_ctx;
4208 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4209 AVRational sample_rate = {1, avctx->sample_rate};
4211 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4216 if (ist->framerate.num) {
4217 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4218 } else if (ist->st->avg_frame_rate.num) {
4219 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4224 if (!ifile->duration)
4225 ifile->time_base = ist->st->time_base;
4226 /* the total duration of the stream, max_pts - min_pts is
4227 * the duration of the stream without the last frame */
4228 duration += ist->max_pts - ist->min_pts;
4229 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4233 if (ifile->loop > 0)
4241 * - 0 -- one packet was read and processed
4242 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4243 * this function should be called again
4244 * - AVERROR_EOF -- this function should not be called again
4246 static int process_input(int file_index)
4248 InputFile *ifile = input_files[file_index];
4249 AVFormatContext *is;
4252 int ret, thread_ret, i, j;
4257 ret = get_input_packet(ifile, &pkt);
4259 if (ret == AVERROR(EAGAIN)) {
4263 if (ret < 0 && ifile->loop) {
4264 AVCodecContext *avctx;
4265 for (i = 0; i < ifile->nb_streams; i++) {
4266 ist = input_streams[ifile->ist_index + i];
4267 avctx = ist->dec_ctx;
4268 if (ist->decoding_needed) {
4269 ret = process_input_packet(ist, NULL, 1);
4272 avcodec_flush_buffers(avctx);
4276 free_input_thread(file_index);
4278 ret = seek_to_start(ifile, is);
4280 thread_ret = init_input_thread(file_index);
4285 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4287 ret = get_input_packet(ifile, &pkt);
4288 if (ret == AVERROR(EAGAIN)) {
4294 if (ret != AVERROR_EOF) {
4295 print_error(is->url, ret);
4300 for (i = 0; i < ifile->nb_streams; i++) {
4301 ist = input_streams[ifile->ist_index + i];
4302 if (ist->decoding_needed) {
4303 ret = process_input_packet(ist, NULL, 0);
4308 /* mark all outputs that don't go through lavfi as finished */
4309 for (j = 0; j < nb_output_streams; j++) {
4310 OutputStream *ost = output_streams[j];
4312 if (ost->source_index == ifile->ist_index + i &&
4313 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4314 finish_output_stream(ost);
4318 ifile->eof_reached = 1;
4319 return AVERROR(EAGAIN);
4325 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4326 is->streams[pkt.stream_index]);
4328 /* the following test is needed in case new streams appear
4329 dynamically in stream : we ignore them */
4330 if (pkt.stream_index >= ifile->nb_streams) {
4331 report_new_stream(file_index, &pkt);
4332 goto discard_packet;
4335 ist = input_streams[ifile->ist_index + pkt.stream_index];
4337 ist->data_size += pkt.size;
4341 goto discard_packet;
4343 if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4344 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
4345 "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4351 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4352 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4353 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4354 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4355 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4356 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4357 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4358 av_ts2str(input_files[ist->file_index]->ts_offset),
4359 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4362 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4363 int64_t stime, stime2;
4364 // Correcting starttime based on the enabled streams
4365 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4366 // so we instead do it here as part of discontinuity handling
4367 if ( ist->next_dts == AV_NOPTS_VALUE
4368 && ifile->ts_offset == -is->start_time
4369 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4370 int64_t new_start_time = INT64_MAX;
4371 for (i=0; i<is->nb_streams; i++) {
4372 AVStream *st = is->streams[i];
4373 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4375 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4377 if (new_start_time > is->start_time) {
4378 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4379 ifile->ts_offset = -new_start_time;
4383 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4384 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4385 ist->wrap_correction_done = 1;
4387 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4388 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4389 ist->wrap_correction_done = 0;
4391 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4392 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4393 ist->wrap_correction_done = 0;
4397 /* add the stream-global side data to the first packet */
4398 if (ist->nb_packets == 1) {
4399 for (i = 0; i < ist->st->nb_side_data; i++) {
4400 AVPacketSideData *src_sd = &ist->st->side_data[i];
4403 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4406 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4409 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4413 memcpy(dst_data, src_sd->data, src_sd->size);
4417 if (pkt.dts != AV_NOPTS_VALUE)
4418 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4419 if (pkt.pts != AV_NOPTS_VALUE)
4420 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4422 if (pkt.pts != AV_NOPTS_VALUE)
4423 pkt.pts *= ist->ts_scale;
4424 if (pkt.dts != AV_NOPTS_VALUE)
4425 pkt.dts *= ist->ts_scale;
4427 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4428 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4429 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4430 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4431 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4432 int64_t delta = pkt_dts - ifile->last_ts;
4433 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4434 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4435 ifile->ts_offset -= delta;
4436 av_log(NULL, AV_LOG_DEBUG,
4437 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4438 delta, ifile->ts_offset);
4439 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4440 if (pkt.pts != AV_NOPTS_VALUE)
4441 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4445 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4446 if (pkt.pts != AV_NOPTS_VALUE) {
4447 pkt.pts += duration;
4448 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4449 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4452 if (pkt.dts != AV_NOPTS_VALUE)
4453 pkt.dts += duration;
4455 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4456 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4457 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4458 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4460 int64_t delta = pkt_dts - ist->next_dts;
4461 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4462 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4463 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4464 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4465 ifile->ts_offset -= delta;
4466 av_log(NULL, AV_LOG_DEBUG,
4467 "timestamp discontinuity for stream #%d:%d "
4468 "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4469 ist->file_index, ist->st->index, ist->st->id,
4470 av_get_media_type_string(ist->dec_ctx->codec_type),
4471 delta, ifile->ts_offset);
4472 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4473 if (pkt.pts != AV_NOPTS_VALUE)
4474 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4477 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4478 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4479 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4480 pkt.dts = AV_NOPTS_VALUE;
4482 if (pkt.pts != AV_NOPTS_VALUE){
4483 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4484 delta = pkt_pts - ist->next_dts;
4485 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4486 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4487 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4488 pkt.pts = AV_NOPTS_VALUE;
4494 if (pkt.dts != AV_NOPTS_VALUE)
4495 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4498 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4499 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4500 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4501 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4502 av_ts2str(input_files[ist->file_index]->ts_offset),
4503 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4506 sub2video_heartbeat(ist, pkt.pts);
4508 process_input_packet(ist, &pkt, 0);
4511 av_packet_unref(&pkt);
4517 * Perform a step of transcoding for the specified filter graph.
4519 * @param[in] graph filter graph to consider
4520 * @param[out] best_ist input stream where a frame would allow to continue
4521 * @return 0 for success, <0 for error
4523 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4526 int nb_requests, nb_requests_max = 0;
4527 InputFilter *ifilter;
4531 ret = avfilter_graph_request_oldest(graph->graph);
4533 return reap_filters(0);
4535 if (ret == AVERROR_EOF) {
4536 ret = reap_filters(1);
4537 for (i = 0; i < graph->nb_outputs; i++)
4538 close_output_stream(graph->outputs[i]->ost);
4541 if (ret != AVERROR(EAGAIN))
4544 for (i = 0; i < graph->nb_inputs; i++) {
4545 ifilter = graph->inputs[i];
4547 if (input_files[ist->file_index]->eagain ||
4548 input_files[ist->file_index]->eof_reached)
4550 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4551 if (nb_requests > nb_requests_max) {
4552 nb_requests_max = nb_requests;
4558 for (i = 0; i < graph->nb_outputs; i++)
4559 graph->outputs[i]->ost->unavailable = 1;
4565 * Run a single step of transcoding.
4567 * @return 0 for success, <0 for error
4569 static int transcode_step(void)
4572 InputStream *ist = NULL;
4575 ost = choose_output();
4582 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4586 if (ost->filter && !ost->filter->graph->graph) {
4587 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4588 ret = configure_filtergraph(ost->filter->graph);
4590 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4596 if (ost->filter && ost->filter->graph->graph) {
4597 if (!ost->initialized) {
4598 char error[1024] = {0};
4599 ret = init_output_stream(ost, error, sizeof(error));
4601 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4602 ost->file_index, ost->index, error);
4606 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4610 } else if (ost->filter) {
4612 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4613 InputFilter *ifilter = ost->filter->graph->inputs[i];
4614 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4620 ost->inputs_done = 1;
4624 av_assert0(ost->source_index >= 0);
4625 ist = input_streams[ost->source_index];
4628 ret = process_input(ist->file_index);
4629 if (ret == AVERROR(EAGAIN)) {
4630 if (input_files[ist->file_index]->eagain)
4631 ost->unavailable = 1;
4636 return ret == AVERROR_EOF ? 0 : ret;
4638 return reap_filters(0);
4642 * The following code is the main loop of the file converter
4644 static int transcode(void)
4647 AVFormatContext *os;
4650 int64_t timer_start;
4651 int64_t total_packets_written = 0;
4653 ret = transcode_init();
4657 if (stdin_interaction) {
4658 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4661 timer_start = av_gettime_relative();
4664 if ((ret = init_input_threads()) < 0)
4668 while (!received_sigterm) {
4669 int64_t cur_time= av_gettime_relative();
4671 /* if 'q' pressed, exits */
4672 if (stdin_interaction)
4673 if (check_keyboard_interaction(cur_time) < 0)
4676 /* check if there's any stream where output is still needed */
4677 if (!need_output()) {
4678 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4682 ret = transcode_step();
4683 if (ret < 0 && ret != AVERROR_EOF) {
4684 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4688 /* dump report by using the output first video and audio streams */
4689 print_report(0, timer_start, cur_time);
4692 free_input_threads();
4695 /* at the end of stream, we must flush the decoder buffers */
4696 for (i = 0; i < nb_input_streams; i++) {
4697 ist = input_streams[i];
4698 if (!input_files[ist->file_index]->eof_reached) {
4699 process_input_packet(ist, NULL, 0);
4706 /* write the trailer if needed and close file */
4707 for (i = 0; i < nb_output_files; i++) {
4708 os = output_files[i]->ctx;
4709 if (!output_files[i]->header_written) {
4710 av_log(NULL, AV_LOG_ERROR,
4711 "Nothing was written into output file %d (%s), because "
4712 "at least one of its streams received no packets.\n",
4716 if ((ret = av_write_trailer(os)) < 0) {
4717 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4723 /* dump report by using the first video and audio streams */
4724 print_report(1, timer_start, av_gettime_relative());
4726 /* close each encoder */
4727 for (i = 0; i < nb_output_streams; i++) {
4728 ost = output_streams[i];
4729 if (ost->encoding_needed) {
4730 av_freep(&ost->enc_ctx->stats_in);
4732 total_packets_written += ost->packets_written;
4735 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4736 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4740 /* close each decoder */
4741 for (i = 0; i < nb_input_streams; i++) {
4742 ist = input_streams[i];
4743 if (ist->decoding_needed) {
4744 avcodec_close(ist->dec_ctx);
4745 if (ist->hwaccel_uninit)
4746 ist->hwaccel_uninit(ist->dec_ctx);
4750 av_buffer_unref(&hw_device_ctx);
4751 hw_device_free_all();
4758 free_input_threads();
4761 if (output_streams) {
4762 for (i = 0; i < nb_output_streams; i++) {
4763 ost = output_streams[i];
4766 if (fclose(ost->logfile))
4767 av_log(NULL, AV_LOG_ERROR,
4768 "Error closing logfile, loss of information possible: %s\n",
4769 av_err2str(AVERROR(errno)));
4770 ost->logfile = NULL;
4772 av_freep(&ost->forced_kf_pts);
4773 av_freep(&ost->apad);
4774 av_freep(&ost->disposition);
4775 av_dict_free(&ost->encoder_opts);
4776 av_dict_free(&ost->sws_dict);
4777 av_dict_free(&ost->swr_opts);
4778 av_dict_free(&ost->resample_opts);
4785 static BenchmarkTimeStamps get_benchmark_time_stamps(void)
4787 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4789 struct rusage rusage;
4791 getrusage(RUSAGE_SELF, &rusage);
4792 time_stamps.user_usec =
4793 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4794 time_stamps.sys_usec =
4795 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4796 #elif HAVE_GETPROCESSTIMES
4798 FILETIME c, e, k, u;
4799 proc = GetCurrentProcess();
4800 GetProcessTimes(proc, &c, &e, &k, &u);
4801 time_stamps.user_usec =
4802 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4803 time_stamps.sys_usec =
4804 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4806 time_stamps.user_usec = time_stamps.sys_usec = 0;
4811 static int64_t getmaxrss(void)
4813 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4814 struct rusage rusage;
4815 getrusage(RUSAGE_SELF, &rusage);
4816 return (int64_t)rusage.ru_maxrss * 1024;
4817 #elif HAVE_GETPROCESSMEMORYINFO
4819 PROCESS_MEMORY_COUNTERS memcounters;
4820 proc = GetCurrentProcess();
4821 memcounters.cb = sizeof(memcounters);
4822 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4823 return memcounters.PeakPagefileUsage;
4829 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4833 int main(int argc, char **argv)
4836 BenchmarkTimeStamps ti;
4840 register_exit(ffmpeg_cleanup);
4842 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4844 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4845 parse_loglevel(argc, argv, options);
4847 if(argc>1 && !strcmp(argv[1], "-d")){
4849 av_log_set_callback(log_callback_null);
4855 avdevice_register_all();
4857 avformat_network_init();
4859 show_banner(argc, argv, options);
4861 /* parse options and open all input/output files */
4862 ret = ffmpeg_parse_options(argc, argv);
4866 if (nb_output_files <= 0 && nb_input_files == 0) {
4868 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4872 /* file converter / grab */
4873 if (nb_output_files <= 0) {
4874 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4878 for (i = 0; i < nb_output_files; i++) {
4879 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4883 current_time = ti = get_benchmark_time_stamps();
4884 if (transcode() < 0)
4887 int64_t utime, stime, rtime;
4888 current_time = get_benchmark_time_stamps();
4889 utime = current_time.user_usec - ti.user_usec;
4890 stime = current_time.sys_usec - ti.sys_usec;
4891 rtime = current_time.real_usec - ti.real_usec;
4892 av_log(NULL, AV_LOG_INFO,
4893 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4894 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4896 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4897 decode_error_stat[0], decode_error_stat[1]);
4898 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4901 exit_program(received_nb_signals ? 255 : main_return_code);
4902 return main_return_code;