2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
65 #include "libavutil/threadmessage.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
73 #if HAVE_SYS_RESOURCE_H
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
80 #if HAVE_GETPROCESSMEMORYINFO
84 #if HAVE_SETCONSOLECTRLHANDLER
90 #include <sys/select.h>
95 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
126 static int ifilter_has_all_input_formats(FilterGraph *fg);
128 static int run_as_daemon = 0;
129 static int nb_frames_dup = 0;
130 static unsigned dup_warning = 1000;
131 static int nb_frames_drop = 0;
132 static int64_t decode_error_stat[2];
134 static int want_sdp = 1;
136 static int current_time;
137 AVIOContext *progress_avio = NULL;
139 static uint8_t *subtitle_out;
141 InputStream **input_streams = NULL;
142 int nb_input_streams = 0;
143 InputFile **input_files = NULL;
144 int nb_input_files = 0;
146 OutputStream **output_streams = NULL;
147 int nb_output_streams = 0;
148 OutputFile **output_files = NULL;
149 int nb_output_files = 0;
151 FilterGraph **filtergraphs;
156 /* init terminal so that we can grab keys */
157 static struct termios oldtty;
158 static int restore_tty;
162 static void free_input_threads(void);
166 Convert subtitles to video with alpha to insert them in filter graphs.
167 This is a temporary solution until libavfilter gets real subtitles support.
170 static int sub2video_get_blank_frame(InputStream *ist)
173 AVFrame *frame = ist->sub2video.frame;
175 av_frame_unref(frame);
176 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
177 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
178 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
179 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
181 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
185 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
188 uint32_t *pal, *dst2;
192 if (r->type != SUBTITLE_BITMAP) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
196 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
197 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
198 r->x, r->y, r->w, r->h, w, h
203 dst += r->y * dst_linesize + r->x * 4;
205 pal = (uint32_t *)r->data[1];
206 for (y = 0; y < r->h; y++) {
207 dst2 = (uint32_t *)dst;
209 for (x = 0; x < r->w; x++)
210 *(dst2++) = pal[*(src2++)];
212 src += r->linesize[0];
216 static void sub2video_push_ref(InputStream *ist, int64_t pts)
218 AVFrame *frame = ist->sub2video.frame;
222 av_assert1(frame->data[0]);
223 ist->sub2video.last_pts = frame->pts = pts;
224 for (i = 0; i < ist->nb_filters; i++) {
225 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
226 AV_BUFFERSRC_FLAG_KEEP_REF |
227 AV_BUFFERSRC_FLAG_PUSH);
228 if (ret != AVERROR_EOF && ret < 0)
229 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
234 void sub2video_update(InputStream *ist, AVSubtitle *sub)
236 AVFrame *frame = ist->sub2video.frame;
240 int64_t pts, end_pts;
245 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
246 AV_TIME_BASE_Q, ist->st->time_base);
247 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
248 AV_TIME_BASE_Q, ist->st->time_base);
249 num_rects = sub->num_rects;
251 pts = ist->sub2video.end_pts;
255 if (sub2video_get_blank_frame(ist) < 0) {
256 av_log(ist->dec_ctx, AV_LOG_ERROR,
257 "Impossible to get a blank canvas.\n");
260 dst = frame->data [0];
261 dst_linesize = frame->linesize[0];
262 for (i = 0; i < num_rects; i++)
263 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
264 sub2video_push_ref(ist, pts);
265 ist->sub2video.end_pts = end_pts;
268 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
270 InputFile *infile = input_files[ist->file_index];
274 /* When a frame is read from a file, examine all sub2video streams in
275 the same file and send the sub2video frame again. Otherwise, decoded
276 video frames could be accumulating in the filter graph while a filter
277 (possibly overlay) is desperately waiting for a subtitle frame. */
278 for (i = 0; i < infile->nb_streams; i++) {
279 InputStream *ist2 = input_streams[infile->ist_index + i];
280 if (!ist2->sub2video.frame)
282 /* subtitles seem to be usually muxed ahead of other streams;
283 if not, subtracting a larger time here is necessary */
284 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
285 /* do not send the heartbeat frame if the subtitle is already ahead */
286 if (pts2 <= ist2->sub2video.last_pts)
288 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
289 sub2video_update(ist2, NULL);
290 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
291 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
293 sub2video_push_ref(ist2, pts2);
297 static void sub2video_flush(InputStream *ist)
302 if (ist->sub2video.end_pts < INT64_MAX)
303 sub2video_update(ist, NULL);
304 for (i = 0; i < ist->nb_filters; i++) {
305 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
306 if (ret != AVERROR_EOF && ret < 0)
307 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
311 /* end of sub2video hack */
313 static void term_exit_sigsafe(void)
317 tcsetattr (0, TCSANOW, &oldtty);
323 av_log(NULL, AV_LOG_QUIET, "%s", "");
327 static volatile int received_sigterm = 0;
328 static volatile int received_nb_signals = 0;
329 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
330 static volatile int ffmpeg_exited = 0;
331 static int main_return_code = 0;
334 sigterm_handler(int sig)
337 received_sigterm = sig;
338 received_nb_signals++;
340 if(received_nb_signals > 3) {
341 ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
342 strlen("Received > 3 system signals, hard exiting\n"));
343 if (ret < 0) { /* Do nothing */ };
348 #if HAVE_SETCONSOLECTRLHANDLER
349 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
351 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
356 case CTRL_BREAK_EVENT:
357 sigterm_handler(SIGINT);
360 case CTRL_CLOSE_EVENT:
361 case CTRL_LOGOFF_EVENT:
362 case CTRL_SHUTDOWN_EVENT:
363 sigterm_handler(SIGTERM);
364 /* Basically, with these 3 events, when we return from this method the
365 process is hard terminated, so stall as long as we need to
366 to try and let the main thread(s) clean up and gracefully terminate
367 (we have at most 5 seconds, but should be done far before that). */
368 while (!ffmpeg_exited) {
374 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
383 if (!run_as_daemon && stdin_interaction) {
385 if (tcgetattr (0, &tty) == 0) {
389 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
390 |INLCR|IGNCR|ICRNL|IXON);
391 tty.c_oflag |= OPOST;
392 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
393 tty.c_cflag &= ~(CSIZE|PARENB);
398 tcsetattr (0, TCSANOW, &tty);
400 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
404 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
405 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
407 signal(SIGXCPU, sigterm_handler);
409 #if HAVE_SETCONSOLECTRLHANDLER
410 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
414 /* read a key without blocking */
415 static int read_key(void)
427 n = select(1, &rfds, NULL, NULL, &tv);
436 # if HAVE_PEEKNAMEDPIPE
438 static HANDLE input_handle;
441 input_handle = GetStdHandle(STD_INPUT_HANDLE);
442 is_pipe = !GetConsoleMode(input_handle, &dw);
446 /* When running under a GUI, you will end here. */
447 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
448 // input pipe may have been closed by the program that ran ffmpeg
466 static int decode_interrupt_cb(void *ctx)
468 return received_nb_signals > atomic_load(&transcode_init_done);
471 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
473 static void ffmpeg_cleanup(int ret)
478 int maxrss = getmaxrss() / 1024;
479 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
482 for (i = 0; i < nb_filtergraphs; i++) {
483 FilterGraph *fg = filtergraphs[i];
484 avfilter_graph_free(&fg->graph);
485 for (j = 0; j < fg->nb_inputs; j++) {
486 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
488 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
489 sizeof(frame), NULL);
490 av_frame_free(&frame);
492 av_fifo_freep(&fg->inputs[j]->frame_queue);
493 if (fg->inputs[j]->ist->sub2video.sub_queue) {
494 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
496 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
497 &sub, sizeof(sub), NULL);
498 avsubtitle_free(&sub);
500 av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
502 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
503 av_freep(&fg->inputs[j]->name);
504 av_freep(&fg->inputs[j]);
506 av_freep(&fg->inputs);
507 for (j = 0; j < fg->nb_outputs; j++) {
508 av_freep(&fg->outputs[j]->name);
509 av_freep(&fg->outputs[j]->formats);
510 av_freep(&fg->outputs[j]->channel_layouts);
511 av_freep(&fg->outputs[j]->sample_rates);
512 av_freep(&fg->outputs[j]);
514 av_freep(&fg->outputs);
515 av_freep(&fg->graph_desc);
517 av_freep(&filtergraphs[i]);
519 av_freep(&filtergraphs);
521 av_freep(&subtitle_out);
524 for (i = 0; i < nb_output_files; i++) {
525 OutputFile *of = output_files[i];
530 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
532 avformat_free_context(s);
533 av_dict_free(&of->opts);
535 av_freep(&output_files[i]);
537 for (i = 0; i < nb_output_streams; i++) {
538 OutputStream *ost = output_streams[i];
543 for (j = 0; j < ost->nb_bitstream_filters; j++)
544 av_bsf_free(&ost->bsf_ctx[j]);
545 av_freep(&ost->bsf_ctx);
547 av_frame_free(&ost->filtered_frame);
548 av_frame_free(&ost->last_frame);
549 av_dict_free(&ost->encoder_opts);
551 av_parser_close(ost->parser);
552 avcodec_free_context(&ost->parser_avctx);
554 av_freep(&ost->forced_keyframes);
555 av_expr_free(ost->forced_keyframes_pexpr);
556 av_freep(&ost->avfilter);
557 av_freep(&ost->logfile_prefix);
559 av_freep(&ost->audio_channels_map);
560 ost->audio_channels_mapped = 0;
562 av_dict_free(&ost->sws_dict);
564 avcodec_free_context(&ost->enc_ctx);
565 avcodec_parameters_free(&ost->ref_par);
567 if (ost->muxing_queue) {
568 while (av_fifo_size(ost->muxing_queue)) {
570 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
571 av_packet_unref(&pkt);
573 av_fifo_freep(&ost->muxing_queue);
576 av_freep(&output_streams[i]);
579 free_input_threads();
581 for (i = 0; i < nb_input_files; i++) {
582 avformat_close_input(&input_files[i]->ctx);
583 av_freep(&input_files[i]);
585 for (i = 0; i < nb_input_streams; i++) {
586 InputStream *ist = input_streams[i];
588 av_frame_free(&ist->decoded_frame);
589 av_frame_free(&ist->filter_frame);
590 av_dict_free(&ist->decoder_opts);
591 avsubtitle_free(&ist->prev_sub.subtitle);
592 av_frame_free(&ist->sub2video.frame);
593 av_freep(&ist->filters);
594 av_freep(&ist->hwaccel_device);
595 av_freep(&ist->dts_buffer);
597 avcodec_free_context(&ist->dec_ctx);
599 av_freep(&input_streams[i]);
603 if (fclose(vstats_file))
604 av_log(NULL, AV_LOG_ERROR,
605 "Error closing vstats file, loss of information possible: %s\n",
606 av_err2str(AVERROR(errno)));
608 av_freep(&vstats_filename);
610 av_freep(&input_streams);
611 av_freep(&input_files);
612 av_freep(&output_streams);
613 av_freep(&output_files);
617 avformat_network_deinit();
619 if (received_sigterm) {
620 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
621 (int) received_sigterm);
622 } else if (ret && atomic_load(&transcode_init_done)) {
623 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
629 void remove_avoptions(AVDictionary **a, AVDictionary *b)
631 AVDictionaryEntry *t = NULL;
633 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
634 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
638 void assert_avoptions(AVDictionary *m)
640 AVDictionaryEntry *t;
641 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
642 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
647 static void abort_codec_experimental(AVCodec *c, int encoder)
652 static void update_benchmark(const char *fmt, ...)
654 if (do_benchmark_all) {
655 int64_t t = getutime();
661 vsnprintf(buf, sizeof(buf), fmt, va);
663 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
669 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
672 for (i = 0; i < nb_output_streams; i++) {
673 OutputStream *ost2 = output_streams[i];
674 ost2->finished |= ost == ost2 ? this_stream : others;
678 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
680 AVFormatContext *s = of->ctx;
681 AVStream *st = ost->st;
685 * Audio encoders may split the packets -- #frames in != #packets out.
686 * But there is no reordering, so we can limit the number of output packets
687 * by simply dropping them here.
688 * Counting encoded video frames needs to be done separately because of
689 * reordering, see do_video_out().
690 * Do not count the packet when unqueued because it has been counted when queued.
692 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
693 if (ost->frame_number >= ost->max_frames) {
694 av_packet_unref(pkt);
700 if (!of->header_written) {
701 AVPacket tmp_pkt = {0};
702 /* the muxer is not initialized yet, buffer the packet */
703 if (!av_fifo_space(ost->muxing_queue)) {
704 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
705 ost->max_muxing_queue_size);
706 if (new_size <= av_fifo_size(ost->muxing_queue)) {
707 av_log(NULL, AV_LOG_ERROR,
708 "Too many packets buffered for output stream %d:%d.\n",
709 ost->file_index, ost->st->index);
712 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
716 ret = av_packet_ref(&tmp_pkt, pkt);
719 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
720 av_packet_unref(pkt);
724 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
725 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
726 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
728 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
730 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
732 ost->quality = sd ? AV_RL32(sd) : -1;
733 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
735 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
737 ost->error[i] = AV_RL64(sd + 8 + 8*i);
742 if (ost->frame_rate.num && ost->is_cfr) {
743 if (pkt->duration > 0)
744 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
745 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
750 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
752 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
753 if (pkt->dts != AV_NOPTS_VALUE &&
754 pkt->pts != AV_NOPTS_VALUE &&
755 pkt->dts > pkt->pts) {
756 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
758 ost->file_index, ost->st->index);
760 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
761 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
762 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
764 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
765 pkt->dts != AV_NOPTS_VALUE &&
766 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
767 ost->last_mux_dts != AV_NOPTS_VALUE) {
768 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
769 if (pkt->dts < max) {
770 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
771 av_log(s, loglevel, "Non-monotonous DTS in output stream "
772 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
773 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
775 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
778 av_log(s, loglevel, "changing to %"PRId64". This may result "
779 "in incorrect timestamps in the output file.\n",
781 if (pkt->pts >= pkt->dts)
782 pkt->pts = FFMAX(pkt->pts, max);
787 ost->last_mux_dts = pkt->dts;
789 ost->data_size += pkt->size;
790 ost->packets_written++;
792 pkt->stream_index = ost->index;
795 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
796 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
797 av_get_media_type_string(ost->enc_ctx->codec_type),
798 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
799 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
804 ret = av_interleaved_write_frame(s, pkt);
806 print_error("av_interleaved_write_frame()", ret);
807 main_return_code = 1;
808 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
810 av_packet_unref(pkt);
813 static void close_output_stream(OutputStream *ost)
815 OutputFile *of = output_files[ost->file_index];
817 ost->finished |= ENCODER_FINISHED;
819 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
820 of->recording_time = FFMIN(of->recording_time, end);
825 * Send a single packet to the output, applying any bitstream filters
826 * associated with the output stream. This may result in any number
827 * of packets actually being written, depending on what bitstream
828 * filters are applied. The supplied packet is consumed and will be
829 * blank (as if newly-allocated) when this function returns.
831 * If eof is set, instead indicate EOF to all bitstream filters and
832 * therefore flush any delayed packets to the output. A blank packet
833 * must be supplied in this case.
835 static void output_packet(OutputFile *of, AVPacket *pkt,
836 OutputStream *ost, int eof)
840 /* apply the output bitstream filters, if any */
841 if (ost->nb_bitstream_filters) {
844 ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
851 /* get a packet from the previous filter up the chain */
852 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
853 if (ret == AVERROR(EAGAIN)) {
857 } else if (ret == AVERROR_EOF) {
862 /* send it to the next filter down the chain or to the muxer */
863 if (idx < ost->nb_bitstream_filters) {
864 ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
872 write_packet(of, pkt, ost, 0);
875 write_packet(of, pkt, ost, 0);
878 if (ret < 0 && ret != AVERROR_EOF) {
879 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
880 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
886 static int check_recording_time(OutputStream *ost)
888 OutputFile *of = output_files[ost->file_index];
890 if (of->recording_time != INT64_MAX &&
891 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
892 AV_TIME_BASE_Q) >= 0) {
893 close_output_stream(ost);
899 static void do_audio_out(OutputFile *of, OutputStream *ost,
902 AVCodecContext *enc = ost->enc_ctx;
906 av_init_packet(&pkt);
910 if (!check_recording_time(ost))
913 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
914 frame->pts = ost->sync_opts;
915 ost->sync_opts = frame->pts + frame->nb_samples;
916 ost->samples_encoded += frame->nb_samples;
917 ost->frames_encoded++;
919 av_assert0(pkt.size || !pkt.data);
920 update_benchmark(NULL);
922 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
923 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
924 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
925 enc->time_base.num, enc->time_base.den);
928 ret = avcodec_send_frame(enc, frame);
933 ret = avcodec_receive_packet(enc, &pkt);
934 if (ret == AVERROR(EAGAIN))
939 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
941 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
944 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
945 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
946 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
947 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
950 output_packet(of, &pkt, ost, 0);
955 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
959 static void do_subtitle_out(OutputFile *of,
963 int subtitle_out_max_size = 1024 * 1024;
964 int subtitle_out_size, nb, i;
969 if (sub->pts == AV_NOPTS_VALUE) {
970 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
979 subtitle_out = av_malloc(subtitle_out_max_size);
981 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
986 /* Note: DVB subtitle need one packet to draw them and one other
987 packet to clear them */
988 /* XXX: signal it in the codec context ? */
989 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
994 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
996 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
997 pts -= output_files[ost->file_index]->start_time;
998 for (i = 0; i < nb; i++) {
999 unsigned save_num_rects = sub->num_rects;
1001 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1002 if (!check_recording_time(ost))
1006 // start_display_time is required to be 0
1007 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1008 sub->end_display_time -= sub->start_display_time;
1009 sub->start_display_time = 0;
1013 ost->frames_encoded++;
1015 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1016 subtitle_out_max_size, sub);
1018 sub->num_rects = save_num_rects;
1019 if (subtitle_out_size < 0) {
1020 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1024 av_init_packet(&pkt);
1025 pkt.data = subtitle_out;
1026 pkt.size = subtitle_out_size;
1027 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1028 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1029 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1030 /* XXX: the pts correction is handled here. Maybe handling
1031 it in the codec would be better */
1033 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1035 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1038 output_packet(of, &pkt, ost, 0);
1042 static void do_video_out(OutputFile *of,
1044 AVFrame *next_picture,
1047 int ret, format_video_sync;
1049 AVCodecContext *enc = ost->enc_ctx;
1050 AVCodecParameters *mux_par = ost->st->codecpar;
1051 AVRational frame_rate;
1052 int nb_frames, nb0_frames, i;
1053 double delta, delta0;
1054 double duration = 0;
1056 InputStream *ist = NULL;
1057 AVFilterContext *filter = ost->filter->filter;
1059 if (ost->source_index >= 0)
1060 ist = input_streams[ost->source_index];
1062 frame_rate = av_buffersink_get_frame_rate(filter);
1063 if (frame_rate.num > 0 && frame_rate.den > 0)
1064 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1066 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1067 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1069 if (!ost->filters_script &&
1073 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1074 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1077 if (!next_picture) {
1079 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1080 ost->last_nb0_frames[1],
1081 ost->last_nb0_frames[2]);
1083 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1084 delta = delta0 + duration;
1086 /* by default, we output a single frame */
1087 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1090 format_video_sync = video_sync_method;
1091 if (format_video_sync == VSYNC_AUTO) {
1092 if(!strcmp(of->ctx->oformat->name, "avi")) {
1093 format_video_sync = VSYNC_VFR;
1095 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1097 && format_video_sync == VSYNC_CFR
1098 && input_files[ist->file_index]->ctx->nb_streams == 1
1099 && input_files[ist->file_index]->input_ts_offset == 0) {
1100 format_video_sync = VSYNC_VSCFR;
1102 if (format_video_sync == VSYNC_CFR && copy_ts) {
1103 format_video_sync = VSYNC_VSCFR;
1106 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1110 format_video_sync != VSYNC_PASSTHROUGH &&
1111 format_video_sync != VSYNC_DROP) {
1112 if (delta0 < -0.6) {
1113 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1115 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1116 sync_ipts = ost->sync_opts;
1121 switch (format_video_sync) {
1123 if (ost->frame_number == 0 && delta0 >= 0.5) {
1124 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1127 ost->sync_opts = lrint(sync_ipts);
1130 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1131 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1133 } else if (delta < -1.1)
1135 else if (delta > 1.1) {
1136 nb_frames = lrintf(delta);
1138 nb0_frames = lrintf(delta0 - 0.6);
1144 else if (delta > 0.6)
1145 ost->sync_opts = lrint(sync_ipts);
1148 case VSYNC_PASSTHROUGH:
1149 ost->sync_opts = lrint(sync_ipts);
1156 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1157 nb0_frames = FFMIN(nb0_frames, nb_frames);
1159 memmove(ost->last_nb0_frames + 1,
1160 ost->last_nb0_frames,
1161 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1162 ost->last_nb0_frames[0] = nb0_frames;
1164 if (nb0_frames == 0 && ost->last_dropped) {
1166 av_log(NULL, AV_LOG_VERBOSE,
1167 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1168 ost->frame_number, ost->st->index, ost->last_frame->pts);
1170 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1171 if (nb_frames > dts_error_threshold * 30) {
1172 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1176 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1177 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1178 if (nb_frames_dup > dup_warning) {
1179 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1183 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1185 /* duplicates frame if needed */
1186 for (i = 0; i < nb_frames; i++) {
1187 AVFrame *in_picture;
1188 av_init_packet(&pkt);
1192 if (i < nb0_frames && ost->last_frame) {
1193 in_picture = ost->last_frame;
1195 in_picture = next_picture;
1200 in_picture->pts = ost->sync_opts;
1203 if (!check_recording_time(ost))
1205 if (ost->frame_number >= ost->max_frames)
1210 int forced_keyframe = 0;
1213 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1214 ost->top_field_first >= 0)
1215 in_picture->top_field_first = !!ost->top_field_first;
1217 if (in_picture->interlaced_frame) {
1218 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1219 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1221 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1223 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1225 in_picture->quality = enc->global_quality;
1226 in_picture->pict_type = 0;
1228 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1229 in_picture->pts * av_q2d(enc->time_base) : NAN;
1230 if (ost->forced_kf_index < ost->forced_kf_count &&
1231 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1232 ost->forced_kf_index++;
1233 forced_keyframe = 1;
1234 } else if (ost->forced_keyframes_pexpr) {
1236 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1237 res = av_expr_eval(ost->forced_keyframes_pexpr,
1238 ost->forced_keyframes_expr_const_values, NULL);
1239 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1240 ost->forced_keyframes_expr_const_values[FKF_N],
1241 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1242 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1243 ost->forced_keyframes_expr_const_values[FKF_T],
1244 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1247 forced_keyframe = 1;
1248 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1249 ost->forced_keyframes_expr_const_values[FKF_N];
1250 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1251 ost->forced_keyframes_expr_const_values[FKF_T];
1252 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1255 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1256 } else if ( ost->forced_keyframes
1257 && !strncmp(ost->forced_keyframes, "source", 6)
1258 && in_picture->key_frame==1) {
1259 forced_keyframe = 1;
1262 if (forced_keyframe) {
1263 in_picture->pict_type = AV_PICTURE_TYPE_I;
1264 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1267 update_benchmark(NULL);
1269 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1270 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1271 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1272 enc->time_base.num, enc->time_base.den);
1275 ost->frames_encoded++;
1277 ret = avcodec_send_frame(enc, in_picture);
1282 ret = avcodec_receive_packet(enc, &pkt);
1283 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1284 if (ret == AVERROR(EAGAIN))
1290 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1291 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1292 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1293 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1296 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1297 pkt.pts = ost->sync_opts;
1299 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1302 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1303 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1304 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1305 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1308 frame_size = pkt.size;
1309 output_packet(of, &pkt, ost, 0);
1311 /* if two pass, output log */
1312 if (ost->logfile && enc->stats_out) {
1313 fprintf(ost->logfile, "%s", enc->stats_out);
1319 * For video, number of frames in == number of packets out.
1320 * But there may be reordering, so we can't throw away frames on encoder
1321 * flush, we need to limit them here, before they go into encoder.
1323 ost->frame_number++;
1325 if (vstats_filename && frame_size)
1326 do_video_stats(ost, frame_size);
1329 if (!ost->last_frame)
1330 ost->last_frame = av_frame_alloc();
1331 av_frame_unref(ost->last_frame);
1332 if (next_picture && ost->last_frame)
1333 av_frame_ref(ost->last_frame, next_picture);
1335 av_frame_free(&ost->last_frame);
1339 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1343 static double psnr(double d)
1345 return -10.0 * log10(d);
1348 static void do_video_stats(OutputStream *ost, int frame_size)
1350 AVCodecContext *enc;
1352 double ti1, bitrate, avg_bitrate;
1354 /* this is executed just the first time do_video_stats is called */
1356 vstats_file = fopen(vstats_filename, "w");
1364 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1365 frame_number = ost->st->nb_frames;
1366 if (vstats_version <= 1) {
1367 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1368 ost->quality / (float)FF_QP2LAMBDA);
1370 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1371 ost->quality / (float)FF_QP2LAMBDA);
1374 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1375 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1377 fprintf(vstats_file,"f_size= %6d ", frame_size);
1378 /* compute pts value */
1379 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1383 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1384 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1385 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1386 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1387 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1391 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1393 static void finish_output_stream(OutputStream *ost)
1395 OutputFile *of = output_files[ost->file_index];
1398 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1401 for (i = 0; i < of->ctx->nb_streams; i++)
1402 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1407 * Get and encode new output from any of the filtergraphs, without causing
1410 * @return 0 for success, <0 for severe errors
1412 static int reap_filters(int flush)
1414 AVFrame *filtered_frame = NULL;
1417 /* Reap all buffers present in the buffer sinks */
1418 for (i = 0; i < nb_output_streams; i++) {
1419 OutputStream *ost = output_streams[i];
1420 OutputFile *of = output_files[ost->file_index];
1421 AVFilterContext *filter;
1422 AVCodecContext *enc = ost->enc_ctx;
1425 if (!ost->filter || !ost->filter->graph->graph)
1427 filter = ost->filter->filter;
1429 if (!ost->initialized) {
1430 char error[1024] = "";
1431 ret = init_output_stream(ost, error, sizeof(error));
1433 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1434 ost->file_index, ost->index, error);
1439 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1440 return AVERROR(ENOMEM);
1442 filtered_frame = ost->filtered_frame;
1445 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1446 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1447 AV_BUFFERSINK_FLAG_NO_REQUEST);
1449 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1450 av_log(NULL, AV_LOG_WARNING,
1451 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1452 } else if (flush && ret == AVERROR_EOF) {
1453 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1454 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1458 if (ost->finished) {
1459 av_frame_unref(filtered_frame);
1462 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1463 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1464 AVRational filter_tb = av_buffersink_get_time_base(filter);
1465 AVRational tb = enc->time_base;
1466 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1468 tb.den <<= extra_bits;
1470 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1471 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1472 float_pts /= 1 << extra_bits;
1473 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1474 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1476 filtered_frame->pts =
1477 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1478 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1480 //if (ost->source_index >= 0)
1481 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1483 switch (av_buffersink_get_type(filter)) {
1484 case AVMEDIA_TYPE_VIDEO:
1485 if (!ost->frame_aspect_ratio.num)
1486 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1489 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1490 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1492 enc->time_base.num, enc->time_base.den);
1495 do_video_out(of, ost, filtered_frame, float_pts);
1497 case AVMEDIA_TYPE_AUDIO:
1498 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1499 enc->channels != filtered_frame->channels) {
1500 av_log(NULL, AV_LOG_ERROR,
1501 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1504 do_audio_out(of, ost, filtered_frame);
1507 // TODO support subtitle filters
1511 av_frame_unref(filtered_frame);
1518 static void print_final_stats(int64_t total_size)
1520 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1521 uint64_t subtitle_size = 0;
1522 uint64_t data_size = 0;
1523 float percent = -1.0;
1527 for (i = 0; i < nb_output_streams; i++) {
1528 OutputStream *ost = output_streams[i];
1529 switch (ost->enc_ctx->codec_type) {
1530 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1531 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1532 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1533 default: other_size += ost->data_size; break;
1535 extra_size += ost->enc_ctx->extradata_size;
1536 data_size += ost->data_size;
1537 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1538 != AV_CODEC_FLAG_PASS1)
1542 if (data_size && total_size>0 && total_size >= data_size)
1543 percent = 100.0 * (total_size - data_size) / data_size;
1545 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1546 video_size / 1024.0,
1547 audio_size / 1024.0,
1548 subtitle_size / 1024.0,
1549 other_size / 1024.0,
1550 extra_size / 1024.0);
1552 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1554 av_log(NULL, AV_LOG_INFO, "unknown");
1555 av_log(NULL, AV_LOG_INFO, "\n");
1557 /* print verbose per-stream stats */
1558 for (i = 0; i < nb_input_files; i++) {
1559 InputFile *f = input_files[i];
1560 uint64_t total_packets = 0, total_size = 0;
1562 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1563 i, f->ctx->filename);
1565 for (j = 0; j < f->nb_streams; j++) {
1566 InputStream *ist = input_streams[f->ist_index + j];
1567 enum AVMediaType type = ist->dec_ctx->codec_type;
1569 total_size += ist->data_size;
1570 total_packets += ist->nb_packets;
1572 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1573 i, j, media_type_string(type));
1574 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1575 ist->nb_packets, ist->data_size);
1577 if (ist->decoding_needed) {
1578 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1579 ist->frames_decoded);
1580 if (type == AVMEDIA_TYPE_AUDIO)
1581 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1582 av_log(NULL, AV_LOG_VERBOSE, "; ");
1585 av_log(NULL, AV_LOG_VERBOSE, "\n");
1588 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1589 total_packets, total_size);
1592 for (i = 0; i < nb_output_files; i++) {
1593 OutputFile *of = output_files[i];
1594 uint64_t total_packets = 0, total_size = 0;
1596 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1597 i, of->ctx->filename);
1599 for (j = 0; j < of->ctx->nb_streams; j++) {
1600 OutputStream *ost = output_streams[of->ost_index + j];
1601 enum AVMediaType type = ost->enc_ctx->codec_type;
1603 total_size += ost->data_size;
1604 total_packets += ost->packets_written;
1606 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1607 i, j, media_type_string(type));
1608 if (ost->encoding_needed) {
1609 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1610 ost->frames_encoded);
1611 if (type == AVMEDIA_TYPE_AUDIO)
1612 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1613 av_log(NULL, AV_LOG_VERBOSE, "; ");
1616 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1617 ost->packets_written, ost->data_size);
1619 av_log(NULL, AV_LOG_VERBOSE, "\n");
1622 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1623 total_packets, total_size);
1625 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1626 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1628 av_log(NULL, AV_LOG_WARNING, "\n");
1630 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1635 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1638 AVBPrint buf_script;
1640 AVFormatContext *oc;
1642 AVCodecContext *enc;
1643 int frame_number, vid, i;
1646 int64_t pts = INT64_MIN + 1;
1647 static int64_t last_time = -1;
1648 static int qp_histogram[52];
1649 int hours, mins, secs, us;
1653 if (!print_stats && !is_last_report && !progress_avio)
1656 if (!is_last_report) {
1657 if (last_time == -1) {
1658 last_time = cur_time;
1661 if ((cur_time - last_time) < 500000)
1663 last_time = cur_time;
1666 t = (cur_time-timer_start) / 1000000.0;
1669 oc = output_files[0]->ctx;
1671 total_size = avio_size(oc->pb);
1672 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1673 total_size = avio_tell(oc->pb);
1677 av_bprint_init(&buf_script, 0, 1);
1678 for (i = 0; i < nb_output_streams; i++) {
1680 ost = output_streams[i];
1682 if (!ost->stream_copy)
1683 q = ost->quality / (float) FF_QP2LAMBDA;
1685 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1686 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1687 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1688 ost->file_index, ost->index, q);
1690 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1693 frame_number = ost->frame_number;
1694 fps = t > 1 ? frame_number / t : 0;
1695 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1696 frame_number, fps < 9.95, fps, q);
1697 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1698 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1699 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1700 ost->file_index, ost->index, q);
1702 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1706 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1708 for (j = 0; j < 32; j++)
1709 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1712 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1714 double error, error_sum = 0;
1715 double scale, scale_sum = 0;
1717 char type[3] = { 'Y','U','V' };
1718 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1719 for (j = 0; j < 3; j++) {
1720 if (is_last_report) {
1721 error = enc->error[j];
1722 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1724 error = ost->error[j];
1725 scale = enc->width * enc->height * 255.0 * 255.0;
1731 p = psnr(error / scale);
1732 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1733 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1734 ost->file_index, ost->index, type[j] | 32, p);
1736 p = psnr(error_sum / scale_sum);
1737 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1738 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1739 ost->file_index, ost->index, p);
1743 /* compute min output value */
1744 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1745 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1746 ost->st->time_base, AV_TIME_BASE_Q));
1748 nb_frames_drop += ost->last_dropped;
1751 secs = FFABS(pts) / AV_TIME_BASE;
1752 us = FFABS(pts) % AV_TIME_BASE;
1758 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1759 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1761 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1763 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1764 "size=%8.0fkB time=", total_size / 1024.0);
1766 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1767 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1768 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1769 (100 * us) / AV_TIME_BASE);
1772 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1773 av_bprintf(&buf_script, "bitrate=N/A\n");
1775 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1776 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1779 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1780 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1781 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1782 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1783 hours, mins, secs, us);
1785 if (nb_frames_dup || nb_frames_drop)
1786 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1787 nb_frames_dup, nb_frames_drop);
1788 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1789 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1792 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1793 av_bprintf(&buf_script, "speed=N/A\n");
1795 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1796 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1799 if (print_stats || is_last_report) {
1800 const char end = is_last_report ? '\n' : '\r';
1801 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1802 fprintf(stderr, "%s %c", buf, end);
1804 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1809 if (progress_avio) {
1810 av_bprintf(&buf_script, "progress=%s\n",
1811 is_last_report ? "end" : "continue");
1812 avio_write(progress_avio, buf_script.str,
1813 FFMIN(buf_script.len, buf_script.size - 1));
1814 avio_flush(progress_avio);
1815 av_bprint_finalize(&buf_script, NULL);
1816 if (is_last_report) {
1817 if ((ret = avio_closep(&progress_avio)) < 0)
1818 av_log(NULL, AV_LOG_ERROR,
1819 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1824 print_final_stats(total_size);
1827 static void flush_encoders(void)
1831 for (i = 0; i < nb_output_streams; i++) {
1832 OutputStream *ost = output_streams[i];
1833 AVCodecContext *enc = ost->enc_ctx;
1834 OutputFile *of = output_files[ost->file_index];
1836 if (!ost->encoding_needed)
1839 // Try to enable encoding with no input frames.
1840 // Maybe we should just let encoding fail instead.
1841 if (!ost->initialized) {
1842 FilterGraph *fg = ost->filter->graph;
1843 char error[1024] = "";
1845 av_log(NULL, AV_LOG_WARNING,
1846 "Finishing stream %d:%d without any data written to it.\n",
1847 ost->file_index, ost->st->index);
1849 if (ost->filter && !fg->graph) {
1851 for (x = 0; x < fg->nb_inputs; x++) {
1852 InputFilter *ifilter = fg->inputs[x];
1853 if (ifilter->format < 0) {
1854 AVCodecParameters *par = ifilter->ist->st->codecpar;
1855 // We never got any input. Set a fake format, which will
1856 // come from libavformat.
1857 ifilter->format = par->format;
1858 ifilter->sample_rate = par->sample_rate;
1859 ifilter->channels = par->channels;
1860 ifilter->channel_layout = par->channel_layout;
1861 ifilter->width = par->width;
1862 ifilter->height = par->height;
1863 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1867 if (!ifilter_has_all_input_formats(fg))
1870 ret = configure_filtergraph(fg);
1872 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1876 finish_output_stream(ost);
1879 ret = init_output_stream(ost, error, sizeof(error));
1881 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1882 ost->file_index, ost->index, error);
1887 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1890 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1894 const char *desc = NULL;
1898 switch (enc->codec_type) {
1899 case AVMEDIA_TYPE_AUDIO:
1902 case AVMEDIA_TYPE_VIDEO:
1909 av_init_packet(&pkt);
1913 update_benchmark(NULL);
1915 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1916 ret = avcodec_send_frame(enc, NULL);
1918 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1925 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1926 if (ret < 0 && ret != AVERROR_EOF) {
1927 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1932 if (ost->logfile && enc->stats_out) {
1933 fprintf(ost->logfile, "%s", enc->stats_out);
1935 if (ret == AVERROR_EOF) {
1936 output_packet(of, &pkt, ost, 1);
1939 if (ost->finished & MUXER_FINISHED) {
1940 av_packet_unref(&pkt);
1943 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1944 pkt_size = pkt.size;
1945 output_packet(of, &pkt, ost, 0);
1946 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1947 do_video_stats(ost, pkt_size);
1954 * Check whether a packet from ist should be written into ost at this time
1956 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1958 OutputFile *of = output_files[ost->file_index];
1959 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1961 if (ost->source_index != ist_index)
1967 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1973 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1975 OutputFile *of = output_files[ost->file_index];
1976 InputFile *f = input_files [ist->file_index];
1977 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1978 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1979 AVPacket opkt = { 0 };
1981 av_init_packet(&opkt);
1983 // EOF: flush output bitstream filters.
1985 output_packet(of, &opkt, ost, 1);
1989 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1990 !ost->copy_initial_nonkeyframes)
1993 if (!ost->frame_number && !ost->copy_prior_start) {
1994 int64_t comp_start = start_time;
1995 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1996 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1997 if (pkt->pts == AV_NOPTS_VALUE ?
1998 ist->pts < comp_start :
1999 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2003 if (of->recording_time != INT64_MAX &&
2004 ist->pts >= of->recording_time + start_time) {
2005 close_output_stream(ost);
2009 if (f->recording_time != INT64_MAX) {
2010 start_time = f->ctx->start_time;
2011 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2012 start_time += f->start_time;
2013 if (ist->pts >= f->recording_time + start_time) {
2014 close_output_stream(ost);
2019 /* force the input stream PTS */
2020 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2023 if (pkt->pts != AV_NOPTS_VALUE)
2024 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2026 opkt.pts = AV_NOPTS_VALUE;
2028 if (pkt->dts == AV_NOPTS_VALUE)
2029 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2031 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2032 opkt.dts -= ost_tb_start_time;
2034 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2035 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2037 duration = ist->dec_ctx->frame_size;
2038 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2039 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2040 ost->mux_timebase) - ost_tb_start_time;
2043 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2045 opkt.flags = pkt->flags;
2046 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2047 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2048 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2049 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2050 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2052 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2053 &opkt.data, &opkt.size,
2054 pkt->data, pkt->size,
2055 pkt->flags & AV_PKT_FLAG_KEY);
2057 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2062 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2067 opkt.data = pkt->data;
2068 opkt.size = pkt->size;
2070 av_copy_packet_side_data(&opkt, pkt);
2072 output_packet(of, &opkt, ost, 0);
2075 int guess_input_channel_layout(InputStream *ist)
2077 AVCodecContext *dec = ist->dec_ctx;
2079 if (!dec->channel_layout) {
2080 char layout_name[256];
2082 if (dec->channels > ist->guess_layout_max)
2084 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2085 if (!dec->channel_layout)
2087 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2088 dec->channels, dec->channel_layout);
2089 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2090 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2095 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2097 if (*got_output || ret<0)
2098 decode_error_stat[ret<0] ++;
2100 if (ret < 0 && exit_on_error)
2103 if (exit_on_error && *got_output && ist) {
2104 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2105 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2111 // Filters can be configured only if the formats of all inputs are known.
2112 static int ifilter_has_all_input_formats(FilterGraph *fg)
2115 for (i = 0; i < fg->nb_inputs; i++) {
2116 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2117 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2123 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2125 FilterGraph *fg = ifilter->graph;
2126 int need_reinit, ret, i;
2128 /* determine if the parameters for this input changed */
2129 need_reinit = ifilter->format != frame->format;
2130 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2131 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2134 switch (ifilter->ist->st->codecpar->codec_type) {
2135 case AVMEDIA_TYPE_AUDIO:
2136 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2137 ifilter->channels != frame->channels ||
2138 ifilter->channel_layout != frame->channel_layout;
2140 case AVMEDIA_TYPE_VIDEO:
2141 need_reinit |= ifilter->width != frame->width ||
2142 ifilter->height != frame->height;
2147 ret = ifilter_parameters_from_frame(ifilter, frame);
2152 /* (re)init the graph if possible, otherwise buffer the frame and return */
2153 if (need_reinit || !fg->graph) {
2154 for (i = 0; i < fg->nb_inputs; i++) {
2155 if (!ifilter_has_all_input_formats(fg)) {
2156 AVFrame *tmp = av_frame_clone(frame);
2158 return AVERROR(ENOMEM);
2159 av_frame_unref(frame);
2161 if (!av_fifo_space(ifilter->frame_queue)) {
2162 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2164 av_frame_free(&tmp);
2168 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2173 ret = reap_filters(1);
2174 if (ret < 0 && ret != AVERROR_EOF) {
2176 av_strerror(ret, errbuf, sizeof(errbuf));
2178 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2182 ret = configure_filtergraph(fg);
2184 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2189 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2191 if (ret != AVERROR_EOF)
2192 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2199 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2205 if (ifilter->filter) {
2206 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2210 // the filtergraph was never configured
2211 FilterGraph *fg = ifilter->graph;
2212 for (i = 0; i < fg->nb_inputs; i++)
2213 if (!fg->inputs[i]->eof)
2215 if (i == fg->nb_inputs) {
2216 // All the input streams have finished without the filtergraph
2217 // ever being configured.
2218 // Mark the output streams as finished.
2219 for (j = 0; j < fg->nb_outputs; j++)
2220 finish_output_stream(fg->outputs[j]->ost);
2227 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2228 // There is the following difference: if you got a frame, you must call
2229 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2230 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2231 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2238 ret = avcodec_send_packet(avctx, pkt);
2239 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2240 // decoded frames with avcodec_receive_frame() until done.
2241 if (ret < 0 && ret != AVERROR_EOF)
2245 ret = avcodec_receive_frame(avctx, frame);
2246 if (ret < 0 && ret != AVERROR(EAGAIN))
2254 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2259 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2260 for (i = 0; i < ist->nb_filters; i++) {
2261 if (i < ist->nb_filters - 1) {
2262 f = ist->filter_frame;
2263 ret = av_frame_ref(f, decoded_frame);
2268 ret = ifilter_send_frame(ist->filters[i], f);
2269 if (ret == AVERROR_EOF)
2270 ret = 0; /* ignore */
2272 av_log(NULL, AV_LOG_ERROR,
2273 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2280 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2283 AVFrame *decoded_frame;
2284 AVCodecContext *avctx = ist->dec_ctx;
2286 AVRational decoded_frame_tb;
2288 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2289 return AVERROR(ENOMEM);
2290 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2291 return AVERROR(ENOMEM);
2292 decoded_frame = ist->decoded_frame;
2294 update_benchmark(NULL);
2295 ret = decode(avctx, decoded_frame, got_output, pkt);
2296 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2300 if (ret >= 0 && avctx->sample_rate <= 0) {
2301 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2302 ret = AVERROR_INVALIDDATA;
2305 if (ret != AVERROR_EOF)
2306 check_decode_result(ist, got_output, ret);
2308 if (!*got_output || ret < 0)
2311 ist->samples_decoded += decoded_frame->nb_samples;
2312 ist->frames_decoded++;
2315 /* increment next_dts to use for the case where the input stream does not
2316 have timestamps or there are multiple frames in the packet */
2317 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2319 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2323 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2324 decoded_frame_tb = ist->st->time_base;
2325 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2326 decoded_frame->pts = pkt->pts;
2327 decoded_frame_tb = ist->st->time_base;
2329 decoded_frame->pts = ist->dts;
2330 decoded_frame_tb = AV_TIME_BASE_Q;
2332 if (decoded_frame->pts != AV_NOPTS_VALUE)
2333 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2334 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2335 (AVRational){1, avctx->sample_rate});
2336 ist->nb_samples = decoded_frame->nb_samples;
2337 err = send_frame_to_filters(ist, decoded_frame);
2339 av_frame_unref(ist->filter_frame);
2340 av_frame_unref(decoded_frame);
2341 return err < 0 ? err : ret;
2344 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2347 AVFrame *decoded_frame;
2348 int i, ret = 0, err = 0;
2349 int64_t best_effort_timestamp;
2350 int64_t dts = AV_NOPTS_VALUE;
2353 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2354 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2356 if (!eof && pkt && pkt->size == 0)
2359 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2360 return AVERROR(ENOMEM);
2361 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2362 return AVERROR(ENOMEM);
2363 decoded_frame = ist->decoded_frame;
2364 if (ist->dts != AV_NOPTS_VALUE)
2365 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2368 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2371 // The old code used to set dts on the drain packet, which does not work
2372 // with the new API anymore.
2374 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2376 return AVERROR(ENOMEM);
2377 ist->dts_buffer = new;
2378 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2381 update_benchmark(NULL);
2382 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2383 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2387 // The following line may be required in some cases where there is no parser
2388 // or the parser does not has_b_frames correctly
2389 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2390 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2391 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2393 av_log(ist->dec_ctx, AV_LOG_WARNING,
2394 "video_delay is larger in decoder than demuxer %d > %d.\n"
2395 "If you want to help, upload a sample "
2396 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2397 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2398 ist->dec_ctx->has_b_frames,
2399 ist->st->codecpar->video_delay);
2402 if (ret != AVERROR_EOF)
2403 check_decode_result(ist, got_output, ret);
2405 if (*got_output && ret >= 0) {
2406 if (ist->dec_ctx->width != decoded_frame->width ||
2407 ist->dec_ctx->height != decoded_frame->height ||
2408 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2409 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2410 decoded_frame->width,
2411 decoded_frame->height,
2412 decoded_frame->format,
2413 ist->dec_ctx->width,
2414 ist->dec_ctx->height,
2415 ist->dec_ctx->pix_fmt);
2419 if (!*got_output || ret < 0)
2422 if(ist->top_field_first>=0)
2423 decoded_frame->top_field_first = ist->top_field_first;
2425 ist->frames_decoded++;
2427 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2428 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2432 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2434 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2435 *duration_pts = decoded_frame->pkt_duration;
2437 if (ist->framerate.num)
2438 best_effort_timestamp = ist->cfr_next_pts++;
2440 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2441 best_effort_timestamp = ist->dts_buffer[0];
2443 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2444 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2445 ist->nb_dts_buffer--;
2448 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2449 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2451 if (ts != AV_NOPTS_VALUE)
2452 ist->next_pts = ist->pts = ts;
2456 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2457 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2458 ist->st->index, av_ts2str(decoded_frame->pts),
2459 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2460 best_effort_timestamp,
2461 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2462 decoded_frame->key_frame, decoded_frame->pict_type,
2463 ist->st->time_base.num, ist->st->time_base.den);
2466 if (ist->st->sample_aspect_ratio.num)
2467 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2469 err = send_frame_to_filters(ist, decoded_frame);
2472 av_frame_unref(ist->filter_frame);
2473 av_frame_unref(decoded_frame);
2474 return err < 0 ? err : ret;
2477 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2480 AVSubtitle subtitle;
2482 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2483 &subtitle, got_output, pkt);
2485 check_decode_result(NULL, got_output, ret);
2487 if (ret < 0 || !*got_output) {
2490 sub2video_flush(ist);
2494 if (ist->fix_sub_duration) {
2496 if (ist->prev_sub.got_output) {
2497 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2498 1000, AV_TIME_BASE);
2499 if (end < ist->prev_sub.subtitle.end_display_time) {
2500 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2501 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2502 ist->prev_sub.subtitle.end_display_time, end,
2503 end <= 0 ? ", dropping it" : "");
2504 ist->prev_sub.subtitle.end_display_time = end;
2507 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2508 FFSWAP(int, ret, ist->prev_sub.ret);
2509 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2517 if (ist->sub2video.frame) {
2518 sub2video_update(ist, &subtitle);
2519 } else if (ist->nb_filters) {
2520 if (!ist->sub2video.sub_queue)
2521 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2522 if (!ist->sub2video.sub_queue)
2524 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2525 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2529 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2533 if (!subtitle.num_rects)
2536 ist->frames_decoded++;
2538 for (i = 0; i < nb_output_streams; i++) {
2539 OutputStream *ost = output_streams[i];
2541 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2542 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2545 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2550 avsubtitle_free(&subtitle);
2554 static int send_filter_eof(InputStream *ist)
2557 /* TODO keep pts also in stream time base to avoid converting back */
2558 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2559 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2561 for (i = 0; i < ist->nb_filters; i++) {
2562 ret = ifilter_send_eof(ist->filters[i], pts);
2569 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2570 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2574 int eof_reached = 0;
2577 if (!ist->saw_first_ts) {
2578 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2580 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2581 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2582 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2584 ist->saw_first_ts = 1;
2587 if (ist->next_dts == AV_NOPTS_VALUE)
2588 ist->next_dts = ist->dts;
2589 if (ist->next_pts == AV_NOPTS_VALUE)
2590 ist->next_pts = ist->pts;
2594 av_init_packet(&avpkt);
2601 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2602 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2603 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2604 ist->next_pts = ist->pts = ist->dts;
2607 // while we have more to decode or while the decoder did output something on EOF
2608 while (ist->decoding_needed) {
2609 int64_t duration_dts = 0;
2610 int64_t duration_pts = 0;
2612 int decode_failed = 0;
2614 ist->pts = ist->next_pts;
2615 ist->dts = ist->next_dts;
2617 switch (ist->dec_ctx->codec_type) {
2618 case AVMEDIA_TYPE_AUDIO:
2619 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2622 case AVMEDIA_TYPE_VIDEO:
2623 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2625 if (!repeating || !pkt || got_output) {
2626 if (pkt && pkt->duration) {
2627 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2628 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2629 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2630 duration_dts = ((int64_t)AV_TIME_BASE *
2631 ist->dec_ctx->framerate.den * ticks) /
2632 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2635 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2636 ist->next_dts += duration_dts;
2638 ist->next_dts = AV_NOPTS_VALUE;
2642 if (duration_pts > 0) {
2643 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2645 ist->next_pts += duration_dts;
2649 case AVMEDIA_TYPE_SUBTITLE:
2652 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2653 if (!pkt && ret >= 0)
2660 if (ret == AVERROR_EOF) {
2666 if (decode_failed) {
2667 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2668 ist->file_index, ist->st->index, av_err2str(ret));
2670 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2671 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2673 if (!decode_failed || exit_on_error)
2679 ist->got_output = 1;
2684 // During draining, we might get multiple output frames in this loop.
2685 // ffmpeg.c does not drain the filter chain on configuration changes,
2686 // which means if we send multiple frames at once to the filters, and
2687 // one of those frames changes configuration, the buffered frames will
2688 // be lost. This can upset certain FATE tests.
2689 // Decode only 1 frame per call on EOF to appease these FATE tests.
2690 // The ideal solution would be to rewrite decoding to use the new
2691 // decoding API in a better way.
2698 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2699 /* except when looping we need to flush but not to send an EOF */
2700 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2701 int ret = send_filter_eof(ist);
2703 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2708 /* handle stream copy */
2709 if (!ist->decoding_needed && pkt) {
2710 ist->dts = ist->next_dts;
2711 switch (ist->dec_ctx->codec_type) {
2712 case AVMEDIA_TYPE_AUDIO:
2713 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2714 ist->dec_ctx->sample_rate;
2716 case AVMEDIA_TYPE_VIDEO:
2717 if (ist->framerate.num) {
2718 // TODO: Remove work-around for c99-to-c89 issue 7
2719 AVRational time_base_q = AV_TIME_BASE_Q;
2720 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2721 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2722 } else if (pkt->duration) {
2723 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2724 } else if(ist->dec_ctx->framerate.num != 0) {
2725 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2726 ist->next_dts += ((int64_t)AV_TIME_BASE *
2727 ist->dec_ctx->framerate.den * ticks) /
2728 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2732 ist->pts = ist->dts;
2733 ist->next_pts = ist->next_dts;
2735 for (i = 0; i < nb_output_streams; i++) {
2736 OutputStream *ost = output_streams[i];
2738 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2741 do_streamcopy(ist, ost, pkt);
2744 return !eof_reached;
2747 static void print_sdp(void)
2752 AVIOContext *sdp_pb;
2753 AVFormatContext **avc;
2755 for (i = 0; i < nb_output_files; i++) {
2756 if (!output_files[i]->header_written)
2760 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2763 for (i = 0, j = 0; i < nb_output_files; i++) {
2764 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2765 avc[j] = output_files[i]->ctx;
2773 av_sdp_create(avc, j, sdp, sizeof(sdp));
2775 if (!sdp_filename) {
2776 printf("SDP:\n%s\n", sdp);
2779 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2780 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2782 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2783 avio_closep(&sdp_pb);
2784 av_freep(&sdp_filename);
2792 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2794 InputStream *ist = s->opaque;
2795 const enum AVPixelFormat *p;
2798 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2799 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2800 const AVCodecHWConfig *config = NULL;
2803 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2806 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2807 ist->hwaccel_id == HWACCEL_AUTO) {
2809 config = avcodec_get_hw_config(s->codec, i);
2812 if (!(config->methods &
2813 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2815 if (config->pix_fmt == *p)
2820 if (config->device_type != ist->hwaccel_device_type) {
2821 // Different hwaccel offered, ignore.
2825 ret = hwaccel_decode_init(s);
2827 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2828 av_log(NULL, AV_LOG_FATAL,
2829 "%s hwaccel requested for input stream #%d:%d, "
2830 "but cannot be initialized.\n",
2831 av_hwdevice_get_type_name(config->device_type),
2832 ist->file_index, ist->st->index);
2833 return AV_PIX_FMT_NONE;
2838 const HWAccel *hwaccel = NULL;
2840 for (i = 0; hwaccels[i].name; i++) {
2841 if (hwaccels[i].pix_fmt == *p) {
2842 hwaccel = &hwaccels[i];
2847 // No hwaccel supporting this pixfmt.
2850 if (hwaccel->id != ist->hwaccel_id) {
2851 // Does not match requested hwaccel.
2855 ret = hwaccel->init(s);
2857 av_log(NULL, AV_LOG_FATAL,
2858 "%s hwaccel requested for input stream #%d:%d, "
2859 "but cannot be initialized.\n", hwaccel->name,
2860 ist->file_index, ist->st->index);
2861 return AV_PIX_FMT_NONE;
2865 if (ist->hw_frames_ctx) {
2866 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2867 if (!s->hw_frames_ctx)
2868 return AV_PIX_FMT_NONE;
2871 ist->hwaccel_pix_fmt = *p;
2878 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2880 InputStream *ist = s->opaque;
2882 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2883 return ist->hwaccel_get_buffer(s, frame, flags);
2885 return avcodec_default_get_buffer2(s, frame, flags);
2888 static int init_input_stream(int ist_index, char *error, int error_len)
2891 InputStream *ist = input_streams[ist_index];
2893 if (ist->decoding_needed) {
2894 AVCodec *codec = ist->dec;
2896 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2897 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2898 return AVERROR(EINVAL);
2901 ist->dec_ctx->opaque = ist;
2902 ist->dec_ctx->get_format = get_format;
2903 ist->dec_ctx->get_buffer2 = get_buffer;
2904 ist->dec_ctx->thread_safe_callbacks = 1;
2906 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2907 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2908 (ist->decoding_needed & DECODING_FOR_OST)) {
2909 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2910 if (ist->decoding_needed & DECODING_FOR_FILTER)
2911 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2914 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2916 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2917 * audio, and video decoders such as cuvid or mediacodec */
2918 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2920 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2921 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2922 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2923 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2924 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2926 ret = hw_device_setup_for_decode(ist);
2928 snprintf(error, error_len, "Device setup failed for "
2929 "decoder on input stream #%d:%d : %s",
2930 ist->file_index, ist->st->index, av_err2str(ret));
2934 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2935 if (ret == AVERROR_EXPERIMENTAL)
2936 abort_codec_experimental(codec, 0);
2938 snprintf(error, error_len,
2939 "Error while opening decoder for input stream "
2941 ist->file_index, ist->st->index, av_err2str(ret));
2944 assert_avoptions(ist->decoder_opts);
2947 ist->next_pts = AV_NOPTS_VALUE;
2948 ist->next_dts = AV_NOPTS_VALUE;
2953 static InputStream *get_input_stream(OutputStream *ost)
2955 if (ost->source_index >= 0)
2956 return input_streams[ost->source_index];
2960 static int compare_int64(const void *a, const void *b)
2962 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2965 /* open the muxer when all the streams are initialized */
2966 static int check_init_output_file(OutputFile *of, int file_index)
2970 for (i = 0; i < of->ctx->nb_streams; i++) {
2971 OutputStream *ost = output_streams[of->ost_index + i];
2972 if (!ost->initialized)
2976 of->ctx->interrupt_callback = int_cb;
2978 ret = avformat_write_header(of->ctx, &of->opts);
2980 av_log(NULL, AV_LOG_ERROR,
2981 "Could not write header for output file #%d "
2982 "(incorrect codec parameters ?): %s\n",
2983 file_index, av_err2str(ret));
2986 //assert_avoptions(of->opts);
2987 of->header_written = 1;
2989 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2991 if (sdp_filename || want_sdp)
2994 /* flush the muxing queues */
2995 for (i = 0; i < of->ctx->nb_streams; i++) {
2996 OutputStream *ost = output_streams[of->ost_index + i];
2998 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2999 if (!av_fifo_size(ost->muxing_queue))
3000 ost->mux_timebase = ost->st->time_base;
3002 while (av_fifo_size(ost->muxing_queue)) {
3004 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3005 write_packet(of, &pkt, ost, 1);
3012 static int init_output_bsfs(OutputStream *ost)
3017 if (!ost->nb_bitstream_filters)
3020 for (i = 0; i < ost->nb_bitstream_filters; i++) {
3021 ctx = ost->bsf_ctx[i];
3023 ret = avcodec_parameters_copy(ctx->par_in,
3024 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3028 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3030 ret = av_bsf_init(ctx);
3032 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3033 ost->bsf_ctx[i]->filter->name);
3038 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3039 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3043 ost->st->time_base = ctx->time_base_out;
3048 static int init_output_stream_streamcopy(OutputStream *ost)
3050 OutputFile *of = output_files[ost->file_index];
3051 InputStream *ist = get_input_stream(ost);
3052 AVCodecParameters *par_dst = ost->st->codecpar;
3053 AVCodecParameters *par_src = ost->ref_par;
3056 uint32_t codec_tag = par_dst->codec_tag;
3058 av_assert0(ist && !ost->filter);
3060 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3062 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3064 av_log(NULL, AV_LOG_FATAL,
3065 "Error setting up codec context options.\n");
3068 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3071 unsigned int codec_tag_tmp;
3072 if (!of->ctx->oformat->codec_tag ||
3073 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3074 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3075 codec_tag = par_src->codec_tag;
3078 ret = avcodec_parameters_copy(par_dst, par_src);
3082 par_dst->codec_tag = codec_tag;
3084 if (!ost->frame_rate.num)
3085 ost->frame_rate = ist->framerate;
3086 ost->st->avg_frame_rate = ost->frame_rate;
3088 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3092 // copy timebase while removing common factors
3093 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3094 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3096 // copy estimated duration as a hint to the muxer
3097 if (ost->st->duration <= 0 && ist->st->duration > 0)
3098 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3101 ost->st->disposition = ist->st->disposition;
3103 if (ist->st->nb_side_data) {
3104 for (i = 0; i < ist->st->nb_side_data; i++) {
3105 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3108 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3110 return AVERROR(ENOMEM);
3111 memcpy(dst_data, sd_src->data, sd_src->size);
3115 if (ost->rotate_overridden) {
3116 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3117 sizeof(int32_t) * 9);
3119 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3122 ost->parser = av_parser_init(par_dst->codec_id);
3123 ost->parser_avctx = avcodec_alloc_context3(NULL);
3124 if (!ost->parser_avctx)
3125 return AVERROR(ENOMEM);
3127 switch (par_dst->codec_type) {
3128 case AVMEDIA_TYPE_AUDIO:
3129 if (audio_volume != 256) {
3130 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3133 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3134 par_dst->block_align= 0;
3135 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3136 par_dst->block_align= 0;
3138 case AVMEDIA_TYPE_VIDEO:
3139 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3141 av_mul_q(ost->frame_aspect_ratio,
3142 (AVRational){ par_dst->height, par_dst->width });
3143 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3144 "with stream copy may produce invalid files\n");
3146 else if (ist->st->sample_aspect_ratio.num)
3147 sar = ist->st->sample_aspect_ratio;
3149 sar = par_src->sample_aspect_ratio;
3150 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3151 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3152 ost->st->r_frame_rate = ist->st->r_frame_rate;
3156 ost->mux_timebase = ist->st->time_base;
3161 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3163 AVDictionaryEntry *e;
3165 uint8_t *encoder_string;
3166 int encoder_string_len;
3167 int format_flags = 0;
3168 int codec_flags = ost->enc_ctx->flags;
3170 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3173 e = av_dict_get(of->opts, "fflags", NULL, 0);
3175 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3178 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3180 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3182 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3185 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3188 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3189 encoder_string = av_mallocz(encoder_string_len);
3190 if (!encoder_string)
3193 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3194 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3196 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3197 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3198 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3199 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3202 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3203 AVCodecContext *avctx)
3206 int n = 1, i, size, index = 0;
3209 for (p = kf; *p; p++)
3213 pts = av_malloc_array(size, sizeof(*pts));
3215 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3220 for (i = 0; i < n; i++) {
3221 char *next = strchr(p, ',');
3226 if (!memcmp(p, "chapters", 8)) {
3228 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3231 if (avf->nb_chapters > INT_MAX - size ||
3232 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3234 av_log(NULL, AV_LOG_FATAL,
3235 "Could not allocate forced key frames array.\n");
3238 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3239 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3241 for (j = 0; j < avf->nb_chapters; j++) {
3242 AVChapter *c = avf->chapters[j];
3243 av_assert1(index < size);
3244 pts[index++] = av_rescale_q(c->start, c->time_base,
3245 avctx->time_base) + t;
3250 t = parse_time_or_die("force_key_frames", p, 1);
3251 av_assert1(index < size);
3252 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3259 av_assert0(index == size);
3260 qsort(pts, size, sizeof(*pts), compare_int64);
3261 ost->forced_kf_count = size;
3262 ost->forced_kf_pts = pts;
3265 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3267 InputStream *ist = get_input_stream(ost);
3268 AVCodecContext *enc_ctx = ost->enc_ctx;
3269 AVFormatContext *oc;
3271 if (ost->enc_timebase.num > 0) {
3272 enc_ctx->time_base = ost->enc_timebase;
3276 if (ost->enc_timebase.num < 0) {
3278 enc_ctx->time_base = ist->st->time_base;
3282 oc = output_files[ost->file_index]->ctx;
3283 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3286 enc_ctx->time_base = default_time_base;
3289 static int init_output_stream_encode(OutputStream *ost)
3291 InputStream *ist = get_input_stream(ost);
3292 AVCodecContext *enc_ctx = ost->enc_ctx;
3293 AVCodecContext *dec_ctx = NULL;
3294 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3297 set_encoder_id(output_files[ost->file_index], ost);
3299 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3300 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3301 // which have to be filtered out to prevent leaking them to output files.
3302 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3305 ost->st->disposition = ist->st->disposition;
3307 dec_ctx = ist->dec_ctx;
3309 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3311 for (j = 0; j < oc->nb_streams; j++) {
3312 AVStream *st = oc->streams[j];
3313 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3316 if (j == oc->nb_streams)
3317 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3318 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3319 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3322 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3323 if (!ost->frame_rate.num)
3324 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3325 if (ist && !ost->frame_rate.num)
3326 ost->frame_rate = ist->framerate;
3327 if (ist && !ost->frame_rate.num)
3328 ost->frame_rate = ist->st->r_frame_rate;
3329 if (ist && !ost->frame_rate.num) {
3330 ost->frame_rate = (AVRational){25, 1};
3331 av_log(NULL, AV_LOG_WARNING,
3333 "about the input framerate is available. Falling "
3334 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3335 "if you want a different framerate.\n",
3336 ost->file_index, ost->index);
3338 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3339 if (ost->enc->supported_framerates && !ost->force_fps) {
3340 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3341 ost->frame_rate = ost->enc->supported_framerates[idx];
3343 // reduce frame rate for mpeg4 to be within the spec limits
3344 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3345 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3346 ost->frame_rate.num, ost->frame_rate.den, 65535);
3350 switch (enc_ctx->codec_type) {
3351 case AVMEDIA_TYPE_AUDIO:
3352 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3354 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3355 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3356 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3357 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3358 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3360 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3363 case AVMEDIA_TYPE_VIDEO:
3364 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3366 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3367 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3368 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3369 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3370 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3371 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3373 for (j = 0; j < ost->forced_kf_count; j++)
3374 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3376 enc_ctx->time_base);
3378 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3379 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3380 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3381 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3382 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3383 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3385 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3387 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3388 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3390 enc_ctx->framerate = ost->frame_rate;
3392 ost->st->avg_frame_rate = ost->frame_rate;
3395 enc_ctx->width != dec_ctx->width ||
3396 enc_ctx->height != dec_ctx->height ||
3397 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3398 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3401 if (ost->forced_keyframes) {
3402 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3403 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3404 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3406 av_log(NULL, AV_LOG_ERROR,
3407 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3410 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3411 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3412 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3413 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3415 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3416 // parse it only for static kf timings
3417 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3418 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3422 case AVMEDIA_TYPE_SUBTITLE:
3423 enc_ctx->time_base = AV_TIME_BASE_Q;
3424 if (!enc_ctx->width) {
3425 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3426 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3429 case AVMEDIA_TYPE_DATA:
3436 ost->mux_timebase = enc_ctx->time_base;
3441 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3445 if (ost->encoding_needed) {
3446 AVCodec *codec = ost->enc;
3447 AVCodecContext *dec = NULL;
3450 ret = init_output_stream_encode(ost);
3454 if ((ist = get_input_stream(ost)))
3456 if (dec && dec->subtitle_header) {
3457 /* ASS code assumes this buffer is null terminated so add extra byte. */
3458 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3459 if (!ost->enc_ctx->subtitle_header)
3460 return AVERROR(ENOMEM);
3461 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3462 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3464 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3465 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3466 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3468 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3469 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3470 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3472 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3473 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3474 av_buffersink_get_format(ost->filter->filter)) {
3475 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3476 if (!ost->enc_ctx->hw_frames_ctx)
3477 return AVERROR(ENOMEM);
3479 ret = hw_device_setup_for_encode(ost);
3481 snprintf(error, error_len, "Device setup failed for "
3482 "encoder on output stream #%d:%d : %s",
3483 ost->file_index, ost->index, av_err2str(ret));
3488 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3489 if (ret == AVERROR_EXPERIMENTAL)
3490 abort_codec_experimental(codec, 1);
3491 snprintf(error, error_len,
3492 "Error while opening encoder for output stream #%d:%d - "
3493 "maybe incorrect parameters such as bit_rate, rate, width or height",
3494 ost->file_index, ost->index);
3497 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3498 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3499 av_buffersink_set_frame_size(ost->filter->filter,
3500 ost->enc_ctx->frame_size);
3501 assert_avoptions(ost->encoder_opts);
3502 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3503 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3504 " It takes bits/s as argument, not kbits/s\n");
3506 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3508 av_log(NULL, AV_LOG_FATAL,
3509 "Error initializing the output stream codec context.\n");
3513 * FIXME: ost->st->codec should't be needed here anymore.
3515 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3519 if (ost->enc_ctx->nb_coded_side_data) {
3522 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3523 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3526 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3528 return AVERROR(ENOMEM);
3529 memcpy(dst_data, sd_src->data, sd_src->size);
3534 * Add global input side data. For now this is naive, and copies it
3535 * from the input stream's global side data. All side data should
3536 * really be funneled over AVFrame and libavfilter, then added back to
3537 * packet side data, and then potentially using the first packet for
3542 for (i = 0; i < ist->st->nb_side_data; i++) {
3543 AVPacketSideData *sd = &ist->st->side_data[i];
3544 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3546 return AVERROR(ENOMEM);
3547 memcpy(dst, sd->data, sd->size);
3548 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3549 av_display_rotation_set((uint32_t *)dst, 0);
3553 // copy timebase while removing common factors
3554 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3555 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3557 // copy estimated duration as a hint to the muxer
3558 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3559 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3561 ost->st->codec->codec= ost->enc_ctx->codec;
3562 } else if (ost->stream_copy) {
3563 ret = init_output_stream_streamcopy(ost);
3568 * FIXME: will the codec context used by the parser during streamcopy
3569 * This should go away with the new parser API.
3571 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3576 // parse user provided disposition, and update stream values
3577 if (ost->disposition) {
3578 static const AVOption opts[] = {
3579 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3580 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3581 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3582 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3583 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3584 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3585 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3586 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3587 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3588 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3589 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3590 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3591 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3592 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3595 static const AVClass class = {
3597 .item_name = av_default_item_name,
3599 .version = LIBAVUTIL_VERSION_INT,
3601 const AVClass *pclass = &class;
3603 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3608 /* initialize bitstream filters for the output stream
3609 * needs to be done here, because the codec id for streamcopy is not
3610 * known until now */
3611 ret = init_output_bsfs(ost);
3615 ost->initialized = 1;
3617 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3624 static void report_new_stream(int input_index, AVPacket *pkt)
3626 InputFile *file = input_files[input_index];
3627 AVStream *st = file->ctx->streams[pkt->stream_index];
3629 if (pkt->stream_index < file->nb_streams_warn)
3631 av_log(file->ctx, AV_LOG_WARNING,
3632 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3633 av_get_media_type_string(st->codecpar->codec_type),
3634 input_index, pkt->stream_index,
3635 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3636 file->nb_streams_warn = pkt->stream_index + 1;
3639 static int transcode_init(void)
3641 int ret = 0, i, j, k;
3642 AVFormatContext *oc;
3645 char error[1024] = {0};
3647 for (i = 0; i < nb_filtergraphs; i++) {
3648 FilterGraph *fg = filtergraphs[i];
3649 for (j = 0; j < fg->nb_outputs; j++) {
3650 OutputFilter *ofilter = fg->outputs[j];
3651 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3653 if (fg->nb_inputs != 1)
3655 for (k = nb_input_streams-1; k >= 0 ; k--)
3656 if (fg->inputs[0]->ist == input_streams[k])
3658 ofilter->ost->source_index = k;
3662 /* init framerate emulation */
3663 for (i = 0; i < nb_input_files; i++) {
3664 InputFile *ifile = input_files[i];
3665 if (ifile->rate_emu)
3666 for (j = 0; j < ifile->nb_streams; j++)
3667 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3670 /* init input streams */
3671 for (i = 0; i < nb_input_streams; i++)
3672 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3673 for (i = 0; i < nb_output_streams; i++) {
3674 ost = output_streams[i];
3675 avcodec_close(ost->enc_ctx);
3680 /* open each encoder */
3681 for (i = 0; i < nb_output_streams; i++) {
3682 // skip streams fed from filtergraphs until we have a frame for them
3683 if (output_streams[i]->filter)
3686 ret = init_output_stream(output_streams[i], error, sizeof(error));
3691 /* discard unused programs */
3692 for (i = 0; i < nb_input_files; i++) {
3693 InputFile *ifile = input_files[i];
3694 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3695 AVProgram *p = ifile->ctx->programs[j];
3696 int discard = AVDISCARD_ALL;
3698 for (k = 0; k < p->nb_stream_indexes; k++)
3699 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3700 discard = AVDISCARD_DEFAULT;
3703 p->discard = discard;
3707 /* write headers for files with no streams */
3708 for (i = 0; i < nb_output_files; i++) {
3709 oc = output_files[i]->ctx;
3710 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3711 ret = check_init_output_file(output_files[i], i);
3718 /* dump the stream mapping */
3719 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3720 for (i = 0; i < nb_input_streams; i++) {
3721 ist = input_streams[i];
3723 for (j = 0; j < ist->nb_filters; j++) {
3724 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3725 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3726 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3727 ist->filters[j]->name);
3728 if (nb_filtergraphs > 1)
3729 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3730 av_log(NULL, AV_LOG_INFO, "\n");
3735 for (i = 0; i < nb_output_streams; i++) {
3736 ost = output_streams[i];
3738 if (ost->attachment_filename) {
3739 /* an attached file */
3740 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3741 ost->attachment_filename, ost->file_index, ost->index);
3745 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3746 /* output from a complex graph */
3747 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3748 if (nb_filtergraphs > 1)
3749 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3751 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3752 ost->index, ost->enc ? ost->enc->name : "?");
3756 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3757 input_streams[ost->source_index]->file_index,
3758 input_streams[ost->source_index]->st->index,
3761 if (ost->sync_ist != input_streams[ost->source_index])
3762 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3763 ost->sync_ist->file_index,
3764 ost->sync_ist->st->index);
3765 if (ost->stream_copy)
3766 av_log(NULL, AV_LOG_INFO, " (copy)");
3768 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3769 const AVCodec *out_codec = ost->enc;
3770 const char *decoder_name = "?";
3771 const char *in_codec_name = "?";
3772 const char *encoder_name = "?";
3773 const char *out_codec_name = "?";
3774 const AVCodecDescriptor *desc;
3777 decoder_name = in_codec->name;
3778 desc = avcodec_descriptor_get(in_codec->id);
3780 in_codec_name = desc->name;
3781 if (!strcmp(decoder_name, in_codec_name))
3782 decoder_name = "native";
3786 encoder_name = out_codec->name;
3787 desc = avcodec_descriptor_get(out_codec->id);
3789 out_codec_name = desc->name;
3790 if (!strcmp(encoder_name, out_codec_name))
3791 encoder_name = "native";
3794 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3795 in_codec_name, decoder_name,
3796 out_codec_name, encoder_name);
3798 av_log(NULL, AV_LOG_INFO, "\n");
3802 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3806 atomic_store(&transcode_init_done, 1);
3811 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3812 static int need_output(void)
3816 for (i = 0; i < nb_output_streams; i++) {
3817 OutputStream *ost = output_streams[i];
3818 OutputFile *of = output_files[ost->file_index];
3819 AVFormatContext *os = output_files[ost->file_index]->ctx;
3821 if (ost->finished ||
3822 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3824 if (ost->frame_number >= ost->max_frames) {
3826 for (j = 0; j < of->ctx->nb_streams; j++)
3827 close_output_stream(output_streams[of->ost_index + j]);
3838 * Select the output stream to process.
3840 * @return selected output stream, or NULL if none available
3842 static OutputStream *choose_output(void)
3845 int64_t opts_min = INT64_MAX;
3846 OutputStream *ost_min = NULL;
3848 for (i = 0; i < nb_output_streams; i++) {
3849 OutputStream *ost = output_streams[i];
3850 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3851 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3853 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3854 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3856 if (!ost->initialized && !ost->inputs_done)
3859 if (!ost->finished && opts < opts_min) {
3861 ost_min = ost->unavailable ? NULL : ost;
3867 static void set_tty_echo(int on)
3871 if (tcgetattr(0, &tty) == 0) {
3872 if (on) tty.c_lflag |= ECHO;
3873 else tty.c_lflag &= ~ECHO;
3874 tcsetattr(0, TCSANOW, &tty);
3879 static int check_keyboard_interaction(int64_t cur_time)
3882 static int64_t last_time;
3883 if (received_nb_signals)
3884 return AVERROR_EXIT;
3885 /* read_key() returns 0 on EOF */
3886 if(cur_time - last_time >= 100000 && !run_as_daemon){
3888 last_time = cur_time;
3892 return AVERROR_EXIT;
3893 if (key == '+') av_log_set_level(av_log_get_level()+10);
3894 if (key == '-') av_log_set_level(av_log_get_level()-10);
3895 if (key == 's') qp_hist ^= 1;
3898 do_hex_dump = do_pkt_dump = 0;
3899 } else if(do_pkt_dump){
3903 av_log_set_level(AV_LOG_DEBUG);
3905 if (key == 'c' || key == 'C'){
3906 char buf[4096], target[64], command[256], arg[256] = {0};
3909 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3912 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3917 fprintf(stderr, "\n");
3919 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3920 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3921 target, time, command, arg);
3922 for (i = 0; i < nb_filtergraphs; i++) {
3923 FilterGraph *fg = filtergraphs[i];
3926 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3927 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3928 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3929 } else if (key == 'c') {
3930 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3931 ret = AVERROR_PATCHWELCOME;
3933 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3935 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3940 av_log(NULL, AV_LOG_ERROR,
3941 "Parse error, at least 3 arguments were expected, "
3942 "only %d given in string '%s'\n", n, buf);
3945 if (key == 'd' || key == 'D'){
3948 debug = input_streams[0]->st->codec->debug<<1;
3949 if(!debug) debug = 1;
3950 while(debug & (FF_DEBUG_DCT_COEFF
3952 |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3954 )) //unsupported, would just crash
3961 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3966 fprintf(stderr, "\n");
3967 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3968 fprintf(stderr,"error parsing debug value\n");
3970 for(i=0;i<nb_input_streams;i++) {
3971 input_streams[i]->st->codec->debug = debug;
3973 for(i=0;i<nb_output_streams;i++) {
3974 OutputStream *ost = output_streams[i];
3975 ost->enc_ctx->debug = debug;
3977 if(debug) av_log_set_level(AV_LOG_DEBUG);
3978 fprintf(stderr,"debug=%d\n", debug);
3981 fprintf(stderr, "key function\n"
3982 "? show this help\n"
3983 "+ increase verbosity\n"
3984 "- decrease verbosity\n"
3985 "c Send command to first matching filter supporting it\n"
3986 "C Send/Queue command to all matching filters\n"
3987 "D cycle through available debug modes\n"
3988 "h dump packets/hex press to cycle through the 3 states\n"
3990 "s Show QP histogram\n"
3997 static void *input_thread(void *arg)
4000 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4005 ret = av_read_frame(f->ctx, &pkt);
4007 if (ret == AVERROR(EAGAIN)) {
4012 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4015 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4016 if (flags && ret == AVERROR(EAGAIN)) {
4018 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4019 av_log(f->ctx, AV_LOG_WARNING,
4020 "Thread message queue blocking; consider raising the "
4021 "thread_queue_size option (current value: %d)\n",
4022 f->thread_queue_size);
4025 if (ret != AVERROR_EOF)
4026 av_log(f->ctx, AV_LOG_ERROR,
4027 "Unable to send packet to main thread: %s\n",
4029 av_packet_unref(&pkt);
4030 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4038 static void free_input_threads(void)
4042 for (i = 0; i < nb_input_files; i++) {
4043 InputFile *f = input_files[i];
4046 if (!f || !f->in_thread_queue)
4048 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4049 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4050 av_packet_unref(&pkt);
4052 pthread_join(f->thread, NULL);
4054 av_thread_message_queue_free(&f->in_thread_queue);
4058 static int init_input_threads(void)
4062 if (nb_input_files == 1)
4065 for (i = 0; i < nb_input_files; i++) {
4066 InputFile *f = input_files[i];
4068 if (f->ctx->pb ? !f->ctx->pb->seekable :
4069 strcmp(f->ctx->iformat->name, "lavfi"))
4070 f->non_blocking = 1;
4071 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4072 f->thread_queue_size, sizeof(AVPacket));
4076 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4077 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4078 av_thread_message_queue_free(&f->in_thread_queue);
4079 return AVERROR(ret);
4085 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4087 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4089 AV_THREAD_MESSAGE_NONBLOCK : 0);
4093 static int get_input_packet(InputFile *f, AVPacket *pkt)
4097 for (i = 0; i < f->nb_streams; i++) {
4098 InputStream *ist = input_streams[f->ist_index + i];
4099 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4100 int64_t now = av_gettime_relative() - ist->start;
4102 return AVERROR(EAGAIN);
4107 if (nb_input_files > 1)
4108 return get_input_packet_mt(f, pkt);
4110 return av_read_frame(f->ctx, pkt);
4113 static int got_eagain(void)
4116 for (i = 0; i < nb_output_streams; i++)
4117 if (output_streams[i]->unavailable)
4122 static void reset_eagain(void)
4125 for (i = 0; i < nb_input_files; i++)
4126 input_files[i]->eagain = 0;
4127 for (i = 0; i < nb_output_streams; i++)
4128 output_streams[i]->unavailable = 0;
4131 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4132 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4133 AVRational time_base)
4139 return tmp_time_base;
4142 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4145 return tmp_time_base;
4151 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4154 AVCodecContext *avctx;
4155 int i, ret, has_audio = 0;
4156 int64_t duration = 0;
4158 ret = av_seek_frame(is, -1, is->start_time, 0);
4162 for (i = 0; i < ifile->nb_streams; i++) {
4163 ist = input_streams[ifile->ist_index + i];
4164 avctx = ist->dec_ctx;
4167 if (ist->decoding_needed) {
4168 process_input_packet(ist, NULL, 1);
4169 avcodec_flush_buffers(avctx);
4172 /* duration is the length of the last frame in a stream
4173 * when audio stream is present we don't care about
4174 * last video frame length because it's not defined exactly */
4175 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4179 for (i = 0; i < ifile->nb_streams; i++) {
4180 ist = input_streams[ifile->ist_index + i];
4181 avctx = ist->dec_ctx;
4184 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4185 AVRational sample_rate = {1, avctx->sample_rate};
4187 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4192 if (ist->framerate.num) {
4193 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4194 } else if (ist->st->avg_frame_rate.num) {
4195 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4200 if (!ifile->duration)
4201 ifile->time_base = ist->st->time_base;
4202 /* the total duration of the stream, max_pts - min_pts is
4203 * the duration of the stream without the last frame */
4204 duration += ist->max_pts - ist->min_pts;
4205 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4209 if (ifile->loop > 0)
4217 * - 0 -- one packet was read and processed
4218 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4219 * this function should be called again
4220 * - AVERROR_EOF -- this function should not be called again
4222 static int process_input(int file_index)
4224 InputFile *ifile = input_files[file_index];
4225 AVFormatContext *is;
4233 ret = get_input_packet(ifile, &pkt);
4235 if (ret == AVERROR(EAGAIN)) {
4239 if (ret < 0 && ifile->loop) {
4240 ret = seek_to_start(ifile, is);
4242 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4244 ret = get_input_packet(ifile, &pkt);
4245 if (ret == AVERROR(EAGAIN)) {
4251 if (ret != AVERROR_EOF) {
4252 print_error(is->filename, ret);
4257 for (i = 0; i < ifile->nb_streams; i++) {
4258 ist = input_streams[ifile->ist_index + i];
4259 if (ist->decoding_needed) {
4260 ret = process_input_packet(ist, NULL, 0);
4265 /* mark all outputs that don't go through lavfi as finished */
4266 for (j = 0; j < nb_output_streams; j++) {
4267 OutputStream *ost = output_streams[j];
4269 if (ost->source_index == ifile->ist_index + i &&
4270 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4271 finish_output_stream(ost);
4275 ifile->eof_reached = 1;
4276 return AVERROR(EAGAIN);
4282 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4283 is->streams[pkt.stream_index]);
4285 /* the following test is needed in case new streams appear
4286 dynamically in stream : we ignore them */
4287 if (pkt.stream_index >= ifile->nb_streams) {
4288 report_new_stream(file_index, &pkt);
4289 goto discard_packet;
4292 ist = input_streams[ifile->ist_index + pkt.stream_index];
4294 ist->data_size += pkt.size;
4298 goto discard_packet;
4300 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4301 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4306 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4307 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4308 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4309 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4310 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4311 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4312 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4313 av_ts2str(input_files[ist->file_index]->ts_offset),
4314 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4317 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4318 int64_t stime, stime2;
4319 // Correcting starttime based on the enabled streams
4320 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4321 // so we instead do it here as part of discontinuity handling
4322 if ( ist->next_dts == AV_NOPTS_VALUE
4323 && ifile->ts_offset == -is->start_time
4324 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4325 int64_t new_start_time = INT64_MAX;
4326 for (i=0; i<is->nb_streams; i++) {
4327 AVStream *st = is->streams[i];
4328 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4330 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4332 if (new_start_time > is->start_time) {
4333 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4334 ifile->ts_offset = -new_start_time;
4338 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4339 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4340 ist->wrap_correction_done = 1;
4342 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4343 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4344 ist->wrap_correction_done = 0;
4346 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4347 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4348 ist->wrap_correction_done = 0;
4352 /* add the stream-global side data to the first packet */
4353 if (ist->nb_packets == 1) {
4354 for (i = 0; i < ist->st->nb_side_data; i++) {
4355 AVPacketSideData *src_sd = &ist->st->side_data[i];
4358 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4361 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4364 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4368 memcpy(dst_data, src_sd->data, src_sd->size);
4372 if (pkt.dts != AV_NOPTS_VALUE)
4373 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4374 if (pkt.pts != AV_NOPTS_VALUE)
4375 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4377 if (pkt.pts != AV_NOPTS_VALUE)
4378 pkt.pts *= ist->ts_scale;
4379 if (pkt.dts != AV_NOPTS_VALUE)
4380 pkt.dts *= ist->ts_scale;
4382 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4383 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4384 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4385 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4386 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4387 int64_t delta = pkt_dts - ifile->last_ts;
4388 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4389 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4390 ifile->ts_offset -= delta;
4391 av_log(NULL, AV_LOG_DEBUG,
4392 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4393 delta, ifile->ts_offset);
4394 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4395 if (pkt.pts != AV_NOPTS_VALUE)
4396 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4400 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4401 if (pkt.pts != AV_NOPTS_VALUE) {
4402 pkt.pts += duration;
4403 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4404 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4407 if (pkt.dts != AV_NOPTS_VALUE)
4408 pkt.dts += duration;
4410 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4411 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4412 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4413 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4415 int64_t delta = pkt_dts - ist->next_dts;
4416 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4417 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4418 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4419 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4420 ifile->ts_offset -= delta;
4421 av_log(NULL, AV_LOG_DEBUG,
4422 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4423 delta, ifile->ts_offset);
4424 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4425 if (pkt.pts != AV_NOPTS_VALUE)
4426 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4429 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4430 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4431 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4432 pkt.dts = AV_NOPTS_VALUE;
4434 if (pkt.pts != AV_NOPTS_VALUE){
4435 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4436 delta = pkt_pts - ist->next_dts;
4437 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4438 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4439 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4440 pkt.pts = AV_NOPTS_VALUE;
4446 if (pkt.dts != AV_NOPTS_VALUE)
4447 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4450 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4451 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4452 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4453 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4454 av_ts2str(input_files[ist->file_index]->ts_offset),
4455 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4458 sub2video_heartbeat(ist, pkt.pts);
4460 process_input_packet(ist, &pkt, 0);
4463 av_packet_unref(&pkt);
4469 * Perform a step of transcoding for the specified filter graph.
4471 * @param[in] graph filter graph to consider
4472 * @param[out] best_ist input stream where a frame would allow to continue
4473 * @return 0 for success, <0 for error
4475 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4478 int nb_requests, nb_requests_max = 0;
4479 InputFilter *ifilter;
4483 ret = avfilter_graph_request_oldest(graph->graph);
4485 return reap_filters(0);
4487 if (ret == AVERROR_EOF) {
4488 ret = reap_filters(1);
4489 for (i = 0; i < graph->nb_outputs; i++)
4490 close_output_stream(graph->outputs[i]->ost);
4493 if (ret != AVERROR(EAGAIN))
4496 for (i = 0; i < graph->nb_inputs; i++) {
4497 ifilter = graph->inputs[i];
4499 if (input_files[ist->file_index]->eagain ||
4500 input_files[ist->file_index]->eof_reached)
4502 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4503 if (nb_requests > nb_requests_max) {
4504 nb_requests_max = nb_requests;
4510 for (i = 0; i < graph->nb_outputs; i++)
4511 graph->outputs[i]->ost->unavailable = 1;
4517 * Run a single step of transcoding.
4519 * @return 0 for success, <0 for error
4521 static int transcode_step(void)
4524 InputStream *ist = NULL;
4527 ost = choose_output();
4534 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4538 if (ost->filter && !ost->filter->graph->graph) {
4539 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4540 ret = configure_filtergraph(ost->filter->graph);
4542 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4548 if (ost->filter && ost->filter->graph->graph) {
4549 if (!ost->initialized) {
4550 char error[1024] = {0};
4551 ret = init_output_stream(ost, error, sizeof(error));
4553 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4554 ost->file_index, ost->index, error);
4558 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4562 } else if (ost->filter) {
4564 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4565 InputFilter *ifilter = ost->filter->graph->inputs[i];
4566 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4572 ost->inputs_done = 1;
4576 av_assert0(ost->source_index >= 0);
4577 ist = input_streams[ost->source_index];
4580 ret = process_input(ist->file_index);
4581 if (ret == AVERROR(EAGAIN)) {
4582 if (input_files[ist->file_index]->eagain)
4583 ost->unavailable = 1;
4588 return ret == AVERROR_EOF ? 0 : ret;
4590 return reap_filters(0);
4594 * The following code is the main loop of the file converter
4596 static int transcode(void)
4599 AVFormatContext *os;
4602 int64_t timer_start;
4603 int64_t total_packets_written = 0;
4605 ret = transcode_init();
4609 if (stdin_interaction) {
4610 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4613 timer_start = av_gettime_relative();
4616 if ((ret = init_input_threads()) < 0)
4620 while (!received_sigterm) {
4621 int64_t cur_time= av_gettime_relative();
4623 /* if 'q' pressed, exits */
4624 if (stdin_interaction)
4625 if (check_keyboard_interaction(cur_time) < 0)
4628 /* check if there's any stream where output is still needed */
4629 if (!need_output()) {
4630 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4634 ret = transcode_step();
4635 if (ret < 0 && ret != AVERROR_EOF) {
4637 av_strerror(ret, errbuf, sizeof(errbuf));
4639 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4643 /* dump report by using the output first video and audio streams */
4644 print_report(0, timer_start, cur_time);
4647 free_input_threads();
4650 /* at the end of stream, we must flush the decoder buffers */
4651 for (i = 0; i < nb_input_streams; i++) {
4652 ist = input_streams[i];
4653 if (!input_files[ist->file_index]->eof_reached) {
4654 process_input_packet(ist, NULL, 0);
4661 /* write the trailer if needed and close file */
4662 for (i = 0; i < nb_output_files; i++) {
4663 os = output_files[i]->ctx;
4664 if (!output_files[i]->header_written) {
4665 av_log(NULL, AV_LOG_ERROR,
4666 "Nothing was written into output file %d (%s), because "
4667 "at least one of its streams received no packets.\n",
4671 if ((ret = av_write_trailer(os)) < 0) {
4672 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4678 /* dump report by using the first video and audio streams */
4679 print_report(1, timer_start, av_gettime_relative());
4681 /* close each encoder */
4682 for (i = 0; i < nb_output_streams; i++) {
4683 ost = output_streams[i];
4684 if (ost->encoding_needed) {
4685 av_freep(&ost->enc_ctx->stats_in);
4687 total_packets_written += ost->packets_written;
4690 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4691 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4695 /* close each decoder */
4696 for (i = 0; i < nb_input_streams; i++) {
4697 ist = input_streams[i];
4698 if (ist->decoding_needed) {
4699 avcodec_close(ist->dec_ctx);
4700 if (ist->hwaccel_uninit)
4701 ist->hwaccel_uninit(ist->dec_ctx);
4705 av_buffer_unref(&hw_device_ctx);
4706 hw_device_free_all();
4713 free_input_threads();
4716 if (output_streams) {
4717 for (i = 0; i < nb_output_streams; i++) {
4718 ost = output_streams[i];
4721 if (fclose(ost->logfile))
4722 av_log(NULL, AV_LOG_ERROR,
4723 "Error closing logfile, loss of information possible: %s\n",
4724 av_err2str(AVERROR(errno)));
4725 ost->logfile = NULL;
4727 av_freep(&ost->forced_kf_pts);
4728 av_freep(&ost->apad);
4729 av_freep(&ost->disposition);
4730 av_dict_free(&ost->encoder_opts);
4731 av_dict_free(&ost->sws_dict);
4732 av_dict_free(&ost->swr_opts);
4733 av_dict_free(&ost->resample_opts);
4741 static int64_t getutime(void)
4744 struct rusage rusage;
4746 getrusage(RUSAGE_SELF, &rusage);
4747 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4748 #elif HAVE_GETPROCESSTIMES
4750 FILETIME c, e, k, u;
4751 proc = GetCurrentProcess();
4752 GetProcessTimes(proc, &c, &e, &k, &u);
4753 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4755 return av_gettime_relative();
4759 static int64_t getmaxrss(void)
4761 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4762 struct rusage rusage;
4763 getrusage(RUSAGE_SELF, &rusage);
4764 return (int64_t)rusage.ru_maxrss * 1024;
4765 #elif HAVE_GETPROCESSMEMORYINFO
4767 PROCESS_MEMORY_COUNTERS memcounters;
4768 proc = GetCurrentProcess();
4769 memcounters.cb = sizeof(memcounters);
4770 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4771 return memcounters.PeakPagefileUsage;
4777 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4781 int main(int argc, char **argv)
4788 register_exit(ffmpeg_cleanup);
4790 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4792 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4793 parse_loglevel(argc, argv, options);
4795 if(argc>1 && !strcmp(argv[1], "-d")){
4797 av_log_set_callback(log_callback_null);
4802 avcodec_register_all();
4804 avdevice_register_all();
4806 avfilter_register_all();
4808 avformat_network_init();
4810 show_banner(argc, argv, options);
4812 /* parse options and open all input/output files */
4813 ret = ffmpeg_parse_options(argc, argv);
4817 if (nb_output_files <= 0 && nb_input_files == 0) {
4819 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4823 /* file converter / grab */
4824 if (nb_output_files <= 0) {
4825 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4829 // if (nb_input_files == 0) {
4830 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4834 for (i = 0; i < nb_output_files; i++) {
4835 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4839 current_time = ti = getutime();
4840 if (transcode() < 0)
4842 ti = getutime() - ti;
4844 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4846 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4847 decode_error_stat[0], decode_error_stat[1]);
4848 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4851 exit_program(received_nb_signals ? 255 : main_return_code);
4852 return main_return_code;