2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
65 #include "libavutil/threadmessage.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
73 #if HAVE_SYS_RESOURCE_H
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
80 #if HAVE_GETPROCESSMEMORYINFO
84 #if HAVE_SETCONSOLECTRLHANDLER
90 #include <sys/select.h>
95 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
126 static int ifilter_has_all_input_formats(FilterGraph *fg);
128 static int run_as_daemon = 0;
129 static int nb_frames_dup = 0;
130 static unsigned dup_warning = 1000;
131 static int nb_frames_drop = 0;
132 static int64_t decode_error_stat[2];
134 static int want_sdp = 1;
136 static int current_time;
137 AVIOContext *progress_avio = NULL;
139 static uint8_t *subtitle_out;
141 InputStream **input_streams = NULL;
142 int nb_input_streams = 0;
143 InputFile **input_files = NULL;
144 int nb_input_files = 0;
146 OutputStream **output_streams = NULL;
147 int nb_output_streams = 0;
148 OutputFile **output_files = NULL;
149 int nb_output_files = 0;
151 FilterGraph **filtergraphs;
156 /* init terminal so that we can grab keys */
157 static struct termios oldtty;
158 static int restore_tty;
162 static void free_input_threads(void);
166 Convert subtitles to video with alpha to insert them in filter graphs.
167 This is a temporary solution until libavfilter gets real subtitles support.
170 static int sub2video_get_blank_frame(InputStream *ist)
173 AVFrame *frame = ist->sub2video.frame;
175 av_frame_unref(frame);
176 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
177 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
178 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
179 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
181 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
185 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
188 uint32_t *pal, *dst2;
192 if (r->type != SUBTITLE_BITMAP) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
196 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
197 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
198 r->x, r->y, r->w, r->h, w, h
203 dst += r->y * dst_linesize + r->x * 4;
205 pal = (uint32_t *)r->data[1];
206 for (y = 0; y < r->h; y++) {
207 dst2 = (uint32_t *)dst;
209 for (x = 0; x < r->w; x++)
210 *(dst2++) = pal[*(src2++)];
212 src += r->linesize[0];
216 static void sub2video_push_ref(InputStream *ist, int64_t pts)
218 AVFrame *frame = ist->sub2video.frame;
222 av_assert1(frame->data[0]);
223 ist->sub2video.last_pts = frame->pts = pts;
224 for (i = 0; i < ist->nb_filters; i++) {
225 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
226 AV_BUFFERSRC_FLAG_KEEP_REF |
227 AV_BUFFERSRC_FLAG_PUSH);
228 if (ret != AVERROR_EOF && ret < 0)
229 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
234 void sub2video_update(InputStream *ist, AVSubtitle *sub)
236 AVFrame *frame = ist->sub2video.frame;
240 int64_t pts, end_pts;
245 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
246 AV_TIME_BASE_Q, ist->st->time_base);
247 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
248 AV_TIME_BASE_Q, ist->st->time_base);
249 num_rects = sub->num_rects;
251 pts = ist->sub2video.end_pts;
255 if (sub2video_get_blank_frame(ist) < 0) {
256 av_log(ist->dec_ctx, AV_LOG_ERROR,
257 "Impossible to get a blank canvas.\n");
260 dst = frame->data [0];
261 dst_linesize = frame->linesize[0];
262 for (i = 0; i < num_rects; i++)
263 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
264 sub2video_push_ref(ist, pts);
265 ist->sub2video.end_pts = end_pts;
268 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
270 InputFile *infile = input_files[ist->file_index];
274 /* When a frame is read from a file, examine all sub2video streams in
275 the same file and send the sub2video frame again. Otherwise, decoded
276 video frames could be accumulating in the filter graph while a filter
277 (possibly overlay) is desperately waiting for a subtitle frame. */
278 for (i = 0; i < infile->nb_streams; i++) {
279 InputStream *ist2 = input_streams[infile->ist_index + i];
280 if (!ist2->sub2video.frame)
282 /* subtitles seem to be usually muxed ahead of other streams;
283 if not, subtracting a larger time here is necessary */
284 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
285 /* do not send the heartbeat frame if the subtitle is already ahead */
286 if (pts2 <= ist2->sub2video.last_pts)
288 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
289 sub2video_update(ist2, NULL);
290 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
291 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
293 sub2video_push_ref(ist2, pts2);
297 static void sub2video_flush(InputStream *ist)
302 if (ist->sub2video.end_pts < INT64_MAX)
303 sub2video_update(ist, NULL);
304 for (i = 0; i < ist->nb_filters; i++) {
305 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
306 if (ret != AVERROR_EOF && ret < 0)
307 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
311 /* end of sub2video hack */
313 static void term_exit_sigsafe(void)
317 tcsetattr (0, TCSANOW, &oldtty);
323 av_log(NULL, AV_LOG_QUIET, "%s", "");
327 static volatile int received_sigterm = 0;
328 static volatile int received_nb_signals = 0;
329 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
330 static volatile int ffmpeg_exited = 0;
331 static int main_return_code = 0;
334 sigterm_handler(int sig)
337 received_sigterm = sig;
338 received_nb_signals++;
340 if(received_nb_signals > 3) {
341 ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
342 strlen("Received > 3 system signals, hard exiting\n"));
343 if (ret < 0) { /* Do nothing */ };
348 #if HAVE_SETCONSOLECTRLHANDLER
349 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
351 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
356 case CTRL_BREAK_EVENT:
357 sigterm_handler(SIGINT);
360 case CTRL_CLOSE_EVENT:
361 case CTRL_LOGOFF_EVENT:
362 case CTRL_SHUTDOWN_EVENT:
363 sigterm_handler(SIGTERM);
364 /* Basically, with these 3 events, when we return from this method the
365 process is hard terminated, so stall as long as we need to
366 to try and let the main thread(s) clean up and gracefully terminate
367 (we have at most 5 seconds, but should be done far before that). */
368 while (!ffmpeg_exited) {
374 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
383 if (!run_as_daemon && stdin_interaction) {
385 if (tcgetattr (0, &tty) == 0) {
389 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
390 |INLCR|IGNCR|ICRNL|IXON);
391 tty.c_oflag |= OPOST;
392 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
393 tty.c_cflag &= ~(CSIZE|PARENB);
398 tcsetattr (0, TCSANOW, &tty);
400 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
404 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
405 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
407 signal(SIGXCPU, sigterm_handler);
410 signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
412 #if HAVE_SETCONSOLECTRLHANDLER
413 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
417 /* read a key without blocking */
418 static int read_key(void)
430 n = select(1, &rfds, NULL, NULL, &tv);
439 # if HAVE_PEEKNAMEDPIPE
441 static HANDLE input_handle;
444 input_handle = GetStdHandle(STD_INPUT_HANDLE);
445 is_pipe = !GetConsoleMode(input_handle, &dw);
449 /* When running under a GUI, you will end here. */
450 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
451 // input pipe may have been closed by the program that ran ffmpeg
469 static int decode_interrupt_cb(void *ctx)
471 return received_nb_signals > atomic_load(&transcode_init_done);
474 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
476 static void ffmpeg_cleanup(int ret)
481 int maxrss = getmaxrss() / 1024;
482 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
485 for (i = 0; i < nb_filtergraphs; i++) {
486 FilterGraph *fg = filtergraphs[i];
487 avfilter_graph_free(&fg->graph);
488 for (j = 0; j < fg->nb_inputs; j++) {
489 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
491 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
492 sizeof(frame), NULL);
493 av_frame_free(&frame);
495 av_fifo_freep(&fg->inputs[j]->frame_queue);
496 if (fg->inputs[j]->ist->sub2video.sub_queue) {
497 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
499 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
500 &sub, sizeof(sub), NULL);
501 avsubtitle_free(&sub);
503 av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
505 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
506 av_freep(&fg->inputs[j]->name);
507 av_freep(&fg->inputs[j]);
509 av_freep(&fg->inputs);
510 for (j = 0; j < fg->nb_outputs; j++) {
511 av_freep(&fg->outputs[j]->name);
512 av_freep(&fg->outputs[j]->formats);
513 av_freep(&fg->outputs[j]->channel_layouts);
514 av_freep(&fg->outputs[j]->sample_rates);
515 av_freep(&fg->outputs[j]);
517 av_freep(&fg->outputs);
518 av_freep(&fg->graph_desc);
520 av_freep(&filtergraphs[i]);
522 av_freep(&filtergraphs);
524 av_freep(&subtitle_out);
527 for (i = 0; i < nb_output_files; i++) {
528 OutputFile *of = output_files[i];
533 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
535 avformat_free_context(s);
536 av_dict_free(&of->opts);
538 av_freep(&output_files[i]);
540 for (i = 0; i < nb_output_streams; i++) {
541 OutputStream *ost = output_streams[i];
546 for (j = 0; j < ost->nb_bitstream_filters; j++)
547 av_bsf_free(&ost->bsf_ctx[j]);
548 av_freep(&ost->bsf_ctx);
550 av_frame_free(&ost->filtered_frame);
551 av_frame_free(&ost->last_frame);
552 av_dict_free(&ost->encoder_opts);
554 av_parser_close(ost->parser);
555 avcodec_free_context(&ost->parser_avctx);
557 av_freep(&ost->forced_keyframes);
558 av_expr_free(ost->forced_keyframes_pexpr);
559 av_freep(&ost->avfilter);
560 av_freep(&ost->logfile_prefix);
562 av_freep(&ost->audio_channels_map);
563 ost->audio_channels_mapped = 0;
565 av_dict_free(&ost->sws_dict);
567 avcodec_free_context(&ost->enc_ctx);
568 avcodec_parameters_free(&ost->ref_par);
570 if (ost->muxing_queue) {
571 while (av_fifo_size(ost->muxing_queue)) {
573 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
574 av_packet_unref(&pkt);
576 av_fifo_freep(&ost->muxing_queue);
579 av_freep(&output_streams[i]);
582 free_input_threads();
584 for (i = 0; i < nb_input_files; i++) {
585 avformat_close_input(&input_files[i]->ctx);
586 av_freep(&input_files[i]);
588 for (i = 0; i < nb_input_streams; i++) {
589 InputStream *ist = input_streams[i];
591 av_frame_free(&ist->decoded_frame);
592 av_frame_free(&ist->filter_frame);
593 av_dict_free(&ist->decoder_opts);
594 avsubtitle_free(&ist->prev_sub.subtitle);
595 av_frame_free(&ist->sub2video.frame);
596 av_freep(&ist->filters);
597 av_freep(&ist->hwaccel_device);
598 av_freep(&ist->dts_buffer);
600 avcodec_free_context(&ist->dec_ctx);
602 av_freep(&input_streams[i]);
606 if (fclose(vstats_file))
607 av_log(NULL, AV_LOG_ERROR,
608 "Error closing vstats file, loss of information possible: %s\n",
609 av_err2str(AVERROR(errno)));
611 av_freep(&vstats_filename);
613 av_freep(&input_streams);
614 av_freep(&input_files);
615 av_freep(&output_streams);
616 av_freep(&output_files);
620 avformat_network_deinit();
622 if (received_sigterm) {
623 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
624 (int) received_sigterm);
625 } else if (ret && atomic_load(&transcode_init_done)) {
626 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
632 void remove_avoptions(AVDictionary **a, AVDictionary *b)
634 AVDictionaryEntry *t = NULL;
636 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
637 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
641 void assert_avoptions(AVDictionary *m)
643 AVDictionaryEntry *t;
644 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
645 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
650 static void abort_codec_experimental(AVCodec *c, int encoder)
655 static void update_benchmark(const char *fmt, ...)
657 if (do_benchmark_all) {
658 int64_t t = getutime();
664 vsnprintf(buf, sizeof(buf), fmt, va);
666 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
672 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
675 for (i = 0; i < nb_output_streams; i++) {
676 OutputStream *ost2 = output_streams[i];
677 ost2->finished |= ost == ost2 ? this_stream : others;
681 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
683 AVFormatContext *s = of->ctx;
684 AVStream *st = ost->st;
688 * Audio encoders may split the packets -- #frames in != #packets out.
689 * But there is no reordering, so we can limit the number of output packets
690 * by simply dropping them here.
691 * Counting encoded video frames needs to be done separately because of
692 * reordering, see do_video_out().
693 * Do not count the packet when unqueued because it has been counted when queued.
695 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
696 if (ost->frame_number >= ost->max_frames) {
697 av_packet_unref(pkt);
703 if (!of->header_written) {
704 AVPacket tmp_pkt = {0};
705 /* the muxer is not initialized yet, buffer the packet */
706 if (!av_fifo_space(ost->muxing_queue)) {
707 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
708 ost->max_muxing_queue_size);
709 if (new_size <= av_fifo_size(ost->muxing_queue)) {
710 av_log(NULL, AV_LOG_ERROR,
711 "Too many packets buffered for output stream %d:%d.\n",
712 ost->file_index, ost->st->index);
715 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
719 ret = av_packet_ref(&tmp_pkt, pkt);
722 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
723 av_packet_unref(pkt);
727 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
728 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
729 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
731 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
733 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
735 ost->quality = sd ? AV_RL32(sd) : -1;
736 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
738 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
740 ost->error[i] = AV_RL64(sd + 8 + 8*i);
745 if (ost->frame_rate.num && ost->is_cfr) {
746 if (pkt->duration > 0)
747 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
748 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
753 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
755 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
756 if (pkt->dts != AV_NOPTS_VALUE &&
757 pkt->pts != AV_NOPTS_VALUE &&
758 pkt->dts > pkt->pts) {
759 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
761 ost->file_index, ost->st->index);
763 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
764 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
765 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
767 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
768 pkt->dts != AV_NOPTS_VALUE &&
769 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
770 ost->last_mux_dts != AV_NOPTS_VALUE) {
771 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
772 if (pkt->dts < max) {
773 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
774 av_log(s, loglevel, "Non-monotonous DTS in output stream "
775 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
776 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
778 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
781 av_log(s, loglevel, "changing to %"PRId64". This may result "
782 "in incorrect timestamps in the output file.\n",
784 if (pkt->pts >= pkt->dts)
785 pkt->pts = FFMAX(pkt->pts, max);
790 ost->last_mux_dts = pkt->dts;
792 ost->data_size += pkt->size;
793 ost->packets_written++;
795 pkt->stream_index = ost->index;
798 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
799 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
800 av_get_media_type_string(ost->enc_ctx->codec_type),
801 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
802 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
807 ret = av_interleaved_write_frame(s, pkt);
809 print_error("av_interleaved_write_frame()", ret);
810 main_return_code = 1;
811 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
813 av_packet_unref(pkt);
816 static void close_output_stream(OutputStream *ost)
818 OutputFile *of = output_files[ost->file_index];
820 ost->finished |= ENCODER_FINISHED;
822 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
823 of->recording_time = FFMIN(of->recording_time, end);
828 * Send a single packet to the output, applying any bitstream filters
829 * associated with the output stream. This may result in any number
830 * of packets actually being written, depending on what bitstream
831 * filters are applied. The supplied packet is consumed and will be
832 * blank (as if newly-allocated) when this function returns.
834 * If eof is set, instead indicate EOF to all bitstream filters and
835 * therefore flush any delayed packets to the output. A blank packet
836 * must be supplied in this case.
838 static void output_packet(OutputFile *of, AVPacket *pkt,
839 OutputStream *ost, int eof)
843 /* apply the output bitstream filters, if any */
844 if (ost->nb_bitstream_filters) {
847 ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
854 /* get a packet from the previous filter up the chain */
855 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
856 if (ret == AVERROR(EAGAIN)) {
860 } else if (ret == AVERROR_EOF) {
865 /* send it to the next filter down the chain or to the muxer */
866 if (idx < ost->nb_bitstream_filters) {
867 ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
875 write_packet(of, pkt, ost, 0);
878 write_packet(of, pkt, ost, 0);
881 if (ret < 0 && ret != AVERROR_EOF) {
882 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
883 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
889 static int check_recording_time(OutputStream *ost)
891 OutputFile *of = output_files[ost->file_index];
893 if (of->recording_time != INT64_MAX &&
894 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
895 AV_TIME_BASE_Q) >= 0) {
896 close_output_stream(ost);
902 static void do_audio_out(OutputFile *of, OutputStream *ost,
905 AVCodecContext *enc = ost->enc_ctx;
909 av_init_packet(&pkt);
913 if (!check_recording_time(ost))
916 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
917 frame->pts = ost->sync_opts;
918 ost->sync_opts = frame->pts + frame->nb_samples;
919 ost->samples_encoded += frame->nb_samples;
920 ost->frames_encoded++;
922 av_assert0(pkt.size || !pkt.data);
923 update_benchmark(NULL);
925 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
926 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
927 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
928 enc->time_base.num, enc->time_base.den);
931 ret = avcodec_send_frame(enc, frame);
936 ret = avcodec_receive_packet(enc, &pkt);
937 if (ret == AVERROR(EAGAIN))
942 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
944 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
947 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
948 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
949 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
950 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
953 output_packet(of, &pkt, ost, 0);
958 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
962 static void do_subtitle_out(OutputFile *of,
966 int subtitle_out_max_size = 1024 * 1024;
967 int subtitle_out_size, nb, i;
972 if (sub->pts == AV_NOPTS_VALUE) {
973 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
982 subtitle_out = av_malloc(subtitle_out_max_size);
984 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
989 /* Note: DVB subtitle need one packet to draw them and one other
990 packet to clear them */
991 /* XXX: signal it in the codec context ? */
992 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
997 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
999 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1000 pts -= output_files[ost->file_index]->start_time;
1001 for (i = 0; i < nb; i++) {
1002 unsigned save_num_rects = sub->num_rects;
1004 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1005 if (!check_recording_time(ost))
1009 // start_display_time is required to be 0
1010 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1011 sub->end_display_time -= sub->start_display_time;
1012 sub->start_display_time = 0;
1016 ost->frames_encoded++;
1018 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1019 subtitle_out_max_size, sub);
1021 sub->num_rects = save_num_rects;
1022 if (subtitle_out_size < 0) {
1023 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1027 av_init_packet(&pkt);
1028 pkt.data = subtitle_out;
1029 pkt.size = subtitle_out_size;
1030 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1031 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1032 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1033 /* XXX: the pts correction is handled here. Maybe handling
1034 it in the codec would be better */
1036 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1038 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1041 output_packet(of, &pkt, ost, 0);
1045 static void do_video_out(OutputFile *of,
1047 AVFrame *next_picture,
1050 int ret, format_video_sync;
1052 AVCodecContext *enc = ost->enc_ctx;
1053 AVCodecParameters *mux_par = ost->st->codecpar;
1054 AVRational frame_rate;
1055 int nb_frames, nb0_frames, i;
1056 double delta, delta0;
1057 double duration = 0;
1059 InputStream *ist = NULL;
1060 AVFilterContext *filter = ost->filter->filter;
1062 if (ost->source_index >= 0)
1063 ist = input_streams[ost->source_index];
1065 frame_rate = av_buffersink_get_frame_rate(filter);
1066 if (frame_rate.num > 0 && frame_rate.den > 0)
1067 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1069 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1070 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1072 if (!ost->filters_script &&
1076 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1077 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1080 if (!next_picture) {
1082 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1083 ost->last_nb0_frames[1],
1084 ost->last_nb0_frames[2]);
1086 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1087 delta = delta0 + duration;
1089 /* by default, we output a single frame */
1090 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1093 format_video_sync = video_sync_method;
1094 if (format_video_sync == VSYNC_AUTO) {
1095 if(!strcmp(of->ctx->oformat->name, "avi")) {
1096 format_video_sync = VSYNC_VFR;
1098 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1100 && format_video_sync == VSYNC_CFR
1101 && input_files[ist->file_index]->ctx->nb_streams == 1
1102 && input_files[ist->file_index]->input_ts_offset == 0) {
1103 format_video_sync = VSYNC_VSCFR;
1105 if (format_video_sync == VSYNC_CFR && copy_ts) {
1106 format_video_sync = VSYNC_VSCFR;
1109 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1113 format_video_sync != VSYNC_PASSTHROUGH &&
1114 format_video_sync != VSYNC_DROP) {
1115 if (delta0 < -0.6) {
1116 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1118 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1119 sync_ipts = ost->sync_opts;
1124 switch (format_video_sync) {
1126 if (ost->frame_number == 0 && delta0 >= 0.5) {
1127 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1130 ost->sync_opts = lrint(sync_ipts);
1133 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1134 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1136 } else if (delta < -1.1)
1138 else if (delta > 1.1) {
1139 nb_frames = lrintf(delta);
1141 nb0_frames = lrintf(delta0 - 0.6);
1147 else if (delta > 0.6)
1148 ost->sync_opts = lrint(sync_ipts);
1151 case VSYNC_PASSTHROUGH:
1152 ost->sync_opts = lrint(sync_ipts);
1159 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1160 nb0_frames = FFMIN(nb0_frames, nb_frames);
1162 memmove(ost->last_nb0_frames + 1,
1163 ost->last_nb0_frames,
1164 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1165 ost->last_nb0_frames[0] = nb0_frames;
1167 if (nb0_frames == 0 && ost->last_dropped) {
1169 av_log(NULL, AV_LOG_VERBOSE,
1170 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1171 ost->frame_number, ost->st->index, ost->last_frame->pts);
1173 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1174 if (nb_frames > dts_error_threshold * 30) {
1175 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1179 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1180 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1181 if (nb_frames_dup > dup_warning) {
1182 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1186 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1188 /* duplicates frame if needed */
1189 for (i = 0; i < nb_frames; i++) {
1190 AVFrame *in_picture;
1191 av_init_packet(&pkt);
1195 if (i < nb0_frames && ost->last_frame) {
1196 in_picture = ost->last_frame;
1198 in_picture = next_picture;
1203 in_picture->pts = ost->sync_opts;
1206 if (!check_recording_time(ost))
1208 if (ost->frame_number >= ost->max_frames)
1213 int forced_keyframe = 0;
1216 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1217 ost->top_field_first >= 0)
1218 in_picture->top_field_first = !!ost->top_field_first;
1220 if (in_picture->interlaced_frame) {
1221 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1222 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1224 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1226 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1228 in_picture->quality = enc->global_quality;
1229 in_picture->pict_type = 0;
1231 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1232 in_picture->pts * av_q2d(enc->time_base) : NAN;
1233 if (ost->forced_kf_index < ost->forced_kf_count &&
1234 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1235 ost->forced_kf_index++;
1236 forced_keyframe = 1;
1237 } else if (ost->forced_keyframes_pexpr) {
1239 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1240 res = av_expr_eval(ost->forced_keyframes_pexpr,
1241 ost->forced_keyframes_expr_const_values, NULL);
1242 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1243 ost->forced_keyframes_expr_const_values[FKF_N],
1244 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1245 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1246 ost->forced_keyframes_expr_const_values[FKF_T],
1247 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1250 forced_keyframe = 1;
1251 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1252 ost->forced_keyframes_expr_const_values[FKF_N];
1253 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1254 ost->forced_keyframes_expr_const_values[FKF_T];
1255 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1258 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1259 } else if ( ost->forced_keyframes
1260 && !strncmp(ost->forced_keyframes, "source", 6)
1261 && in_picture->key_frame==1) {
1262 forced_keyframe = 1;
1265 if (forced_keyframe) {
1266 in_picture->pict_type = AV_PICTURE_TYPE_I;
1267 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1270 update_benchmark(NULL);
1272 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1273 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1274 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1275 enc->time_base.num, enc->time_base.den);
1278 ost->frames_encoded++;
1280 ret = avcodec_send_frame(enc, in_picture);
1285 ret = avcodec_receive_packet(enc, &pkt);
1286 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1287 if (ret == AVERROR(EAGAIN))
1293 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1294 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1295 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1296 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1299 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1300 pkt.pts = ost->sync_opts;
1302 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1305 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1306 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1307 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1308 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1311 frame_size = pkt.size;
1312 output_packet(of, &pkt, ost, 0);
1314 /* if two pass, output log */
1315 if (ost->logfile && enc->stats_out) {
1316 fprintf(ost->logfile, "%s", enc->stats_out);
1322 * For video, number of frames in == number of packets out.
1323 * But there may be reordering, so we can't throw away frames on encoder
1324 * flush, we need to limit them here, before they go into encoder.
1326 ost->frame_number++;
1328 if (vstats_filename && frame_size)
1329 do_video_stats(ost, frame_size);
1332 if (!ost->last_frame)
1333 ost->last_frame = av_frame_alloc();
1334 av_frame_unref(ost->last_frame);
1335 if (next_picture && ost->last_frame)
1336 av_frame_ref(ost->last_frame, next_picture);
1338 av_frame_free(&ost->last_frame);
1342 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1346 static double psnr(double d)
1348 return -10.0 * log10(d);
1351 static void do_video_stats(OutputStream *ost, int frame_size)
1353 AVCodecContext *enc;
1355 double ti1, bitrate, avg_bitrate;
1357 /* this is executed just the first time do_video_stats is called */
1359 vstats_file = fopen(vstats_filename, "w");
1367 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1368 frame_number = ost->st->nb_frames;
1369 if (vstats_version <= 1) {
1370 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1371 ost->quality / (float)FF_QP2LAMBDA);
1373 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1374 ost->quality / (float)FF_QP2LAMBDA);
1377 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1378 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1380 fprintf(vstats_file,"f_size= %6d ", frame_size);
1381 /* compute pts value */
1382 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1386 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1387 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1388 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1389 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1390 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1394 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1396 static void finish_output_stream(OutputStream *ost)
1398 OutputFile *of = output_files[ost->file_index];
1401 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1404 for (i = 0; i < of->ctx->nb_streams; i++)
1405 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1410 * Get and encode new output from any of the filtergraphs, without causing
1413 * @return 0 for success, <0 for severe errors
1415 static int reap_filters(int flush)
1417 AVFrame *filtered_frame = NULL;
1420 /* Reap all buffers present in the buffer sinks */
1421 for (i = 0; i < nb_output_streams; i++) {
1422 OutputStream *ost = output_streams[i];
1423 OutputFile *of = output_files[ost->file_index];
1424 AVFilterContext *filter;
1425 AVCodecContext *enc = ost->enc_ctx;
1428 if (!ost->filter || !ost->filter->graph->graph)
1430 filter = ost->filter->filter;
1432 if (!ost->initialized) {
1433 char error[1024] = "";
1434 ret = init_output_stream(ost, error, sizeof(error));
1436 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1437 ost->file_index, ost->index, error);
1442 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1443 return AVERROR(ENOMEM);
1445 filtered_frame = ost->filtered_frame;
1448 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1449 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1450 AV_BUFFERSINK_FLAG_NO_REQUEST);
1452 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1453 av_log(NULL, AV_LOG_WARNING,
1454 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1455 } else if (flush && ret == AVERROR_EOF) {
1456 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1457 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1461 if (ost->finished) {
1462 av_frame_unref(filtered_frame);
1465 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1466 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1467 AVRational filter_tb = av_buffersink_get_time_base(filter);
1468 AVRational tb = enc->time_base;
1469 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1471 tb.den <<= extra_bits;
1473 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1474 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1475 float_pts /= 1 << extra_bits;
1476 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1477 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1479 filtered_frame->pts =
1480 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1481 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1483 //if (ost->source_index >= 0)
1484 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1486 switch (av_buffersink_get_type(filter)) {
1487 case AVMEDIA_TYPE_VIDEO:
1488 if (!ost->frame_aspect_ratio.num)
1489 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1492 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1493 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1495 enc->time_base.num, enc->time_base.den);
1498 do_video_out(of, ost, filtered_frame, float_pts);
1500 case AVMEDIA_TYPE_AUDIO:
1501 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1502 enc->channels != filtered_frame->channels) {
1503 av_log(NULL, AV_LOG_ERROR,
1504 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1507 do_audio_out(of, ost, filtered_frame);
1510 // TODO support subtitle filters
1514 av_frame_unref(filtered_frame);
1521 static void print_final_stats(int64_t total_size)
1523 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1524 uint64_t subtitle_size = 0;
1525 uint64_t data_size = 0;
1526 float percent = -1.0;
1530 for (i = 0; i < nb_output_streams; i++) {
1531 OutputStream *ost = output_streams[i];
1532 switch (ost->enc_ctx->codec_type) {
1533 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1534 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1535 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1536 default: other_size += ost->data_size; break;
1538 extra_size += ost->enc_ctx->extradata_size;
1539 data_size += ost->data_size;
1540 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1541 != AV_CODEC_FLAG_PASS1)
1545 if (data_size && total_size>0 && total_size >= data_size)
1546 percent = 100.0 * (total_size - data_size) / data_size;
1548 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1549 video_size / 1024.0,
1550 audio_size / 1024.0,
1551 subtitle_size / 1024.0,
1552 other_size / 1024.0,
1553 extra_size / 1024.0);
1555 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1557 av_log(NULL, AV_LOG_INFO, "unknown");
1558 av_log(NULL, AV_LOG_INFO, "\n");
1560 /* print verbose per-stream stats */
1561 for (i = 0; i < nb_input_files; i++) {
1562 InputFile *f = input_files[i];
1563 uint64_t total_packets = 0, total_size = 0;
1565 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1568 for (j = 0; j < f->nb_streams; j++) {
1569 InputStream *ist = input_streams[f->ist_index + j];
1570 enum AVMediaType type = ist->dec_ctx->codec_type;
1572 total_size += ist->data_size;
1573 total_packets += ist->nb_packets;
1575 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1576 i, j, media_type_string(type));
1577 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1578 ist->nb_packets, ist->data_size);
1580 if (ist->decoding_needed) {
1581 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1582 ist->frames_decoded);
1583 if (type == AVMEDIA_TYPE_AUDIO)
1584 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1585 av_log(NULL, AV_LOG_VERBOSE, "; ");
1588 av_log(NULL, AV_LOG_VERBOSE, "\n");
1591 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1592 total_packets, total_size);
1595 for (i = 0; i < nb_output_files; i++) {
1596 OutputFile *of = output_files[i];
1597 uint64_t total_packets = 0, total_size = 0;
1599 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1602 for (j = 0; j < of->ctx->nb_streams; j++) {
1603 OutputStream *ost = output_streams[of->ost_index + j];
1604 enum AVMediaType type = ost->enc_ctx->codec_type;
1606 total_size += ost->data_size;
1607 total_packets += ost->packets_written;
1609 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1610 i, j, media_type_string(type));
1611 if (ost->encoding_needed) {
1612 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1613 ost->frames_encoded);
1614 if (type == AVMEDIA_TYPE_AUDIO)
1615 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1616 av_log(NULL, AV_LOG_VERBOSE, "; ");
1619 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1620 ost->packets_written, ost->data_size);
1622 av_log(NULL, AV_LOG_VERBOSE, "\n");
1625 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1626 total_packets, total_size);
1628 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1629 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1631 av_log(NULL, AV_LOG_WARNING, "\n");
1633 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1638 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1640 AVBPrint buf, buf_script;
1642 AVFormatContext *oc;
1644 AVCodecContext *enc;
1645 int frame_number, vid, i;
1648 int64_t pts = INT64_MIN + 1;
1649 static int64_t last_time = -1;
1650 static int qp_histogram[52];
1651 int hours, mins, secs, us;
1652 const char *hours_sign;
1656 if (!print_stats && !is_last_report && !progress_avio)
1659 if (!is_last_report) {
1660 if (last_time == -1) {
1661 last_time = cur_time;
1664 if ((cur_time - last_time) < 500000)
1666 last_time = cur_time;
1669 t = (cur_time-timer_start) / 1000000.0;
1672 oc = output_files[0]->ctx;
1674 total_size = avio_size(oc->pb);
1675 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1676 total_size = avio_tell(oc->pb);
1679 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1680 av_bprint_init(&buf_script, 0, 1);
1681 for (i = 0; i < nb_output_streams; i++) {
1683 ost = output_streams[i];
1685 if (!ost->stream_copy)
1686 q = ost->quality / (float) FF_QP2LAMBDA;
1688 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1689 av_bprintf(&buf, "q=%2.1f ", q);
1690 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1691 ost->file_index, ost->index, q);
1693 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1696 frame_number = ost->frame_number;
1697 fps = t > 1 ? frame_number / t : 0;
1698 av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1699 frame_number, fps < 9.95, fps, q);
1700 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1701 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1702 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1703 ost->file_index, ost->index, q);
1705 av_bprintf(&buf, "L");
1709 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1711 for (j = 0; j < 32; j++)
1712 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1715 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1717 double error, error_sum = 0;
1718 double scale, scale_sum = 0;
1720 char type[3] = { 'Y','U','V' };
1721 av_bprintf(&buf, "PSNR=");
1722 for (j = 0; j < 3; j++) {
1723 if (is_last_report) {
1724 error = enc->error[j];
1725 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1727 error = ost->error[j];
1728 scale = enc->width * enc->height * 255.0 * 255.0;
1734 p = psnr(error / scale);
1735 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1736 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1737 ost->file_index, ost->index, type[j] | 32, p);
1739 p = psnr(error_sum / scale_sum);
1740 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1741 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1742 ost->file_index, ost->index, p);
1746 /* compute min output value */
1747 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1748 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1749 ost->st->time_base, AV_TIME_BASE_Q));
1751 nb_frames_drop += ost->last_dropped;
1754 secs = FFABS(pts) / AV_TIME_BASE;
1755 us = FFABS(pts) % AV_TIME_BASE;
1760 hours_sign = (pts < 0) ? "-" : "";
1762 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1763 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1765 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1766 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1767 if (pts == AV_NOPTS_VALUE) {
1768 av_bprintf(&buf, "N/A ");
1770 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1771 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1775 av_bprintf(&buf, "bitrate=N/A");
1776 av_bprintf(&buf_script, "bitrate=N/A\n");
1778 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1779 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1782 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1783 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1784 if (pts == AV_NOPTS_VALUE) {
1785 av_bprintf(&buf_script, "out_time_ms=N/A\n");
1786 av_bprintf(&buf_script, "out_time=N/A\n");
1788 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1789 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1790 hours_sign, hours, mins, secs, us);
1793 if (nb_frames_dup || nb_frames_drop)
1794 av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1795 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1796 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1799 av_bprintf(&buf, " speed=N/A");
1800 av_bprintf(&buf_script, "speed=N/A\n");
1802 av_bprintf(&buf, " speed=%4.3gx", speed);
1803 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1806 if (print_stats || is_last_report) {
1807 const char end = is_last_report ? '\n' : '\r';
1808 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1809 fprintf(stderr, "%s %c", buf.str, end);
1811 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1815 av_bprint_finalize(&buf, NULL);
1817 if (progress_avio) {
1818 av_bprintf(&buf_script, "progress=%s\n",
1819 is_last_report ? "end" : "continue");
1820 avio_write(progress_avio, buf_script.str,
1821 FFMIN(buf_script.len, buf_script.size - 1));
1822 avio_flush(progress_avio);
1823 av_bprint_finalize(&buf_script, NULL);
1824 if (is_last_report) {
1825 if ((ret = avio_closep(&progress_avio)) < 0)
1826 av_log(NULL, AV_LOG_ERROR,
1827 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1832 print_final_stats(total_size);
1835 static void flush_encoders(void)
1839 for (i = 0; i < nb_output_streams; i++) {
1840 OutputStream *ost = output_streams[i];
1841 AVCodecContext *enc = ost->enc_ctx;
1842 OutputFile *of = output_files[ost->file_index];
1844 if (!ost->encoding_needed)
1847 // Try to enable encoding with no input frames.
1848 // Maybe we should just let encoding fail instead.
1849 if (!ost->initialized) {
1850 FilterGraph *fg = ost->filter->graph;
1851 char error[1024] = "";
1853 av_log(NULL, AV_LOG_WARNING,
1854 "Finishing stream %d:%d without any data written to it.\n",
1855 ost->file_index, ost->st->index);
1857 if (ost->filter && !fg->graph) {
1859 for (x = 0; x < fg->nb_inputs; x++) {
1860 InputFilter *ifilter = fg->inputs[x];
1861 if (ifilter->format < 0) {
1862 AVCodecParameters *par = ifilter->ist->st->codecpar;
1863 // We never got any input. Set a fake format, which will
1864 // come from libavformat.
1865 ifilter->format = par->format;
1866 ifilter->sample_rate = par->sample_rate;
1867 ifilter->channels = par->channels;
1868 ifilter->channel_layout = par->channel_layout;
1869 ifilter->width = par->width;
1870 ifilter->height = par->height;
1871 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1875 if (!ifilter_has_all_input_formats(fg))
1878 ret = configure_filtergraph(fg);
1880 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1884 finish_output_stream(ost);
1887 ret = init_output_stream(ost, error, sizeof(error));
1889 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1890 ost->file_index, ost->index, error);
1895 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1898 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1902 const char *desc = NULL;
1906 switch (enc->codec_type) {
1907 case AVMEDIA_TYPE_AUDIO:
1910 case AVMEDIA_TYPE_VIDEO:
1917 av_init_packet(&pkt);
1921 update_benchmark(NULL);
1923 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1924 ret = avcodec_send_frame(enc, NULL);
1926 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1933 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1934 if (ret < 0 && ret != AVERROR_EOF) {
1935 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1940 if (ost->logfile && enc->stats_out) {
1941 fprintf(ost->logfile, "%s", enc->stats_out);
1943 if (ret == AVERROR_EOF) {
1944 output_packet(of, &pkt, ost, 1);
1947 if (ost->finished & MUXER_FINISHED) {
1948 av_packet_unref(&pkt);
1951 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1952 pkt_size = pkt.size;
1953 output_packet(of, &pkt, ost, 0);
1954 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1955 do_video_stats(ost, pkt_size);
1962 * Check whether a packet from ist should be written into ost at this time
1964 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1966 OutputFile *of = output_files[ost->file_index];
1967 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1969 if (ost->source_index != ist_index)
1975 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1981 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1983 OutputFile *of = output_files[ost->file_index];
1984 InputFile *f = input_files [ist->file_index];
1985 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1986 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1987 AVPacket opkt = { 0 };
1989 av_init_packet(&opkt);
1991 // EOF: flush output bitstream filters.
1993 output_packet(of, &opkt, ost, 1);
1997 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1998 !ost->copy_initial_nonkeyframes)
2001 if (!ost->frame_number && !ost->copy_prior_start) {
2002 int64_t comp_start = start_time;
2003 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2004 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2005 if (pkt->pts == AV_NOPTS_VALUE ?
2006 ist->pts < comp_start :
2007 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2011 if (of->recording_time != INT64_MAX &&
2012 ist->pts >= of->recording_time + start_time) {
2013 close_output_stream(ost);
2017 if (f->recording_time != INT64_MAX) {
2018 start_time = f->ctx->start_time;
2019 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2020 start_time += f->start_time;
2021 if (ist->pts >= f->recording_time + start_time) {
2022 close_output_stream(ost);
2027 /* force the input stream PTS */
2028 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2031 if (pkt->pts != AV_NOPTS_VALUE)
2032 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2034 opkt.pts = AV_NOPTS_VALUE;
2036 if (pkt->dts == AV_NOPTS_VALUE)
2037 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2039 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2040 opkt.dts -= ost_tb_start_time;
2042 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2043 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2045 duration = ist->dec_ctx->frame_size;
2046 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2047 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2048 ost->mux_timebase) - ost_tb_start_time;
2051 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2053 opkt.flags = pkt->flags;
2054 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2055 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2056 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2057 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2058 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2060 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2061 &opkt.data, &opkt.size,
2062 pkt->data, pkt->size,
2063 pkt->flags & AV_PKT_FLAG_KEY);
2065 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2070 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2075 opkt.data = pkt->data;
2076 opkt.size = pkt->size;
2078 av_copy_packet_side_data(&opkt, pkt);
2080 output_packet(of, &opkt, ost, 0);
2083 int guess_input_channel_layout(InputStream *ist)
2085 AVCodecContext *dec = ist->dec_ctx;
2087 if (!dec->channel_layout) {
2088 char layout_name[256];
2090 if (dec->channels > ist->guess_layout_max)
2092 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2093 if (!dec->channel_layout)
2095 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2096 dec->channels, dec->channel_layout);
2097 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2098 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2103 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2105 if (*got_output || ret<0)
2106 decode_error_stat[ret<0] ++;
2108 if (ret < 0 && exit_on_error)
2111 if (exit_on_error && *got_output && ist) {
2112 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2113 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2119 // Filters can be configured only if the formats of all inputs are known.
2120 static int ifilter_has_all_input_formats(FilterGraph *fg)
2123 for (i = 0; i < fg->nb_inputs; i++) {
2124 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2125 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2131 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2133 FilterGraph *fg = ifilter->graph;
2134 int need_reinit, ret, i;
2136 /* determine if the parameters for this input changed */
2137 need_reinit = ifilter->format != frame->format;
2138 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2139 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2142 switch (ifilter->ist->st->codecpar->codec_type) {
2143 case AVMEDIA_TYPE_AUDIO:
2144 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2145 ifilter->channels != frame->channels ||
2146 ifilter->channel_layout != frame->channel_layout;
2148 case AVMEDIA_TYPE_VIDEO:
2149 need_reinit |= ifilter->width != frame->width ||
2150 ifilter->height != frame->height;
2155 ret = ifilter_parameters_from_frame(ifilter, frame);
2160 /* (re)init the graph if possible, otherwise buffer the frame and return */
2161 if (need_reinit || !fg->graph) {
2162 for (i = 0; i < fg->nb_inputs; i++) {
2163 if (!ifilter_has_all_input_formats(fg)) {
2164 AVFrame *tmp = av_frame_clone(frame);
2166 return AVERROR(ENOMEM);
2167 av_frame_unref(frame);
2169 if (!av_fifo_space(ifilter->frame_queue)) {
2170 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2172 av_frame_free(&tmp);
2176 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2181 ret = reap_filters(1);
2182 if (ret < 0 && ret != AVERROR_EOF) {
2183 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2187 ret = configure_filtergraph(fg);
2189 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2194 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2196 if (ret != AVERROR_EOF)
2197 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2204 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2210 if (ifilter->filter) {
2211 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2215 // the filtergraph was never configured
2216 FilterGraph *fg = ifilter->graph;
2217 for (i = 0; i < fg->nb_inputs; i++)
2218 if (!fg->inputs[i]->eof)
2220 if (i == fg->nb_inputs) {
2221 // All the input streams have finished without the filtergraph
2222 // ever being configured.
2223 // Mark the output streams as finished.
2224 for (j = 0; j < fg->nb_outputs; j++)
2225 finish_output_stream(fg->outputs[j]->ost);
2232 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2233 // There is the following difference: if you got a frame, you must call
2234 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2235 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2236 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2243 ret = avcodec_send_packet(avctx, pkt);
2244 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2245 // decoded frames with avcodec_receive_frame() until done.
2246 if (ret < 0 && ret != AVERROR_EOF)
2250 ret = avcodec_receive_frame(avctx, frame);
2251 if (ret < 0 && ret != AVERROR(EAGAIN))
2259 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2264 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2265 for (i = 0; i < ist->nb_filters; i++) {
2266 if (i < ist->nb_filters - 1) {
2267 f = ist->filter_frame;
2268 ret = av_frame_ref(f, decoded_frame);
2273 ret = ifilter_send_frame(ist->filters[i], f);
2274 if (ret == AVERROR_EOF)
2275 ret = 0; /* ignore */
2277 av_log(NULL, AV_LOG_ERROR,
2278 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2285 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2288 AVFrame *decoded_frame;
2289 AVCodecContext *avctx = ist->dec_ctx;
2291 AVRational decoded_frame_tb;
2293 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2294 return AVERROR(ENOMEM);
2295 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2296 return AVERROR(ENOMEM);
2297 decoded_frame = ist->decoded_frame;
2299 update_benchmark(NULL);
2300 ret = decode(avctx, decoded_frame, got_output, pkt);
2301 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2305 if (ret >= 0 && avctx->sample_rate <= 0) {
2306 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2307 ret = AVERROR_INVALIDDATA;
2310 if (ret != AVERROR_EOF)
2311 check_decode_result(ist, got_output, ret);
2313 if (!*got_output || ret < 0)
2316 ist->samples_decoded += decoded_frame->nb_samples;
2317 ist->frames_decoded++;
2320 /* increment next_dts to use for the case where the input stream does not
2321 have timestamps or there are multiple frames in the packet */
2322 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2324 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2328 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2329 decoded_frame_tb = ist->st->time_base;
2330 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2331 decoded_frame->pts = pkt->pts;
2332 decoded_frame_tb = ist->st->time_base;
2334 decoded_frame->pts = ist->dts;
2335 decoded_frame_tb = AV_TIME_BASE_Q;
2337 if (decoded_frame->pts != AV_NOPTS_VALUE)
2338 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2339 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2340 (AVRational){1, avctx->sample_rate});
2341 ist->nb_samples = decoded_frame->nb_samples;
2342 err = send_frame_to_filters(ist, decoded_frame);
2344 av_frame_unref(ist->filter_frame);
2345 av_frame_unref(decoded_frame);
2346 return err < 0 ? err : ret;
2349 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2352 AVFrame *decoded_frame;
2353 int i, ret = 0, err = 0;
2354 int64_t best_effort_timestamp;
2355 int64_t dts = AV_NOPTS_VALUE;
2358 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2359 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2361 if (!eof && pkt && pkt->size == 0)
2364 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2365 return AVERROR(ENOMEM);
2366 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2367 return AVERROR(ENOMEM);
2368 decoded_frame = ist->decoded_frame;
2369 if (ist->dts != AV_NOPTS_VALUE)
2370 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2373 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2376 // The old code used to set dts on the drain packet, which does not work
2377 // with the new API anymore.
2379 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2381 return AVERROR(ENOMEM);
2382 ist->dts_buffer = new;
2383 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2386 update_benchmark(NULL);
2387 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2388 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2392 // The following line may be required in some cases where there is no parser
2393 // or the parser does not has_b_frames correctly
2394 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2395 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2396 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2398 av_log(ist->dec_ctx, AV_LOG_WARNING,
2399 "video_delay is larger in decoder than demuxer %d > %d.\n"
2400 "If you want to help, upload a sample "
2401 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2402 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2403 ist->dec_ctx->has_b_frames,
2404 ist->st->codecpar->video_delay);
2407 if (ret != AVERROR_EOF)
2408 check_decode_result(ist, got_output, ret);
2410 if (*got_output && ret >= 0) {
2411 if (ist->dec_ctx->width != decoded_frame->width ||
2412 ist->dec_ctx->height != decoded_frame->height ||
2413 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2414 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2415 decoded_frame->width,
2416 decoded_frame->height,
2417 decoded_frame->format,
2418 ist->dec_ctx->width,
2419 ist->dec_ctx->height,
2420 ist->dec_ctx->pix_fmt);
2424 if (!*got_output || ret < 0)
2427 if(ist->top_field_first>=0)
2428 decoded_frame->top_field_first = ist->top_field_first;
2430 ist->frames_decoded++;
2432 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2433 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2437 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2439 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2440 *duration_pts = decoded_frame->pkt_duration;
2442 if (ist->framerate.num)
2443 best_effort_timestamp = ist->cfr_next_pts++;
2445 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2446 best_effort_timestamp = ist->dts_buffer[0];
2448 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2449 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2450 ist->nb_dts_buffer--;
2453 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2454 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2456 if (ts != AV_NOPTS_VALUE)
2457 ist->next_pts = ist->pts = ts;
2461 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2462 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2463 ist->st->index, av_ts2str(decoded_frame->pts),
2464 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2465 best_effort_timestamp,
2466 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2467 decoded_frame->key_frame, decoded_frame->pict_type,
2468 ist->st->time_base.num, ist->st->time_base.den);
2471 if (ist->st->sample_aspect_ratio.num)
2472 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2474 err = send_frame_to_filters(ist, decoded_frame);
2477 av_frame_unref(ist->filter_frame);
2478 av_frame_unref(decoded_frame);
2479 return err < 0 ? err : ret;
2482 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2485 AVSubtitle subtitle;
2487 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2488 &subtitle, got_output, pkt);
2490 check_decode_result(NULL, got_output, ret);
2492 if (ret < 0 || !*got_output) {
2495 sub2video_flush(ist);
2499 if (ist->fix_sub_duration) {
2501 if (ist->prev_sub.got_output) {
2502 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2503 1000, AV_TIME_BASE);
2504 if (end < ist->prev_sub.subtitle.end_display_time) {
2505 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2506 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2507 ist->prev_sub.subtitle.end_display_time, end,
2508 end <= 0 ? ", dropping it" : "");
2509 ist->prev_sub.subtitle.end_display_time = end;
2512 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2513 FFSWAP(int, ret, ist->prev_sub.ret);
2514 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2522 if (ist->sub2video.frame) {
2523 sub2video_update(ist, &subtitle);
2524 } else if (ist->nb_filters) {
2525 if (!ist->sub2video.sub_queue)
2526 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2527 if (!ist->sub2video.sub_queue)
2529 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2530 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2534 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2538 if (!subtitle.num_rects)
2541 ist->frames_decoded++;
2543 for (i = 0; i < nb_output_streams; i++) {
2544 OutputStream *ost = output_streams[i];
2546 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2547 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2550 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2555 avsubtitle_free(&subtitle);
2559 static int send_filter_eof(InputStream *ist)
2562 /* TODO keep pts also in stream time base to avoid converting back */
2563 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2564 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2566 for (i = 0; i < ist->nb_filters; i++) {
2567 ret = ifilter_send_eof(ist->filters[i], pts);
2574 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2575 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2579 int eof_reached = 0;
2582 if (!ist->saw_first_ts) {
2583 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2585 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2586 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2587 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2589 ist->saw_first_ts = 1;
2592 if (ist->next_dts == AV_NOPTS_VALUE)
2593 ist->next_dts = ist->dts;
2594 if (ist->next_pts == AV_NOPTS_VALUE)
2595 ist->next_pts = ist->pts;
2599 av_init_packet(&avpkt);
2606 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2607 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2608 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2609 ist->next_pts = ist->pts = ist->dts;
2612 // while we have more to decode or while the decoder did output something on EOF
2613 while (ist->decoding_needed) {
2614 int64_t duration_dts = 0;
2615 int64_t duration_pts = 0;
2617 int decode_failed = 0;
2619 ist->pts = ist->next_pts;
2620 ist->dts = ist->next_dts;
2622 switch (ist->dec_ctx->codec_type) {
2623 case AVMEDIA_TYPE_AUDIO:
2624 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2627 case AVMEDIA_TYPE_VIDEO:
2628 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2630 if (!repeating || !pkt || got_output) {
2631 if (pkt && pkt->duration) {
2632 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2633 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2634 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2635 duration_dts = ((int64_t)AV_TIME_BASE *
2636 ist->dec_ctx->framerate.den * ticks) /
2637 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2640 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2641 ist->next_dts += duration_dts;
2643 ist->next_dts = AV_NOPTS_VALUE;
2647 if (duration_pts > 0) {
2648 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2650 ist->next_pts += duration_dts;
2654 case AVMEDIA_TYPE_SUBTITLE:
2657 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2658 if (!pkt && ret >= 0)
2665 if (ret == AVERROR_EOF) {
2671 if (decode_failed) {
2672 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2673 ist->file_index, ist->st->index, av_err2str(ret));
2675 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2676 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2678 if (!decode_failed || exit_on_error)
2684 ist->got_output = 1;
2689 // During draining, we might get multiple output frames in this loop.
2690 // ffmpeg.c does not drain the filter chain on configuration changes,
2691 // which means if we send multiple frames at once to the filters, and
2692 // one of those frames changes configuration, the buffered frames will
2693 // be lost. This can upset certain FATE tests.
2694 // Decode only 1 frame per call on EOF to appease these FATE tests.
2695 // The ideal solution would be to rewrite decoding to use the new
2696 // decoding API in a better way.
2703 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2704 /* except when looping we need to flush but not to send an EOF */
2705 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2706 int ret = send_filter_eof(ist);
2708 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2713 /* handle stream copy */
2714 if (!ist->decoding_needed && pkt) {
2715 ist->dts = ist->next_dts;
2716 switch (ist->dec_ctx->codec_type) {
2717 case AVMEDIA_TYPE_AUDIO:
2718 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2719 ist->dec_ctx->sample_rate;
2721 case AVMEDIA_TYPE_VIDEO:
2722 if (ist->framerate.num) {
2723 // TODO: Remove work-around for c99-to-c89 issue 7
2724 AVRational time_base_q = AV_TIME_BASE_Q;
2725 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2726 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2727 } else if (pkt->duration) {
2728 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2729 } else if(ist->dec_ctx->framerate.num != 0) {
2730 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2731 ist->next_dts += ((int64_t)AV_TIME_BASE *
2732 ist->dec_ctx->framerate.den * ticks) /
2733 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2737 ist->pts = ist->dts;
2738 ist->next_pts = ist->next_dts;
2740 for (i = 0; i < nb_output_streams; i++) {
2741 OutputStream *ost = output_streams[i];
2743 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2746 do_streamcopy(ist, ost, pkt);
2749 return !eof_reached;
2752 static void print_sdp(void)
2757 AVIOContext *sdp_pb;
2758 AVFormatContext **avc;
2760 for (i = 0; i < nb_output_files; i++) {
2761 if (!output_files[i]->header_written)
2765 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2768 for (i = 0, j = 0; i < nb_output_files; i++) {
2769 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2770 avc[j] = output_files[i]->ctx;
2778 av_sdp_create(avc, j, sdp, sizeof(sdp));
2780 if (!sdp_filename) {
2781 printf("SDP:\n%s\n", sdp);
2784 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2785 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2787 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2788 avio_closep(&sdp_pb);
2789 av_freep(&sdp_filename);
2797 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2799 InputStream *ist = s->opaque;
2800 const enum AVPixelFormat *p;
2803 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2804 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2805 const AVCodecHWConfig *config = NULL;
2808 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2811 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2812 ist->hwaccel_id == HWACCEL_AUTO) {
2814 config = avcodec_get_hw_config(s->codec, i);
2817 if (!(config->methods &
2818 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2820 if (config->pix_fmt == *p)
2825 if (config->device_type != ist->hwaccel_device_type) {
2826 // Different hwaccel offered, ignore.
2830 ret = hwaccel_decode_init(s);
2832 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2833 av_log(NULL, AV_LOG_FATAL,
2834 "%s hwaccel requested for input stream #%d:%d, "
2835 "but cannot be initialized.\n",
2836 av_hwdevice_get_type_name(config->device_type),
2837 ist->file_index, ist->st->index);
2838 return AV_PIX_FMT_NONE;
2843 const HWAccel *hwaccel = NULL;
2845 for (i = 0; hwaccels[i].name; i++) {
2846 if (hwaccels[i].pix_fmt == *p) {
2847 hwaccel = &hwaccels[i];
2852 // No hwaccel supporting this pixfmt.
2855 if (hwaccel->id != ist->hwaccel_id) {
2856 // Does not match requested hwaccel.
2860 ret = hwaccel->init(s);
2862 av_log(NULL, AV_LOG_FATAL,
2863 "%s hwaccel requested for input stream #%d:%d, "
2864 "but cannot be initialized.\n", hwaccel->name,
2865 ist->file_index, ist->st->index);
2866 return AV_PIX_FMT_NONE;
2870 if (ist->hw_frames_ctx) {
2871 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2872 if (!s->hw_frames_ctx)
2873 return AV_PIX_FMT_NONE;
2876 ist->hwaccel_pix_fmt = *p;
2883 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2885 InputStream *ist = s->opaque;
2887 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2888 return ist->hwaccel_get_buffer(s, frame, flags);
2890 return avcodec_default_get_buffer2(s, frame, flags);
2893 static int init_input_stream(int ist_index, char *error, int error_len)
2896 InputStream *ist = input_streams[ist_index];
2898 if (ist->decoding_needed) {
2899 AVCodec *codec = ist->dec;
2901 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2902 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2903 return AVERROR(EINVAL);
2906 ist->dec_ctx->opaque = ist;
2907 ist->dec_ctx->get_format = get_format;
2908 ist->dec_ctx->get_buffer2 = get_buffer;
2909 ist->dec_ctx->thread_safe_callbacks = 1;
2911 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2912 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2913 (ist->decoding_needed & DECODING_FOR_OST)) {
2914 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2915 if (ist->decoding_needed & DECODING_FOR_FILTER)
2916 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2919 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2921 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2922 * audio, and video decoders such as cuvid or mediacodec */
2923 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2925 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2926 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2927 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2928 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2929 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2931 ret = hw_device_setup_for_decode(ist);
2933 snprintf(error, error_len, "Device setup failed for "
2934 "decoder on input stream #%d:%d : %s",
2935 ist->file_index, ist->st->index, av_err2str(ret));
2939 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2940 if (ret == AVERROR_EXPERIMENTAL)
2941 abort_codec_experimental(codec, 0);
2943 snprintf(error, error_len,
2944 "Error while opening decoder for input stream "
2946 ist->file_index, ist->st->index, av_err2str(ret));
2949 assert_avoptions(ist->decoder_opts);
2952 ist->next_pts = AV_NOPTS_VALUE;
2953 ist->next_dts = AV_NOPTS_VALUE;
2958 static InputStream *get_input_stream(OutputStream *ost)
2960 if (ost->source_index >= 0)
2961 return input_streams[ost->source_index];
2965 static int compare_int64(const void *a, const void *b)
2967 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2970 /* open the muxer when all the streams are initialized */
2971 static int check_init_output_file(OutputFile *of, int file_index)
2975 for (i = 0; i < of->ctx->nb_streams; i++) {
2976 OutputStream *ost = output_streams[of->ost_index + i];
2977 if (!ost->initialized)
2981 of->ctx->interrupt_callback = int_cb;
2983 ret = avformat_write_header(of->ctx, &of->opts);
2985 av_log(NULL, AV_LOG_ERROR,
2986 "Could not write header for output file #%d "
2987 "(incorrect codec parameters ?): %s\n",
2988 file_index, av_err2str(ret));
2991 //assert_avoptions(of->opts);
2992 of->header_written = 1;
2994 av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2996 if (sdp_filename || want_sdp)
2999 /* flush the muxing queues */
3000 for (i = 0; i < of->ctx->nb_streams; i++) {
3001 OutputStream *ost = output_streams[of->ost_index + i];
3003 /* try to improve muxing time_base (only possible if nothing has been written yet) */
3004 if (!av_fifo_size(ost->muxing_queue))
3005 ost->mux_timebase = ost->st->time_base;
3007 while (av_fifo_size(ost->muxing_queue)) {
3009 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3010 write_packet(of, &pkt, ost, 1);
3017 static int init_output_bsfs(OutputStream *ost)
3022 if (!ost->nb_bitstream_filters)
3025 for (i = 0; i < ost->nb_bitstream_filters; i++) {
3026 ctx = ost->bsf_ctx[i];
3028 ret = avcodec_parameters_copy(ctx->par_in,
3029 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3033 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3035 ret = av_bsf_init(ctx);
3037 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3038 ost->bsf_ctx[i]->filter->name);
3043 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3044 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3048 ost->st->time_base = ctx->time_base_out;
3053 static int init_output_stream_streamcopy(OutputStream *ost)
3055 OutputFile *of = output_files[ost->file_index];
3056 InputStream *ist = get_input_stream(ost);
3057 AVCodecParameters *par_dst = ost->st->codecpar;
3058 AVCodecParameters *par_src = ost->ref_par;
3061 uint32_t codec_tag = par_dst->codec_tag;
3063 av_assert0(ist && !ost->filter);
3065 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3067 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3069 av_log(NULL, AV_LOG_FATAL,
3070 "Error setting up codec context options.\n");
3073 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3076 unsigned int codec_tag_tmp;
3077 if (!of->ctx->oformat->codec_tag ||
3078 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3079 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3080 codec_tag = par_src->codec_tag;
3083 ret = avcodec_parameters_copy(par_dst, par_src);
3087 par_dst->codec_tag = codec_tag;
3089 if (!ost->frame_rate.num)
3090 ost->frame_rate = ist->framerate;
3091 ost->st->avg_frame_rate = ost->frame_rate;
3093 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3097 // copy timebase while removing common factors
3098 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3099 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3101 // copy estimated duration as a hint to the muxer
3102 if (ost->st->duration <= 0 && ist->st->duration > 0)
3103 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3106 ost->st->disposition = ist->st->disposition;
3108 if (ist->st->nb_side_data) {
3109 for (i = 0; i < ist->st->nb_side_data; i++) {
3110 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3113 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3115 return AVERROR(ENOMEM);
3116 memcpy(dst_data, sd_src->data, sd_src->size);
3120 if (ost->rotate_overridden) {
3121 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3122 sizeof(int32_t) * 9);
3124 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3127 ost->parser = av_parser_init(par_dst->codec_id);
3128 ost->parser_avctx = avcodec_alloc_context3(NULL);
3129 if (!ost->parser_avctx)
3130 return AVERROR(ENOMEM);
3132 switch (par_dst->codec_type) {
3133 case AVMEDIA_TYPE_AUDIO:
3134 if (audio_volume != 256) {
3135 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3138 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3139 par_dst->block_align= 0;
3140 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3141 par_dst->block_align= 0;
3143 case AVMEDIA_TYPE_VIDEO:
3144 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3146 av_mul_q(ost->frame_aspect_ratio,
3147 (AVRational){ par_dst->height, par_dst->width });
3148 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3149 "with stream copy may produce invalid files\n");
3151 else if (ist->st->sample_aspect_ratio.num)
3152 sar = ist->st->sample_aspect_ratio;
3154 sar = par_src->sample_aspect_ratio;
3155 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3156 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3157 ost->st->r_frame_rate = ist->st->r_frame_rate;
3161 ost->mux_timebase = ist->st->time_base;
3166 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3168 AVDictionaryEntry *e;
3170 uint8_t *encoder_string;
3171 int encoder_string_len;
3172 int format_flags = 0;
3173 int codec_flags = ost->enc_ctx->flags;
3175 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3178 e = av_dict_get(of->opts, "fflags", NULL, 0);
3180 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3183 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3185 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3187 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3190 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3193 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3194 encoder_string = av_mallocz(encoder_string_len);
3195 if (!encoder_string)
3198 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3199 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3201 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3202 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3203 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3204 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3207 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3208 AVCodecContext *avctx)
3211 int n = 1, i, size, index = 0;
3214 for (p = kf; *p; p++)
3218 pts = av_malloc_array(size, sizeof(*pts));
3220 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3225 for (i = 0; i < n; i++) {
3226 char *next = strchr(p, ',');
3231 if (!memcmp(p, "chapters", 8)) {
3233 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3236 if (avf->nb_chapters > INT_MAX - size ||
3237 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3239 av_log(NULL, AV_LOG_FATAL,
3240 "Could not allocate forced key frames array.\n");
3243 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3244 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3246 for (j = 0; j < avf->nb_chapters; j++) {
3247 AVChapter *c = avf->chapters[j];
3248 av_assert1(index < size);
3249 pts[index++] = av_rescale_q(c->start, c->time_base,
3250 avctx->time_base) + t;
3255 t = parse_time_or_die("force_key_frames", p, 1);
3256 av_assert1(index < size);
3257 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3264 av_assert0(index == size);
3265 qsort(pts, size, sizeof(*pts), compare_int64);
3266 ost->forced_kf_count = size;
3267 ost->forced_kf_pts = pts;
3270 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3272 InputStream *ist = get_input_stream(ost);
3273 AVCodecContext *enc_ctx = ost->enc_ctx;
3274 AVFormatContext *oc;
3276 if (ost->enc_timebase.num > 0) {
3277 enc_ctx->time_base = ost->enc_timebase;
3281 if (ost->enc_timebase.num < 0) {
3283 enc_ctx->time_base = ist->st->time_base;
3287 oc = output_files[ost->file_index]->ctx;
3288 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3291 enc_ctx->time_base = default_time_base;
3294 static int init_output_stream_encode(OutputStream *ost)
3296 InputStream *ist = get_input_stream(ost);
3297 AVCodecContext *enc_ctx = ost->enc_ctx;
3298 AVCodecContext *dec_ctx = NULL;
3299 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3302 set_encoder_id(output_files[ost->file_index], ost);
3304 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3305 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3306 // which have to be filtered out to prevent leaking them to output files.
3307 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3310 ost->st->disposition = ist->st->disposition;
3312 dec_ctx = ist->dec_ctx;
3314 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3316 for (j = 0; j < oc->nb_streams; j++) {
3317 AVStream *st = oc->streams[j];
3318 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3321 if (j == oc->nb_streams)
3322 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3323 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3324 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3327 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3328 if (!ost->frame_rate.num)
3329 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3330 if (ist && !ost->frame_rate.num)
3331 ost->frame_rate = ist->framerate;
3332 if (ist && !ost->frame_rate.num)
3333 ost->frame_rate = ist->st->r_frame_rate;
3334 if (ist && !ost->frame_rate.num) {
3335 ost->frame_rate = (AVRational){25, 1};
3336 av_log(NULL, AV_LOG_WARNING,
3338 "about the input framerate is available. Falling "
3339 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3340 "if you want a different framerate.\n",
3341 ost->file_index, ost->index);
3343 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3344 if (ost->enc->supported_framerates && !ost->force_fps) {
3345 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3346 ost->frame_rate = ost->enc->supported_framerates[idx];
3348 // reduce frame rate for mpeg4 to be within the spec limits
3349 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3350 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3351 ost->frame_rate.num, ost->frame_rate.den, 65535);
3355 switch (enc_ctx->codec_type) {
3356 case AVMEDIA_TYPE_AUDIO:
3357 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3359 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3360 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3361 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3362 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3363 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3365 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3368 case AVMEDIA_TYPE_VIDEO:
3369 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3371 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3372 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3373 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3374 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3375 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3376 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3378 for (j = 0; j < ost->forced_kf_count; j++)
3379 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3381 enc_ctx->time_base);
3383 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3384 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3385 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3386 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3387 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3388 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3390 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3392 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3393 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3395 enc_ctx->framerate = ost->frame_rate;
3397 ost->st->avg_frame_rate = ost->frame_rate;
3400 enc_ctx->width != dec_ctx->width ||
3401 enc_ctx->height != dec_ctx->height ||
3402 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3403 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3406 if (ost->forced_keyframes) {
3407 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3408 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3409 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3411 av_log(NULL, AV_LOG_ERROR,
3412 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3415 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3416 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3417 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3418 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3420 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3421 // parse it only for static kf timings
3422 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3423 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3427 case AVMEDIA_TYPE_SUBTITLE:
3428 enc_ctx->time_base = AV_TIME_BASE_Q;
3429 if (!enc_ctx->width) {
3430 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3431 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3434 case AVMEDIA_TYPE_DATA:
3441 ost->mux_timebase = enc_ctx->time_base;
3446 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3450 if (ost->encoding_needed) {
3451 AVCodec *codec = ost->enc;
3452 AVCodecContext *dec = NULL;
3455 ret = init_output_stream_encode(ost);
3459 if ((ist = get_input_stream(ost)))
3461 if (dec && dec->subtitle_header) {
3462 /* ASS code assumes this buffer is null terminated so add extra byte. */
3463 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3464 if (!ost->enc_ctx->subtitle_header)
3465 return AVERROR(ENOMEM);
3466 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3467 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3469 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3470 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3471 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3473 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3474 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3475 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3477 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3478 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3479 av_buffersink_get_format(ost->filter->filter)) {
3480 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3481 if (!ost->enc_ctx->hw_frames_ctx)
3482 return AVERROR(ENOMEM);
3484 ret = hw_device_setup_for_encode(ost);
3486 snprintf(error, error_len, "Device setup failed for "
3487 "encoder on output stream #%d:%d : %s",
3488 ost->file_index, ost->index, av_err2str(ret));
3493 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3494 if (ret == AVERROR_EXPERIMENTAL)
3495 abort_codec_experimental(codec, 1);
3496 snprintf(error, error_len,
3497 "Error while opening encoder for output stream #%d:%d - "
3498 "maybe incorrect parameters such as bit_rate, rate, width or height",
3499 ost->file_index, ost->index);
3502 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3503 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3504 av_buffersink_set_frame_size(ost->filter->filter,
3505 ost->enc_ctx->frame_size);
3506 assert_avoptions(ost->encoder_opts);
3507 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3508 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3509 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3510 " It takes bits/s as argument, not kbits/s\n");
3512 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3514 av_log(NULL, AV_LOG_FATAL,
3515 "Error initializing the output stream codec context.\n");
3519 * FIXME: ost->st->codec should't be needed here anymore.
3521 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3525 if (ost->enc_ctx->nb_coded_side_data) {
3528 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3529 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3532 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3534 return AVERROR(ENOMEM);
3535 memcpy(dst_data, sd_src->data, sd_src->size);
3540 * Add global input side data. For now this is naive, and copies it
3541 * from the input stream's global side data. All side data should
3542 * really be funneled over AVFrame and libavfilter, then added back to
3543 * packet side data, and then potentially using the first packet for
3548 for (i = 0; i < ist->st->nb_side_data; i++) {
3549 AVPacketSideData *sd = &ist->st->side_data[i];
3550 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3552 return AVERROR(ENOMEM);
3553 memcpy(dst, sd->data, sd->size);
3554 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3555 av_display_rotation_set((uint32_t *)dst, 0);
3559 // copy timebase while removing common factors
3560 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3561 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3563 // copy estimated duration as a hint to the muxer
3564 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3565 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3567 ost->st->codec->codec= ost->enc_ctx->codec;
3568 } else if (ost->stream_copy) {
3569 ret = init_output_stream_streamcopy(ost);
3574 * FIXME: will the codec context used by the parser during streamcopy
3575 * This should go away with the new parser API.
3577 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3582 // parse user provided disposition, and update stream values
3583 if (ost->disposition) {
3584 static const AVOption opts[] = {
3585 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3586 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3587 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3588 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3589 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3590 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3591 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3592 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3593 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3594 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3595 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3596 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3597 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3598 { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3599 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3602 static const AVClass class = {
3604 .item_name = av_default_item_name,
3606 .version = LIBAVUTIL_VERSION_INT,
3608 const AVClass *pclass = &class;
3610 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3615 /* initialize bitstream filters for the output stream
3616 * needs to be done here, because the codec id for streamcopy is not
3617 * known until now */
3618 ret = init_output_bsfs(ost);
3622 ost->initialized = 1;
3624 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3631 static void report_new_stream(int input_index, AVPacket *pkt)
3633 InputFile *file = input_files[input_index];
3634 AVStream *st = file->ctx->streams[pkt->stream_index];
3636 if (pkt->stream_index < file->nb_streams_warn)
3638 av_log(file->ctx, AV_LOG_WARNING,
3639 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3640 av_get_media_type_string(st->codecpar->codec_type),
3641 input_index, pkt->stream_index,
3642 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3643 file->nb_streams_warn = pkt->stream_index + 1;
3646 static int transcode_init(void)
3648 int ret = 0, i, j, k;
3649 AVFormatContext *oc;
3652 char error[1024] = {0};
3654 for (i = 0; i < nb_filtergraphs; i++) {
3655 FilterGraph *fg = filtergraphs[i];
3656 for (j = 0; j < fg->nb_outputs; j++) {
3657 OutputFilter *ofilter = fg->outputs[j];
3658 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3660 if (fg->nb_inputs != 1)
3662 for (k = nb_input_streams-1; k >= 0 ; k--)
3663 if (fg->inputs[0]->ist == input_streams[k])
3665 ofilter->ost->source_index = k;
3669 /* init framerate emulation */
3670 for (i = 0; i < nb_input_files; i++) {
3671 InputFile *ifile = input_files[i];
3672 if (ifile->rate_emu)
3673 for (j = 0; j < ifile->nb_streams; j++)
3674 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3677 /* init input streams */
3678 for (i = 0; i < nb_input_streams; i++)
3679 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3680 for (i = 0; i < nb_output_streams; i++) {
3681 ost = output_streams[i];
3682 avcodec_close(ost->enc_ctx);
3687 /* open each encoder */
3688 for (i = 0; i < nb_output_streams; i++) {
3689 // skip streams fed from filtergraphs until we have a frame for them
3690 if (output_streams[i]->filter)
3693 ret = init_output_stream(output_streams[i], error, sizeof(error));
3698 /* discard unused programs */
3699 for (i = 0; i < nb_input_files; i++) {
3700 InputFile *ifile = input_files[i];
3701 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3702 AVProgram *p = ifile->ctx->programs[j];
3703 int discard = AVDISCARD_ALL;
3705 for (k = 0; k < p->nb_stream_indexes; k++)
3706 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3707 discard = AVDISCARD_DEFAULT;
3710 p->discard = discard;
3714 /* write headers for files with no streams */
3715 for (i = 0; i < nb_output_files; i++) {
3716 oc = output_files[i]->ctx;
3717 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3718 ret = check_init_output_file(output_files[i], i);
3725 /* dump the stream mapping */
3726 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3727 for (i = 0; i < nb_input_streams; i++) {
3728 ist = input_streams[i];
3730 for (j = 0; j < ist->nb_filters; j++) {
3731 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3732 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3733 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3734 ist->filters[j]->name);
3735 if (nb_filtergraphs > 1)
3736 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3737 av_log(NULL, AV_LOG_INFO, "\n");
3742 for (i = 0; i < nb_output_streams; i++) {
3743 ost = output_streams[i];
3745 if (ost->attachment_filename) {
3746 /* an attached file */
3747 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3748 ost->attachment_filename, ost->file_index, ost->index);
3752 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3753 /* output from a complex graph */
3754 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3755 if (nb_filtergraphs > 1)
3756 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3758 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3759 ost->index, ost->enc ? ost->enc->name : "?");
3763 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3764 input_streams[ost->source_index]->file_index,
3765 input_streams[ost->source_index]->st->index,
3768 if (ost->sync_ist != input_streams[ost->source_index])
3769 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3770 ost->sync_ist->file_index,
3771 ost->sync_ist->st->index);
3772 if (ost->stream_copy)
3773 av_log(NULL, AV_LOG_INFO, " (copy)");
3775 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3776 const AVCodec *out_codec = ost->enc;
3777 const char *decoder_name = "?";
3778 const char *in_codec_name = "?";
3779 const char *encoder_name = "?";
3780 const char *out_codec_name = "?";
3781 const AVCodecDescriptor *desc;
3784 decoder_name = in_codec->name;
3785 desc = avcodec_descriptor_get(in_codec->id);
3787 in_codec_name = desc->name;
3788 if (!strcmp(decoder_name, in_codec_name))
3789 decoder_name = "native";
3793 encoder_name = out_codec->name;
3794 desc = avcodec_descriptor_get(out_codec->id);
3796 out_codec_name = desc->name;
3797 if (!strcmp(encoder_name, out_codec_name))
3798 encoder_name = "native";
3801 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3802 in_codec_name, decoder_name,
3803 out_codec_name, encoder_name);
3805 av_log(NULL, AV_LOG_INFO, "\n");
3809 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3813 atomic_store(&transcode_init_done, 1);
3818 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3819 static int need_output(void)
3823 for (i = 0; i < nb_output_streams; i++) {
3824 OutputStream *ost = output_streams[i];
3825 OutputFile *of = output_files[ost->file_index];
3826 AVFormatContext *os = output_files[ost->file_index]->ctx;
3828 if (ost->finished ||
3829 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3831 if (ost->frame_number >= ost->max_frames) {
3833 for (j = 0; j < of->ctx->nb_streams; j++)
3834 close_output_stream(output_streams[of->ost_index + j]);
3845 * Select the output stream to process.
3847 * @return selected output stream, or NULL if none available
3849 static OutputStream *choose_output(void)
3852 int64_t opts_min = INT64_MAX;
3853 OutputStream *ost_min = NULL;
3855 for (i = 0; i < nb_output_streams; i++) {
3856 OutputStream *ost = output_streams[i];
3857 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3858 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3860 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3861 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3863 if (!ost->initialized && !ost->inputs_done)
3866 if (!ost->finished && opts < opts_min) {
3868 ost_min = ost->unavailable ? NULL : ost;
3874 static void set_tty_echo(int on)
3878 if (tcgetattr(0, &tty) == 0) {
3879 if (on) tty.c_lflag |= ECHO;
3880 else tty.c_lflag &= ~ECHO;
3881 tcsetattr(0, TCSANOW, &tty);
3886 static int check_keyboard_interaction(int64_t cur_time)
3889 static int64_t last_time;
3890 if (received_nb_signals)
3891 return AVERROR_EXIT;
3892 /* read_key() returns 0 on EOF */
3893 if(cur_time - last_time >= 100000 && !run_as_daemon){
3895 last_time = cur_time;
3899 return AVERROR_EXIT;
3900 if (key == '+') av_log_set_level(av_log_get_level()+10);
3901 if (key == '-') av_log_set_level(av_log_get_level()-10);
3902 if (key == 's') qp_hist ^= 1;
3905 do_hex_dump = do_pkt_dump = 0;
3906 } else if(do_pkt_dump){
3910 av_log_set_level(AV_LOG_DEBUG);
3912 if (key == 'c' || key == 'C'){
3913 char buf[4096], target[64], command[256], arg[256] = {0};
3916 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3919 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3924 fprintf(stderr, "\n");
3926 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3927 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3928 target, time, command, arg);
3929 for (i = 0; i < nb_filtergraphs; i++) {
3930 FilterGraph *fg = filtergraphs[i];
3933 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3934 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3935 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3936 } else if (key == 'c') {
3937 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3938 ret = AVERROR_PATCHWELCOME;
3940 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3942 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3947 av_log(NULL, AV_LOG_ERROR,
3948 "Parse error, at least 3 arguments were expected, "
3949 "only %d given in string '%s'\n", n, buf);
3952 if (key == 'd' || key == 'D'){
3955 debug = input_streams[0]->st->codec->debug<<1;
3956 if(!debug) debug = 1;
3957 while(debug & (FF_DEBUG_DCT_COEFF
3959 |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3961 )) //unsupported, would just crash
3968 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3973 fprintf(stderr, "\n");
3974 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3975 fprintf(stderr,"error parsing debug value\n");
3977 for(i=0;i<nb_input_streams;i++) {
3978 input_streams[i]->st->codec->debug = debug;
3980 for(i=0;i<nb_output_streams;i++) {
3981 OutputStream *ost = output_streams[i];
3982 ost->enc_ctx->debug = debug;
3984 if(debug) av_log_set_level(AV_LOG_DEBUG);
3985 fprintf(stderr,"debug=%d\n", debug);
3988 fprintf(stderr, "key function\n"
3989 "? show this help\n"
3990 "+ increase verbosity\n"
3991 "- decrease verbosity\n"
3992 "c Send command to first matching filter supporting it\n"
3993 "C Send/Queue command to all matching filters\n"
3994 "D cycle through available debug modes\n"
3995 "h dump packets/hex press to cycle through the 3 states\n"
3997 "s Show QP histogram\n"
4004 static void *input_thread(void *arg)
4007 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4012 ret = av_read_frame(f->ctx, &pkt);
4014 if (ret == AVERROR(EAGAIN)) {
4019 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4022 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4023 if (flags && ret == AVERROR(EAGAIN)) {
4025 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4026 av_log(f->ctx, AV_LOG_WARNING,
4027 "Thread message queue blocking; consider raising the "
4028 "thread_queue_size option (current value: %d)\n",
4029 f->thread_queue_size);
4032 if (ret != AVERROR_EOF)
4033 av_log(f->ctx, AV_LOG_ERROR,
4034 "Unable to send packet to main thread: %s\n",
4036 av_packet_unref(&pkt);
4037 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4045 static void free_input_threads(void)
4049 for (i = 0; i < nb_input_files; i++) {
4050 InputFile *f = input_files[i];
4053 if (!f || !f->in_thread_queue)
4055 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4056 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4057 av_packet_unref(&pkt);
4059 pthread_join(f->thread, NULL);
4061 av_thread_message_queue_free(&f->in_thread_queue);
4065 static int init_input_threads(void)
4069 if (nb_input_files == 1)
4072 for (i = 0; i < nb_input_files; i++) {
4073 InputFile *f = input_files[i];
4075 if (f->ctx->pb ? !f->ctx->pb->seekable :
4076 strcmp(f->ctx->iformat->name, "lavfi"))
4077 f->non_blocking = 1;
4078 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4079 f->thread_queue_size, sizeof(AVPacket));
4083 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4084 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4085 av_thread_message_queue_free(&f->in_thread_queue);
4086 return AVERROR(ret);
4092 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4094 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4096 AV_THREAD_MESSAGE_NONBLOCK : 0);
4100 static int get_input_packet(InputFile *f, AVPacket *pkt)
4104 for (i = 0; i < f->nb_streams; i++) {
4105 InputStream *ist = input_streams[f->ist_index + i];
4106 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4107 int64_t now = av_gettime_relative() - ist->start;
4109 return AVERROR(EAGAIN);
4114 if (nb_input_files > 1)
4115 return get_input_packet_mt(f, pkt);
4117 return av_read_frame(f->ctx, pkt);
4120 static int got_eagain(void)
4123 for (i = 0; i < nb_output_streams; i++)
4124 if (output_streams[i]->unavailable)
4129 static void reset_eagain(void)
4132 for (i = 0; i < nb_input_files; i++)
4133 input_files[i]->eagain = 0;
4134 for (i = 0; i < nb_output_streams; i++)
4135 output_streams[i]->unavailable = 0;
4138 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4139 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4140 AVRational time_base)
4146 return tmp_time_base;
4149 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4152 return tmp_time_base;
4158 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4161 AVCodecContext *avctx;
4162 int i, ret, has_audio = 0;
4163 int64_t duration = 0;
4165 ret = av_seek_frame(is, -1, is->start_time, 0);
4169 for (i = 0; i < ifile->nb_streams; i++) {
4170 ist = input_streams[ifile->ist_index + i];
4171 avctx = ist->dec_ctx;
4174 if (ist->decoding_needed) {
4175 process_input_packet(ist, NULL, 1);
4176 avcodec_flush_buffers(avctx);
4179 /* duration is the length of the last frame in a stream
4180 * when audio stream is present we don't care about
4181 * last video frame length because it's not defined exactly */
4182 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4186 for (i = 0; i < ifile->nb_streams; i++) {
4187 ist = input_streams[ifile->ist_index + i];
4188 avctx = ist->dec_ctx;
4191 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4192 AVRational sample_rate = {1, avctx->sample_rate};
4194 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4199 if (ist->framerate.num) {
4200 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4201 } else if (ist->st->avg_frame_rate.num) {
4202 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4207 if (!ifile->duration)
4208 ifile->time_base = ist->st->time_base;
4209 /* the total duration of the stream, max_pts - min_pts is
4210 * the duration of the stream without the last frame */
4211 duration += ist->max_pts - ist->min_pts;
4212 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4216 if (ifile->loop > 0)
4224 * - 0 -- one packet was read and processed
4225 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4226 * this function should be called again
4227 * - AVERROR_EOF -- this function should not be called again
4229 static int process_input(int file_index)
4231 InputFile *ifile = input_files[file_index];
4232 AVFormatContext *is;
4240 ret = get_input_packet(ifile, &pkt);
4242 if (ret == AVERROR(EAGAIN)) {
4246 if (ret < 0 && ifile->loop) {
4247 ret = seek_to_start(ifile, is);
4249 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4251 ret = get_input_packet(ifile, &pkt);
4252 if (ret == AVERROR(EAGAIN)) {
4258 if (ret != AVERROR_EOF) {
4259 print_error(is->url, ret);
4264 for (i = 0; i < ifile->nb_streams; i++) {
4265 ist = input_streams[ifile->ist_index + i];
4266 if (ist->decoding_needed) {
4267 ret = process_input_packet(ist, NULL, 0);
4272 /* mark all outputs that don't go through lavfi as finished */
4273 for (j = 0; j < nb_output_streams; j++) {
4274 OutputStream *ost = output_streams[j];
4276 if (ost->source_index == ifile->ist_index + i &&
4277 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4278 finish_output_stream(ost);
4282 ifile->eof_reached = 1;
4283 return AVERROR(EAGAIN);
4289 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4290 is->streams[pkt.stream_index]);
4292 /* the following test is needed in case new streams appear
4293 dynamically in stream : we ignore them */
4294 if (pkt.stream_index >= ifile->nb_streams) {
4295 report_new_stream(file_index, &pkt);
4296 goto discard_packet;
4299 ist = input_streams[ifile->ist_index + pkt.stream_index];
4301 ist->data_size += pkt.size;
4305 goto discard_packet;
4307 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4308 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4313 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4314 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4315 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4316 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4317 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4318 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4319 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4320 av_ts2str(input_files[ist->file_index]->ts_offset),
4321 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4324 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4325 int64_t stime, stime2;
4326 // Correcting starttime based on the enabled streams
4327 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4328 // so we instead do it here as part of discontinuity handling
4329 if ( ist->next_dts == AV_NOPTS_VALUE
4330 && ifile->ts_offset == -is->start_time
4331 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4332 int64_t new_start_time = INT64_MAX;
4333 for (i=0; i<is->nb_streams; i++) {
4334 AVStream *st = is->streams[i];
4335 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4337 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4339 if (new_start_time > is->start_time) {
4340 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4341 ifile->ts_offset = -new_start_time;
4345 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4346 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4347 ist->wrap_correction_done = 1;
4349 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4350 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4351 ist->wrap_correction_done = 0;
4353 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4354 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4355 ist->wrap_correction_done = 0;
4359 /* add the stream-global side data to the first packet */
4360 if (ist->nb_packets == 1) {
4361 for (i = 0; i < ist->st->nb_side_data; i++) {
4362 AVPacketSideData *src_sd = &ist->st->side_data[i];
4365 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4368 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4371 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4375 memcpy(dst_data, src_sd->data, src_sd->size);
4379 if (pkt.dts != AV_NOPTS_VALUE)
4380 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4381 if (pkt.pts != AV_NOPTS_VALUE)
4382 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4384 if (pkt.pts != AV_NOPTS_VALUE)
4385 pkt.pts *= ist->ts_scale;
4386 if (pkt.dts != AV_NOPTS_VALUE)
4387 pkt.dts *= ist->ts_scale;
4389 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4390 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4391 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4392 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4393 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4394 int64_t delta = pkt_dts - ifile->last_ts;
4395 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4396 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4397 ifile->ts_offset -= delta;
4398 av_log(NULL, AV_LOG_DEBUG,
4399 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4400 delta, ifile->ts_offset);
4401 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4402 if (pkt.pts != AV_NOPTS_VALUE)
4403 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4407 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4408 if (pkt.pts != AV_NOPTS_VALUE) {
4409 pkt.pts += duration;
4410 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4411 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4414 if (pkt.dts != AV_NOPTS_VALUE)
4415 pkt.dts += duration;
4417 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4418 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4419 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4420 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4422 int64_t delta = pkt_dts - ist->next_dts;
4423 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4424 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4425 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4426 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4427 ifile->ts_offset -= delta;
4428 av_log(NULL, AV_LOG_DEBUG,
4429 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4430 delta, ifile->ts_offset);
4431 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4432 if (pkt.pts != AV_NOPTS_VALUE)
4433 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4436 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4437 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4438 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4439 pkt.dts = AV_NOPTS_VALUE;
4441 if (pkt.pts != AV_NOPTS_VALUE){
4442 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4443 delta = pkt_pts - ist->next_dts;
4444 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4445 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4446 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4447 pkt.pts = AV_NOPTS_VALUE;
4453 if (pkt.dts != AV_NOPTS_VALUE)
4454 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4457 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4458 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4459 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4460 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4461 av_ts2str(input_files[ist->file_index]->ts_offset),
4462 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4465 sub2video_heartbeat(ist, pkt.pts);
4467 process_input_packet(ist, &pkt, 0);
4470 av_packet_unref(&pkt);
4476 * Perform a step of transcoding for the specified filter graph.
4478 * @param[in] graph filter graph to consider
4479 * @param[out] best_ist input stream where a frame would allow to continue
4480 * @return 0 for success, <0 for error
4482 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4485 int nb_requests, nb_requests_max = 0;
4486 InputFilter *ifilter;
4490 ret = avfilter_graph_request_oldest(graph->graph);
4492 return reap_filters(0);
4494 if (ret == AVERROR_EOF) {
4495 ret = reap_filters(1);
4496 for (i = 0; i < graph->nb_outputs; i++)
4497 close_output_stream(graph->outputs[i]->ost);
4500 if (ret != AVERROR(EAGAIN))
4503 for (i = 0; i < graph->nb_inputs; i++) {
4504 ifilter = graph->inputs[i];
4506 if (input_files[ist->file_index]->eagain ||
4507 input_files[ist->file_index]->eof_reached)
4509 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4510 if (nb_requests > nb_requests_max) {
4511 nb_requests_max = nb_requests;
4517 for (i = 0; i < graph->nb_outputs; i++)
4518 graph->outputs[i]->ost->unavailable = 1;
4524 * Run a single step of transcoding.
4526 * @return 0 for success, <0 for error
4528 static int transcode_step(void)
4531 InputStream *ist = NULL;
4534 ost = choose_output();
4541 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4545 if (ost->filter && !ost->filter->graph->graph) {
4546 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4547 ret = configure_filtergraph(ost->filter->graph);
4549 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4555 if (ost->filter && ost->filter->graph->graph) {
4556 if (!ost->initialized) {
4557 char error[1024] = {0};
4558 ret = init_output_stream(ost, error, sizeof(error));
4560 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4561 ost->file_index, ost->index, error);
4565 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4569 } else if (ost->filter) {
4571 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4572 InputFilter *ifilter = ost->filter->graph->inputs[i];
4573 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4579 ost->inputs_done = 1;
4583 av_assert0(ost->source_index >= 0);
4584 ist = input_streams[ost->source_index];
4587 ret = process_input(ist->file_index);
4588 if (ret == AVERROR(EAGAIN)) {
4589 if (input_files[ist->file_index]->eagain)
4590 ost->unavailable = 1;
4595 return ret == AVERROR_EOF ? 0 : ret;
4597 return reap_filters(0);
4601 * The following code is the main loop of the file converter
4603 static int transcode(void)
4606 AVFormatContext *os;
4609 int64_t timer_start;
4610 int64_t total_packets_written = 0;
4612 ret = transcode_init();
4616 if (stdin_interaction) {
4617 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4620 timer_start = av_gettime_relative();
4623 if ((ret = init_input_threads()) < 0)
4627 while (!received_sigterm) {
4628 int64_t cur_time= av_gettime_relative();
4630 /* if 'q' pressed, exits */
4631 if (stdin_interaction)
4632 if (check_keyboard_interaction(cur_time) < 0)
4635 /* check if there's any stream where output is still needed */
4636 if (!need_output()) {
4637 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4641 ret = transcode_step();
4642 if (ret < 0 && ret != AVERROR_EOF) {
4643 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4647 /* dump report by using the output first video and audio streams */
4648 print_report(0, timer_start, cur_time);
4651 free_input_threads();
4654 /* at the end of stream, we must flush the decoder buffers */
4655 for (i = 0; i < nb_input_streams; i++) {
4656 ist = input_streams[i];
4657 if (!input_files[ist->file_index]->eof_reached) {
4658 process_input_packet(ist, NULL, 0);
4665 /* write the trailer if needed and close file */
4666 for (i = 0; i < nb_output_files; i++) {
4667 os = output_files[i]->ctx;
4668 if (!output_files[i]->header_written) {
4669 av_log(NULL, AV_LOG_ERROR,
4670 "Nothing was written into output file %d (%s), because "
4671 "at least one of its streams received no packets.\n",
4675 if ((ret = av_write_trailer(os)) < 0) {
4676 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4682 /* dump report by using the first video and audio streams */
4683 print_report(1, timer_start, av_gettime_relative());
4685 /* close each encoder */
4686 for (i = 0; i < nb_output_streams; i++) {
4687 ost = output_streams[i];
4688 if (ost->encoding_needed) {
4689 av_freep(&ost->enc_ctx->stats_in);
4691 total_packets_written += ost->packets_written;
4694 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4695 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4699 /* close each decoder */
4700 for (i = 0; i < nb_input_streams; i++) {
4701 ist = input_streams[i];
4702 if (ist->decoding_needed) {
4703 avcodec_close(ist->dec_ctx);
4704 if (ist->hwaccel_uninit)
4705 ist->hwaccel_uninit(ist->dec_ctx);
4709 av_buffer_unref(&hw_device_ctx);
4710 hw_device_free_all();
4717 free_input_threads();
4720 if (output_streams) {
4721 for (i = 0; i < nb_output_streams; i++) {
4722 ost = output_streams[i];
4725 if (fclose(ost->logfile))
4726 av_log(NULL, AV_LOG_ERROR,
4727 "Error closing logfile, loss of information possible: %s\n",
4728 av_err2str(AVERROR(errno)));
4729 ost->logfile = NULL;
4731 av_freep(&ost->forced_kf_pts);
4732 av_freep(&ost->apad);
4733 av_freep(&ost->disposition);
4734 av_dict_free(&ost->encoder_opts);
4735 av_dict_free(&ost->sws_dict);
4736 av_dict_free(&ost->swr_opts);
4737 av_dict_free(&ost->resample_opts);
4745 static int64_t getutime(void)
4748 struct rusage rusage;
4750 getrusage(RUSAGE_SELF, &rusage);
4751 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4752 #elif HAVE_GETPROCESSTIMES
4754 FILETIME c, e, k, u;
4755 proc = GetCurrentProcess();
4756 GetProcessTimes(proc, &c, &e, &k, &u);
4757 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4759 return av_gettime_relative();
4763 static int64_t getmaxrss(void)
4765 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4766 struct rusage rusage;
4767 getrusage(RUSAGE_SELF, &rusage);
4768 return (int64_t)rusage.ru_maxrss * 1024;
4769 #elif HAVE_GETPROCESSMEMORYINFO
4771 PROCESS_MEMORY_COUNTERS memcounters;
4772 proc = GetCurrentProcess();
4773 memcounters.cb = sizeof(memcounters);
4774 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4775 return memcounters.PeakPagefileUsage;
4781 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4785 int main(int argc, char **argv)
4792 register_exit(ffmpeg_cleanup);
4794 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4796 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4797 parse_loglevel(argc, argv, options);
4799 if(argc>1 && !strcmp(argv[1], "-d")){
4801 av_log_set_callback(log_callback_null);
4806 avcodec_register_all();
4808 avdevice_register_all();
4810 avfilter_register_all();
4812 avformat_network_init();
4814 show_banner(argc, argv, options);
4816 /* parse options and open all input/output files */
4817 ret = ffmpeg_parse_options(argc, argv);
4821 if (nb_output_files <= 0 && nb_input_files == 0) {
4823 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4827 /* file converter / grab */
4828 if (nb_output_files <= 0) {
4829 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4833 // if (nb_input_files == 0) {
4834 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4838 for (i = 0; i < nb_output_files; i++) {
4839 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4843 current_time = ti = getutime();
4844 if (transcode() < 0)
4846 ti = getutime() - ti;
4848 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4850 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4851 decode_error_stat[0], decode_error_stat[1]);
4852 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4855 exit_program(received_nb_signals ? 255 : main_return_code);
4856 return main_return_code;