2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
65 #include "libavutil/threadmessage.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
73 #if HAVE_SYS_RESOURCE_H
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
80 #if HAVE_GETPROCESSMEMORYINFO
84 #if HAVE_SETCONSOLECTRLHANDLER
90 #include <sys/select.h>
95 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 typedef struct BenchmarkTimeStamps {
127 } BenchmarkTimeStamps;
129 static void do_video_stats(OutputStream *ost, int frame_size);
130 static BenchmarkTimeStamps get_benchmark_time_stamps(void);
131 static int64_t getmaxrss(void);
132 static int ifilter_has_all_input_formats(FilterGraph *fg);
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
140 static int want_sdp = 1;
142 static BenchmarkTimeStamps current_time;
143 AVIOContext *progress_avio = NULL;
145 static uint8_t *subtitle_out;
147 InputStream **input_streams = NULL;
148 int nb_input_streams = 0;
149 InputFile **input_files = NULL;
150 int nb_input_files = 0;
152 OutputStream **output_streams = NULL;
153 int nb_output_streams = 0;
154 OutputFile **output_files = NULL;
155 int nb_output_files = 0;
157 FilterGraph **filtergraphs;
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
168 static void free_input_threads(void);
172 Convert subtitles to video with alpha to insert them in filter graphs.
173 This is a temporary solution until libavfilter gets real subtitles support.
176 static int sub2video_get_blank_frame(InputStream *ist)
179 AVFrame *frame = ist->sub2video.frame;
181 av_frame_unref(frame);
182 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
184 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
185 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
187 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
194 uint32_t *pal, *dst2;
198 if (r->type != SUBTITLE_BITMAP) {
199 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
202 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204 r->x, r->y, r->w, r->h, w, h
209 dst += r->y * dst_linesize + r->x * 4;
211 pal = (uint32_t *)r->data[1];
212 for (y = 0; y < r->h; y++) {
213 dst2 = (uint32_t *)dst;
215 for (x = 0; x < r->w; x++)
216 *(dst2++) = pal[*(src2++)];
218 src += r->linesize[0];
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
224 AVFrame *frame = ist->sub2video.frame;
228 av_assert1(frame->data[0]);
229 ist->sub2video.last_pts = frame->pts = pts;
230 for (i = 0; i < ist->nb_filters; i++) {
231 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
232 AV_BUFFERSRC_FLAG_KEEP_REF |
233 AV_BUFFERSRC_FLAG_PUSH);
234 if (ret != AVERROR_EOF && ret < 0)
235 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
240 void sub2video_update(InputStream *ist, AVSubtitle *sub)
242 AVFrame *frame = ist->sub2video.frame;
246 int64_t pts, end_pts;
251 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252 AV_TIME_BASE_Q, ist->st->time_base);
253 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254 AV_TIME_BASE_Q, ist->st->time_base);
255 num_rects = sub->num_rects;
257 pts = ist->sub2video.end_pts;
261 if (sub2video_get_blank_frame(ist) < 0) {
262 av_log(ist->dec_ctx, AV_LOG_ERROR,
263 "Impossible to get a blank canvas.\n");
266 dst = frame->data [0];
267 dst_linesize = frame->linesize[0];
268 for (i = 0; i < num_rects; i++)
269 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
270 sub2video_push_ref(ist, pts);
271 ist->sub2video.end_pts = end_pts;
274 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
276 InputFile *infile = input_files[ist->file_index];
280 /* When a frame is read from a file, examine all sub2video streams in
281 the same file and send the sub2video frame again. Otherwise, decoded
282 video frames could be accumulating in the filter graph while a filter
283 (possibly overlay) is desperately waiting for a subtitle frame. */
284 for (i = 0; i < infile->nb_streams; i++) {
285 InputStream *ist2 = input_streams[infile->ist_index + i];
286 if (!ist2->sub2video.frame)
288 /* subtitles seem to be usually muxed ahead of other streams;
289 if not, subtracting a larger time here is necessary */
290 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
291 /* do not send the heartbeat frame if the subtitle is already ahead */
292 if (pts2 <= ist2->sub2video.last_pts)
294 if (pts2 >= ist2->sub2video.end_pts ||
295 (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX))
296 sub2video_update(ist2, NULL);
297 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
298 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
300 sub2video_push_ref(ist2, pts2);
304 static void sub2video_flush(InputStream *ist)
309 if (ist->sub2video.end_pts < INT64_MAX)
310 sub2video_update(ist, NULL);
311 for (i = 0; i < ist->nb_filters; i++) {
312 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
313 if (ret != AVERROR_EOF && ret < 0)
314 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
318 /* end of sub2video hack */
320 static void term_exit_sigsafe(void)
324 tcsetattr (0, TCSANOW, &oldtty);
330 av_log(NULL, AV_LOG_QUIET, "%s", "");
334 static volatile int received_sigterm = 0;
335 static volatile int received_nb_signals = 0;
336 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
337 static volatile int ffmpeg_exited = 0;
338 static int main_return_code = 0;
341 sigterm_handler(int sig)
344 received_sigterm = sig;
345 received_nb_signals++;
347 if(received_nb_signals > 3) {
348 ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
349 strlen("Received > 3 system signals, hard exiting\n"));
350 if (ret < 0) { /* Do nothing */ };
355 #if HAVE_SETCONSOLECTRLHANDLER
356 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
358 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
363 case CTRL_BREAK_EVENT:
364 sigterm_handler(SIGINT);
367 case CTRL_CLOSE_EVENT:
368 case CTRL_LOGOFF_EVENT:
369 case CTRL_SHUTDOWN_EVENT:
370 sigterm_handler(SIGTERM);
371 /* Basically, with these 3 events, when we return from this method the
372 process is hard terminated, so stall as long as we need to
373 to try and let the main thread(s) clean up and gracefully terminate
374 (we have at most 5 seconds, but should be done far before that). */
375 while (!ffmpeg_exited) {
381 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
390 if (!run_as_daemon && stdin_interaction) {
392 if (tcgetattr (0, &tty) == 0) {
396 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
397 |INLCR|IGNCR|ICRNL|IXON);
398 tty.c_oflag |= OPOST;
399 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
400 tty.c_cflag &= ~(CSIZE|PARENB);
405 tcsetattr (0, TCSANOW, &tty);
407 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
411 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
412 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
414 signal(SIGXCPU, sigterm_handler);
417 signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
419 #if HAVE_SETCONSOLECTRLHANDLER
420 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
424 /* read a key without blocking */
425 static int read_key(void)
437 n = select(1, &rfds, NULL, NULL, &tv);
446 # if HAVE_PEEKNAMEDPIPE
448 static HANDLE input_handle;
451 input_handle = GetStdHandle(STD_INPUT_HANDLE);
452 is_pipe = !GetConsoleMode(input_handle, &dw);
456 /* When running under a GUI, you will end here. */
457 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
458 // input pipe may have been closed by the program that ran ffmpeg
476 static int decode_interrupt_cb(void *ctx)
478 return received_nb_signals > atomic_load(&transcode_init_done);
481 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
483 static void ffmpeg_cleanup(int ret)
488 int maxrss = getmaxrss() / 1024;
489 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
492 for (i = 0; i < nb_filtergraphs; i++) {
493 FilterGraph *fg = filtergraphs[i];
494 avfilter_graph_free(&fg->graph);
495 for (j = 0; j < fg->nb_inputs; j++) {
496 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
498 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
499 sizeof(frame), NULL);
500 av_frame_free(&frame);
502 av_fifo_freep(&fg->inputs[j]->frame_queue);
503 if (fg->inputs[j]->ist->sub2video.sub_queue) {
504 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
506 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
507 &sub, sizeof(sub), NULL);
508 avsubtitle_free(&sub);
510 av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
512 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
513 av_freep(&fg->inputs[j]->name);
514 av_freep(&fg->inputs[j]);
516 av_freep(&fg->inputs);
517 for (j = 0; j < fg->nb_outputs; j++) {
518 av_freep(&fg->outputs[j]->name);
519 av_freep(&fg->outputs[j]->formats);
520 av_freep(&fg->outputs[j]->channel_layouts);
521 av_freep(&fg->outputs[j]->sample_rates);
522 av_freep(&fg->outputs[j]);
524 av_freep(&fg->outputs);
525 av_freep(&fg->graph_desc);
527 av_freep(&filtergraphs[i]);
529 av_freep(&filtergraphs);
531 av_freep(&subtitle_out);
534 for (i = 0; i < nb_output_files; i++) {
535 OutputFile *of = output_files[i];
540 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
542 avformat_free_context(s);
543 av_dict_free(&of->opts);
545 av_freep(&output_files[i]);
547 for (i = 0; i < nb_output_streams; i++) {
548 OutputStream *ost = output_streams[i];
553 for (j = 0; j < ost->nb_bitstream_filters; j++)
554 av_bsf_free(&ost->bsf_ctx[j]);
555 av_freep(&ost->bsf_ctx);
557 av_frame_free(&ost->filtered_frame);
558 av_frame_free(&ost->last_frame);
559 av_dict_free(&ost->encoder_opts);
561 av_freep(&ost->forced_keyframes);
562 av_expr_free(ost->forced_keyframes_pexpr);
563 av_freep(&ost->avfilter);
564 av_freep(&ost->logfile_prefix);
566 av_freep(&ost->audio_channels_map);
567 ost->audio_channels_mapped = 0;
569 av_dict_free(&ost->sws_dict);
571 avcodec_free_context(&ost->enc_ctx);
572 avcodec_parameters_free(&ost->ref_par);
574 if (ost->muxing_queue) {
575 while (av_fifo_size(ost->muxing_queue)) {
577 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
578 av_packet_unref(&pkt);
580 av_fifo_freep(&ost->muxing_queue);
583 av_freep(&output_streams[i]);
586 free_input_threads();
588 for (i = 0; i < nb_input_files; i++) {
589 avformat_close_input(&input_files[i]->ctx);
590 av_freep(&input_files[i]);
592 for (i = 0; i < nb_input_streams; i++) {
593 InputStream *ist = input_streams[i];
595 av_frame_free(&ist->decoded_frame);
596 av_frame_free(&ist->filter_frame);
597 av_dict_free(&ist->decoder_opts);
598 avsubtitle_free(&ist->prev_sub.subtitle);
599 av_frame_free(&ist->sub2video.frame);
600 av_freep(&ist->filters);
601 av_freep(&ist->hwaccel_device);
602 av_freep(&ist->dts_buffer);
604 avcodec_free_context(&ist->dec_ctx);
606 av_freep(&input_streams[i]);
610 if (fclose(vstats_file))
611 av_log(NULL, AV_LOG_ERROR,
612 "Error closing vstats file, loss of information possible: %s\n",
613 av_err2str(AVERROR(errno)));
615 av_freep(&vstats_filename);
617 av_freep(&input_streams);
618 av_freep(&input_files);
619 av_freep(&output_streams);
620 av_freep(&output_files);
624 avformat_network_deinit();
626 if (received_sigterm) {
627 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
628 (int) received_sigterm);
629 } else if (ret && atomic_load(&transcode_init_done)) {
630 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
636 void remove_avoptions(AVDictionary **a, AVDictionary *b)
638 AVDictionaryEntry *t = NULL;
640 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
641 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
645 void assert_avoptions(AVDictionary *m)
647 AVDictionaryEntry *t;
648 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
649 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
654 static void abort_codec_experimental(AVCodec *c, int encoder)
659 static void update_benchmark(const char *fmt, ...)
661 if (do_benchmark_all) {
662 BenchmarkTimeStamps t = get_benchmark_time_stamps();
668 vsnprintf(buf, sizeof(buf), fmt, va);
670 av_log(NULL, AV_LOG_INFO,
671 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
672 t.user_usec - current_time.user_usec,
673 t.sys_usec - current_time.sys_usec,
674 t.real_usec - current_time.real_usec, buf);
680 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
683 for (i = 0; i < nb_output_streams; i++) {
684 OutputStream *ost2 = output_streams[i];
685 ost2->finished |= ost == ost2 ? this_stream : others;
689 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
691 AVFormatContext *s = of->ctx;
692 AVStream *st = ost->st;
696 * Audio encoders may split the packets -- #frames in != #packets out.
697 * But there is no reordering, so we can limit the number of output packets
698 * by simply dropping them here.
699 * Counting encoded video frames needs to be done separately because of
700 * reordering, see do_video_out().
701 * Do not count the packet when unqueued because it has been counted when queued.
703 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
704 if (ost->frame_number >= ost->max_frames) {
705 av_packet_unref(pkt);
711 if (!of->header_written) {
712 AVPacket tmp_pkt = {0};
713 /* the muxer is not initialized yet, buffer the packet */
714 if (!av_fifo_space(ost->muxing_queue)) {
715 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
716 ost->max_muxing_queue_size);
717 if (new_size <= av_fifo_size(ost->muxing_queue)) {
718 av_log(NULL, AV_LOG_ERROR,
719 "Too many packets buffered for output stream %d:%d.\n",
720 ost->file_index, ost->st->index);
723 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
727 ret = av_packet_make_refcounted(pkt);
730 av_packet_move_ref(&tmp_pkt, pkt);
731 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
735 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
736 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
737 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
739 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
741 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
743 ost->quality = sd ? AV_RL32(sd) : -1;
744 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
746 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
748 ost->error[i] = AV_RL64(sd + 8 + 8*i);
753 if (ost->frame_rate.num && ost->is_cfr) {
754 if (pkt->duration > 0)
755 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
756 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
761 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
763 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
764 if (pkt->dts != AV_NOPTS_VALUE &&
765 pkt->pts != AV_NOPTS_VALUE &&
766 pkt->dts > pkt->pts) {
767 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
769 ost->file_index, ost->st->index);
771 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
772 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
773 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
775 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
776 pkt->dts != AV_NOPTS_VALUE &&
777 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
778 ost->last_mux_dts != AV_NOPTS_VALUE) {
779 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
780 if (pkt->dts < max) {
781 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
782 av_log(s, loglevel, "Non-monotonous DTS in output stream "
783 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
784 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
786 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
789 av_log(s, loglevel, "changing to %"PRId64". This may result "
790 "in incorrect timestamps in the output file.\n",
792 if (pkt->pts >= pkt->dts)
793 pkt->pts = FFMAX(pkt->pts, max);
798 ost->last_mux_dts = pkt->dts;
800 ost->data_size += pkt->size;
801 ost->packets_written++;
803 pkt->stream_index = ost->index;
806 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
807 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
808 av_get_media_type_string(ost->enc_ctx->codec_type),
809 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
810 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
815 ret = av_interleaved_write_frame(s, pkt);
817 print_error("av_interleaved_write_frame()", ret);
818 main_return_code = 1;
819 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
821 av_packet_unref(pkt);
824 static void close_output_stream(OutputStream *ost)
826 OutputFile *of = output_files[ost->file_index];
828 ost->finished |= ENCODER_FINISHED;
830 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
831 of->recording_time = FFMIN(of->recording_time, end);
836 * Send a single packet to the output, applying any bitstream filters
837 * associated with the output stream. This may result in any number
838 * of packets actually being written, depending on what bitstream
839 * filters are applied. The supplied packet is consumed and will be
840 * blank (as if newly-allocated) when this function returns.
842 * If eof is set, instead indicate EOF to all bitstream filters and
843 * therefore flush any delayed packets to the output. A blank packet
844 * must be supplied in this case.
846 static void output_packet(OutputFile *of, AVPacket *pkt,
847 OutputStream *ost, int eof)
851 /* apply the output bitstream filters, if any */
852 if (ost->nb_bitstream_filters) {
855 ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
862 /* get a packet from the previous filter up the chain */
863 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
864 if (ret == AVERROR(EAGAIN)) {
868 } else if (ret == AVERROR_EOF) {
873 /* send it to the next filter down the chain or to the muxer */
874 if (idx < ost->nb_bitstream_filters) {
875 ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
883 write_packet(of, pkt, ost, 0);
886 write_packet(of, pkt, ost, 0);
889 if (ret < 0 && ret != AVERROR_EOF) {
890 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
891 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
897 static int check_recording_time(OutputStream *ost)
899 OutputFile *of = output_files[ost->file_index];
901 if (of->recording_time != INT64_MAX &&
902 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
903 AV_TIME_BASE_Q) >= 0) {
904 close_output_stream(ost);
910 static void do_audio_out(OutputFile *of, OutputStream *ost,
913 AVCodecContext *enc = ost->enc_ctx;
917 av_init_packet(&pkt);
921 if (!check_recording_time(ost))
924 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
925 frame->pts = ost->sync_opts;
926 ost->sync_opts = frame->pts + frame->nb_samples;
927 ost->samples_encoded += frame->nb_samples;
928 ost->frames_encoded++;
930 av_assert0(pkt.size || !pkt.data);
931 update_benchmark(NULL);
933 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
934 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
935 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
936 enc->time_base.num, enc->time_base.den);
939 ret = avcodec_send_frame(enc, frame);
944 ret = avcodec_receive_packet(enc, &pkt);
945 if (ret == AVERROR(EAGAIN))
950 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
952 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
955 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
956 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
957 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
958 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
961 output_packet(of, &pkt, ost, 0);
966 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
970 static void do_subtitle_out(OutputFile *of,
974 int subtitle_out_max_size = 1024 * 1024;
975 int subtitle_out_size, nb, i;
980 if (sub->pts == AV_NOPTS_VALUE) {
981 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
990 subtitle_out = av_malloc(subtitle_out_max_size);
992 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
997 /* Note: DVB subtitle need one packet to draw them and one other
998 packet to clear them */
999 /* XXX: signal it in the codec context ? */
1000 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1005 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1007 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1008 pts -= output_files[ost->file_index]->start_time;
1009 for (i = 0; i < nb; i++) {
1010 unsigned save_num_rects = sub->num_rects;
1012 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1013 if (!check_recording_time(ost))
1017 // start_display_time is required to be 0
1018 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1019 sub->end_display_time -= sub->start_display_time;
1020 sub->start_display_time = 0;
1024 ost->frames_encoded++;
1026 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1027 subtitle_out_max_size, sub);
1029 sub->num_rects = save_num_rects;
1030 if (subtitle_out_size < 0) {
1031 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1035 av_init_packet(&pkt);
1036 pkt.data = subtitle_out;
1037 pkt.size = subtitle_out_size;
1038 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1039 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1040 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1041 /* XXX: the pts correction is handled here. Maybe handling
1042 it in the codec would be better */
1044 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1046 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1049 output_packet(of, &pkt, ost, 0);
1053 static void do_video_out(OutputFile *of,
1055 AVFrame *next_picture,
1058 int ret, format_video_sync;
1060 AVCodecContext *enc = ost->enc_ctx;
1061 AVCodecParameters *mux_par = ost->st->codecpar;
1062 AVRational frame_rate;
1063 int nb_frames, nb0_frames, i;
1064 double delta, delta0;
1065 double duration = 0;
1067 InputStream *ist = NULL;
1068 AVFilterContext *filter = ost->filter->filter;
1070 if (ost->source_index >= 0)
1071 ist = input_streams[ost->source_index];
1073 frame_rate = av_buffersink_get_frame_rate(filter);
1074 if (frame_rate.num > 0 && frame_rate.den > 0)
1075 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1077 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1078 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1080 if (!ost->filters_script &&
1084 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1085 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1088 if (!next_picture) {
1090 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1091 ost->last_nb0_frames[1],
1092 ost->last_nb0_frames[2]);
1094 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1095 delta = delta0 + duration;
1097 /* by default, we output a single frame */
1098 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1101 format_video_sync = video_sync_method;
1102 if (format_video_sync == VSYNC_AUTO) {
1103 if(!strcmp(of->ctx->oformat->name, "avi")) {
1104 format_video_sync = VSYNC_VFR;
1106 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1108 && format_video_sync == VSYNC_CFR
1109 && input_files[ist->file_index]->ctx->nb_streams == 1
1110 && input_files[ist->file_index]->input_ts_offset == 0) {
1111 format_video_sync = VSYNC_VSCFR;
1113 if (format_video_sync == VSYNC_CFR && copy_ts) {
1114 format_video_sync = VSYNC_VSCFR;
1117 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1121 format_video_sync != VSYNC_PASSTHROUGH &&
1122 format_video_sync != VSYNC_DROP) {
1123 if (delta0 < -0.6) {
1124 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1126 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1127 sync_ipts = ost->sync_opts;
1132 switch (format_video_sync) {
1134 if (ost->frame_number == 0 && delta0 >= 0.5) {
1135 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1138 ost->sync_opts = lrint(sync_ipts);
1141 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1142 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1144 } else if (delta < -1.1)
1146 else if (delta > 1.1) {
1147 nb_frames = lrintf(delta);
1149 nb0_frames = lrintf(delta0 - 0.6);
1155 else if (delta > 0.6)
1156 ost->sync_opts = lrint(sync_ipts);
1159 case VSYNC_PASSTHROUGH:
1160 ost->sync_opts = lrint(sync_ipts);
1167 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1168 nb0_frames = FFMIN(nb0_frames, nb_frames);
1170 memmove(ost->last_nb0_frames + 1,
1171 ost->last_nb0_frames,
1172 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1173 ost->last_nb0_frames[0] = nb0_frames;
1175 if (nb0_frames == 0 && ost->last_dropped) {
1177 av_log(NULL, AV_LOG_VERBOSE,
1178 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1179 ost->frame_number, ost->st->index, ost->last_frame->pts);
1181 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1182 if (nb_frames > dts_error_threshold * 30) {
1183 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1187 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1188 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1189 if (nb_frames_dup > dup_warning) {
1190 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1194 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1196 /* duplicates frame if needed */
1197 for (i = 0; i < nb_frames; i++) {
1198 AVFrame *in_picture;
1199 av_init_packet(&pkt);
1203 if (i < nb0_frames && ost->last_frame) {
1204 in_picture = ost->last_frame;
1206 in_picture = next_picture;
1211 in_picture->pts = ost->sync_opts;
1214 if (!check_recording_time(ost))
1216 if (ost->frame_number >= ost->max_frames)
1221 int forced_keyframe = 0;
1224 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1225 ost->top_field_first >= 0)
1226 in_picture->top_field_first = !!ost->top_field_first;
1228 if (in_picture->interlaced_frame) {
1229 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1230 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1232 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1234 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1236 in_picture->quality = enc->global_quality;
1237 in_picture->pict_type = 0;
1239 if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1240 in_picture->pts != AV_NOPTS_VALUE)
1241 ost->forced_kf_ref_pts = in_picture->pts;
1243 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1244 (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1245 if (ost->forced_kf_index < ost->forced_kf_count &&
1246 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1247 ost->forced_kf_index++;
1248 forced_keyframe = 1;
1249 } else if (ost->forced_keyframes_pexpr) {
1251 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1252 res = av_expr_eval(ost->forced_keyframes_pexpr,
1253 ost->forced_keyframes_expr_const_values, NULL);
1254 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1255 ost->forced_keyframes_expr_const_values[FKF_N],
1256 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1257 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1258 ost->forced_keyframes_expr_const_values[FKF_T],
1259 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1262 forced_keyframe = 1;
1263 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1264 ost->forced_keyframes_expr_const_values[FKF_N];
1265 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1266 ost->forced_keyframes_expr_const_values[FKF_T];
1267 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1270 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1271 } else if ( ost->forced_keyframes
1272 && !strncmp(ost->forced_keyframes, "source", 6)
1273 && in_picture->key_frame==1) {
1274 forced_keyframe = 1;
1277 if (forced_keyframe) {
1278 in_picture->pict_type = AV_PICTURE_TYPE_I;
1279 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1282 update_benchmark(NULL);
1284 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1285 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1286 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1287 enc->time_base.num, enc->time_base.den);
1290 ost->frames_encoded++;
1292 ret = avcodec_send_frame(enc, in_picture);
1297 ret = avcodec_receive_packet(enc, &pkt);
1298 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1299 if (ret == AVERROR(EAGAIN))
1305 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1306 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1307 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1308 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1311 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1312 pkt.pts = ost->sync_opts;
1314 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1317 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1318 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1319 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1320 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1323 frame_size = pkt.size;
1324 output_packet(of, &pkt, ost, 0);
1326 /* if two pass, output log */
1327 if (ost->logfile && enc->stats_out) {
1328 fprintf(ost->logfile, "%s", enc->stats_out);
1334 * For video, number of frames in == number of packets out.
1335 * But there may be reordering, so we can't throw away frames on encoder
1336 * flush, we need to limit them here, before they go into encoder.
1338 ost->frame_number++;
1340 if (vstats_filename && frame_size)
1341 do_video_stats(ost, frame_size);
1344 if (!ost->last_frame)
1345 ost->last_frame = av_frame_alloc();
1346 av_frame_unref(ost->last_frame);
1347 if (next_picture && ost->last_frame)
1348 av_frame_ref(ost->last_frame, next_picture);
1350 av_frame_free(&ost->last_frame);
1354 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1358 static double psnr(double d)
1360 return -10.0 * log10(d);
1363 static void do_video_stats(OutputStream *ost, int frame_size)
1365 AVCodecContext *enc;
1367 double ti1, bitrate, avg_bitrate;
1369 /* this is executed just the first time do_video_stats is called */
1371 vstats_file = fopen(vstats_filename, "w");
1379 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1380 frame_number = ost->st->nb_frames;
1381 if (vstats_version <= 1) {
1382 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1383 ost->quality / (float)FF_QP2LAMBDA);
1385 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1386 ost->quality / (float)FF_QP2LAMBDA);
1389 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1390 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1392 fprintf(vstats_file,"f_size= %6d ", frame_size);
1393 /* compute pts value */
1394 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1398 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1399 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1400 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1401 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1402 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1406 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1408 static void finish_output_stream(OutputStream *ost)
1410 OutputFile *of = output_files[ost->file_index];
1413 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1416 for (i = 0; i < of->ctx->nb_streams; i++)
1417 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1422 * Get and encode new output from any of the filtergraphs, without causing
1425 * @return 0 for success, <0 for severe errors
1427 static int reap_filters(int flush)
1429 AVFrame *filtered_frame = NULL;
1432 /* Reap all buffers present in the buffer sinks */
1433 for (i = 0; i < nb_output_streams; i++) {
1434 OutputStream *ost = output_streams[i];
1435 OutputFile *of = output_files[ost->file_index];
1436 AVFilterContext *filter;
1437 AVCodecContext *enc = ost->enc_ctx;
1440 if (!ost->filter || !ost->filter->graph->graph)
1442 filter = ost->filter->filter;
1444 if (!ost->initialized) {
1445 char error[1024] = "";
1446 ret = init_output_stream(ost, error, sizeof(error));
1448 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1449 ost->file_index, ost->index, error);
1454 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1455 return AVERROR(ENOMEM);
1457 filtered_frame = ost->filtered_frame;
1460 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1461 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1462 AV_BUFFERSINK_FLAG_NO_REQUEST);
1464 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1465 av_log(NULL, AV_LOG_WARNING,
1466 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1467 } else if (flush && ret == AVERROR_EOF) {
1468 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1469 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1473 if (ost->finished) {
1474 av_frame_unref(filtered_frame);
1477 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1478 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1479 AVRational filter_tb = av_buffersink_get_time_base(filter);
1480 AVRational tb = enc->time_base;
1481 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1483 tb.den <<= extra_bits;
1485 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1486 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1487 float_pts /= 1 << extra_bits;
1488 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1489 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1491 filtered_frame->pts =
1492 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1493 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1495 //if (ost->source_index >= 0)
1496 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1498 switch (av_buffersink_get_type(filter)) {
1499 case AVMEDIA_TYPE_VIDEO:
1500 if (!ost->frame_aspect_ratio.num)
1501 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1504 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1505 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1507 enc->time_base.num, enc->time_base.den);
1510 do_video_out(of, ost, filtered_frame, float_pts);
1512 case AVMEDIA_TYPE_AUDIO:
1513 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1514 enc->channels != filtered_frame->channels) {
1515 av_log(NULL, AV_LOG_ERROR,
1516 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1519 do_audio_out(of, ost, filtered_frame);
1522 // TODO support subtitle filters
1526 av_frame_unref(filtered_frame);
1533 static void print_final_stats(int64_t total_size)
1535 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1536 uint64_t subtitle_size = 0;
1537 uint64_t data_size = 0;
1538 float percent = -1.0;
1542 for (i = 0; i < nb_output_streams; i++) {
1543 OutputStream *ost = output_streams[i];
1544 switch (ost->enc_ctx->codec_type) {
1545 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1546 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1547 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1548 default: other_size += ost->data_size; break;
1550 extra_size += ost->enc_ctx->extradata_size;
1551 data_size += ost->data_size;
1552 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1553 != AV_CODEC_FLAG_PASS1)
1557 if (data_size && total_size>0 && total_size >= data_size)
1558 percent = 100.0 * (total_size - data_size) / data_size;
1560 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1561 video_size / 1024.0,
1562 audio_size / 1024.0,
1563 subtitle_size / 1024.0,
1564 other_size / 1024.0,
1565 extra_size / 1024.0);
1567 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1569 av_log(NULL, AV_LOG_INFO, "unknown");
1570 av_log(NULL, AV_LOG_INFO, "\n");
1572 /* print verbose per-stream stats */
1573 for (i = 0; i < nb_input_files; i++) {
1574 InputFile *f = input_files[i];
1575 uint64_t total_packets = 0, total_size = 0;
1577 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1580 for (j = 0; j < f->nb_streams; j++) {
1581 InputStream *ist = input_streams[f->ist_index + j];
1582 enum AVMediaType type = ist->dec_ctx->codec_type;
1584 total_size += ist->data_size;
1585 total_packets += ist->nb_packets;
1587 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1588 i, j, media_type_string(type));
1589 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1590 ist->nb_packets, ist->data_size);
1592 if (ist->decoding_needed) {
1593 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1594 ist->frames_decoded);
1595 if (type == AVMEDIA_TYPE_AUDIO)
1596 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1597 av_log(NULL, AV_LOG_VERBOSE, "; ");
1600 av_log(NULL, AV_LOG_VERBOSE, "\n");
1603 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1604 total_packets, total_size);
1607 for (i = 0; i < nb_output_files; i++) {
1608 OutputFile *of = output_files[i];
1609 uint64_t total_packets = 0, total_size = 0;
1611 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1614 for (j = 0; j < of->ctx->nb_streams; j++) {
1615 OutputStream *ost = output_streams[of->ost_index + j];
1616 enum AVMediaType type = ost->enc_ctx->codec_type;
1618 total_size += ost->data_size;
1619 total_packets += ost->packets_written;
1621 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1622 i, j, media_type_string(type));
1623 if (ost->encoding_needed) {
1624 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1625 ost->frames_encoded);
1626 if (type == AVMEDIA_TYPE_AUDIO)
1627 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1628 av_log(NULL, AV_LOG_VERBOSE, "; ");
1631 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1632 ost->packets_written, ost->data_size);
1634 av_log(NULL, AV_LOG_VERBOSE, "\n");
1637 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1638 total_packets, total_size);
1640 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1641 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1643 av_log(NULL, AV_LOG_WARNING, "\n");
1645 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1650 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1652 AVBPrint buf, buf_script;
1654 AVFormatContext *oc;
1656 AVCodecContext *enc;
1657 int frame_number, vid, i;
1660 int64_t pts = INT64_MIN + 1;
1661 static int64_t last_time = -1;
1662 static int qp_histogram[52];
1663 int hours, mins, secs, us;
1664 const char *hours_sign;
1668 if (!print_stats && !is_last_report && !progress_avio)
1671 if (!is_last_report) {
1672 if (last_time == -1) {
1673 last_time = cur_time;
1676 if ((cur_time - last_time) < 500000)
1678 last_time = cur_time;
1681 t = (cur_time-timer_start) / 1000000.0;
1684 oc = output_files[0]->ctx;
1686 total_size = avio_size(oc->pb);
1687 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1688 total_size = avio_tell(oc->pb);
1691 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1692 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1693 for (i = 0; i < nb_output_streams; i++) {
1695 ost = output_streams[i];
1697 if (!ost->stream_copy)
1698 q = ost->quality / (float) FF_QP2LAMBDA;
1700 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1701 av_bprintf(&buf, "q=%2.1f ", q);
1702 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1703 ost->file_index, ost->index, q);
1705 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1708 frame_number = ost->frame_number;
1709 fps = t > 1 ? frame_number / t : 0;
1710 av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1711 frame_number, fps < 9.95, fps, q);
1712 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1713 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1714 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1715 ost->file_index, ost->index, q);
1717 av_bprintf(&buf, "L");
1721 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1723 for (j = 0; j < 32; j++)
1724 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1727 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1729 double error, error_sum = 0;
1730 double scale, scale_sum = 0;
1732 char type[3] = { 'Y','U','V' };
1733 av_bprintf(&buf, "PSNR=");
1734 for (j = 0; j < 3; j++) {
1735 if (is_last_report) {
1736 error = enc->error[j];
1737 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1739 error = ost->error[j];
1740 scale = enc->width * enc->height * 255.0 * 255.0;
1746 p = psnr(error / scale);
1747 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1748 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1749 ost->file_index, ost->index, type[j] | 32, p);
1751 p = psnr(error_sum / scale_sum);
1752 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1753 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1754 ost->file_index, ost->index, p);
1758 /* compute min output value */
1759 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1760 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1761 ost->st->time_base, AV_TIME_BASE_Q));
1763 nb_frames_drop += ost->last_dropped;
1766 secs = FFABS(pts) / AV_TIME_BASE;
1767 us = FFABS(pts) % AV_TIME_BASE;
1772 hours_sign = (pts < 0) ? "-" : "";
1774 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1775 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1777 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1778 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1779 if (pts == AV_NOPTS_VALUE) {
1780 av_bprintf(&buf, "N/A ");
1782 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1783 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1787 av_bprintf(&buf, "bitrate=N/A");
1788 av_bprintf(&buf_script, "bitrate=N/A\n");
1790 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1791 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1794 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1795 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1796 if (pts == AV_NOPTS_VALUE) {
1797 av_bprintf(&buf_script, "out_time_us=N/A\n");
1798 av_bprintf(&buf_script, "out_time_ms=N/A\n");
1799 av_bprintf(&buf_script, "out_time=N/A\n");
1801 av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1802 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1803 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1804 hours_sign, hours, mins, secs, us);
1807 if (nb_frames_dup || nb_frames_drop)
1808 av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1809 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1810 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1813 av_bprintf(&buf, " speed=N/A");
1814 av_bprintf(&buf_script, "speed=N/A\n");
1816 av_bprintf(&buf, " speed=%4.3gx", speed);
1817 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1820 if (print_stats || is_last_report) {
1821 const char end = is_last_report ? '\n' : '\r';
1822 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1823 fprintf(stderr, "%s %c", buf.str, end);
1825 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1829 av_bprint_finalize(&buf, NULL);
1831 if (progress_avio) {
1832 av_bprintf(&buf_script, "progress=%s\n",
1833 is_last_report ? "end" : "continue");
1834 avio_write(progress_avio, buf_script.str,
1835 FFMIN(buf_script.len, buf_script.size - 1));
1836 avio_flush(progress_avio);
1837 av_bprint_finalize(&buf_script, NULL);
1838 if (is_last_report) {
1839 if ((ret = avio_closep(&progress_avio)) < 0)
1840 av_log(NULL, AV_LOG_ERROR,
1841 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1846 print_final_stats(total_size);
1849 static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1851 // We never got any input. Set a fake format, which will
1852 // come from libavformat.
1853 ifilter->format = par->format;
1854 ifilter->sample_rate = par->sample_rate;
1855 ifilter->channels = par->channels;
1856 ifilter->channel_layout = par->channel_layout;
1857 ifilter->width = par->width;
1858 ifilter->height = par->height;
1859 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1862 static void flush_encoders(void)
1866 for (i = 0; i < nb_output_streams; i++) {
1867 OutputStream *ost = output_streams[i];
1868 AVCodecContext *enc = ost->enc_ctx;
1869 OutputFile *of = output_files[ost->file_index];
1871 if (!ost->encoding_needed)
1874 // Try to enable encoding with no input frames.
1875 // Maybe we should just let encoding fail instead.
1876 if (!ost->initialized) {
1877 FilterGraph *fg = ost->filter->graph;
1878 char error[1024] = "";
1880 av_log(NULL, AV_LOG_WARNING,
1881 "Finishing stream %d:%d without any data written to it.\n",
1882 ost->file_index, ost->st->index);
1884 if (ost->filter && !fg->graph) {
1886 for (x = 0; x < fg->nb_inputs; x++) {
1887 InputFilter *ifilter = fg->inputs[x];
1888 if (ifilter->format < 0)
1889 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1892 if (!ifilter_has_all_input_formats(fg))
1895 ret = configure_filtergraph(fg);
1897 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1901 finish_output_stream(ost);
1904 ret = init_output_stream(ost, error, sizeof(error));
1906 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1907 ost->file_index, ost->index, error);
1912 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1915 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1919 const char *desc = NULL;
1923 switch (enc->codec_type) {
1924 case AVMEDIA_TYPE_AUDIO:
1927 case AVMEDIA_TYPE_VIDEO:
1934 av_init_packet(&pkt);
1938 update_benchmark(NULL);
1940 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1941 ret = avcodec_send_frame(enc, NULL);
1943 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1950 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1951 if (ret < 0 && ret != AVERROR_EOF) {
1952 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1957 if (ost->logfile && enc->stats_out) {
1958 fprintf(ost->logfile, "%s", enc->stats_out);
1960 if (ret == AVERROR_EOF) {
1961 output_packet(of, &pkt, ost, 1);
1964 if (ost->finished & MUXER_FINISHED) {
1965 av_packet_unref(&pkt);
1968 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1969 pkt_size = pkt.size;
1970 output_packet(of, &pkt, ost, 0);
1971 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1972 do_video_stats(ost, pkt_size);
1979 * Check whether a packet from ist should be written into ost at this time
1981 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1983 OutputFile *of = output_files[ost->file_index];
1984 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1986 if (ost->source_index != ist_index)
1992 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1998 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2000 OutputFile *of = output_files[ost->file_index];
2001 InputFile *f = input_files [ist->file_index];
2002 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2003 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2004 AVPacket opkt = { 0 };
2006 av_init_packet(&opkt);
2008 // EOF: flush output bitstream filters.
2010 output_packet(of, &opkt, ost, 1);
2014 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2015 !ost->copy_initial_nonkeyframes)
2018 if (!ost->frame_number && !ost->copy_prior_start) {
2019 int64_t comp_start = start_time;
2020 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2021 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2022 if (pkt->pts == AV_NOPTS_VALUE ?
2023 ist->pts < comp_start :
2024 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2028 if (of->recording_time != INT64_MAX &&
2029 ist->pts >= of->recording_time + start_time) {
2030 close_output_stream(ost);
2034 if (f->recording_time != INT64_MAX) {
2035 start_time = f->ctx->start_time;
2036 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2037 start_time += f->start_time;
2038 if (ist->pts >= f->recording_time + start_time) {
2039 close_output_stream(ost);
2044 /* force the input stream PTS */
2045 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2048 if (pkt->pts != AV_NOPTS_VALUE)
2049 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2051 opkt.pts = AV_NOPTS_VALUE;
2053 if (pkt->dts == AV_NOPTS_VALUE)
2054 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2056 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2057 opkt.dts -= ost_tb_start_time;
2059 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2060 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2062 duration = ist->dec_ctx->frame_size;
2063 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2064 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2065 ost->mux_timebase) - ost_tb_start_time;
2068 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2070 opkt.flags = pkt->flags;
2073 opkt.buf = av_buffer_ref(pkt->buf);
2077 opkt.data = pkt->data;
2078 opkt.size = pkt->size;
2080 av_copy_packet_side_data(&opkt, pkt);
2082 output_packet(of, &opkt, ost, 0);
2085 int guess_input_channel_layout(InputStream *ist)
2087 AVCodecContext *dec = ist->dec_ctx;
2089 if (!dec->channel_layout) {
2090 char layout_name[256];
2092 if (dec->channels > ist->guess_layout_max)
2094 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2095 if (!dec->channel_layout)
2097 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2098 dec->channels, dec->channel_layout);
2099 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2100 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2105 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2107 if (*got_output || ret<0)
2108 decode_error_stat[ret<0] ++;
2110 if (ret < 0 && exit_on_error)
2113 if (exit_on_error && *got_output && ist) {
2114 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2115 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2121 // Filters can be configured only if the formats of all inputs are known.
2122 static int ifilter_has_all_input_formats(FilterGraph *fg)
2125 for (i = 0; i < fg->nb_inputs; i++) {
2126 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2127 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2133 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2135 FilterGraph *fg = ifilter->graph;
2136 int need_reinit, ret, i;
2138 /* determine if the parameters for this input changed */
2139 need_reinit = ifilter->format != frame->format;
2140 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2141 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2144 switch (ifilter->ist->st->codecpar->codec_type) {
2145 case AVMEDIA_TYPE_AUDIO:
2146 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2147 ifilter->channels != frame->channels ||
2148 ifilter->channel_layout != frame->channel_layout;
2150 case AVMEDIA_TYPE_VIDEO:
2151 need_reinit |= ifilter->width != frame->width ||
2152 ifilter->height != frame->height;
2157 ret = ifilter_parameters_from_frame(ifilter, frame);
2162 /* (re)init the graph if possible, otherwise buffer the frame and return */
2163 if (need_reinit || !fg->graph) {
2164 for (i = 0; i < fg->nb_inputs; i++) {
2165 if (!ifilter_has_all_input_formats(fg)) {
2166 AVFrame *tmp = av_frame_clone(frame);
2168 return AVERROR(ENOMEM);
2169 av_frame_unref(frame);
2171 if (!av_fifo_space(ifilter->frame_queue)) {
2172 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2174 av_frame_free(&tmp);
2178 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2183 ret = reap_filters(1);
2184 if (ret < 0 && ret != AVERROR_EOF) {
2185 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2189 ret = configure_filtergraph(fg);
2191 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2196 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2198 if (ret != AVERROR_EOF)
2199 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2206 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2212 if (ifilter->filter) {
2213 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2217 // the filtergraph was never configured
2218 if (ifilter->format < 0)
2219 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2220 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2221 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2222 return AVERROR_INVALIDDATA;
2229 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2230 // There is the following difference: if you got a frame, you must call
2231 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2232 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2233 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2240 ret = avcodec_send_packet(avctx, pkt);
2241 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2242 // decoded frames with avcodec_receive_frame() until done.
2243 if (ret < 0 && ret != AVERROR_EOF)
2247 ret = avcodec_receive_frame(avctx, frame);
2248 if (ret < 0 && ret != AVERROR(EAGAIN))
2256 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2261 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2262 for (i = 0; i < ist->nb_filters; i++) {
2263 if (i < ist->nb_filters - 1) {
2264 f = ist->filter_frame;
2265 ret = av_frame_ref(f, decoded_frame);
2270 ret = ifilter_send_frame(ist->filters[i], f);
2271 if (ret == AVERROR_EOF)
2272 ret = 0; /* ignore */
2274 av_log(NULL, AV_LOG_ERROR,
2275 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2282 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2285 AVFrame *decoded_frame;
2286 AVCodecContext *avctx = ist->dec_ctx;
2288 AVRational decoded_frame_tb;
2290 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2291 return AVERROR(ENOMEM);
2292 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2293 return AVERROR(ENOMEM);
2294 decoded_frame = ist->decoded_frame;
2296 update_benchmark(NULL);
2297 ret = decode(avctx, decoded_frame, got_output, pkt);
2298 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2302 if (ret >= 0 && avctx->sample_rate <= 0) {
2303 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2304 ret = AVERROR_INVALIDDATA;
2307 if (ret != AVERROR_EOF)
2308 check_decode_result(ist, got_output, ret);
2310 if (!*got_output || ret < 0)
2313 ist->samples_decoded += decoded_frame->nb_samples;
2314 ist->frames_decoded++;
2317 /* increment next_dts to use for the case where the input stream does not
2318 have timestamps or there are multiple frames in the packet */
2319 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2321 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2325 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2326 decoded_frame_tb = ist->st->time_base;
2327 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2328 decoded_frame->pts = pkt->pts;
2329 decoded_frame_tb = ist->st->time_base;
2331 decoded_frame->pts = ist->dts;
2332 decoded_frame_tb = AV_TIME_BASE_Q;
2334 if (decoded_frame->pts != AV_NOPTS_VALUE)
2335 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2336 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2337 (AVRational){1, avctx->sample_rate});
2338 ist->nb_samples = decoded_frame->nb_samples;
2339 err = send_frame_to_filters(ist, decoded_frame);
2341 av_frame_unref(ist->filter_frame);
2342 av_frame_unref(decoded_frame);
2343 return err < 0 ? err : ret;
2346 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2349 AVFrame *decoded_frame;
2350 int i, ret = 0, err = 0;
2351 int64_t best_effort_timestamp;
2352 int64_t dts = AV_NOPTS_VALUE;
2355 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2356 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2358 if (!eof && pkt && pkt->size == 0)
2361 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2362 return AVERROR(ENOMEM);
2363 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2364 return AVERROR(ENOMEM);
2365 decoded_frame = ist->decoded_frame;
2366 if (ist->dts != AV_NOPTS_VALUE)
2367 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2370 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2373 // The old code used to set dts on the drain packet, which does not work
2374 // with the new API anymore.
2376 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2378 return AVERROR(ENOMEM);
2379 ist->dts_buffer = new;
2380 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2383 update_benchmark(NULL);
2384 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2385 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2389 // The following line may be required in some cases where there is no parser
2390 // or the parser does not has_b_frames correctly
2391 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2392 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2393 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2395 av_log(ist->dec_ctx, AV_LOG_WARNING,
2396 "video_delay is larger in decoder than demuxer %d > %d.\n"
2397 "If you want to help, upload a sample "
2398 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2399 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2400 ist->dec_ctx->has_b_frames,
2401 ist->st->codecpar->video_delay);
2404 if (ret != AVERROR_EOF)
2405 check_decode_result(ist, got_output, ret);
2407 if (*got_output && ret >= 0) {
2408 if (ist->dec_ctx->width != decoded_frame->width ||
2409 ist->dec_ctx->height != decoded_frame->height ||
2410 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2411 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2412 decoded_frame->width,
2413 decoded_frame->height,
2414 decoded_frame->format,
2415 ist->dec_ctx->width,
2416 ist->dec_ctx->height,
2417 ist->dec_ctx->pix_fmt);
2421 if (!*got_output || ret < 0)
2424 if(ist->top_field_first>=0)
2425 decoded_frame->top_field_first = ist->top_field_first;
2427 ist->frames_decoded++;
2429 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2430 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2434 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2436 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2437 *duration_pts = decoded_frame->pkt_duration;
2439 if (ist->framerate.num)
2440 best_effort_timestamp = ist->cfr_next_pts++;
2442 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2443 best_effort_timestamp = ist->dts_buffer[0];
2445 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2446 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2447 ist->nb_dts_buffer--;
2450 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2451 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2453 if (ts != AV_NOPTS_VALUE)
2454 ist->next_pts = ist->pts = ts;
2458 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2459 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2460 ist->st->index, av_ts2str(decoded_frame->pts),
2461 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2462 best_effort_timestamp,
2463 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2464 decoded_frame->key_frame, decoded_frame->pict_type,
2465 ist->st->time_base.num, ist->st->time_base.den);
2468 if (ist->st->sample_aspect_ratio.num)
2469 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2471 err = send_frame_to_filters(ist, decoded_frame);
2474 av_frame_unref(ist->filter_frame);
2475 av_frame_unref(decoded_frame);
2476 return err < 0 ? err : ret;
2479 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2482 AVSubtitle subtitle;
2484 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2485 &subtitle, got_output, pkt);
2487 check_decode_result(NULL, got_output, ret);
2489 if (ret < 0 || !*got_output) {
2492 sub2video_flush(ist);
2496 if (ist->fix_sub_duration) {
2498 if (ist->prev_sub.got_output) {
2499 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2500 1000, AV_TIME_BASE);
2501 if (end < ist->prev_sub.subtitle.end_display_time) {
2502 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2503 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2504 ist->prev_sub.subtitle.end_display_time, end,
2505 end <= 0 ? ", dropping it" : "");
2506 ist->prev_sub.subtitle.end_display_time = end;
2509 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2510 FFSWAP(int, ret, ist->prev_sub.ret);
2511 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2519 if (ist->sub2video.frame) {
2520 sub2video_update(ist, &subtitle);
2521 } else if (ist->nb_filters) {
2522 if (!ist->sub2video.sub_queue)
2523 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2524 if (!ist->sub2video.sub_queue)
2526 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2527 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2531 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2535 if (!subtitle.num_rects)
2538 ist->frames_decoded++;
2540 for (i = 0; i < nb_output_streams; i++) {
2541 OutputStream *ost = output_streams[i];
2543 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2544 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2547 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2552 avsubtitle_free(&subtitle);
2556 static int send_filter_eof(InputStream *ist)
2559 /* TODO keep pts also in stream time base to avoid converting back */
2560 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2561 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2563 for (i = 0; i < ist->nb_filters; i++) {
2564 ret = ifilter_send_eof(ist->filters[i], pts);
2571 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2572 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2576 int eof_reached = 0;
2579 if (!ist->saw_first_ts) {
2580 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2582 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2583 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2584 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2586 ist->saw_first_ts = 1;
2589 if (ist->next_dts == AV_NOPTS_VALUE)
2590 ist->next_dts = ist->dts;
2591 if (ist->next_pts == AV_NOPTS_VALUE)
2592 ist->next_pts = ist->pts;
2596 av_init_packet(&avpkt);
2603 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2604 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2605 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2606 ist->next_pts = ist->pts = ist->dts;
2609 // while we have more to decode or while the decoder did output something on EOF
2610 while (ist->decoding_needed) {
2611 int64_t duration_dts = 0;
2612 int64_t duration_pts = 0;
2614 int decode_failed = 0;
2616 ist->pts = ist->next_pts;
2617 ist->dts = ist->next_dts;
2619 switch (ist->dec_ctx->codec_type) {
2620 case AVMEDIA_TYPE_AUDIO:
2621 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2624 case AVMEDIA_TYPE_VIDEO:
2625 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2627 if (!repeating || !pkt || got_output) {
2628 if (pkt && pkt->duration) {
2629 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2630 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2631 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2632 duration_dts = ((int64_t)AV_TIME_BASE *
2633 ist->dec_ctx->framerate.den * ticks) /
2634 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2637 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2638 ist->next_dts += duration_dts;
2640 ist->next_dts = AV_NOPTS_VALUE;
2644 if (duration_pts > 0) {
2645 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2647 ist->next_pts += duration_dts;
2651 case AVMEDIA_TYPE_SUBTITLE:
2654 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2655 if (!pkt && ret >= 0)
2662 if (ret == AVERROR_EOF) {
2668 if (decode_failed) {
2669 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2670 ist->file_index, ist->st->index, av_err2str(ret));
2672 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2673 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2675 if (!decode_failed || exit_on_error)
2681 ist->got_output = 1;
2686 // During draining, we might get multiple output frames in this loop.
2687 // ffmpeg.c does not drain the filter chain on configuration changes,
2688 // which means if we send multiple frames at once to the filters, and
2689 // one of those frames changes configuration, the buffered frames will
2690 // be lost. This can upset certain FATE tests.
2691 // Decode only 1 frame per call on EOF to appease these FATE tests.
2692 // The ideal solution would be to rewrite decoding to use the new
2693 // decoding API in a better way.
2700 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2701 /* except when looping we need to flush but not to send an EOF */
2702 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2703 int ret = send_filter_eof(ist);
2705 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2710 /* handle stream copy */
2711 if (!ist->decoding_needed && pkt) {
2712 ist->dts = ist->next_dts;
2713 switch (ist->dec_ctx->codec_type) {
2714 case AVMEDIA_TYPE_AUDIO:
2715 av_assert1(pkt->duration >= 0);
2716 if (ist->dec_ctx->sample_rate) {
2717 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2718 ist->dec_ctx->sample_rate;
2720 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2723 case AVMEDIA_TYPE_VIDEO:
2724 if (ist->framerate.num) {
2725 // TODO: Remove work-around for c99-to-c89 issue 7
2726 AVRational time_base_q = AV_TIME_BASE_Q;
2727 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2728 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2729 } else if (pkt->duration) {
2730 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2731 } else if(ist->dec_ctx->framerate.num != 0) {
2732 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2733 ist->next_dts += ((int64_t)AV_TIME_BASE *
2734 ist->dec_ctx->framerate.den * ticks) /
2735 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2739 ist->pts = ist->dts;
2740 ist->next_pts = ist->next_dts;
2742 for (i = 0; i < nb_output_streams; i++) {
2743 OutputStream *ost = output_streams[i];
2745 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2748 do_streamcopy(ist, ost, pkt);
2751 return !eof_reached;
2754 static void print_sdp(void)
2759 AVIOContext *sdp_pb;
2760 AVFormatContext **avc;
2762 for (i = 0; i < nb_output_files; i++) {
2763 if (!output_files[i]->header_written)
2767 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2770 for (i = 0, j = 0; i < nb_output_files; i++) {
2771 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2772 avc[j] = output_files[i]->ctx;
2780 av_sdp_create(avc, j, sdp, sizeof(sdp));
2782 if (!sdp_filename) {
2783 printf("SDP:\n%s\n", sdp);
2786 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2787 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2789 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2790 avio_closep(&sdp_pb);
2791 av_freep(&sdp_filename);
2799 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2801 InputStream *ist = s->opaque;
2802 const enum AVPixelFormat *p;
2805 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2806 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2807 const AVCodecHWConfig *config = NULL;
2810 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2813 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2814 ist->hwaccel_id == HWACCEL_AUTO) {
2816 config = avcodec_get_hw_config(s->codec, i);
2819 if (!(config->methods &
2820 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2822 if (config->pix_fmt == *p)
2827 if (config->device_type != ist->hwaccel_device_type) {
2828 // Different hwaccel offered, ignore.
2832 ret = hwaccel_decode_init(s);
2834 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2835 av_log(NULL, AV_LOG_FATAL,
2836 "%s hwaccel requested for input stream #%d:%d, "
2837 "but cannot be initialized.\n",
2838 av_hwdevice_get_type_name(config->device_type),
2839 ist->file_index, ist->st->index);
2840 return AV_PIX_FMT_NONE;
2845 const HWAccel *hwaccel = NULL;
2847 for (i = 0; hwaccels[i].name; i++) {
2848 if (hwaccels[i].pix_fmt == *p) {
2849 hwaccel = &hwaccels[i];
2854 // No hwaccel supporting this pixfmt.
2857 if (hwaccel->id != ist->hwaccel_id) {
2858 // Does not match requested hwaccel.
2862 ret = hwaccel->init(s);
2864 av_log(NULL, AV_LOG_FATAL,
2865 "%s hwaccel requested for input stream #%d:%d, "
2866 "but cannot be initialized.\n", hwaccel->name,
2867 ist->file_index, ist->st->index);
2868 return AV_PIX_FMT_NONE;
2872 if (ist->hw_frames_ctx) {
2873 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2874 if (!s->hw_frames_ctx)
2875 return AV_PIX_FMT_NONE;
2878 ist->hwaccel_pix_fmt = *p;
2885 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2887 InputStream *ist = s->opaque;
2889 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2890 return ist->hwaccel_get_buffer(s, frame, flags);
2892 return avcodec_default_get_buffer2(s, frame, flags);
2895 static int init_input_stream(int ist_index, char *error, int error_len)
2898 InputStream *ist = input_streams[ist_index];
2900 if (ist->decoding_needed) {
2901 AVCodec *codec = ist->dec;
2903 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2904 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2905 return AVERROR(EINVAL);
2908 ist->dec_ctx->opaque = ist;
2909 ist->dec_ctx->get_format = get_format;
2910 ist->dec_ctx->get_buffer2 = get_buffer;
2911 ist->dec_ctx->thread_safe_callbacks = 1;
2913 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2914 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2915 (ist->decoding_needed & DECODING_FOR_OST)) {
2916 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2917 if (ist->decoding_needed & DECODING_FOR_FILTER)
2918 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2921 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2923 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2924 * audio, and video decoders such as cuvid or mediacodec */
2925 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2927 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2928 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2929 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2930 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2931 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2933 ret = hw_device_setup_for_decode(ist);
2935 snprintf(error, error_len, "Device setup failed for "
2936 "decoder on input stream #%d:%d : %s",
2937 ist->file_index, ist->st->index, av_err2str(ret));
2941 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2942 if (ret == AVERROR_EXPERIMENTAL)
2943 abort_codec_experimental(codec, 0);
2945 snprintf(error, error_len,
2946 "Error while opening decoder for input stream "
2948 ist->file_index, ist->st->index, av_err2str(ret));
2951 assert_avoptions(ist->decoder_opts);
2954 ist->next_pts = AV_NOPTS_VALUE;
2955 ist->next_dts = AV_NOPTS_VALUE;
2960 static InputStream *get_input_stream(OutputStream *ost)
2962 if (ost->source_index >= 0)
2963 return input_streams[ost->source_index];
2967 static int compare_int64(const void *a, const void *b)
2969 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2972 /* open the muxer when all the streams are initialized */
2973 static int check_init_output_file(OutputFile *of, int file_index)
2977 for (i = 0; i < of->ctx->nb_streams; i++) {
2978 OutputStream *ost = output_streams[of->ost_index + i];
2979 if (!ost->initialized)
2983 of->ctx->interrupt_callback = int_cb;
2985 ret = avformat_write_header(of->ctx, &of->opts);
2987 av_log(NULL, AV_LOG_ERROR,
2988 "Could not write header for output file #%d "
2989 "(incorrect codec parameters ?): %s\n",
2990 file_index, av_err2str(ret));
2993 //assert_avoptions(of->opts);
2994 of->header_written = 1;
2996 av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2998 if (sdp_filename || want_sdp)
3001 /* flush the muxing queues */
3002 for (i = 0; i < of->ctx->nb_streams; i++) {
3003 OutputStream *ost = output_streams[of->ost_index + i];
3005 /* try to improve muxing time_base (only possible if nothing has been written yet) */
3006 if (!av_fifo_size(ost->muxing_queue))
3007 ost->mux_timebase = ost->st->time_base;
3009 while (av_fifo_size(ost->muxing_queue)) {
3011 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3012 write_packet(of, &pkt, ost, 1);
3019 static int init_output_bsfs(OutputStream *ost)
3024 if (!ost->nb_bitstream_filters)
3027 for (i = 0; i < ost->nb_bitstream_filters; i++) {
3028 ctx = ost->bsf_ctx[i];
3030 ret = avcodec_parameters_copy(ctx->par_in,
3031 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3035 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3037 ret = av_bsf_init(ctx);
3039 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3040 ost->bsf_ctx[i]->filter->name);
3045 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3046 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3050 ost->st->time_base = ctx->time_base_out;
3055 static int init_output_stream_streamcopy(OutputStream *ost)
3057 OutputFile *of = output_files[ost->file_index];
3058 InputStream *ist = get_input_stream(ost);
3059 AVCodecParameters *par_dst = ost->st->codecpar;
3060 AVCodecParameters *par_src = ost->ref_par;
3063 uint32_t codec_tag = par_dst->codec_tag;
3065 av_assert0(ist && !ost->filter);
3067 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3069 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3071 av_log(NULL, AV_LOG_FATAL,
3072 "Error setting up codec context options.\n");
3075 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3078 unsigned int codec_tag_tmp;
3079 if (!of->ctx->oformat->codec_tag ||
3080 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3081 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3082 codec_tag = par_src->codec_tag;
3085 ret = avcodec_parameters_copy(par_dst, par_src);
3089 par_dst->codec_tag = codec_tag;
3091 if (!ost->frame_rate.num)
3092 ost->frame_rate = ist->framerate;
3093 ost->st->avg_frame_rate = ost->frame_rate;
3095 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3099 // copy timebase while removing common factors
3100 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3101 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3103 // copy estimated duration as a hint to the muxer
3104 if (ost->st->duration <= 0 && ist->st->duration > 0)
3105 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3108 ost->st->disposition = ist->st->disposition;
3110 if (ist->st->nb_side_data) {
3111 for (i = 0; i < ist->st->nb_side_data; i++) {
3112 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3115 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3117 return AVERROR(ENOMEM);
3118 memcpy(dst_data, sd_src->data, sd_src->size);
3122 if (ost->rotate_overridden) {
3123 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3124 sizeof(int32_t) * 9);
3126 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3129 switch (par_dst->codec_type) {
3130 case AVMEDIA_TYPE_AUDIO:
3131 if (audio_volume != 256) {
3132 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3135 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3136 par_dst->block_align= 0;
3137 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3138 par_dst->block_align= 0;
3140 case AVMEDIA_TYPE_VIDEO:
3141 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3143 av_mul_q(ost->frame_aspect_ratio,
3144 (AVRational){ par_dst->height, par_dst->width });
3145 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3146 "with stream copy may produce invalid files\n");
3148 else if (ist->st->sample_aspect_ratio.num)
3149 sar = ist->st->sample_aspect_ratio;
3151 sar = par_src->sample_aspect_ratio;
3152 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3153 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3154 ost->st->r_frame_rate = ist->st->r_frame_rate;
3158 ost->mux_timebase = ist->st->time_base;
3163 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3165 AVDictionaryEntry *e;
3167 uint8_t *encoder_string;
3168 int encoder_string_len;
3169 int format_flags = 0;
3170 int codec_flags = ost->enc_ctx->flags;
3172 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3175 e = av_dict_get(of->opts, "fflags", NULL, 0);
3177 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3180 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3182 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3184 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3187 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3190 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3191 encoder_string = av_mallocz(encoder_string_len);
3192 if (!encoder_string)
3195 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3196 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3198 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3199 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3200 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3201 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3204 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3205 AVCodecContext *avctx)
3208 int n = 1, i, size, index = 0;
3211 for (p = kf; *p; p++)
3215 pts = av_malloc_array(size, sizeof(*pts));
3217 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3222 for (i = 0; i < n; i++) {
3223 char *next = strchr(p, ',');
3228 if (!memcmp(p, "chapters", 8)) {
3230 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3233 if (avf->nb_chapters > INT_MAX - size ||
3234 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3236 av_log(NULL, AV_LOG_FATAL,
3237 "Could not allocate forced key frames array.\n");
3240 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3241 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3243 for (j = 0; j < avf->nb_chapters; j++) {
3244 AVChapter *c = avf->chapters[j];
3245 av_assert1(index < size);
3246 pts[index++] = av_rescale_q(c->start, c->time_base,
3247 avctx->time_base) + t;
3252 t = parse_time_or_die("force_key_frames", p, 1);
3253 av_assert1(index < size);
3254 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3261 av_assert0(index == size);
3262 qsort(pts, size, sizeof(*pts), compare_int64);
3263 ost->forced_kf_count = size;
3264 ost->forced_kf_pts = pts;
3267 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3269 InputStream *ist = get_input_stream(ost);
3270 AVCodecContext *enc_ctx = ost->enc_ctx;
3271 AVFormatContext *oc;
3273 if (ost->enc_timebase.num > 0) {
3274 enc_ctx->time_base = ost->enc_timebase;
3278 if (ost->enc_timebase.num < 0) {
3280 enc_ctx->time_base = ist->st->time_base;
3284 oc = output_files[ost->file_index]->ctx;
3285 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3288 enc_ctx->time_base = default_time_base;
3291 static int init_output_stream_encode(OutputStream *ost)
3293 InputStream *ist = get_input_stream(ost);
3294 AVCodecContext *enc_ctx = ost->enc_ctx;
3295 AVCodecContext *dec_ctx = NULL;
3296 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3299 set_encoder_id(output_files[ost->file_index], ost);
3301 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3302 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3303 // which have to be filtered out to prevent leaking them to output files.
3304 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3307 ost->st->disposition = ist->st->disposition;
3309 dec_ctx = ist->dec_ctx;
3311 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3313 for (j = 0; j < oc->nb_streams; j++) {
3314 AVStream *st = oc->streams[j];
3315 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3318 if (j == oc->nb_streams)
3319 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3320 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3321 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3324 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3325 if (!ost->frame_rate.num)
3326 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3327 if (ist && !ost->frame_rate.num)
3328 ost->frame_rate = ist->framerate;
3329 if (ist && !ost->frame_rate.num)
3330 ost->frame_rate = ist->st->r_frame_rate;
3331 if (ist && !ost->frame_rate.num) {
3332 ost->frame_rate = (AVRational){25, 1};
3333 av_log(NULL, AV_LOG_WARNING,
3335 "about the input framerate is available. Falling "
3336 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3337 "if you want a different framerate.\n",
3338 ost->file_index, ost->index);
3340 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3341 if (ost->enc->supported_framerates && !ost->force_fps) {
3342 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3343 ost->frame_rate = ost->enc->supported_framerates[idx];
3345 // reduce frame rate for mpeg4 to be within the spec limits
3346 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3347 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3348 ost->frame_rate.num, ost->frame_rate.den, 65535);
3352 switch (enc_ctx->codec_type) {
3353 case AVMEDIA_TYPE_AUDIO:
3354 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3356 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3357 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3358 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3359 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3360 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3362 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3365 case AVMEDIA_TYPE_VIDEO:
3366 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3368 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3369 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3370 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3371 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3372 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3373 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3375 for (j = 0; j < ost->forced_kf_count; j++)
3376 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3378 enc_ctx->time_base);
3380 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3381 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3382 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3383 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3384 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3385 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3387 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3389 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3390 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3392 enc_ctx->framerate = ost->frame_rate;
3394 ost->st->avg_frame_rate = ost->frame_rate;
3397 enc_ctx->width != dec_ctx->width ||
3398 enc_ctx->height != dec_ctx->height ||
3399 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3400 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3403 if (ost->top_field_first == 0) {
3404 enc_ctx->field_order = AV_FIELD_BB;
3405 } else if (ost->top_field_first == 1) {
3406 enc_ctx->field_order = AV_FIELD_TT;
3409 if (ost->forced_keyframes) {
3410 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3411 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3412 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3414 av_log(NULL, AV_LOG_ERROR,
3415 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3418 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3419 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3420 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3421 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3423 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3424 // parse it only for static kf timings
3425 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3426 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3430 case AVMEDIA_TYPE_SUBTITLE:
3431 enc_ctx->time_base = AV_TIME_BASE_Q;
3432 if (!enc_ctx->width) {
3433 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3434 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3437 case AVMEDIA_TYPE_DATA:
3444 ost->mux_timebase = enc_ctx->time_base;
3449 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3453 if (ost->encoding_needed) {
3454 AVCodec *codec = ost->enc;
3455 AVCodecContext *dec = NULL;
3458 ret = init_output_stream_encode(ost);
3462 if ((ist = get_input_stream(ost)))
3464 if (dec && dec->subtitle_header) {
3465 /* ASS code assumes this buffer is null terminated so add extra byte. */
3466 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3467 if (!ost->enc_ctx->subtitle_header)
3468 return AVERROR(ENOMEM);
3469 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3470 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3472 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3473 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3474 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3476 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3477 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3478 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3480 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3481 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3482 av_buffersink_get_format(ost->filter->filter)) {
3483 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3484 if (!ost->enc_ctx->hw_frames_ctx)
3485 return AVERROR(ENOMEM);
3487 ret = hw_device_setup_for_encode(ost);
3489 snprintf(error, error_len, "Device setup failed for "
3490 "encoder on output stream #%d:%d : %s",
3491 ost->file_index, ost->index, av_err2str(ret));
3495 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3496 int input_props = 0, output_props = 0;
3497 AVCodecDescriptor const *input_descriptor =
3498 avcodec_descriptor_get(dec->codec_id);
3499 AVCodecDescriptor const *output_descriptor =
3500 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3501 if (input_descriptor)
3502 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3503 if (output_descriptor)
3504 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3505 if (input_props && output_props && input_props != output_props) {
3506 snprintf(error, error_len,
3507 "Subtitle encoding currently only possible from text to text "
3508 "or bitmap to bitmap");
3509 return AVERROR_INVALIDDATA;
3513 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3514 if (ret == AVERROR_EXPERIMENTAL)
3515 abort_codec_experimental(codec, 1);
3516 snprintf(error, error_len,
3517 "Error while opening encoder for output stream #%d:%d - "
3518 "maybe incorrect parameters such as bit_rate, rate, width or height",
3519 ost->file_index, ost->index);
3522 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3523 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3524 av_buffersink_set_frame_size(ost->filter->filter,
3525 ost->enc_ctx->frame_size);
3526 assert_avoptions(ost->encoder_opts);
3527 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3528 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3529 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3530 " It takes bits/s as argument, not kbits/s\n");
3532 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3534 av_log(NULL, AV_LOG_FATAL,
3535 "Error initializing the output stream codec context.\n");
3539 * FIXME: ost->st->codec should't be needed here anymore.
3541 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3545 if (ost->enc_ctx->nb_coded_side_data) {
3548 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3549 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3552 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3554 return AVERROR(ENOMEM);
3555 memcpy(dst_data, sd_src->data, sd_src->size);
3560 * Add global input side data. For now this is naive, and copies it
3561 * from the input stream's global side data. All side data should
3562 * really be funneled over AVFrame and libavfilter, then added back to
3563 * packet side data, and then potentially using the first packet for
3568 for (i = 0; i < ist->st->nb_side_data; i++) {
3569 AVPacketSideData *sd = &ist->st->side_data[i];
3570 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3572 return AVERROR(ENOMEM);
3573 memcpy(dst, sd->data, sd->size);
3574 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3575 av_display_rotation_set((uint32_t *)dst, 0);
3579 // copy timebase while removing common factors
3580 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3581 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3583 // copy estimated duration as a hint to the muxer
3584 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3585 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3587 ost->st->codec->codec= ost->enc_ctx->codec;
3588 } else if (ost->stream_copy) {
3589 ret = init_output_stream_streamcopy(ost);
3594 // parse user provided disposition, and update stream values
3595 if (ost->disposition) {
3596 static const AVOption opts[] = {
3597 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3598 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3599 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3600 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3601 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3602 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3603 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3604 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3605 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3606 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3607 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3608 { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3609 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3610 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3611 { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3612 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3615 static const AVClass class = {
3617 .item_name = av_default_item_name,
3619 .version = LIBAVUTIL_VERSION_INT,
3621 const AVClass *pclass = &class;
3623 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3628 /* initialize bitstream filters for the output stream
3629 * needs to be done here, because the codec id for streamcopy is not
3630 * known until now */
3631 ret = init_output_bsfs(ost);
3635 ost->initialized = 1;
3637 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3644 static void report_new_stream(int input_index, AVPacket *pkt)
3646 InputFile *file = input_files[input_index];
3647 AVStream *st = file->ctx->streams[pkt->stream_index];
3649 if (pkt->stream_index < file->nb_streams_warn)
3651 av_log(file->ctx, AV_LOG_WARNING,
3652 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3653 av_get_media_type_string(st->codecpar->codec_type),
3654 input_index, pkt->stream_index,
3655 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3656 file->nb_streams_warn = pkt->stream_index + 1;
3659 static int transcode_init(void)
3661 int ret = 0, i, j, k;
3662 AVFormatContext *oc;
3665 char error[1024] = {0};
3667 for (i = 0; i < nb_filtergraphs; i++) {
3668 FilterGraph *fg = filtergraphs[i];
3669 for (j = 0; j < fg->nb_outputs; j++) {
3670 OutputFilter *ofilter = fg->outputs[j];
3671 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3673 if (fg->nb_inputs != 1)
3675 for (k = nb_input_streams-1; k >= 0 ; k--)
3676 if (fg->inputs[0]->ist == input_streams[k])
3678 ofilter->ost->source_index = k;
3682 /* init framerate emulation */
3683 for (i = 0; i < nb_input_files; i++) {
3684 InputFile *ifile = input_files[i];
3685 if (ifile->rate_emu)
3686 for (j = 0; j < ifile->nb_streams; j++)
3687 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3690 /* init input streams */
3691 for (i = 0; i < nb_input_streams; i++)
3692 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3693 for (i = 0; i < nb_output_streams; i++) {
3694 ost = output_streams[i];
3695 avcodec_close(ost->enc_ctx);
3700 /* open each encoder */
3701 for (i = 0; i < nb_output_streams; i++) {
3702 // skip streams fed from filtergraphs until we have a frame for them
3703 if (output_streams[i]->filter)
3706 ret = init_output_stream(output_streams[i], error, sizeof(error));
3711 /* discard unused programs */
3712 for (i = 0; i < nb_input_files; i++) {
3713 InputFile *ifile = input_files[i];
3714 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3715 AVProgram *p = ifile->ctx->programs[j];
3716 int discard = AVDISCARD_ALL;
3718 for (k = 0; k < p->nb_stream_indexes; k++)
3719 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3720 discard = AVDISCARD_DEFAULT;
3723 p->discard = discard;
3727 /* write headers for files with no streams */
3728 for (i = 0; i < nb_output_files; i++) {
3729 oc = output_files[i]->ctx;
3730 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3731 ret = check_init_output_file(output_files[i], i);
3738 /* dump the stream mapping */
3739 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3740 for (i = 0; i < nb_input_streams; i++) {
3741 ist = input_streams[i];
3743 for (j = 0; j < ist->nb_filters; j++) {
3744 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3745 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3746 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3747 ist->filters[j]->name);
3748 if (nb_filtergraphs > 1)
3749 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3750 av_log(NULL, AV_LOG_INFO, "\n");
3755 for (i = 0; i < nb_output_streams; i++) {
3756 ost = output_streams[i];
3758 if (ost->attachment_filename) {
3759 /* an attached file */
3760 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3761 ost->attachment_filename, ost->file_index, ost->index);
3765 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3766 /* output from a complex graph */
3767 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3768 if (nb_filtergraphs > 1)
3769 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3771 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3772 ost->index, ost->enc ? ost->enc->name : "?");
3776 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3777 input_streams[ost->source_index]->file_index,
3778 input_streams[ost->source_index]->st->index,
3781 if (ost->sync_ist != input_streams[ost->source_index])
3782 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3783 ost->sync_ist->file_index,
3784 ost->sync_ist->st->index);
3785 if (ost->stream_copy)
3786 av_log(NULL, AV_LOG_INFO, " (copy)");
3788 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3789 const AVCodec *out_codec = ost->enc;
3790 const char *decoder_name = "?";
3791 const char *in_codec_name = "?";
3792 const char *encoder_name = "?";
3793 const char *out_codec_name = "?";
3794 const AVCodecDescriptor *desc;
3797 decoder_name = in_codec->name;
3798 desc = avcodec_descriptor_get(in_codec->id);
3800 in_codec_name = desc->name;
3801 if (!strcmp(decoder_name, in_codec_name))
3802 decoder_name = "native";
3806 encoder_name = out_codec->name;
3807 desc = avcodec_descriptor_get(out_codec->id);
3809 out_codec_name = desc->name;
3810 if (!strcmp(encoder_name, out_codec_name))
3811 encoder_name = "native";
3814 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3815 in_codec_name, decoder_name,
3816 out_codec_name, encoder_name);
3818 av_log(NULL, AV_LOG_INFO, "\n");
3822 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3826 atomic_store(&transcode_init_done, 1);
3831 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3832 static int need_output(void)
3836 for (i = 0; i < nb_output_streams; i++) {
3837 OutputStream *ost = output_streams[i];
3838 OutputFile *of = output_files[ost->file_index];
3839 AVFormatContext *os = output_files[ost->file_index]->ctx;
3841 if (ost->finished ||
3842 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3844 if (ost->frame_number >= ost->max_frames) {
3846 for (j = 0; j < of->ctx->nb_streams; j++)
3847 close_output_stream(output_streams[of->ost_index + j]);
3858 * Select the output stream to process.
3860 * @return selected output stream, or NULL if none available
3862 static OutputStream *choose_output(void)
3865 int64_t opts_min = INT64_MAX;
3866 OutputStream *ost_min = NULL;
3868 for (i = 0; i < nb_output_streams; i++) {
3869 OutputStream *ost = output_streams[i];
3870 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3871 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3873 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3874 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3876 if (!ost->initialized && !ost->inputs_done)
3879 if (!ost->finished && opts < opts_min) {
3881 ost_min = ost->unavailable ? NULL : ost;
3887 static void set_tty_echo(int on)
3891 if (tcgetattr(0, &tty) == 0) {
3892 if (on) tty.c_lflag |= ECHO;
3893 else tty.c_lflag &= ~ECHO;
3894 tcsetattr(0, TCSANOW, &tty);
3899 static int check_keyboard_interaction(int64_t cur_time)
3902 static int64_t last_time;
3903 if (received_nb_signals)
3904 return AVERROR_EXIT;
3905 /* read_key() returns 0 on EOF */
3906 if(cur_time - last_time >= 100000 && !run_as_daemon){
3908 last_time = cur_time;
3912 return AVERROR_EXIT;
3913 if (key == '+') av_log_set_level(av_log_get_level()+10);
3914 if (key == '-') av_log_set_level(av_log_get_level()-10);
3915 if (key == 's') qp_hist ^= 1;
3918 do_hex_dump = do_pkt_dump = 0;
3919 } else if(do_pkt_dump){
3923 av_log_set_level(AV_LOG_DEBUG);
3925 if (key == 'c' || key == 'C'){
3926 char buf[4096], target[64], command[256], arg[256] = {0};
3929 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3932 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3937 fprintf(stderr, "\n");
3939 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3940 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3941 target, time, command, arg);
3942 for (i = 0; i < nb_filtergraphs; i++) {
3943 FilterGraph *fg = filtergraphs[i];
3946 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3947 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3948 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3949 } else if (key == 'c') {
3950 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3951 ret = AVERROR_PATCHWELCOME;
3953 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3955 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3960 av_log(NULL, AV_LOG_ERROR,
3961 "Parse error, at least 3 arguments were expected, "
3962 "only %d given in string '%s'\n", n, buf);
3965 if (key == 'd' || key == 'D'){
3968 debug = input_streams[0]->st->codec->debug<<1;
3969 if(!debug) debug = 1;
3970 while(debug & (FF_DEBUG_DCT_COEFF
3972 |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3974 )) //unsupported, would just crash
3981 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3986 fprintf(stderr, "\n");
3987 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3988 fprintf(stderr,"error parsing debug value\n");
3990 for(i=0;i<nb_input_streams;i++) {
3991 input_streams[i]->st->codec->debug = debug;
3993 for(i=0;i<nb_output_streams;i++) {
3994 OutputStream *ost = output_streams[i];
3995 ost->enc_ctx->debug = debug;
3997 if(debug) av_log_set_level(AV_LOG_DEBUG);
3998 fprintf(stderr,"debug=%d\n", debug);
4001 fprintf(stderr, "key function\n"
4002 "? show this help\n"
4003 "+ increase verbosity\n"
4004 "- decrease verbosity\n"
4005 "c Send command to first matching filter supporting it\n"
4006 "C Send/Queue command to all matching filters\n"
4007 "D cycle through available debug modes\n"
4008 "h dump packets/hex press to cycle through the 3 states\n"
4010 "s Show QP histogram\n"
4017 static void *input_thread(void *arg)
4020 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4025 ret = av_read_frame(f->ctx, &pkt);
4027 if (ret == AVERROR(EAGAIN)) {
4032 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4035 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4036 if (flags && ret == AVERROR(EAGAIN)) {
4038 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4039 av_log(f->ctx, AV_LOG_WARNING,
4040 "Thread message queue blocking; consider raising the "
4041 "thread_queue_size option (current value: %d)\n",
4042 f->thread_queue_size);
4045 if (ret != AVERROR_EOF)
4046 av_log(f->ctx, AV_LOG_ERROR,
4047 "Unable to send packet to main thread: %s\n",
4049 av_packet_unref(&pkt);
4050 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4058 static void free_input_thread(int i)
4060 InputFile *f = input_files[i];
4063 if (!f || !f->in_thread_queue)
4065 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4066 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4067 av_packet_unref(&pkt);
4069 pthread_join(f->thread, NULL);
4071 av_thread_message_queue_free(&f->in_thread_queue);
4074 static void free_input_threads(void)
4078 for (i = 0; i < nb_input_files; i++)
4079 free_input_thread(i);
4082 static int init_input_thread(int i)
4085 InputFile *f = input_files[i];
4087 if (nb_input_files == 1)
4090 if (f->ctx->pb ? !f->ctx->pb->seekable :
4091 strcmp(f->ctx->iformat->name, "lavfi"))
4092 f->non_blocking = 1;
4093 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4094 f->thread_queue_size, sizeof(AVPacket));
4098 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4099 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4100 av_thread_message_queue_free(&f->in_thread_queue);
4101 return AVERROR(ret);
4107 static int init_input_threads(void)
4111 for (i = 0; i < nb_input_files; i++) {
4112 ret = init_input_thread(i);
4119 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4121 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4123 AV_THREAD_MESSAGE_NONBLOCK : 0);
4127 static int get_input_packet(InputFile *f, AVPacket *pkt)
4131 for (i = 0; i < f->nb_streams; i++) {
4132 InputStream *ist = input_streams[f->ist_index + i];
4133 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4134 int64_t now = av_gettime_relative() - ist->start;
4136 return AVERROR(EAGAIN);
4141 if (nb_input_files > 1)
4142 return get_input_packet_mt(f, pkt);
4144 return av_read_frame(f->ctx, pkt);
4147 static int got_eagain(void)
4150 for (i = 0; i < nb_output_streams; i++)
4151 if (output_streams[i]->unavailable)
4156 static void reset_eagain(void)
4159 for (i = 0; i < nb_input_files; i++)
4160 input_files[i]->eagain = 0;
4161 for (i = 0; i < nb_output_streams; i++)
4162 output_streams[i]->unavailable = 0;
4165 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4166 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4167 AVRational time_base)
4173 return tmp_time_base;
4176 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4179 return tmp_time_base;
4185 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4188 AVCodecContext *avctx;
4189 int i, ret, has_audio = 0;
4190 int64_t duration = 0;
4192 ret = av_seek_frame(is, -1, is->start_time, 0);
4196 for (i = 0; i < ifile->nb_streams; i++) {
4197 ist = input_streams[ifile->ist_index + i];
4198 avctx = ist->dec_ctx;
4200 /* duration is the length of the last frame in a stream
4201 * when audio stream is present we don't care about
4202 * last video frame length because it's not defined exactly */
4203 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4207 for (i = 0; i < ifile->nb_streams; i++) {
4208 ist = input_streams[ifile->ist_index + i];
4209 avctx = ist->dec_ctx;
4212 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4213 AVRational sample_rate = {1, avctx->sample_rate};
4215 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4220 if (ist->framerate.num) {
4221 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4222 } else if (ist->st->avg_frame_rate.num) {
4223 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4228 if (!ifile->duration)
4229 ifile->time_base = ist->st->time_base;
4230 /* the total duration of the stream, max_pts - min_pts is
4231 * the duration of the stream without the last frame */
4232 duration += ist->max_pts - ist->min_pts;
4233 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4237 if (ifile->loop > 0)
4245 * - 0 -- one packet was read and processed
4246 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4247 * this function should be called again
4248 * - AVERROR_EOF -- this function should not be called again
4250 static int process_input(int file_index)
4252 InputFile *ifile = input_files[file_index];
4253 AVFormatContext *is;
4256 int ret, thread_ret, i, j;
4261 ret = get_input_packet(ifile, &pkt);
4263 if (ret == AVERROR(EAGAIN)) {
4267 if (ret < 0 && ifile->loop) {
4268 AVCodecContext *avctx;
4269 for (i = 0; i < ifile->nb_streams; i++) {
4270 ist = input_streams[ifile->ist_index + i];
4271 avctx = ist->dec_ctx;
4272 if (ist->decoding_needed) {
4273 ret = process_input_packet(ist, NULL, 1);
4276 avcodec_flush_buffers(avctx);
4280 free_input_thread(file_index);
4282 ret = seek_to_start(ifile, is);
4284 thread_ret = init_input_thread(file_index);
4289 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4291 ret = get_input_packet(ifile, &pkt);
4292 if (ret == AVERROR(EAGAIN)) {
4298 if (ret != AVERROR_EOF) {
4299 print_error(is->url, ret);
4304 for (i = 0; i < ifile->nb_streams; i++) {
4305 ist = input_streams[ifile->ist_index + i];
4306 if (ist->decoding_needed) {
4307 ret = process_input_packet(ist, NULL, 0);
4312 /* mark all outputs that don't go through lavfi as finished */
4313 for (j = 0; j < nb_output_streams; j++) {
4314 OutputStream *ost = output_streams[j];
4316 if (ost->source_index == ifile->ist_index + i &&
4317 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4318 finish_output_stream(ost);
4322 ifile->eof_reached = 1;
4323 return AVERROR(EAGAIN);
4329 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4330 is->streams[pkt.stream_index]);
4332 /* the following test is needed in case new streams appear
4333 dynamically in stream : we ignore them */
4334 if (pkt.stream_index >= ifile->nb_streams) {
4335 report_new_stream(file_index, &pkt);
4336 goto discard_packet;
4339 ist = input_streams[ifile->ist_index + pkt.stream_index];
4341 ist->data_size += pkt.size;
4345 goto discard_packet;
4347 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4348 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4353 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4354 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4355 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4356 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4357 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4358 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4359 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4360 av_ts2str(input_files[ist->file_index]->ts_offset),
4361 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4364 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4365 int64_t stime, stime2;
4366 // Correcting starttime based on the enabled streams
4367 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4368 // so we instead do it here as part of discontinuity handling
4369 if ( ist->next_dts == AV_NOPTS_VALUE
4370 && ifile->ts_offset == -is->start_time
4371 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4372 int64_t new_start_time = INT64_MAX;
4373 for (i=0; i<is->nb_streams; i++) {
4374 AVStream *st = is->streams[i];
4375 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4377 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4379 if (new_start_time > is->start_time) {
4380 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4381 ifile->ts_offset = -new_start_time;
4385 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4386 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4387 ist->wrap_correction_done = 1;
4389 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4390 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4391 ist->wrap_correction_done = 0;
4393 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4394 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4395 ist->wrap_correction_done = 0;
4399 /* add the stream-global side data to the first packet */
4400 if (ist->nb_packets == 1) {
4401 for (i = 0; i < ist->st->nb_side_data; i++) {
4402 AVPacketSideData *src_sd = &ist->st->side_data[i];
4405 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4408 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4411 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4415 memcpy(dst_data, src_sd->data, src_sd->size);
4419 if (pkt.dts != AV_NOPTS_VALUE)
4420 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4421 if (pkt.pts != AV_NOPTS_VALUE)
4422 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4424 if (pkt.pts != AV_NOPTS_VALUE)
4425 pkt.pts *= ist->ts_scale;
4426 if (pkt.dts != AV_NOPTS_VALUE)
4427 pkt.dts *= ist->ts_scale;
4429 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4430 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4431 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4432 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4433 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4434 int64_t delta = pkt_dts - ifile->last_ts;
4435 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4436 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4437 ifile->ts_offset -= delta;
4438 av_log(NULL, AV_LOG_DEBUG,
4439 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4440 delta, ifile->ts_offset);
4441 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4442 if (pkt.pts != AV_NOPTS_VALUE)
4443 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4447 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4448 if (pkt.pts != AV_NOPTS_VALUE) {
4449 pkt.pts += duration;
4450 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4451 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4454 if (pkt.dts != AV_NOPTS_VALUE)
4455 pkt.dts += duration;
4457 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4458 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4459 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4460 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4462 int64_t delta = pkt_dts - ist->next_dts;
4463 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4464 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4465 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4466 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4467 ifile->ts_offset -= delta;
4468 av_log(NULL, AV_LOG_DEBUG,
4469 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4470 delta, ifile->ts_offset);
4471 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4472 if (pkt.pts != AV_NOPTS_VALUE)
4473 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4476 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4477 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4478 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4479 pkt.dts = AV_NOPTS_VALUE;
4481 if (pkt.pts != AV_NOPTS_VALUE){
4482 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4483 delta = pkt_pts - ist->next_dts;
4484 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4485 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4486 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4487 pkt.pts = AV_NOPTS_VALUE;
4493 if (pkt.dts != AV_NOPTS_VALUE)
4494 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4497 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4498 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4499 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4500 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4501 av_ts2str(input_files[ist->file_index]->ts_offset),
4502 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4505 sub2video_heartbeat(ist, pkt.pts);
4507 process_input_packet(ist, &pkt, 0);
4510 av_packet_unref(&pkt);
4516 * Perform a step of transcoding for the specified filter graph.
4518 * @param[in] graph filter graph to consider
4519 * @param[out] best_ist input stream where a frame would allow to continue
4520 * @return 0 for success, <0 for error
4522 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4525 int nb_requests, nb_requests_max = 0;
4526 InputFilter *ifilter;
4530 ret = avfilter_graph_request_oldest(graph->graph);
4532 return reap_filters(0);
4534 if (ret == AVERROR_EOF) {
4535 ret = reap_filters(1);
4536 for (i = 0; i < graph->nb_outputs; i++)
4537 close_output_stream(graph->outputs[i]->ost);
4540 if (ret != AVERROR(EAGAIN))
4543 for (i = 0; i < graph->nb_inputs; i++) {
4544 ifilter = graph->inputs[i];
4546 if (input_files[ist->file_index]->eagain ||
4547 input_files[ist->file_index]->eof_reached)
4549 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4550 if (nb_requests > nb_requests_max) {
4551 nb_requests_max = nb_requests;
4557 for (i = 0; i < graph->nb_outputs; i++)
4558 graph->outputs[i]->ost->unavailable = 1;
4564 * Run a single step of transcoding.
4566 * @return 0 for success, <0 for error
4568 static int transcode_step(void)
4571 InputStream *ist = NULL;
4574 ost = choose_output();
4581 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4585 if (ost->filter && !ost->filter->graph->graph) {
4586 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4587 ret = configure_filtergraph(ost->filter->graph);
4589 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4595 if (ost->filter && ost->filter->graph->graph) {
4596 if (!ost->initialized) {
4597 char error[1024] = {0};
4598 ret = init_output_stream(ost, error, sizeof(error));
4600 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4601 ost->file_index, ost->index, error);
4605 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4609 } else if (ost->filter) {
4611 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4612 InputFilter *ifilter = ost->filter->graph->inputs[i];
4613 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4619 ost->inputs_done = 1;
4623 av_assert0(ost->source_index >= 0);
4624 ist = input_streams[ost->source_index];
4627 ret = process_input(ist->file_index);
4628 if (ret == AVERROR(EAGAIN)) {
4629 if (input_files[ist->file_index]->eagain)
4630 ost->unavailable = 1;
4635 return ret == AVERROR_EOF ? 0 : ret;
4637 return reap_filters(0);
4641 * The following code is the main loop of the file converter
4643 static int transcode(void)
4646 AVFormatContext *os;
4649 int64_t timer_start;
4650 int64_t total_packets_written = 0;
4652 ret = transcode_init();
4656 if (stdin_interaction) {
4657 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4660 timer_start = av_gettime_relative();
4663 if ((ret = init_input_threads()) < 0)
4667 while (!received_sigterm) {
4668 int64_t cur_time= av_gettime_relative();
4670 /* if 'q' pressed, exits */
4671 if (stdin_interaction)
4672 if (check_keyboard_interaction(cur_time) < 0)
4675 /* check if there's any stream where output is still needed */
4676 if (!need_output()) {
4677 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4681 ret = transcode_step();
4682 if (ret < 0 && ret != AVERROR_EOF) {
4683 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4687 /* dump report by using the output first video and audio streams */
4688 print_report(0, timer_start, cur_time);
4691 free_input_threads();
4694 /* at the end of stream, we must flush the decoder buffers */
4695 for (i = 0; i < nb_input_streams; i++) {
4696 ist = input_streams[i];
4697 if (!input_files[ist->file_index]->eof_reached) {
4698 process_input_packet(ist, NULL, 0);
4705 /* write the trailer if needed and close file */
4706 for (i = 0; i < nb_output_files; i++) {
4707 os = output_files[i]->ctx;
4708 if (!output_files[i]->header_written) {
4709 av_log(NULL, AV_LOG_ERROR,
4710 "Nothing was written into output file %d (%s), because "
4711 "at least one of its streams received no packets.\n",
4715 if ((ret = av_write_trailer(os)) < 0) {
4716 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4722 /* dump report by using the first video and audio streams */
4723 print_report(1, timer_start, av_gettime_relative());
4725 /* close each encoder */
4726 for (i = 0; i < nb_output_streams; i++) {
4727 ost = output_streams[i];
4728 if (ost->encoding_needed) {
4729 av_freep(&ost->enc_ctx->stats_in);
4731 total_packets_written += ost->packets_written;
4734 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4735 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4739 /* close each decoder */
4740 for (i = 0; i < nb_input_streams; i++) {
4741 ist = input_streams[i];
4742 if (ist->decoding_needed) {
4743 avcodec_close(ist->dec_ctx);
4744 if (ist->hwaccel_uninit)
4745 ist->hwaccel_uninit(ist->dec_ctx);
4749 av_buffer_unref(&hw_device_ctx);
4750 hw_device_free_all();
4757 free_input_threads();
4760 if (output_streams) {
4761 for (i = 0; i < nb_output_streams; i++) {
4762 ost = output_streams[i];
4765 if (fclose(ost->logfile))
4766 av_log(NULL, AV_LOG_ERROR,
4767 "Error closing logfile, loss of information possible: %s\n",
4768 av_err2str(AVERROR(errno)));
4769 ost->logfile = NULL;
4771 av_freep(&ost->forced_kf_pts);
4772 av_freep(&ost->apad);
4773 av_freep(&ost->disposition);
4774 av_dict_free(&ost->encoder_opts);
4775 av_dict_free(&ost->sws_dict);
4776 av_dict_free(&ost->swr_opts);
4777 av_dict_free(&ost->resample_opts);
4784 static BenchmarkTimeStamps get_benchmark_time_stamps(void)
4786 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4788 struct rusage rusage;
4790 getrusage(RUSAGE_SELF, &rusage);
4791 time_stamps.user_usec =
4792 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4793 time_stamps.sys_usec =
4794 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4795 #elif HAVE_GETPROCESSTIMES
4797 FILETIME c, e, k, u;
4798 proc = GetCurrentProcess();
4799 GetProcessTimes(proc, &c, &e, &k, &u);
4800 time_stamps.user_usec =
4801 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4802 time_stamps.sys_usec =
4803 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4805 time_stamps.user_usec = time_stamps.sys_usec = 0;
4810 static int64_t getmaxrss(void)
4812 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4813 struct rusage rusage;
4814 getrusage(RUSAGE_SELF, &rusage);
4815 return (int64_t)rusage.ru_maxrss * 1024;
4816 #elif HAVE_GETPROCESSMEMORYINFO
4818 PROCESS_MEMORY_COUNTERS memcounters;
4819 proc = GetCurrentProcess();
4820 memcounters.cb = sizeof(memcounters);
4821 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4822 return memcounters.PeakPagefileUsage;
4828 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4832 int main(int argc, char **argv)
4835 BenchmarkTimeStamps ti;
4839 register_exit(ffmpeg_cleanup);
4841 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4843 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4844 parse_loglevel(argc, argv, options);
4846 if(argc>1 && !strcmp(argv[1], "-d")){
4848 av_log_set_callback(log_callback_null);
4854 avdevice_register_all();
4856 avformat_network_init();
4858 show_banner(argc, argv, options);
4860 /* parse options and open all input/output files */
4861 ret = ffmpeg_parse_options(argc, argv);
4865 if (nb_output_files <= 0 && nb_input_files == 0) {
4867 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4871 /* file converter / grab */
4872 if (nb_output_files <= 0) {
4873 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4877 // if (nb_input_files == 0) {
4878 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4882 for (i = 0; i < nb_output_files; i++) {
4883 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4887 current_time = ti = get_benchmark_time_stamps();
4888 if (transcode() < 0)
4891 int64_t utime, stime, rtime;
4892 current_time = get_benchmark_time_stamps();
4893 utime = current_time.user_usec - ti.user_usec;
4894 stime = current_time.sys_usec - ti.sys_usec;
4895 rtime = current_time.real_usec - ti.real_usec;
4896 av_log(NULL, AV_LOG_INFO,
4897 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4898 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4900 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4901 decode_error_stat[0], decode_error_stat[1]);
4902 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4905 exit_program(received_nb_signals ? 255 : main_return_code);
4906 return main_return_code;