2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
65 #include "libavutil/threadmessage.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
73 #if HAVE_SYS_RESOURCE_H
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
80 #if HAVE_GETPROCESSMEMORYINFO
84 #if HAVE_SETCONSOLECTRLHANDLER
90 #include <sys/select.h>
95 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 typedef struct BenchmarkTimeStamps {
127 } BenchmarkTimeStamps;
129 static void do_video_stats(OutputStream *ost, int frame_size);
130 static BenchmarkTimeStamps get_benchmark_time_stamps(void);
131 static int64_t getmaxrss(void);
132 static int ifilter_has_all_input_formats(FilterGraph *fg);
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
140 static int want_sdp = 1;
142 static BenchmarkTimeStamps current_time;
143 AVIOContext *progress_avio = NULL;
145 static uint8_t *subtitle_out;
147 InputStream **input_streams = NULL;
148 int nb_input_streams = 0;
149 InputFile **input_files = NULL;
150 int nb_input_files = 0;
152 OutputStream **output_streams = NULL;
153 int nb_output_streams = 0;
154 OutputFile **output_files = NULL;
155 int nb_output_files = 0;
157 FilterGraph **filtergraphs;
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
168 static void free_input_threads(void);
172 Convert subtitles to video with alpha to insert them in filter graphs.
173 This is a temporary solution until libavfilter gets real subtitles support.
176 static int sub2video_get_blank_frame(InputStream *ist)
179 AVFrame *frame = ist->sub2video.frame;
181 av_frame_unref(frame);
182 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
184 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
185 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
187 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
194 uint32_t *pal, *dst2;
198 if (r->type != SUBTITLE_BITMAP) {
199 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
202 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204 r->x, r->y, r->w, r->h, w, h
209 dst += r->y * dst_linesize + r->x * 4;
211 pal = (uint32_t *)r->data[1];
212 for (y = 0; y < r->h; y++) {
213 dst2 = (uint32_t *)dst;
215 for (x = 0; x < r->w; x++)
216 *(dst2++) = pal[*(src2++)];
218 src += r->linesize[0];
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
224 AVFrame *frame = ist->sub2video.frame;
228 av_assert1(frame->data[0]);
229 ist->sub2video.last_pts = frame->pts = pts;
230 for (i = 0; i < ist->nb_filters; i++) {
231 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
232 AV_BUFFERSRC_FLAG_KEEP_REF |
233 AV_BUFFERSRC_FLAG_PUSH);
234 if (ret != AVERROR_EOF && ret < 0)
235 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
240 void sub2video_update(InputStream *ist, AVSubtitle *sub)
242 AVFrame *frame = ist->sub2video.frame;
246 int64_t pts, end_pts;
251 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252 AV_TIME_BASE_Q, ist->st->time_base);
253 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254 AV_TIME_BASE_Q, ist->st->time_base);
255 num_rects = sub->num_rects;
257 pts = ist->sub2video.end_pts;
261 if (sub2video_get_blank_frame(ist) < 0) {
262 av_log(ist->dec_ctx, AV_LOG_ERROR,
263 "Impossible to get a blank canvas.\n");
266 dst = frame->data [0];
267 dst_linesize = frame->linesize[0];
268 for (i = 0; i < num_rects; i++)
269 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
270 sub2video_push_ref(ist, pts);
271 ist->sub2video.end_pts = end_pts;
274 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
276 InputFile *infile = input_files[ist->file_index];
280 /* When a frame is read from a file, examine all sub2video streams in
281 the same file and send the sub2video frame again. Otherwise, decoded
282 video frames could be accumulating in the filter graph while a filter
283 (possibly overlay) is desperately waiting for a subtitle frame. */
284 for (i = 0; i < infile->nb_streams; i++) {
285 InputStream *ist2 = input_streams[infile->ist_index + i];
286 if (!ist2->sub2video.frame)
288 /* subtitles seem to be usually muxed ahead of other streams;
289 if not, subtracting a larger time here is necessary */
290 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
291 /* do not send the heartbeat frame if the subtitle is already ahead */
292 if (pts2 <= ist2->sub2video.last_pts)
294 if (pts2 >= ist2->sub2video.end_pts ||
295 (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX))
296 sub2video_update(ist2, NULL);
297 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
298 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
300 sub2video_push_ref(ist2, pts2);
304 static void sub2video_flush(InputStream *ist)
309 if (ist->sub2video.end_pts < INT64_MAX)
310 sub2video_update(ist, NULL);
311 for (i = 0; i < ist->nb_filters; i++) {
312 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
313 if (ret != AVERROR_EOF && ret < 0)
314 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
318 /* end of sub2video hack */
320 static void term_exit_sigsafe(void)
324 tcsetattr (0, TCSANOW, &oldtty);
330 av_log(NULL, AV_LOG_QUIET, "%s", "");
334 static volatile int received_sigterm = 0;
335 static volatile int received_nb_signals = 0;
336 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
337 static volatile int ffmpeg_exited = 0;
338 static int main_return_code = 0;
341 sigterm_handler(int sig)
344 received_sigterm = sig;
345 received_nb_signals++;
347 if(received_nb_signals > 3) {
348 ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
349 strlen("Received > 3 system signals, hard exiting\n"));
350 if (ret < 0) { /* Do nothing */ };
355 #if HAVE_SETCONSOLECTRLHANDLER
356 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
358 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
363 case CTRL_BREAK_EVENT:
364 sigterm_handler(SIGINT);
367 case CTRL_CLOSE_EVENT:
368 case CTRL_LOGOFF_EVENT:
369 case CTRL_SHUTDOWN_EVENT:
370 sigterm_handler(SIGTERM);
371 /* Basically, with these 3 events, when we return from this method the
372 process is hard terminated, so stall as long as we need to
373 to try and let the main thread(s) clean up and gracefully terminate
374 (we have at most 5 seconds, but should be done far before that). */
375 while (!ffmpeg_exited) {
381 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
390 if (!run_as_daemon && stdin_interaction) {
392 if (tcgetattr (0, &tty) == 0) {
396 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
397 |INLCR|IGNCR|ICRNL|IXON);
398 tty.c_oflag |= OPOST;
399 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
400 tty.c_cflag &= ~(CSIZE|PARENB);
405 tcsetattr (0, TCSANOW, &tty);
407 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
411 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
412 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
414 signal(SIGXCPU, sigterm_handler);
417 signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
419 #if HAVE_SETCONSOLECTRLHANDLER
420 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
424 /* read a key without blocking */
425 static int read_key(void)
437 n = select(1, &rfds, NULL, NULL, &tv);
446 # if HAVE_PEEKNAMEDPIPE
448 static HANDLE input_handle;
451 input_handle = GetStdHandle(STD_INPUT_HANDLE);
452 is_pipe = !GetConsoleMode(input_handle, &dw);
456 /* When running under a GUI, you will end here. */
457 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
458 // input pipe may have been closed by the program that ran ffmpeg
476 static int decode_interrupt_cb(void *ctx)
478 return received_nb_signals > atomic_load(&transcode_init_done);
481 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
483 static void ffmpeg_cleanup(int ret)
488 int maxrss = getmaxrss() / 1024;
489 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
492 for (i = 0; i < nb_filtergraphs; i++) {
493 FilterGraph *fg = filtergraphs[i];
494 avfilter_graph_free(&fg->graph);
495 for (j = 0; j < fg->nb_inputs; j++) {
496 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
498 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
499 sizeof(frame), NULL);
500 av_frame_free(&frame);
502 av_fifo_freep(&fg->inputs[j]->frame_queue);
503 if (fg->inputs[j]->ist->sub2video.sub_queue) {
504 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
506 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
507 &sub, sizeof(sub), NULL);
508 avsubtitle_free(&sub);
510 av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
512 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
513 av_freep(&fg->inputs[j]->name);
514 av_freep(&fg->inputs[j]);
516 av_freep(&fg->inputs);
517 for (j = 0; j < fg->nb_outputs; j++) {
518 av_freep(&fg->outputs[j]->name);
519 av_freep(&fg->outputs[j]->formats);
520 av_freep(&fg->outputs[j]->channel_layouts);
521 av_freep(&fg->outputs[j]->sample_rates);
522 av_freep(&fg->outputs[j]);
524 av_freep(&fg->outputs);
525 av_freep(&fg->graph_desc);
527 av_freep(&filtergraphs[i]);
529 av_freep(&filtergraphs);
531 av_freep(&subtitle_out);
534 for (i = 0; i < nb_output_files; i++) {
535 OutputFile *of = output_files[i];
540 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
542 avformat_free_context(s);
543 av_dict_free(&of->opts);
545 av_freep(&output_files[i]);
547 for (i = 0; i < nb_output_streams; i++) {
548 OutputStream *ost = output_streams[i];
553 for (j = 0; j < ost->nb_bitstream_filters; j++)
554 av_bsf_free(&ost->bsf_ctx[j]);
555 av_freep(&ost->bsf_ctx);
557 av_frame_free(&ost->filtered_frame);
558 av_frame_free(&ost->last_frame);
559 av_dict_free(&ost->encoder_opts);
561 av_freep(&ost->forced_keyframes);
562 av_expr_free(ost->forced_keyframes_pexpr);
563 av_freep(&ost->avfilter);
564 av_freep(&ost->logfile_prefix);
566 av_freep(&ost->audio_channels_map);
567 ost->audio_channels_mapped = 0;
569 av_dict_free(&ost->sws_dict);
571 avcodec_free_context(&ost->enc_ctx);
572 avcodec_parameters_free(&ost->ref_par);
574 if (ost->muxing_queue) {
575 while (av_fifo_size(ost->muxing_queue)) {
577 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
578 av_packet_unref(&pkt);
580 av_fifo_freep(&ost->muxing_queue);
583 av_freep(&output_streams[i]);
586 free_input_threads();
588 for (i = 0; i < nb_input_files; i++) {
589 avformat_close_input(&input_files[i]->ctx);
590 av_freep(&input_files[i]);
592 for (i = 0; i < nb_input_streams; i++) {
593 InputStream *ist = input_streams[i];
595 av_frame_free(&ist->decoded_frame);
596 av_frame_free(&ist->filter_frame);
597 av_dict_free(&ist->decoder_opts);
598 avsubtitle_free(&ist->prev_sub.subtitle);
599 av_frame_free(&ist->sub2video.frame);
600 av_freep(&ist->filters);
601 av_freep(&ist->hwaccel_device);
602 av_freep(&ist->dts_buffer);
604 avcodec_free_context(&ist->dec_ctx);
606 av_freep(&input_streams[i]);
610 if (fclose(vstats_file))
611 av_log(NULL, AV_LOG_ERROR,
612 "Error closing vstats file, loss of information possible: %s\n",
613 av_err2str(AVERROR(errno)));
615 av_freep(&vstats_filename);
617 av_freep(&input_streams);
618 av_freep(&input_files);
619 av_freep(&output_streams);
620 av_freep(&output_files);
624 avformat_network_deinit();
626 if (received_sigterm) {
627 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
628 (int) received_sigterm);
629 } else if (ret && atomic_load(&transcode_init_done)) {
630 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
636 void remove_avoptions(AVDictionary **a, AVDictionary *b)
638 AVDictionaryEntry *t = NULL;
640 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
641 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
645 void assert_avoptions(AVDictionary *m)
647 AVDictionaryEntry *t;
648 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
649 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
654 static void abort_codec_experimental(AVCodec *c, int encoder)
659 static void update_benchmark(const char *fmt, ...)
661 if (do_benchmark_all) {
662 BenchmarkTimeStamps t = get_benchmark_time_stamps();
668 vsnprintf(buf, sizeof(buf), fmt, va);
670 av_log(NULL, AV_LOG_INFO,
671 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
672 t.user_usec - current_time.user_usec,
673 t.sys_usec - current_time.sys_usec,
674 t.real_usec - current_time.real_usec, buf);
680 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
683 for (i = 0; i < nb_output_streams; i++) {
684 OutputStream *ost2 = output_streams[i];
685 ost2->finished |= ost == ost2 ? this_stream : others;
689 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
691 AVFormatContext *s = of->ctx;
692 AVStream *st = ost->st;
696 * Audio encoders may split the packets -- #frames in != #packets out.
697 * But there is no reordering, so we can limit the number of output packets
698 * by simply dropping them here.
699 * Counting encoded video frames needs to be done separately because of
700 * reordering, see do_video_out().
701 * Do not count the packet when unqueued because it has been counted when queued.
703 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
704 if (ost->frame_number >= ost->max_frames) {
705 av_packet_unref(pkt);
711 if (!of->header_written) {
712 AVPacket tmp_pkt = {0};
713 /* the muxer is not initialized yet, buffer the packet */
714 if (!av_fifo_space(ost->muxing_queue)) {
715 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
716 ost->max_muxing_queue_size);
717 if (new_size <= av_fifo_size(ost->muxing_queue)) {
718 av_log(NULL, AV_LOG_ERROR,
719 "Too many packets buffered for output stream %d:%d.\n",
720 ost->file_index, ost->st->index);
723 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
727 ret = av_packet_make_refcounted(pkt);
730 av_packet_move_ref(&tmp_pkt, pkt);
731 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
735 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
736 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
737 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
739 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
741 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
743 ost->quality = sd ? AV_RL32(sd) : -1;
744 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
746 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
748 ost->error[i] = AV_RL64(sd + 8 + 8*i);
753 if (ost->frame_rate.num && ost->is_cfr) {
754 if (pkt->duration > 0)
755 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
756 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
761 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
763 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
764 if (pkt->dts != AV_NOPTS_VALUE &&
765 pkt->pts != AV_NOPTS_VALUE &&
766 pkt->dts > pkt->pts) {
767 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
769 ost->file_index, ost->st->index);
771 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
772 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
773 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
775 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
776 pkt->dts != AV_NOPTS_VALUE &&
777 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
778 ost->last_mux_dts != AV_NOPTS_VALUE) {
779 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
780 if (pkt->dts < max) {
781 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
782 av_log(s, loglevel, "Non-monotonous DTS in output stream "
783 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
784 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
786 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
789 av_log(s, loglevel, "changing to %"PRId64". This may result "
790 "in incorrect timestamps in the output file.\n",
792 if (pkt->pts >= pkt->dts)
793 pkt->pts = FFMAX(pkt->pts, max);
798 ost->last_mux_dts = pkt->dts;
800 ost->data_size += pkt->size;
801 ost->packets_written++;
803 pkt->stream_index = ost->index;
806 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
807 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
808 av_get_media_type_string(ost->enc_ctx->codec_type),
809 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
810 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
815 ret = av_interleaved_write_frame(s, pkt);
817 print_error("av_interleaved_write_frame()", ret);
818 main_return_code = 1;
819 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
821 av_packet_unref(pkt);
824 static void close_output_stream(OutputStream *ost)
826 OutputFile *of = output_files[ost->file_index];
828 ost->finished |= ENCODER_FINISHED;
830 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
831 of->recording_time = FFMIN(of->recording_time, end);
836 * Send a single packet to the output, applying any bitstream filters
837 * associated with the output stream. This may result in any number
838 * of packets actually being written, depending on what bitstream
839 * filters are applied. The supplied packet is consumed and will be
840 * blank (as if newly-allocated) when this function returns.
842 * If eof is set, instead indicate EOF to all bitstream filters and
843 * therefore flush any delayed packets to the output. A blank packet
844 * must be supplied in this case.
846 static void output_packet(OutputFile *of, AVPacket *pkt,
847 OutputStream *ost, int eof)
851 /* apply the output bitstream filters, if any */
852 if (ost->nb_bitstream_filters) {
855 ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
862 /* get a packet from the previous filter up the chain */
863 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
864 if (ret == AVERROR(EAGAIN)) {
868 } else if (ret == AVERROR_EOF) {
873 /* send it to the next filter down the chain or to the muxer */
874 if (idx < ost->nb_bitstream_filters) {
875 ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
883 write_packet(of, pkt, ost, 0);
886 write_packet(of, pkt, ost, 0);
889 if (ret < 0 && ret != AVERROR_EOF) {
890 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
891 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
897 static int check_recording_time(OutputStream *ost)
899 OutputFile *of = output_files[ost->file_index];
901 if (of->recording_time != INT64_MAX &&
902 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
903 AV_TIME_BASE_Q) >= 0) {
904 close_output_stream(ost);
910 static void do_audio_out(OutputFile *of, OutputStream *ost,
913 AVCodecContext *enc = ost->enc_ctx;
917 av_init_packet(&pkt);
921 if (!check_recording_time(ost))
924 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
925 frame->pts = ost->sync_opts;
926 ost->sync_opts = frame->pts + frame->nb_samples;
927 ost->samples_encoded += frame->nb_samples;
928 ost->frames_encoded++;
930 av_assert0(pkt.size || !pkt.data);
931 update_benchmark(NULL);
933 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
934 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
935 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
936 enc->time_base.num, enc->time_base.den);
939 ret = avcodec_send_frame(enc, frame);
944 ret = avcodec_receive_packet(enc, &pkt);
945 if (ret == AVERROR(EAGAIN))
950 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
952 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
955 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
956 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
957 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
958 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
961 output_packet(of, &pkt, ost, 0);
966 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
970 static void do_subtitle_out(OutputFile *of,
974 int subtitle_out_max_size = 1024 * 1024;
975 int subtitle_out_size, nb, i;
980 if (sub->pts == AV_NOPTS_VALUE) {
981 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
990 subtitle_out = av_malloc(subtitle_out_max_size);
992 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
997 /* Note: DVB subtitle need one packet to draw them and one other
998 packet to clear them */
999 /* XXX: signal it in the codec context ? */
1000 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1005 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1007 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1008 pts -= output_files[ost->file_index]->start_time;
1009 for (i = 0; i < nb; i++) {
1010 unsigned save_num_rects = sub->num_rects;
1012 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1013 if (!check_recording_time(ost))
1017 // start_display_time is required to be 0
1018 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1019 sub->end_display_time -= sub->start_display_time;
1020 sub->start_display_time = 0;
1024 ost->frames_encoded++;
1026 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1027 subtitle_out_max_size, sub);
1029 sub->num_rects = save_num_rects;
1030 if (subtitle_out_size < 0) {
1031 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1035 av_init_packet(&pkt);
1036 pkt.data = subtitle_out;
1037 pkt.size = subtitle_out_size;
1038 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1039 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1040 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1041 /* XXX: the pts correction is handled here. Maybe handling
1042 it in the codec would be better */
1044 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1046 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1049 output_packet(of, &pkt, ost, 0);
1053 static void do_video_out(OutputFile *of,
1055 AVFrame *next_picture,
1058 int ret, format_video_sync;
1060 AVCodecContext *enc = ost->enc_ctx;
1061 AVCodecParameters *mux_par = ost->st->codecpar;
1062 AVRational frame_rate;
1063 int nb_frames, nb0_frames, i;
1064 double delta, delta0;
1065 double duration = 0;
1067 InputStream *ist = NULL;
1068 AVFilterContext *filter = ost->filter->filter;
1070 if (ost->source_index >= 0)
1071 ist = input_streams[ost->source_index];
1073 frame_rate = av_buffersink_get_frame_rate(filter);
1074 if (frame_rate.num > 0 && frame_rate.den > 0)
1075 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1077 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1078 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1080 if (!ost->filters_script &&
1084 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1085 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1088 if (!next_picture) {
1090 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1091 ost->last_nb0_frames[1],
1092 ost->last_nb0_frames[2]);
1094 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1095 delta = delta0 + duration;
1097 /* by default, we output a single frame */
1098 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1101 format_video_sync = video_sync_method;
1102 if (format_video_sync == VSYNC_AUTO) {
1103 if(!strcmp(of->ctx->oformat->name, "avi")) {
1104 format_video_sync = VSYNC_VFR;
1106 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1108 && format_video_sync == VSYNC_CFR
1109 && input_files[ist->file_index]->ctx->nb_streams == 1
1110 && input_files[ist->file_index]->input_ts_offset == 0) {
1111 format_video_sync = VSYNC_VSCFR;
1113 if (format_video_sync == VSYNC_CFR && copy_ts) {
1114 format_video_sync = VSYNC_VSCFR;
1117 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1121 format_video_sync != VSYNC_PASSTHROUGH &&
1122 format_video_sync != VSYNC_DROP) {
1123 if (delta0 < -0.6) {
1124 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1126 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1127 sync_ipts = ost->sync_opts;
1132 switch (format_video_sync) {
1134 if (ost->frame_number == 0 && delta0 >= 0.5) {
1135 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1138 ost->sync_opts = lrint(sync_ipts);
1141 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1142 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1144 } else if (delta < -1.1)
1146 else if (delta > 1.1) {
1147 nb_frames = lrintf(delta);
1149 nb0_frames = lrintf(delta0 - 0.6);
1155 else if (delta > 0.6)
1156 ost->sync_opts = lrint(sync_ipts);
1159 case VSYNC_PASSTHROUGH:
1160 ost->sync_opts = lrint(sync_ipts);
1167 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1168 nb0_frames = FFMIN(nb0_frames, nb_frames);
1170 memmove(ost->last_nb0_frames + 1,
1171 ost->last_nb0_frames,
1172 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1173 ost->last_nb0_frames[0] = nb0_frames;
1175 if (nb0_frames == 0 && ost->last_dropped) {
1177 av_log(NULL, AV_LOG_VERBOSE,
1178 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1179 ost->frame_number, ost->st->index, ost->last_frame->pts);
1181 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1182 if (nb_frames > dts_error_threshold * 30) {
1183 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1187 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1188 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1189 if (nb_frames_dup > dup_warning) {
1190 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1194 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1196 /* duplicates frame if needed */
1197 for (i = 0; i < nb_frames; i++) {
1198 AVFrame *in_picture;
1199 int forced_keyframe = 0;
1201 av_init_packet(&pkt);
1205 if (i < nb0_frames && ost->last_frame) {
1206 in_picture = ost->last_frame;
1208 in_picture = next_picture;
1213 in_picture->pts = ost->sync_opts;
1215 if (!check_recording_time(ost))
1218 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1219 ost->top_field_first >= 0)
1220 in_picture->top_field_first = !!ost->top_field_first;
1222 if (in_picture->interlaced_frame) {
1223 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1224 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1226 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1228 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1230 in_picture->quality = enc->global_quality;
1231 in_picture->pict_type = 0;
1233 if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1234 in_picture->pts != AV_NOPTS_VALUE)
1235 ost->forced_kf_ref_pts = in_picture->pts;
1237 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1238 (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1239 if (ost->forced_kf_index < ost->forced_kf_count &&
1240 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1241 ost->forced_kf_index++;
1242 forced_keyframe = 1;
1243 } else if (ost->forced_keyframes_pexpr) {
1245 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1246 res = av_expr_eval(ost->forced_keyframes_pexpr,
1247 ost->forced_keyframes_expr_const_values, NULL);
1248 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1249 ost->forced_keyframes_expr_const_values[FKF_N],
1250 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1251 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1252 ost->forced_keyframes_expr_const_values[FKF_T],
1253 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1256 forced_keyframe = 1;
1257 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1258 ost->forced_keyframes_expr_const_values[FKF_N];
1259 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1260 ost->forced_keyframes_expr_const_values[FKF_T];
1261 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1264 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1265 } else if ( ost->forced_keyframes
1266 && !strncmp(ost->forced_keyframes, "source", 6)
1267 && in_picture->key_frame==1) {
1268 forced_keyframe = 1;
1271 if (forced_keyframe) {
1272 in_picture->pict_type = AV_PICTURE_TYPE_I;
1273 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1276 update_benchmark(NULL);
1278 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1279 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1280 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1281 enc->time_base.num, enc->time_base.den);
1284 ost->frames_encoded++;
1286 ret = avcodec_send_frame(enc, in_picture);
1289 // Make sure Closed Captions will not be duplicated
1290 av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
1293 ret = avcodec_receive_packet(enc, &pkt);
1294 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1295 if (ret == AVERROR(EAGAIN))
1301 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1302 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1303 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1304 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1307 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1308 pkt.pts = ost->sync_opts;
1310 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1313 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1314 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1315 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1316 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1319 frame_size = pkt.size;
1320 output_packet(of, &pkt, ost, 0);
1322 /* if two pass, output log */
1323 if (ost->logfile && enc->stats_out) {
1324 fprintf(ost->logfile, "%s", enc->stats_out);
1329 * For video, number of frames in == number of packets out.
1330 * But there may be reordering, so we can't throw away frames on encoder
1331 * flush, we need to limit them here, before they go into encoder.
1333 ost->frame_number++;
1335 if (vstats_filename && frame_size)
1336 do_video_stats(ost, frame_size);
1339 if (!ost->last_frame)
1340 ost->last_frame = av_frame_alloc();
1341 av_frame_unref(ost->last_frame);
1342 if (next_picture && ost->last_frame)
1343 av_frame_ref(ost->last_frame, next_picture);
1345 av_frame_free(&ost->last_frame);
1349 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1353 static double psnr(double d)
1355 return -10.0 * log10(d);
1358 static void do_video_stats(OutputStream *ost, int frame_size)
1360 AVCodecContext *enc;
1362 double ti1, bitrate, avg_bitrate;
1364 /* this is executed just the first time do_video_stats is called */
1366 vstats_file = fopen(vstats_filename, "w");
1374 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1375 frame_number = ost->st->nb_frames;
1376 if (vstats_version <= 1) {
1377 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1378 ost->quality / (float)FF_QP2LAMBDA);
1380 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1381 ost->quality / (float)FF_QP2LAMBDA);
1384 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1385 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1387 fprintf(vstats_file,"f_size= %6d ", frame_size);
1388 /* compute pts value */
1389 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1393 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1394 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1395 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1396 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1397 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1401 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1403 static void finish_output_stream(OutputStream *ost)
1405 OutputFile *of = output_files[ost->file_index];
1408 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1411 for (i = 0; i < of->ctx->nb_streams; i++)
1412 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1417 * Get and encode new output from any of the filtergraphs, without causing
1420 * @return 0 for success, <0 for severe errors
1422 static int reap_filters(int flush)
1424 AVFrame *filtered_frame = NULL;
1427 /* Reap all buffers present in the buffer sinks */
1428 for (i = 0; i < nb_output_streams; i++) {
1429 OutputStream *ost = output_streams[i];
1430 OutputFile *of = output_files[ost->file_index];
1431 AVFilterContext *filter;
1432 AVCodecContext *enc = ost->enc_ctx;
1435 if (!ost->filter || !ost->filter->graph->graph)
1437 filter = ost->filter->filter;
1439 if (!ost->initialized) {
1440 char error[1024] = "";
1441 ret = init_output_stream(ost, error, sizeof(error));
1443 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1444 ost->file_index, ost->index, error);
1449 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1450 return AVERROR(ENOMEM);
1452 filtered_frame = ost->filtered_frame;
1455 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1456 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1457 AV_BUFFERSINK_FLAG_NO_REQUEST);
1459 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1460 av_log(NULL, AV_LOG_WARNING,
1461 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1462 } else if (flush && ret == AVERROR_EOF) {
1463 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1464 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1468 if (ost->finished) {
1469 av_frame_unref(filtered_frame);
1472 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1473 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1474 AVRational filter_tb = av_buffersink_get_time_base(filter);
1475 AVRational tb = enc->time_base;
1476 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1478 tb.den <<= extra_bits;
1480 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1481 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1482 float_pts /= 1 << extra_bits;
1483 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1484 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1486 filtered_frame->pts =
1487 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1488 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1491 switch (av_buffersink_get_type(filter)) {
1492 case AVMEDIA_TYPE_VIDEO:
1493 if (!ost->frame_aspect_ratio.num)
1494 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1497 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1498 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1500 enc->time_base.num, enc->time_base.den);
1503 do_video_out(of, ost, filtered_frame, float_pts);
1505 case AVMEDIA_TYPE_AUDIO:
1506 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1507 enc->channels != filtered_frame->channels) {
1508 av_log(NULL, AV_LOG_ERROR,
1509 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1512 do_audio_out(of, ost, filtered_frame);
1515 // TODO support subtitle filters
1519 av_frame_unref(filtered_frame);
1526 static void print_final_stats(int64_t total_size)
1528 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1529 uint64_t subtitle_size = 0;
1530 uint64_t data_size = 0;
1531 float percent = -1.0;
1535 for (i = 0; i < nb_output_streams; i++) {
1536 OutputStream *ost = output_streams[i];
1537 switch (ost->enc_ctx->codec_type) {
1538 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1539 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1540 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1541 default: other_size += ost->data_size; break;
1543 extra_size += ost->enc_ctx->extradata_size;
1544 data_size += ost->data_size;
1545 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1546 != AV_CODEC_FLAG_PASS1)
1550 if (data_size && total_size>0 && total_size >= data_size)
1551 percent = 100.0 * (total_size - data_size) / data_size;
1553 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1554 video_size / 1024.0,
1555 audio_size / 1024.0,
1556 subtitle_size / 1024.0,
1557 other_size / 1024.0,
1558 extra_size / 1024.0);
1560 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1562 av_log(NULL, AV_LOG_INFO, "unknown");
1563 av_log(NULL, AV_LOG_INFO, "\n");
1565 /* print verbose per-stream stats */
1566 for (i = 0; i < nb_input_files; i++) {
1567 InputFile *f = input_files[i];
1568 uint64_t total_packets = 0, total_size = 0;
1570 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1573 for (j = 0; j < f->nb_streams; j++) {
1574 InputStream *ist = input_streams[f->ist_index + j];
1575 enum AVMediaType type = ist->dec_ctx->codec_type;
1577 total_size += ist->data_size;
1578 total_packets += ist->nb_packets;
1580 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1581 i, j, media_type_string(type));
1582 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1583 ist->nb_packets, ist->data_size);
1585 if (ist->decoding_needed) {
1586 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1587 ist->frames_decoded);
1588 if (type == AVMEDIA_TYPE_AUDIO)
1589 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1590 av_log(NULL, AV_LOG_VERBOSE, "; ");
1593 av_log(NULL, AV_LOG_VERBOSE, "\n");
1596 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1597 total_packets, total_size);
1600 for (i = 0; i < nb_output_files; i++) {
1601 OutputFile *of = output_files[i];
1602 uint64_t total_packets = 0, total_size = 0;
1604 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1607 for (j = 0; j < of->ctx->nb_streams; j++) {
1608 OutputStream *ost = output_streams[of->ost_index + j];
1609 enum AVMediaType type = ost->enc_ctx->codec_type;
1611 total_size += ost->data_size;
1612 total_packets += ost->packets_written;
1614 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1615 i, j, media_type_string(type));
1616 if (ost->encoding_needed) {
1617 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1618 ost->frames_encoded);
1619 if (type == AVMEDIA_TYPE_AUDIO)
1620 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1621 av_log(NULL, AV_LOG_VERBOSE, "; ");
1624 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1625 ost->packets_written, ost->data_size);
1627 av_log(NULL, AV_LOG_VERBOSE, "\n");
1630 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1631 total_packets, total_size);
1633 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1634 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1636 av_log(NULL, AV_LOG_WARNING, "\n");
1638 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1643 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1645 AVBPrint buf, buf_script;
1647 AVFormatContext *oc;
1649 AVCodecContext *enc;
1650 int frame_number, vid, i;
1653 int64_t pts = INT64_MIN + 1;
1654 static int64_t last_time = -1;
1655 static int qp_histogram[52];
1656 int hours, mins, secs, us;
1657 const char *hours_sign;
1661 if (!print_stats && !is_last_report && !progress_avio)
1664 if (!is_last_report) {
1665 if (last_time == -1) {
1666 last_time = cur_time;
1669 if ((cur_time - last_time) < 500000)
1671 last_time = cur_time;
1674 t = (cur_time-timer_start) / 1000000.0;
1677 oc = output_files[0]->ctx;
1679 total_size = avio_size(oc->pb);
1680 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1681 total_size = avio_tell(oc->pb);
1684 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1685 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1686 for (i = 0; i < nb_output_streams; i++) {
1688 ost = output_streams[i];
1690 if (!ost->stream_copy)
1691 q = ost->quality / (float) FF_QP2LAMBDA;
1693 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1694 av_bprintf(&buf, "q=%2.1f ", q);
1695 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1696 ost->file_index, ost->index, q);
1698 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1701 frame_number = ost->frame_number;
1702 fps = t > 1 ? frame_number / t : 0;
1703 av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1704 frame_number, fps < 9.95, fps, q);
1705 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1706 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1707 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1708 ost->file_index, ost->index, q);
1710 av_bprintf(&buf, "L");
1714 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1716 for (j = 0; j < 32; j++)
1717 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1720 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1722 double error, error_sum = 0;
1723 double scale, scale_sum = 0;
1725 char type[3] = { 'Y','U','V' };
1726 av_bprintf(&buf, "PSNR=");
1727 for (j = 0; j < 3; j++) {
1728 if (is_last_report) {
1729 error = enc->error[j];
1730 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1732 error = ost->error[j];
1733 scale = enc->width * enc->height * 255.0 * 255.0;
1739 p = psnr(error / scale);
1740 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1741 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1742 ost->file_index, ost->index, type[j] | 32, p);
1744 p = psnr(error_sum / scale_sum);
1745 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1746 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1747 ost->file_index, ost->index, p);
1751 /* compute min output value */
1752 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1753 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1754 ost->st->time_base, AV_TIME_BASE_Q));
1756 nb_frames_drop += ost->last_dropped;
1759 secs = FFABS(pts) / AV_TIME_BASE;
1760 us = FFABS(pts) % AV_TIME_BASE;
1765 hours_sign = (pts < 0) ? "-" : "";
1767 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1768 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1770 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1771 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1772 if (pts == AV_NOPTS_VALUE) {
1773 av_bprintf(&buf, "N/A ");
1775 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1776 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1780 av_bprintf(&buf, "bitrate=N/A");
1781 av_bprintf(&buf_script, "bitrate=N/A\n");
1783 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1784 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1787 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1788 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1789 if (pts == AV_NOPTS_VALUE) {
1790 av_bprintf(&buf_script, "out_time_us=N/A\n");
1791 av_bprintf(&buf_script, "out_time_ms=N/A\n");
1792 av_bprintf(&buf_script, "out_time=N/A\n");
1794 av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1795 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1796 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1797 hours_sign, hours, mins, secs, us);
1800 if (nb_frames_dup || nb_frames_drop)
1801 av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1802 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1803 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1806 av_bprintf(&buf, " speed=N/A");
1807 av_bprintf(&buf_script, "speed=N/A\n");
1809 av_bprintf(&buf, " speed=%4.3gx", speed);
1810 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1813 if (print_stats || is_last_report) {
1814 const char end = is_last_report ? '\n' : '\r';
1815 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1816 fprintf(stderr, "%s %c", buf.str, end);
1818 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1822 av_bprint_finalize(&buf, NULL);
1824 if (progress_avio) {
1825 av_bprintf(&buf_script, "progress=%s\n",
1826 is_last_report ? "end" : "continue");
1827 avio_write(progress_avio, buf_script.str,
1828 FFMIN(buf_script.len, buf_script.size - 1));
1829 avio_flush(progress_avio);
1830 av_bprint_finalize(&buf_script, NULL);
1831 if (is_last_report) {
1832 if ((ret = avio_closep(&progress_avio)) < 0)
1833 av_log(NULL, AV_LOG_ERROR,
1834 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1839 print_final_stats(total_size);
1842 static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1844 // We never got any input. Set a fake format, which will
1845 // come from libavformat.
1846 ifilter->format = par->format;
1847 ifilter->sample_rate = par->sample_rate;
1848 ifilter->channels = par->channels;
1849 ifilter->channel_layout = par->channel_layout;
1850 ifilter->width = par->width;
1851 ifilter->height = par->height;
1852 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1855 static void flush_encoders(void)
1859 for (i = 0; i < nb_output_streams; i++) {
1860 OutputStream *ost = output_streams[i];
1861 AVCodecContext *enc = ost->enc_ctx;
1862 OutputFile *of = output_files[ost->file_index];
1864 if (!ost->encoding_needed)
1867 // Try to enable encoding with no input frames.
1868 // Maybe we should just let encoding fail instead.
1869 if (!ost->initialized) {
1870 FilterGraph *fg = ost->filter->graph;
1871 char error[1024] = "";
1873 av_log(NULL, AV_LOG_WARNING,
1874 "Finishing stream %d:%d without any data written to it.\n",
1875 ost->file_index, ost->st->index);
1877 if (ost->filter && !fg->graph) {
1879 for (x = 0; x < fg->nb_inputs; x++) {
1880 InputFilter *ifilter = fg->inputs[x];
1881 if (ifilter->format < 0)
1882 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1885 if (!ifilter_has_all_input_formats(fg))
1888 ret = configure_filtergraph(fg);
1890 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1894 finish_output_stream(ost);
1897 ret = init_output_stream(ost, error, sizeof(error));
1899 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1900 ost->file_index, ost->index, error);
1905 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1908 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1912 const char *desc = NULL;
1916 switch (enc->codec_type) {
1917 case AVMEDIA_TYPE_AUDIO:
1920 case AVMEDIA_TYPE_VIDEO:
1927 av_init_packet(&pkt);
1931 update_benchmark(NULL);
1933 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1934 ret = avcodec_send_frame(enc, NULL);
1936 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1943 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1944 if (ret < 0 && ret != AVERROR_EOF) {
1945 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1950 if (ost->logfile && enc->stats_out) {
1951 fprintf(ost->logfile, "%s", enc->stats_out);
1953 if (ret == AVERROR_EOF) {
1954 output_packet(of, &pkt, ost, 1);
1957 if (ost->finished & MUXER_FINISHED) {
1958 av_packet_unref(&pkt);
1961 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1962 pkt_size = pkt.size;
1963 output_packet(of, &pkt, ost, 0);
1964 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1965 do_video_stats(ost, pkt_size);
1972 * Check whether a packet from ist should be written into ost at this time
1974 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1976 OutputFile *of = output_files[ost->file_index];
1977 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1979 if (ost->source_index != ist_index)
1985 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1991 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1993 OutputFile *of = output_files[ost->file_index];
1994 InputFile *f = input_files [ist->file_index];
1995 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1996 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1997 AVPacket opkt = { 0 };
1999 av_init_packet(&opkt);
2001 // EOF: flush output bitstream filters.
2003 output_packet(of, &opkt, ost, 1);
2007 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2008 !ost->copy_initial_nonkeyframes)
2011 if (!ost->frame_number && !ost->copy_prior_start) {
2012 int64_t comp_start = start_time;
2013 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2014 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2015 if (pkt->pts == AV_NOPTS_VALUE ?
2016 ist->pts < comp_start :
2017 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2021 if (of->recording_time != INT64_MAX &&
2022 ist->pts >= of->recording_time + start_time) {
2023 close_output_stream(ost);
2027 if (f->recording_time != INT64_MAX) {
2028 start_time = f->ctx->start_time;
2029 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2030 start_time += f->start_time;
2031 if (ist->pts >= f->recording_time + start_time) {
2032 close_output_stream(ost);
2037 /* force the input stream PTS */
2038 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2041 if (pkt->pts != AV_NOPTS_VALUE)
2042 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2044 opkt.pts = AV_NOPTS_VALUE;
2046 if (pkt->dts == AV_NOPTS_VALUE)
2047 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2049 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2050 opkt.dts -= ost_tb_start_time;
2052 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2053 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2055 duration = ist->dec_ctx->frame_size;
2056 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2057 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2058 ost->mux_timebase) - ost_tb_start_time;
2061 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2063 opkt.flags = pkt->flags;
2066 opkt.buf = av_buffer_ref(pkt->buf);
2070 opkt.data = pkt->data;
2071 opkt.size = pkt->size;
2073 av_copy_packet_side_data(&opkt, pkt);
2075 output_packet(of, &opkt, ost, 0);
2078 int guess_input_channel_layout(InputStream *ist)
2080 AVCodecContext *dec = ist->dec_ctx;
2082 if (!dec->channel_layout) {
2083 char layout_name[256];
2085 if (dec->channels > ist->guess_layout_max)
2087 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2088 if (!dec->channel_layout)
2090 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2091 dec->channels, dec->channel_layout);
2092 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2093 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2098 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2100 if (*got_output || ret<0)
2101 decode_error_stat[ret<0] ++;
2103 if (ret < 0 && exit_on_error)
2106 if (*got_output && ist) {
2107 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2108 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2109 "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2116 // Filters can be configured only if the formats of all inputs are known.
2117 static int ifilter_has_all_input_formats(FilterGraph *fg)
2120 for (i = 0; i < fg->nb_inputs; i++) {
2121 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2122 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2128 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2130 FilterGraph *fg = ifilter->graph;
2131 int need_reinit, ret, i;
2133 /* determine if the parameters for this input changed */
2134 need_reinit = ifilter->format != frame->format;
2136 switch (ifilter->ist->st->codecpar->codec_type) {
2137 case AVMEDIA_TYPE_AUDIO:
2138 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2139 ifilter->channels != frame->channels ||
2140 ifilter->channel_layout != frame->channel_layout;
2142 case AVMEDIA_TYPE_VIDEO:
2143 need_reinit |= ifilter->width != frame->width ||
2144 ifilter->height != frame->height;
2148 if (!ifilter->ist->reinit_filters && fg->graph)
2151 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2152 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2156 ret = ifilter_parameters_from_frame(ifilter, frame);
2161 /* (re)init the graph if possible, otherwise buffer the frame and return */
2162 if (need_reinit || !fg->graph) {
2163 for (i = 0; i < fg->nb_inputs; i++) {
2164 if (!ifilter_has_all_input_formats(fg)) {
2165 AVFrame *tmp = av_frame_clone(frame);
2167 return AVERROR(ENOMEM);
2168 av_frame_unref(frame);
2170 if (!av_fifo_space(ifilter->frame_queue)) {
2171 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2173 av_frame_free(&tmp);
2177 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2182 ret = reap_filters(1);
2183 if (ret < 0 && ret != AVERROR_EOF) {
2184 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2188 ret = configure_filtergraph(fg);
2190 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2195 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2197 if (ret != AVERROR_EOF)
2198 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2205 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2211 if (ifilter->filter) {
2212 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2216 // the filtergraph was never configured
2217 if (ifilter->format < 0)
2218 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2219 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2220 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2221 return AVERROR_INVALIDDATA;
2228 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2229 // There is the following difference: if you got a frame, you must call
2230 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2231 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2232 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2239 ret = avcodec_send_packet(avctx, pkt);
2240 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2241 // decoded frames with avcodec_receive_frame() until done.
2242 if (ret < 0 && ret != AVERROR_EOF)
2246 ret = avcodec_receive_frame(avctx, frame);
2247 if (ret < 0 && ret != AVERROR(EAGAIN))
2255 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2260 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2261 for (i = 0; i < ist->nb_filters; i++) {
2262 if (i < ist->nb_filters - 1) {
2263 f = ist->filter_frame;
2264 ret = av_frame_ref(f, decoded_frame);
2269 ret = ifilter_send_frame(ist->filters[i], f);
2270 if (ret == AVERROR_EOF)
2271 ret = 0; /* ignore */
2273 av_log(NULL, AV_LOG_ERROR,
2274 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2281 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2284 AVFrame *decoded_frame;
2285 AVCodecContext *avctx = ist->dec_ctx;
2287 AVRational decoded_frame_tb;
2289 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2290 return AVERROR(ENOMEM);
2291 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2292 return AVERROR(ENOMEM);
2293 decoded_frame = ist->decoded_frame;
2295 update_benchmark(NULL);
2296 ret = decode(avctx, decoded_frame, got_output, pkt);
2297 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2301 if (ret >= 0 && avctx->sample_rate <= 0) {
2302 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2303 ret = AVERROR_INVALIDDATA;
2306 if (ret != AVERROR_EOF)
2307 check_decode_result(ist, got_output, ret);
2309 if (!*got_output || ret < 0)
2312 ist->samples_decoded += decoded_frame->nb_samples;
2313 ist->frames_decoded++;
2315 /* increment next_dts to use for the case where the input stream does not
2316 have timestamps or there are multiple frames in the packet */
2317 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2319 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2322 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2323 decoded_frame_tb = ist->st->time_base;
2324 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2325 decoded_frame->pts = pkt->pts;
2326 decoded_frame_tb = ist->st->time_base;
2328 decoded_frame->pts = ist->dts;
2329 decoded_frame_tb = AV_TIME_BASE_Q;
2331 if (decoded_frame->pts != AV_NOPTS_VALUE)
2332 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2333 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2334 (AVRational){1, avctx->sample_rate});
2335 ist->nb_samples = decoded_frame->nb_samples;
2336 err = send_frame_to_filters(ist, decoded_frame);
2338 av_frame_unref(ist->filter_frame);
2339 av_frame_unref(decoded_frame);
2340 return err < 0 ? err : ret;
2343 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2346 AVFrame *decoded_frame;
2347 int i, ret = 0, err = 0;
2348 int64_t best_effort_timestamp;
2349 int64_t dts = AV_NOPTS_VALUE;
2352 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2353 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2355 if (!eof && pkt && pkt->size == 0)
2358 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2359 return AVERROR(ENOMEM);
2360 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2361 return AVERROR(ENOMEM);
2362 decoded_frame = ist->decoded_frame;
2363 if (ist->dts != AV_NOPTS_VALUE)
2364 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2367 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2370 // The old code used to set dts on the drain packet, which does not work
2371 // with the new API anymore.
2373 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2375 return AVERROR(ENOMEM);
2376 ist->dts_buffer = new;
2377 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2380 update_benchmark(NULL);
2381 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2382 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2386 // The following line may be required in some cases where there is no parser
2387 // or the parser does not has_b_frames correctly
2388 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2389 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2390 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2392 av_log(ist->dec_ctx, AV_LOG_WARNING,
2393 "video_delay is larger in decoder than demuxer %d > %d.\n"
2394 "If you want to help, upload a sample "
2395 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2396 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2397 ist->dec_ctx->has_b_frames,
2398 ist->st->codecpar->video_delay);
2401 if (ret != AVERROR_EOF)
2402 check_decode_result(ist, got_output, ret);
2404 if (*got_output && ret >= 0) {
2405 if (ist->dec_ctx->width != decoded_frame->width ||
2406 ist->dec_ctx->height != decoded_frame->height ||
2407 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2408 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2409 decoded_frame->width,
2410 decoded_frame->height,
2411 decoded_frame->format,
2412 ist->dec_ctx->width,
2413 ist->dec_ctx->height,
2414 ist->dec_ctx->pix_fmt);
2418 if (!*got_output || ret < 0)
2421 if(ist->top_field_first>=0)
2422 decoded_frame->top_field_first = ist->top_field_first;
2424 ist->frames_decoded++;
2426 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2427 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2431 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2433 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2434 *duration_pts = decoded_frame->pkt_duration;
2436 if (ist->framerate.num)
2437 best_effort_timestamp = ist->cfr_next_pts++;
2439 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2440 best_effort_timestamp = ist->dts_buffer[0];
2442 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2443 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2444 ist->nb_dts_buffer--;
2447 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2448 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2450 if (ts != AV_NOPTS_VALUE)
2451 ist->next_pts = ist->pts = ts;
2455 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2456 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2457 ist->st->index, av_ts2str(decoded_frame->pts),
2458 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2459 best_effort_timestamp,
2460 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2461 decoded_frame->key_frame, decoded_frame->pict_type,
2462 ist->st->time_base.num, ist->st->time_base.den);
2465 if (ist->st->sample_aspect_ratio.num)
2466 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2468 err = send_frame_to_filters(ist, decoded_frame);
2471 av_frame_unref(ist->filter_frame);
2472 av_frame_unref(decoded_frame);
2473 return err < 0 ? err : ret;
2476 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2479 AVSubtitle subtitle;
2481 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2482 &subtitle, got_output, pkt);
2484 check_decode_result(NULL, got_output, ret);
2486 if (ret < 0 || !*got_output) {
2489 sub2video_flush(ist);
2493 if (ist->fix_sub_duration) {
2495 if (ist->prev_sub.got_output) {
2496 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2497 1000, AV_TIME_BASE);
2498 if (end < ist->prev_sub.subtitle.end_display_time) {
2499 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2500 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2501 ist->prev_sub.subtitle.end_display_time, end,
2502 end <= 0 ? ", dropping it" : "");
2503 ist->prev_sub.subtitle.end_display_time = end;
2506 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2507 FFSWAP(int, ret, ist->prev_sub.ret);
2508 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2516 if (ist->sub2video.frame) {
2517 sub2video_update(ist, &subtitle);
2518 } else if (ist->nb_filters) {
2519 if (!ist->sub2video.sub_queue)
2520 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2521 if (!ist->sub2video.sub_queue)
2523 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2524 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2528 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2532 if (!subtitle.num_rects)
2535 ist->frames_decoded++;
2537 for (i = 0; i < nb_output_streams; i++) {
2538 OutputStream *ost = output_streams[i];
2540 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2541 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2544 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2549 avsubtitle_free(&subtitle);
2553 static int send_filter_eof(InputStream *ist)
2556 /* TODO keep pts also in stream time base to avoid converting back */
2557 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2558 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2560 for (i = 0; i < ist->nb_filters; i++) {
2561 ret = ifilter_send_eof(ist->filters[i], pts);
2568 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2569 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2573 int eof_reached = 0;
2576 if (!ist->saw_first_ts) {
2577 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2579 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2580 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2581 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2583 ist->saw_first_ts = 1;
2586 if (ist->next_dts == AV_NOPTS_VALUE)
2587 ist->next_dts = ist->dts;
2588 if (ist->next_pts == AV_NOPTS_VALUE)
2589 ist->next_pts = ist->pts;
2593 av_init_packet(&avpkt);
2600 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2601 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2602 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2603 ist->next_pts = ist->pts = ist->dts;
2606 // while we have more to decode or while the decoder did output something on EOF
2607 while (ist->decoding_needed) {
2608 int64_t duration_dts = 0;
2609 int64_t duration_pts = 0;
2611 int decode_failed = 0;
2613 ist->pts = ist->next_pts;
2614 ist->dts = ist->next_dts;
2616 switch (ist->dec_ctx->codec_type) {
2617 case AVMEDIA_TYPE_AUDIO:
2618 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2621 case AVMEDIA_TYPE_VIDEO:
2622 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2624 if (!repeating || !pkt || got_output) {
2625 if (pkt && pkt->duration) {
2626 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2627 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2628 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2629 duration_dts = ((int64_t)AV_TIME_BASE *
2630 ist->dec_ctx->framerate.den * ticks) /
2631 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2634 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2635 ist->next_dts += duration_dts;
2637 ist->next_dts = AV_NOPTS_VALUE;
2641 if (duration_pts > 0) {
2642 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2644 ist->next_pts += duration_dts;
2648 case AVMEDIA_TYPE_SUBTITLE:
2651 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2652 if (!pkt && ret >= 0)
2659 if (ret == AVERROR_EOF) {
2665 if (decode_failed) {
2666 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2667 ist->file_index, ist->st->index, av_err2str(ret));
2669 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2670 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2672 if (!decode_failed || exit_on_error)
2678 ist->got_output = 1;
2683 // During draining, we might get multiple output frames in this loop.
2684 // ffmpeg.c does not drain the filter chain on configuration changes,
2685 // which means if we send multiple frames at once to the filters, and
2686 // one of those frames changes configuration, the buffered frames will
2687 // be lost. This can upset certain FATE tests.
2688 // Decode only 1 frame per call on EOF to appease these FATE tests.
2689 // The ideal solution would be to rewrite decoding to use the new
2690 // decoding API in a better way.
2697 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2698 /* except when looping we need to flush but not to send an EOF */
2699 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2700 int ret = send_filter_eof(ist);
2702 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2707 /* handle stream copy */
2708 if (!ist->decoding_needed && pkt) {
2709 ist->dts = ist->next_dts;
2710 switch (ist->dec_ctx->codec_type) {
2711 case AVMEDIA_TYPE_AUDIO:
2712 av_assert1(pkt->duration >= 0);
2713 if (ist->dec_ctx->sample_rate) {
2714 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2715 ist->dec_ctx->sample_rate;
2717 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2720 case AVMEDIA_TYPE_VIDEO:
2721 if (ist->framerate.num) {
2722 // TODO: Remove work-around for c99-to-c89 issue 7
2723 AVRational time_base_q = AV_TIME_BASE_Q;
2724 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2725 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2726 } else if (pkt->duration) {
2727 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2728 } else if(ist->dec_ctx->framerate.num != 0) {
2729 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2730 ist->next_dts += ((int64_t)AV_TIME_BASE *
2731 ist->dec_ctx->framerate.den * ticks) /
2732 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2736 ist->pts = ist->dts;
2737 ist->next_pts = ist->next_dts;
2739 for (i = 0; i < nb_output_streams; i++) {
2740 OutputStream *ost = output_streams[i];
2742 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2745 do_streamcopy(ist, ost, pkt);
2748 return !eof_reached;
2751 static void print_sdp(void)
2756 AVIOContext *sdp_pb;
2757 AVFormatContext **avc;
2759 for (i = 0; i < nb_output_files; i++) {
2760 if (!output_files[i]->header_written)
2764 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2767 for (i = 0, j = 0; i < nb_output_files; i++) {
2768 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2769 avc[j] = output_files[i]->ctx;
2777 av_sdp_create(avc, j, sdp, sizeof(sdp));
2779 if (!sdp_filename) {
2780 printf("SDP:\n%s\n", sdp);
2783 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2784 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2786 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2787 avio_closep(&sdp_pb);
2788 av_freep(&sdp_filename);
2796 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2798 InputStream *ist = s->opaque;
2799 const enum AVPixelFormat *p;
2802 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2803 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2804 const AVCodecHWConfig *config = NULL;
2807 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2810 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2811 ist->hwaccel_id == HWACCEL_AUTO) {
2813 config = avcodec_get_hw_config(s->codec, i);
2816 if (!(config->methods &
2817 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2819 if (config->pix_fmt == *p)
2824 if (config->device_type != ist->hwaccel_device_type) {
2825 // Different hwaccel offered, ignore.
2829 ret = hwaccel_decode_init(s);
2831 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2832 av_log(NULL, AV_LOG_FATAL,
2833 "%s hwaccel requested for input stream #%d:%d, "
2834 "but cannot be initialized.\n",
2835 av_hwdevice_get_type_name(config->device_type),
2836 ist->file_index, ist->st->index);
2837 return AV_PIX_FMT_NONE;
2842 const HWAccel *hwaccel = NULL;
2844 for (i = 0; hwaccels[i].name; i++) {
2845 if (hwaccels[i].pix_fmt == *p) {
2846 hwaccel = &hwaccels[i];
2851 // No hwaccel supporting this pixfmt.
2854 if (hwaccel->id != ist->hwaccel_id) {
2855 // Does not match requested hwaccel.
2859 ret = hwaccel->init(s);
2861 av_log(NULL, AV_LOG_FATAL,
2862 "%s hwaccel requested for input stream #%d:%d, "
2863 "but cannot be initialized.\n", hwaccel->name,
2864 ist->file_index, ist->st->index);
2865 return AV_PIX_FMT_NONE;
2869 if (ist->hw_frames_ctx) {
2870 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2871 if (!s->hw_frames_ctx)
2872 return AV_PIX_FMT_NONE;
2875 ist->hwaccel_pix_fmt = *p;
2882 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2884 InputStream *ist = s->opaque;
2886 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2887 return ist->hwaccel_get_buffer(s, frame, flags);
2889 return avcodec_default_get_buffer2(s, frame, flags);
2892 static int init_input_stream(int ist_index, char *error, int error_len)
2895 InputStream *ist = input_streams[ist_index];
2897 if (ist->decoding_needed) {
2898 AVCodec *codec = ist->dec;
2900 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2901 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2902 return AVERROR(EINVAL);
2905 ist->dec_ctx->opaque = ist;
2906 ist->dec_ctx->get_format = get_format;
2907 ist->dec_ctx->get_buffer2 = get_buffer;
2908 ist->dec_ctx->thread_safe_callbacks = 1;
2910 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2911 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2912 (ist->decoding_needed & DECODING_FOR_OST)) {
2913 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2914 if (ist->decoding_needed & DECODING_FOR_FILTER)
2915 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2918 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2920 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2921 * audio, and video decoders such as cuvid or mediacodec */
2922 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2924 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2925 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2926 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2927 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2928 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2930 ret = hw_device_setup_for_decode(ist);
2932 snprintf(error, error_len, "Device setup failed for "
2933 "decoder on input stream #%d:%d : %s",
2934 ist->file_index, ist->st->index, av_err2str(ret));
2938 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2939 if (ret == AVERROR_EXPERIMENTAL)
2940 abort_codec_experimental(codec, 0);
2942 snprintf(error, error_len,
2943 "Error while opening decoder for input stream "
2945 ist->file_index, ist->st->index, av_err2str(ret));
2948 assert_avoptions(ist->decoder_opts);
2951 ist->next_pts = AV_NOPTS_VALUE;
2952 ist->next_dts = AV_NOPTS_VALUE;
2957 static InputStream *get_input_stream(OutputStream *ost)
2959 if (ost->source_index >= 0)
2960 return input_streams[ost->source_index];
2964 static int compare_int64(const void *a, const void *b)
2966 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2969 /* open the muxer when all the streams are initialized */
2970 static int check_init_output_file(OutputFile *of, int file_index)
2974 for (i = 0; i < of->ctx->nb_streams; i++) {
2975 OutputStream *ost = output_streams[of->ost_index + i];
2976 if (!ost->initialized)
2980 of->ctx->interrupt_callback = int_cb;
2982 ret = avformat_write_header(of->ctx, &of->opts);
2984 av_log(NULL, AV_LOG_ERROR,
2985 "Could not write header for output file #%d "
2986 "(incorrect codec parameters ?): %s\n",
2987 file_index, av_err2str(ret));
2990 //assert_avoptions(of->opts);
2991 of->header_written = 1;
2993 av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2995 if (sdp_filename || want_sdp)
2998 /* flush the muxing queues */
2999 for (i = 0; i < of->ctx->nb_streams; i++) {
3000 OutputStream *ost = output_streams[of->ost_index + i];
3002 /* try to improve muxing time_base (only possible if nothing has been written yet) */
3003 if (!av_fifo_size(ost->muxing_queue))
3004 ost->mux_timebase = ost->st->time_base;
3006 while (av_fifo_size(ost->muxing_queue)) {
3008 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3009 write_packet(of, &pkt, ost, 1);
3016 static int init_output_bsfs(OutputStream *ost)
3021 if (!ost->nb_bitstream_filters)
3024 for (i = 0; i < ost->nb_bitstream_filters; i++) {
3025 ctx = ost->bsf_ctx[i];
3027 ret = avcodec_parameters_copy(ctx->par_in,
3028 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3032 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3034 ret = av_bsf_init(ctx);
3036 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3037 ost->bsf_ctx[i]->filter->name);
3042 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3043 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3047 ost->st->time_base = ctx->time_base_out;
3052 static int init_output_stream_streamcopy(OutputStream *ost)
3054 OutputFile *of = output_files[ost->file_index];
3055 InputStream *ist = get_input_stream(ost);
3056 AVCodecParameters *par_dst = ost->st->codecpar;
3057 AVCodecParameters *par_src = ost->ref_par;
3060 uint32_t codec_tag = par_dst->codec_tag;
3062 av_assert0(ist && !ost->filter);
3064 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3066 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3068 av_log(NULL, AV_LOG_FATAL,
3069 "Error setting up codec context options.\n");
3073 ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3075 av_log(NULL, AV_LOG_FATAL,
3076 "Error getting reference codec parameters.\n");
3081 unsigned int codec_tag_tmp;
3082 if (!of->ctx->oformat->codec_tag ||
3083 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3084 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3085 codec_tag = par_src->codec_tag;
3088 ret = avcodec_parameters_copy(par_dst, par_src);
3092 par_dst->codec_tag = codec_tag;
3094 if (!ost->frame_rate.num)
3095 ost->frame_rate = ist->framerate;
3096 ost->st->avg_frame_rate = ost->frame_rate;
3098 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3102 // copy timebase while removing common factors
3103 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3104 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3106 // copy estimated duration as a hint to the muxer
3107 if (ost->st->duration <= 0 && ist->st->duration > 0)
3108 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3111 ost->st->disposition = ist->st->disposition;
3113 if (ist->st->nb_side_data) {
3114 for (i = 0; i < ist->st->nb_side_data; i++) {
3115 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3118 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3120 return AVERROR(ENOMEM);
3121 memcpy(dst_data, sd_src->data, sd_src->size);
3125 if (ost->rotate_overridden) {
3126 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3127 sizeof(int32_t) * 9);
3129 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3132 switch (par_dst->codec_type) {
3133 case AVMEDIA_TYPE_AUDIO:
3134 if (audio_volume != 256) {
3135 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3138 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3139 par_dst->block_align= 0;
3140 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3141 par_dst->block_align= 0;
3143 case AVMEDIA_TYPE_VIDEO:
3144 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3146 av_mul_q(ost->frame_aspect_ratio,
3147 (AVRational){ par_dst->height, par_dst->width });
3148 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3149 "with stream copy may produce invalid files\n");
3151 else if (ist->st->sample_aspect_ratio.num)
3152 sar = ist->st->sample_aspect_ratio;
3154 sar = par_src->sample_aspect_ratio;
3155 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3156 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3157 ost->st->r_frame_rate = ist->st->r_frame_rate;
3161 ost->mux_timebase = ist->st->time_base;
3166 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3168 AVDictionaryEntry *e;
3170 uint8_t *encoder_string;
3171 int encoder_string_len;
3172 int format_flags = 0;
3173 int codec_flags = ost->enc_ctx->flags;
3175 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3178 e = av_dict_get(of->opts, "fflags", NULL, 0);
3180 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3183 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3185 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3187 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3190 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3193 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3194 encoder_string = av_mallocz(encoder_string_len);
3195 if (!encoder_string)
3198 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3199 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3201 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3202 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3203 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3204 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3207 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3208 AVCodecContext *avctx)
3211 int n = 1, i, size, index = 0;
3214 for (p = kf; *p; p++)
3218 pts = av_malloc_array(size, sizeof(*pts));
3220 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3225 for (i = 0; i < n; i++) {
3226 char *next = strchr(p, ',');
3231 if (!memcmp(p, "chapters", 8)) {
3233 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3236 if (avf->nb_chapters > INT_MAX - size ||
3237 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3239 av_log(NULL, AV_LOG_FATAL,
3240 "Could not allocate forced key frames array.\n");
3243 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3244 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3246 for (j = 0; j < avf->nb_chapters; j++) {
3247 AVChapter *c = avf->chapters[j];
3248 av_assert1(index < size);
3249 pts[index++] = av_rescale_q(c->start, c->time_base,
3250 avctx->time_base) + t;
3255 t = parse_time_or_die("force_key_frames", p, 1);
3256 av_assert1(index < size);
3257 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3264 av_assert0(index == size);
3265 qsort(pts, size, sizeof(*pts), compare_int64);
3266 ost->forced_kf_count = size;
3267 ost->forced_kf_pts = pts;
3270 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3272 InputStream *ist = get_input_stream(ost);
3273 AVCodecContext *enc_ctx = ost->enc_ctx;
3274 AVFormatContext *oc;
3276 if (ost->enc_timebase.num > 0) {
3277 enc_ctx->time_base = ost->enc_timebase;
3281 if (ost->enc_timebase.num < 0) {
3283 enc_ctx->time_base = ist->st->time_base;
3287 oc = output_files[ost->file_index]->ctx;
3288 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3291 enc_ctx->time_base = default_time_base;
3294 static int init_output_stream_encode(OutputStream *ost)
3296 InputStream *ist = get_input_stream(ost);
3297 AVCodecContext *enc_ctx = ost->enc_ctx;
3298 AVCodecContext *dec_ctx = NULL;
3299 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3302 set_encoder_id(output_files[ost->file_index], ost);
3304 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3305 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3306 // which have to be filtered out to prevent leaking them to output files.
3307 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3310 ost->st->disposition = ist->st->disposition;
3312 dec_ctx = ist->dec_ctx;
3314 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3316 for (j = 0; j < oc->nb_streams; j++) {
3317 AVStream *st = oc->streams[j];
3318 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3321 if (j == oc->nb_streams)
3322 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3323 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3324 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3327 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3328 if (!ost->frame_rate.num)
3329 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3330 if (ist && !ost->frame_rate.num)
3331 ost->frame_rate = ist->framerate;
3332 if (ist && !ost->frame_rate.num)
3333 ost->frame_rate = ist->st->r_frame_rate;
3334 if (ist && !ost->frame_rate.num) {
3335 ost->frame_rate = (AVRational){25, 1};
3336 av_log(NULL, AV_LOG_WARNING,
3338 "about the input framerate is available. Falling "
3339 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3340 "if you want a different framerate.\n",
3341 ost->file_index, ost->index);
3344 if (ost->enc->supported_framerates && !ost->force_fps) {
3345 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3346 ost->frame_rate = ost->enc->supported_framerates[idx];
3348 // reduce frame rate for mpeg4 to be within the spec limits
3349 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3350 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3351 ost->frame_rate.num, ost->frame_rate.den, 65535);
3355 switch (enc_ctx->codec_type) {
3356 case AVMEDIA_TYPE_AUDIO:
3357 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3359 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3360 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3361 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3362 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3363 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3365 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3368 case AVMEDIA_TYPE_VIDEO:
3369 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3371 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3372 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3373 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3374 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3375 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3376 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3378 for (j = 0; j < ost->forced_kf_count; j++)
3379 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3381 enc_ctx->time_base);
3383 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3384 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3385 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3386 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3387 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3388 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3390 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3392 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3393 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3395 enc_ctx->framerate = ost->frame_rate;
3397 ost->st->avg_frame_rate = ost->frame_rate;
3400 enc_ctx->width != dec_ctx->width ||
3401 enc_ctx->height != dec_ctx->height ||
3402 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3403 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3406 if (ost->top_field_first == 0) {
3407 enc_ctx->field_order = AV_FIELD_BB;
3408 } else if (ost->top_field_first == 1) {
3409 enc_ctx->field_order = AV_FIELD_TT;
3412 if (ost->forced_keyframes) {
3413 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3414 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3415 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3417 av_log(NULL, AV_LOG_ERROR,
3418 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3421 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3422 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3423 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3424 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3426 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3427 // parse it only for static kf timings
3428 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3429 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3433 case AVMEDIA_TYPE_SUBTITLE:
3434 enc_ctx->time_base = AV_TIME_BASE_Q;
3435 if (!enc_ctx->width) {
3436 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3437 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3440 case AVMEDIA_TYPE_DATA:
3447 ost->mux_timebase = enc_ctx->time_base;
3452 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3456 if (ost->encoding_needed) {
3457 AVCodec *codec = ost->enc;
3458 AVCodecContext *dec = NULL;
3461 ret = init_output_stream_encode(ost);
3465 if ((ist = get_input_stream(ost)))
3467 if (dec && dec->subtitle_header) {
3468 /* ASS code assumes this buffer is null terminated so add extra byte. */
3469 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3470 if (!ost->enc_ctx->subtitle_header)
3471 return AVERROR(ENOMEM);
3472 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3473 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3475 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3476 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3477 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3479 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3480 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3481 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3483 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3484 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3485 av_buffersink_get_format(ost->filter->filter)) {
3486 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3487 if (!ost->enc_ctx->hw_frames_ctx)
3488 return AVERROR(ENOMEM);
3490 ret = hw_device_setup_for_encode(ost);
3492 snprintf(error, error_len, "Device setup failed for "
3493 "encoder on output stream #%d:%d : %s",
3494 ost->file_index, ost->index, av_err2str(ret));
3498 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3499 int input_props = 0, output_props = 0;
3500 AVCodecDescriptor const *input_descriptor =
3501 avcodec_descriptor_get(dec->codec_id);
3502 AVCodecDescriptor const *output_descriptor =
3503 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3504 if (input_descriptor)
3505 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3506 if (output_descriptor)
3507 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3508 if (input_props && output_props && input_props != output_props) {
3509 snprintf(error, error_len,
3510 "Subtitle encoding currently only possible from text to text "
3511 "or bitmap to bitmap");
3512 return AVERROR_INVALIDDATA;
3516 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3517 if (ret == AVERROR_EXPERIMENTAL)
3518 abort_codec_experimental(codec, 1);
3519 snprintf(error, error_len,
3520 "Error while opening encoder for output stream #%d:%d - "
3521 "maybe incorrect parameters such as bit_rate, rate, width or height",
3522 ost->file_index, ost->index);
3525 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3526 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3527 av_buffersink_set_frame_size(ost->filter->filter,
3528 ost->enc_ctx->frame_size);
3529 assert_avoptions(ost->encoder_opts);
3530 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3531 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3532 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3533 " It takes bits/s as argument, not kbits/s\n");
3535 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3537 av_log(NULL, AV_LOG_FATAL,
3538 "Error initializing the output stream codec context.\n");
3542 * FIXME: ost->st->codec should't be needed here anymore.
3544 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3548 if (ost->enc_ctx->nb_coded_side_data) {
3551 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3552 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3555 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3557 return AVERROR(ENOMEM);
3558 memcpy(dst_data, sd_src->data, sd_src->size);
3563 * Add global input side data. For now this is naive, and copies it
3564 * from the input stream's global side data. All side data should
3565 * really be funneled over AVFrame and libavfilter, then added back to
3566 * packet side data, and then potentially using the first packet for
3571 for (i = 0; i < ist->st->nb_side_data; i++) {
3572 AVPacketSideData *sd = &ist->st->side_data[i];
3573 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3575 return AVERROR(ENOMEM);
3576 memcpy(dst, sd->data, sd->size);
3577 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3578 av_display_rotation_set((uint32_t *)dst, 0);
3582 // copy timebase while removing common factors
3583 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3584 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3586 // copy estimated duration as a hint to the muxer
3587 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3588 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3590 ost->st->codec->codec= ost->enc_ctx->codec;
3591 } else if (ost->stream_copy) {
3592 ret = init_output_stream_streamcopy(ost);
3597 // parse user provided disposition, and update stream values
3598 if (ost->disposition) {
3599 static const AVOption opts[] = {
3600 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3601 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3602 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3603 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3604 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3605 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3606 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3607 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3608 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3609 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3610 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3611 { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3612 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3613 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3614 { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3615 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3618 static const AVClass class = {
3620 .item_name = av_default_item_name,
3622 .version = LIBAVUTIL_VERSION_INT,
3624 const AVClass *pclass = &class;
3626 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3631 /* initialize bitstream filters for the output stream
3632 * needs to be done here, because the codec id for streamcopy is not
3633 * known until now */
3634 ret = init_output_bsfs(ost);
3638 ost->initialized = 1;
3640 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3647 static void report_new_stream(int input_index, AVPacket *pkt)
3649 InputFile *file = input_files[input_index];
3650 AVStream *st = file->ctx->streams[pkt->stream_index];
3652 if (pkt->stream_index < file->nb_streams_warn)
3654 av_log(file->ctx, AV_LOG_WARNING,
3655 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3656 av_get_media_type_string(st->codecpar->codec_type),
3657 input_index, pkt->stream_index,
3658 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3659 file->nb_streams_warn = pkt->stream_index + 1;
3662 static int transcode_init(void)
3664 int ret = 0, i, j, k;
3665 AVFormatContext *oc;
3668 char error[1024] = {0};
3670 for (i = 0; i < nb_filtergraphs; i++) {
3671 FilterGraph *fg = filtergraphs[i];
3672 for (j = 0; j < fg->nb_outputs; j++) {
3673 OutputFilter *ofilter = fg->outputs[j];
3674 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3676 if (fg->nb_inputs != 1)
3678 for (k = nb_input_streams-1; k >= 0 ; k--)
3679 if (fg->inputs[0]->ist == input_streams[k])
3681 ofilter->ost->source_index = k;
3685 /* init framerate emulation */
3686 for (i = 0; i < nb_input_files; i++) {
3687 InputFile *ifile = input_files[i];
3688 if (ifile->rate_emu)
3689 for (j = 0; j < ifile->nb_streams; j++)
3690 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3693 /* init input streams */
3694 for (i = 0; i < nb_input_streams; i++)
3695 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3696 for (i = 0; i < nb_output_streams; i++) {
3697 ost = output_streams[i];
3698 avcodec_close(ost->enc_ctx);
3703 /* open each encoder */
3704 for (i = 0; i < nb_output_streams; i++) {
3705 // skip streams fed from filtergraphs until we have a frame for them
3706 if (output_streams[i]->filter)
3709 ret = init_output_stream(output_streams[i], error, sizeof(error));
3714 /* discard unused programs */
3715 for (i = 0; i < nb_input_files; i++) {
3716 InputFile *ifile = input_files[i];
3717 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3718 AVProgram *p = ifile->ctx->programs[j];
3719 int discard = AVDISCARD_ALL;
3721 for (k = 0; k < p->nb_stream_indexes; k++)
3722 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3723 discard = AVDISCARD_DEFAULT;
3726 p->discard = discard;
3730 /* write headers for files with no streams */
3731 for (i = 0; i < nb_output_files; i++) {
3732 oc = output_files[i]->ctx;
3733 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3734 ret = check_init_output_file(output_files[i], i);
3741 /* dump the stream mapping */
3742 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3743 for (i = 0; i < nb_input_streams; i++) {
3744 ist = input_streams[i];
3746 for (j = 0; j < ist->nb_filters; j++) {
3747 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3748 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3749 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3750 ist->filters[j]->name);
3751 if (nb_filtergraphs > 1)
3752 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3753 av_log(NULL, AV_LOG_INFO, "\n");
3758 for (i = 0; i < nb_output_streams; i++) {
3759 ost = output_streams[i];
3761 if (ost->attachment_filename) {
3762 /* an attached file */
3763 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3764 ost->attachment_filename, ost->file_index, ost->index);
3768 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3769 /* output from a complex graph */
3770 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3771 if (nb_filtergraphs > 1)
3772 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3774 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3775 ost->index, ost->enc ? ost->enc->name : "?");
3779 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3780 input_streams[ost->source_index]->file_index,
3781 input_streams[ost->source_index]->st->index,
3784 if (ost->sync_ist != input_streams[ost->source_index])
3785 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3786 ost->sync_ist->file_index,
3787 ost->sync_ist->st->index);
3788 if (ost->stream_copy)
3789 av_log(NULL, AV_LOG_INFO, " (copy)");
3791 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3792 const AVCodec *out_codec = ost->enc;
3793 const char *decoder_name = "?";
3794 const char *in_codec_name = "?";
3795 const char *encoder_name = "?";
3796 const char *out_codec_name = "?";
3797 const AVCodecDescriptor *desc;
3800 decoder_name = in_codec->name;
3801 desc = avcodec_descriptor_get(in_codec->id);
3803 in_codec_name = desc->name;
3804 if (!strcmp(decoder_name, in_codec_name))
3805 decoder_name = "native";
3809 encoder_name = out_codec->name;
3810 desc = avcodec_descriptor_get(out_codec->id);
3812 out_codec_name = desc->name;
3813 if (!strcmp(encoder_name, out_codec_name))
3814 encoder_name = "native";
3817 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3818 in_codec_name, decoder_name,
3819 out_codec_name, encoder_name);
3821 av_log(NULL, AV_LOG_INFO, "\n");
3825 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3829 atomic_store(&transcode_init_done, 1);
3834 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3835 static int need_output(void)
3839 for (i = 0; i < nb_output_streams; i++) {
3840 OutputStream *ost = output_streams[i];
3841 OutputFile *of = output_files[ost->file_index];
3842 AVFormatContext *os = output_files[ost->file_index]->ctx;
3844 if (ost->finished ||
3845 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3847 if (ost->frame_number >= ost->max_frames) {
3849 for (j = 0; j < of->ctx->nb_streams; j++)
3850 close_output_stream(output_streams[of->ost_index + j]);
3861 * Select the output stream to process.
3863 * @return selected output stream, or NULL if none available
3865 static OutputStream *choose_output(void)
3868 int64_t opts_min = INT64_MAX;
3869 OutputStream *ost_min = NULL;
3871 for (i = 0; i < nb_output_streams; i++) {
3872 OutputStream *ost = output_streams[i];
3873 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3874 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3876 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3877 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3879 if (!ost->initialized && !ost->inputs_done)
3882 if (!ost->finished && opts < opts_min) {
3884 ost_min = ost->unavailable ? NULL : ost;
3890 static void set_tty_echo(int on)
3894 if (tcgetattr(0, &tty) == 0) {
3895 if (on) tty.c_lflag |= ECHO;
3896 else tty.c_lflag &= ~ECHO;
3897 tcsetattr(0, TCSANOW, &tty);
3902 static int check_keyboard_interaction(int64_t cur_time)
3905 static int64_t last_time;
3906 if (received_nb_signals)
3907 return AVERROR_EXIT;
3908 /* read_key() returns 0 on EOF */
3909 if(cur_time - last_time >= 100000 && !run_as_daemon){
3911 last_time = cur_time;
3915 return AVERROR_EXIT;
3916 if (key == '+') av_log_set_level(av_log_get_level()+10);
3917 if (key == '-') av_log_set_level(av_log_get_level()-10);
3918 if (key == 's') qp_hist ^= 1;
3921 do_hex_dump = do_pkt_dump = 0;
3922 } else if(do_pkt_dump){
3926 av_log_set_level(AV_LOG_DEBUG);
3928 if (key == 'c' || key == 'C'){
3929 char buf[4096], target[64], command[256], arg[256] = {0};
3932 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3935 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3940 fprintf(stderr, "\n");
3942 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3943 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3944 target, time, command, arg);
3945 for (i = 0; i < nb_filtergraphs; i++) {
3946 FilterGraph *fg = filtergraphs[i];
3949 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3950 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3951 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3952 } else if (key == 'c') {
3953 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3954 ret = AVERROR_PATCHWELCOME;
3956 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3958 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3963 av_log(NULL, AV_LOG_ERROR,
3964 "Parse error, at least 3 arguments were expected, "
3965 "only %d given in string '%s'\n", n, buf);
3968 if (key == 'd' || key == 'D'){
3971 debug = input_streams[0]->st->codec->debug<<1;
3972 if(!debug) debug = 1;
3973 while(debug & (FF_DEBUG_DCT_COEFF
3975 |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3977 )) //unsupported, would just crash
3984 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3989 fprintf(stderr, "\n");
3990 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3991 fprintf(stderr,"error parsing debug value\n");
3993 for(i=0;i<nb_input_streams;i++) {
3994 input_streams[i]->st->codec->debug = debug;
3996 for(i=0;i<nb_output_streams;i++) {
3997 OutputStream *ost = output_streams[i];
3998 ost->enc_ctx->debug = debug;
4000 if(debug) av_log_set_level(AV_LOG_DEBUG);
4001 fprintf(stderr,"debug=%d\n", debug);
4004 fprintf(stderr, "key function\n"
4005 "? show this help\n"
4006 "+ increase verbosity\n"
4007 "- decrease verbosity\n"
4008 "c Send command to first matching filter supporting it\n"
4009 "C Send/Queue command to all matching filters\n"
4010 "D cycle through available debug modes\n"
4011 "h dump packets/hex press to cycle through the 3 states\n"
4013 "s Show QP histogram\n"
4020 static void *input_thread(void *arg)
4023 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4028 ret = av_read_frame(f->ctx, &pkt);
4030 if (ret == AVERROR(EAGAIN)) {
4035 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4038 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4039 if (flags && ret == AVERROR(EAGAIN)) {
4041 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4042 av_log(f->ctx, AV_LOG_WARNING,
4043 "Thread message queue blocking; consider raising the "
4044 "thread_queue_size option (current value: %d)\n",
4045 f->thread_queue_size);
4048 if (ret != AVERROR_EOF)
4049 av_log(f->ctx, AV_LOG_ERROR,
4050 "Unable to send packet to main thread: %s\n",
4052 av_packet_unref(&pkt);
4053 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4061 static void free_input_thread(int i)
4063 InputFile *f = input_files[i];
4066 if (!f || !f->in_thread_queue)
4068 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4069 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4070 av_packet_unref(&pkt);
4072 pthread_join(f->thread, NULL);
4074 av_thread_message_queue_free(&f->in_thread_queue);
4077 static void free_input_threads(void)
4081 for (i = 0; i < nb_input_files; i++)
4082 free_input_thread(i);
4085 static int init_input_thread(int i)
4088 InputFile *f = input_files[i];
4090 if (nb_input_files == 1)
4093 if (f->ctx->pb ? !f->ctx->pb->seekable :
4094 strcmp(f->ctx->iformat->name, "lavfi"))
4095 f->non_blocking = 1;
4096 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4097 f->thread_queue_size, sizeof(AVPacket));
4101 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4102 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4103 av_thread_message_queue_free(&f->in_thread_queue);
4104 return AVERROR(ret);
4110 static int init_input_threads(void)
4114 for (i = 0; i < nb_input_files; i++) {
4115 ret = init_input_thread(i);
4122 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4124 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4126 AV_THREAD_MESSAGE_NONBLOCK : 0);
4130 static int get_input_packet(InputFile *f, AVPacket *pkt)
4134 for (i = 0; i < f->nb_streams; i++) {
4135 InputStream *ist = input_streams[f->ist_index + i];
4136 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4137 int64_t now = av_gettime_relative() - ist->start;
4139 return AVERROR(EAGAIN);
4144 if (nb_input_files > 1)
4145 return get_input_packet_mt(f, pkt);
4147 return av_read_frame(f->ctx, pkt);
4150 static int got_eagain(void)
4153 for (i = 0; i < nb_output_streams; i++)
4154 if (output_streams[i]->unavailable)
4159 static void reset_eagain(void)
4162 for (i = 0; i < nb_input_files; i++)
4163 input_files[i]->eagain = 0;
4164 for (i = 0; i < nb_output_streams; i++)
4165 output_streams[i]->unavailable = 0;
4168 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4169 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4170 AVRational time_base)
4176 return tmp_time_base;
4179 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4182 return tmp_time_base;
4188 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4191 AVCodecContext *avctx;
4192 int i, ret, has_audio = 0;
4193 int64_t duration = 0;
4195 ret = av_seek_frame(is, -1, is->start_time, 0);
4199 for (i = 0; i < ifile->nb_streams; i++) {
4200 ist = input_streams[ifile->ist_index + i];
4201 avctx = ist->dec_ctx;
4203 /* duration is the length of the last frame in a stream
4204 * when audio stream is present we don't care about
4205 * last video frame length because it's not defined exactly */
4206 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4210 for (i = 0; i < ifile->nb_streams; i++) {
4211 ist = input_streams[ifile->ist_index + i];
4212 avctx = ist->dec_ctx;
4215 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4216 AVRational sample_rate = {1, avctx->sample_rate};
4218 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4223 if (ist->framerate.num) {
4224 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4225 } else if (ist->st->avg_frame_rate.num) {
4226 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4231 if (!ifile->duration)
4232 ifile->time_base = ist->st->time_base;
4233 /* the total duration of the stream, max_pts - min_pts is
4234 * the duration of the stream without the last frame */
4235 duration += ist->max_pts - ist->min_pts;
4236 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4240 if (ifile->loop > 0)
4248 * - 0 -- one packet was read and processed
4249 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4250 * this function should be called again
4251 * - AVERROR_EOF -- this function should not be called again
4253 static int process_input(int file_index)
4255 InputFile *ifile = input_files[file_index];
4256 AVFormatContext *is;
4259 int ret, thread_ret, i, j;
4264 ret = get_input_packet(ifile, &pkt);
4266 if (ret == AVERROR(EAGAIN)) {
4270 if (ret < 0 && ifile->loop) {
4271 AVCodecContext *avctx;
4272 for (i = 0; i < ifile->nb_streams; i++) {
4273 ist = input_streams[ifile->ist_index + i];
4274 avctx = ist->dec_ctx;
4275 if (ist->decoding_needed) {
4276 ret = process_input_packet(ist, NULL, 1);
4279 avcodec_flush_buffers(avctx);
4283 free_input_thread(file_index);
4285 ret = seek_to_start(ifile, is);
4287 thread_ret = init_input_thread(file_index);
4292 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4294 ret = get_input_packet(ifile, &pkt);
4295 if (ret == AVERROR(EAGAIN)) {
4301 if (ret != AVERROR_EOF) {
4302 print_error(is->url, ret);
4307 for (i = 0; i < ifile->nb_streams; i++) {
4308 ist = input_streams[ifile->ist_index + i];
4309 if (ist->decoding_needed) {
4310 ret = process_input_packet(ist, NULL, 0);
4315 /* mark all outputs that don't go through lavfi as finished */
4316 for (j = 0; j < nb_output_streams; j++) {
4317 OutputStream *ost = output_streams[j];
4319 if (ost->source_index == ifile->ist_index + i &&
4320 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4321 finish_output_stream(ost);
4325 ifile->eof_reached = 1;
4326 return AVERROR(EAGAIN);
4332 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4333 is->streams[pkt.stream_index]);
4335 /* the following test is needed in case new streams appear
4336 dynamically in stream : we ignore them */
4337 if (pkt.stream_index >= ifile->nb_streams) {
4338 report_new_stream(file_index, &pkt);
4339 goto discard_packet;
4342 ist = input_streams[ifile->ist_index + pkt.stream_index];
4344 ist->data_size += pkt.size;
4348 goto discard_packet;
4350 if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4351 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
4352 "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4358 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4359 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4360 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4361 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4362 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4363 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4364 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4365 av_ts2str(input_files[ist->file_index]->ts_offset),
4366 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4369 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4370 int64_t stime, stime2;
4371 // Correcting starttime based on the enabled streams
4372 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4373 // so we instead do it here as part of discontinuity handling
4374 if ( ist->next_dts == AV_NOPTS_VALUE
4375 && ifile->ts_offset == -is->start_time
4376 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4377 int64_t new_start_time = INT64_MAX;
4378 for (i=0; i<is->nb_streams; i++) {
4379 AVStream *st = is->streams[i];
4380 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4382 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4384 if (new_start_time > is->start_time) {
4385 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4386 ifile->ts_offset = -new_start_time;
4390 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4391 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4392 ist->wrap_correction_done = 1;
4394 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4395 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4396 ist->wrap_correction_done = 0;
4398 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4399 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4400 ist->wrap_correction_done = 0;
4404 /* add the stream-global side data to the first packet */
4405 if (ist->nb_packets == 1) {
4406 for (i = 0; i < ist->st->nb_side_data; i++) {
4407 AVPacketSideData *src_sd = &ist->st->side_data[i];
4410 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4413 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4416 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4420 memcpy(dst_data, src_sd->data, src_sd->size);
4424 if (pkt.dts != AV_NOPTS_VALUE)
4425 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4426 if (pkt.pts != AV_NOPTS_VALUE)
4427 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4429 if (pkt.pts != AV_NOPTS_VALUE)
4430 pkt.pts *= ist->ts_scale;
4431 if (pkt.dts != AV_NOPTS_VALUE)
4432 pkt.dts *= ist->ts_scale;
4434 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4435 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4436 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4437 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4438 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4439 int64_t delta = pkt_dts - ifile->last_ts;
4440 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4441 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4442 ifile->ts_offset -= delta;
4443 av_log(NULL, AV_LOG_DEBUG,
4444 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4445 delta, ifile->ts_offset);
4446 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4447 if (pkt.pts != AV_NOPTS_VALUE)
4448 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4452 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4453 if (pkt.pts != AV_NOPTS_VALUE) {
4454 pkt.pts += duration;
4455 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4456 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4459 if (pkt.dts != AV_NOPTS_VALUE)
4460 pkt.dts += duration;
4462 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4463 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4464 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4465 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4467 int64_t delta = pkt_dts - ist->next_dts;
4468 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4469 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4470 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4471 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4472 ifile->ts_offset -= delta;
4473 av_log(NULL, AV_LOG_DEBUG,
4474 "timestamp discontinuity for stream #%d:%d "
4475 "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4476 ist->file_index, ist->st->index, ist->st->id,
4477 av_get_media_type_string(ist->dec_ctx->codec_type),
4478 delta, ifile->ts_offset);
4479 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4480 if (pkt.pts != AV_NOPTS_VALUE)
4481 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4484 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4485 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4486 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4487 pkt.dts = AV_NOPTS_VALUE;
4489 if (pkt.pts != AV_NOPTS_VALUE){
4490 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4491 delta = pkt_pts - ist->next_dts;
4492 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4493 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4494 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4495 pkt.pts = AV_NOPTS_VALUE;
4501 if (pkt.dts != AV_NOPTS_VALUE)
4502 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4505 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4506 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4507 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4508 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4509 av_ts2str(input_files[ist->file_index]->ts_offset),
4510 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4513 sub2video_heartbeat(ist, pkt.pts);
4515 process_input_packet(ist, &pkt, 0);
4518 av_packet_unref(&pkt);
4524 * Perform a step of transcoding for the specified filter graph.
4526 * @param[in] graph filter graph to consider
4527 * @param[out] best_ist input stream where a frame would allow to continue
4528 * @return 0 for success, <0 for error
4530 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4533 int nb_requests, nb_requests_max = 0;
4534 InputFilter *ifilter;
4538 ret = avfilter_graph_request_oldest(graph->graph);
4540 return reap_filters(0);
4542 if (ret == AVERROR_EOF) {
4543 ret = reap_filters(1);
4544 for (i = 0; i < graph->nb_outputs; i++)
4545 close_output_stream(graph->outputs[i]->ost);
4548 if (ret != AVERROR(EAGAIN))
4551 for (i = 0; i < graph->nb_inputs; i++) {
4552 ifilter = graph->inputs[i];
4554 if (input_files[ist->file_index]->eagain ||
4555 input_files[ist->file_index]->eof_reached)
4557 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4558 if (nb_requests > nb_requests_max) {
4559 nb_requests_max = nb_requests;
4565 for (i = 0; i < graph->nb_outputs; i++)
4566 graph->outputs[i]->ost->unavailable = 1;
4572 * Run a single step of transcoding.
4574 * @return 0 for success, <0 for error
4576 static int transcode_step(void)
4579 InputStream *ist = NULL;
4582 ost = choose_output();
4589 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4593 if (ost->filter && !ost->filter->graph->graph) {
4594 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4595 ret = configure_filtergraph(ost->filter->graph);
4597 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4603 if (ost->filter && ost->filter->graph->graph) {
4604 if (!ost->initialized) {
4605 char error[1024] = {0};
4606 ret = init_output_stream(ost, error, sizeof(error));
4608 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4609 ost->file_index, ost->index, error);
4613 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4617 } else if (ost->filter) {
4619 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4620 InputFilter *ifilter = ost->filter->graph->inputs[i];
4621 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4627 ost->inputs_done = 1;
4631 av_assert0(ost->source_index >= 0);
4632 ist = input_streams[ost->source_index];
4635 ret = process_input(ist->file_index);
4636 if (ret == AVERROR(EAGAIN)) {
4637 if (input_files[ist->file_index]->eagain)
4638 ost->unavailable = 1;
4643 return ret == AVERROR_EOF ? 0 : ret;
4645 return reap_filters(0);
4649 * The following code is the main loop of the file converter
4651 static int transcode(void)
4654 AVFormatContext *os;
4657 int64_t timer_start;
4658 int64_t total_packets_written = 0;
4660 ret = transcode_init();
4664 if (stdin_interaction) {
4665 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4668 timer_start = av_gettime_relative();
4671 if ((ret = init_input_threads()) < 0)
4675 while (!received_sigterm) {
4676 int64_t cur_time= av_gettime_relative();
4678 /* if 'q' pressed, exits */
4679 if (stdin_interaction)
4680 if (check_keyboard_interaction(cur_time) < 0)
4683 /* check if there's any stream where output is still needed */
4684 if (!need_output()) {
4685 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4689 ret = transcode_step();
4690 if (ret < 0 && ret != AVERROR_EOF) {
4691 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4695 /* dump report by using the output first video and audio streams */
4696 print_report(0, timer_start, cur_time);
4699 free_input_threads();
4702 /* at the end of stream, we must flush the decoder buffers */
4703 for (i = 0; i < nb_input_streams; i++) {
4704 ist = input_streams[i];
4705 if (!input_files[ist->file_index]->eof_reached) {
4706 process_input_packet(ist, NULL, 0);
4713 /* write the trailer if needed and close file */
4714 for (i = 0; i < nb_output_files; i++) {
4715 os = output_files[i]->ctx;
4716 if (!output_files[i]->header_written) {
4717 av_log(NULL, AV_LOG_ERROR,
4718 "Nothing was written into output file %d (%s), because "
4719 "at least one of its streams received no packets.\n",
4723 if ((ret = av_write_trailer(os)) < 0) {
4724 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4730 /* dump report by using the first video and audio streams */
4731 print_report(1, timer_start, av_gettime_relative());
4733 /* close each encoder */
4734 for (i = 0; i < nb_output_streams; i++) {
4735 ost = output_streams[i];
4736 if (ost->encoding_needed) {
4737 av_freep(&ost->enc_ctx->stats_in);
4739 total_packets_written += ost->packets_written;
4742 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4743 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4747 /* close each decoder */
4748 for (i = 0; i < nb_input_streams; i++) {
4749 ist = input_streams[i];
4750 if (ist->decoding_needed) {
4751 avcodec_close(ist->dec_ctx);
4752 if (ist->hwaccel_uninit)
4753 ist->hwaccel_uninit(ist->dec_ctx);
4757 av_buffer_unref(&hw_device_ctx);
4758 hw_device_free_all();
4765 free_input_threads();
4768 if (output_streams) {
4769 for (i = 0; i < nb_output_streams; i++) {
4770 ost = output_streams[i];
4773 if (fclose(ost->logfile))
4774 av_log(NULL, AV_LOG_ERROR,
4775 "Error closing logfile, loss of information possible: %s\n",
4776 av_err2str(AVERROR(errno)));
4777 ost->logfile = NULL;
4779 av_freep(&ost->forced_kf_pts);
4780 av_freep(&ost->apad);
4781 av_freep(&ost->disposition);
4782 av_dict_free(&ost->encoder_opts);
4783 av_dict_free(&ost->sws_dict);
4784 av_dict_free(&ost->swr_opts);
4785 av_dict_free(&ost->resample_opts);
4792 static BenchmarkTimeStamps get_benchmark_time_stamps(void)
4794 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4796 struct rusage rusage;
4798 getrusage(RUSAGE_SELF, &rusage);
4799 time_stamps.user_usec =
4800 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4801 time_stamps.sys_usec =
4802 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4803 #elif HAVE_GETPROCESSTIMES
4805 FILETIME c, e, k, u;
4806 proc = GetCurrentProcess();
4807 GetProcessTimes(proc, &c, &e, &k, &u);
4808 time_stamps.user_usec =
4809 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4810 time_stamps.sys_usec =
4811 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4813 time_stamps.user_usec = time_stamps.sys_usec = 0;
4818 static int64_t getmaxrss(void)
4820 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4821 struct rusage rusage;
4822 getrusage(RUSAGE_SELF, &rusage);
4823 return (int64_t)rusage.ru_maxrss * 1024;
4824 #elif HAVE_GETPROCESSMEMORYINFO
4826 PROCESS_MEMORY_COUNTERS memcounters;
4827 proc = GetCurrentProcess();
4828 memcounters.cb = sizeof(memcounters);
4829 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4830 return memcounters.PeakPagefileUsage;
4836 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4840 int main(int argc, char **argv)
4843 BenchmarkTimeStamps ti;
4847 register_exit(ffmpeg_cleanup);
4849 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4851 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4852 parse_loglevel(argc, argv, options);
4854 if(argc>1 && !strcmp(argv[1], "-d")){
4856 av_log_set_callback(log_callback_null);
4862 avdevice_register_all();
4864 avformat_network_init();
4866 show_banner(argc, argv, options);
4868 /* parse options and open all input/output files */
4869 ret = ffmpeg_parse_options(argc, argv);
4873 if (nb_output_files <= 0 && nb_input_files == 0) {
4875 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4879 /* file converter / grab */
4880 if (nb_output_files <= 0) {
4881 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4885 for (i = 0; i < nb_output_files; i++) {
4886 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4890 current_time = ti = get_benchmark_time_stamps();
4891 if (transcode() < 0)
4894 int64_t utime, stime, rtime;
4895 current_time = get_benchmark_time_stamps();
4896 utime = current_time.user_usec - ti.user_usec;
4897 stime = current_time.sys_usec - ti.sys_usec;
4898 rtime = current_time.real_usec - ti.real_usec;
4899 av_log(NULL, AV_LOG_INFO,
4900 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4901 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4903 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4904 decode_error_stat[0], decode_error_stat[1]);
4905 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4908 exit_program(received_nb_signals ? 255 : main_return_code);
4909 return main_return_code;