2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static unsigned dup_warning = 1000;
130 static int nb_frames_drop = 0;
131 static int64_t decode_error_stat[2];
133 static int want_sdp = 1;
135 static int current_time;
136 AVIOContext *progress_avio = NULL;
138 static uint8_t *subtitle_out;
140 InputStream **input_streams = NULL;
141 int nb_input_streams = 0;
142 InputFile **input_files = NULL;
143 int nb_input_files = 0;
145 OutputStream **output_streams = NULL;
146 int nb_output_streams = 0;
147 OutputFile **output_files = NULL;
148 int nb_output_files = 0;
150 FilterGraph **filtergraphs;
155 /* init terminal so that we can grab keys */
156 static struct termios oldtty;
157 static int restore_tty;
161 static void free_input_threads(void);
165 Convert subtitles to video with alpha to insert them in filter graphs.
166 This is a temporary solution until libavfilter gets real subtitles support.
169 static int sub2video_get_blank_frame(InputStream *ist)
172 AVFrame *frame = ist->sub2video.frame;
174 av_frame_unref(frame);
175 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
176 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
177 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
178 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
180 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
184 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
187 uint32_t *pal, *dst2;
191 if (r->type != SUBTITLE_BITMAP) {
192 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
195 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
197 r->x, r->y, r->w, r->h, w, h
202 dst += r->y * dst_linesize + r->x * 4;
204 pal = (uint32_t *)r->data[1];
205 for (y = 0; y < r->h; y++) {
206 dst2 = (uint32_t *)dst;
208 for (x = 0; x < r->w; x++)
209 *(dst2++) = pal[*(src2++)];
211 src += r->linesize[0];
215 static void sub2video_push_ref(InputStream *ist, int64_t pts)
217 AVFrame *frame = ist->sub2video.frame;
220 av_assert1(frame->data[0]);
221 ist->sub2video.last_pts = frame->pts = pts;
222 for (i = 0; i < ist->nb_filters; i++)
223 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
224 AV_BUFFERSRC_FLAG_KEEP_REF |
225 AV_BUFFERSRC_FLAG_PUSH);
228 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
230 AVFrame *frame = ist->sub2video.frame;
234 int64_t pts, end_pts;
239 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240 AV_TIME_BASE_Q, ist->st->time_base);
241 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
242 AV_TIME_BASE_Q, ist->st->time_base);
243 num_rects = sub->num_rects;
245 pts = ist->sub2video.end_pts;
249 if (sub2video_get_blank_frame(ist) < 0) {
250 av_log(ist->dec_ctx, AV_LOG_ERROR,
251 "Impossible to get a blank canvas.\n");
254 dst = frame->data [0];
255 dst_linesize = frame->linesize[0];
256 for (i = 0; i < num_rects; i++)
257 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
258 sub2video_push_ref(ist, pts);
259 ist->sub2video.end_pts = end_pts;
262 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
264 InputFile *infile = input_files[ist->file_index];
268 /* When a frame is read from a file, examine all sub2video streams in
269 the same file and send the sub2video frame again. Otherwise, decoded
270 video frames could be accumulating in the filter graph while a filter
271 (possibly overlay) is desperately waiting for a subtitle frame. */
272 for (i = 0; i < infile->nb_streams; i++) {
273 InputStream *ist2 = input_streams[infile->ist_index + i];
274 if (!ist2->sub2video.frame)
276 /* subtitles seem to be usually muxed ahead of other streams;
277 if not, subtracting a larger time here is necessary */
278 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
279 /* do not send the heartbeat frame if the subtitle is already ahead */
280 if (pts2 <= ist2->sub2video.last_pts)
282 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
283 sub2video_update(ist2, NULL);
284 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
285 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
287 sub2video_push_ref(ist2, pts2);
291 static void sub2video_flush(InputStream *ist)
295 if (ist->sub2video.end_pts < INT64_MAX)
296 sub2video_update(ist, NULL);
297 for (i = 0; i < ist->nb_filters; i++)
298 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
301 /* end of sub2video hack */
303 static void term_exit_sigsafe(void)
307 tcsetattr (0, TCSANOW, &oldtty);
313 av_log(NULL, AV_LOG_QUIET, "%s", "");
317 static volatile int received_sigterm = 0;
318 static volatile int received_nb_signals = 0;
319 static volatile int transcode_init_done = 0;
320 static volatile int ffmpeg_exited = 0;
321 static int main_return_code = 0;
324 sigterm_handler(int sig)
326 received_sigterm = sig;
327 received_nb_signals++;
329 if(received_nb_signals > 3) {
330 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
331 strlen("Received > 3 system signals, hard exiting\n"));
337 #if HAVE_SETCONSOLECTRLHANDLER
338 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
340 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345 case CTRL_BREAK_EVENT:
346 sigterm_handler(SIGINT);
349 case CTRL_CLOSE_EVENT:
350 case CTRL_LOGOFF_EVENT:
351 case CTRL_SHUTDOWN_EVENT:
352 sigterm_handler(SIGTERM);
353 /* Basically, with these 3 events, when we return from this method the
354 process is hard terminated, so stall as long as we need to
355 to try and let the main thread(s) clean up and gracefully terminate
356 (we have at most 5 seconds, but should be done far before that). */
357 while (!ffmpeg_exited) {
363 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
372 if (!run_as_daemon && stdin_interaction) {
374 if (tcgetattr (0, &tty) == 0) {
378 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
379 |INLCR|IGNCR|ICRNL|IXON);
380 tty.c_oflag |= OPOST;
381 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
382 tty.c_cflag &= ~(CSIZE|PARENB);
387 tcsetattr (0, TCSANOW, &tty);
389 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
393 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
394 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
396 signal(SIGXCPU, sigterm_handler);
398 #if HAVE_SETCONSOLECTRLHANDLER
399 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
403 /* read a key without blocking */
404 static int read_key(void)
416 n = select(1, &rfds, NULL, NULL, &tv);
425 # if HAVE_PEEKNAMEDPIPE
427 static HANDLE input_handle;
430 input_handle = GetStdHandle(STD_INPUT_HANDLE);
431 is_pipe = !GetConsoleMode(input_handle, &dw);
435 /* When running under a GUI, you will end here. */
436 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
437 // input pipe may have been closed by the program that ran ffmpeg
455 static int decode_interrupt_cb(void *ctx)
457 return received_nb_signals > transcode_init_done;
460 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
462 static void ffmpeg_cleanup(int ret)
467 int maxrss = getmaxrss() / 1024;
468 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
471 for (i = 0; i < nb_filtergraphs; i++) {
472 FilterGraph *fg = filtergraphs[i];
473 avfilter_graph_free(&fg->graph);
474 for (j = 0; j < fg->nb_inputs; j++) {
475 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
476 av_freep(&fg->inputs[j]->name);
477 av_freep(&fg->inputs[j]);
479 av_freep(&fg->inputs);
480 for (j = 0; j < fg->nb_outputs; j++) {
481 av_freep(&fg->outputs[j]->name);
482 av_freep(&fg->outputs[j]->formats);
483 av_freep(&fg->outputs[j]->channel_layouts);
484 av_freep(&fg->outputs[j]->sample_rates);
485 av_freep(&fg->outputs[j]);
487 av_freep(&fg->outputs);
488 av_freep(&fg->graph_desc);
490 av_freep(&filtergraphs[i]);
492 av_freep(&filtergraphs);
494 av_freep(&subtitle_out);
497 for (i = 0; i < nb_output_files; i++) {
498 OutputFile *of = output_files[i];
503 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
505 avformat_free_context(s);
506 av_dict_free(&of->opts);
508 av_freep(&output_files[i]);
510 for (i = 0; i < nb_output_streams; i++) {
511 OutputStream *ost = output_streams[i];
516 for (j = 0; j < ost->nb_bitstream_filters; j++)
517 av_bsf_free(&ost->bsf_ctx[j]);
518 av_freep(&ost->bsf_ctx);
519 av_freep(&ost->bsf_extradata_updated);
521 av_frame_free(&ost->filtered_frame);
522 av_frame_free(&ost->last_frame);
523 av_dict_free(&ost->encoder_opts);
525 av_parser_close(ost->parser);
526 avcodec_free_context(&ost->parser_avctx);
528 av_freep(&ost->forced_keyframes);
529 av_expr_free(ost->forced_keyframes_pexpr);
530 av_freep(&ost->avfilter);
531 av_freep(&ost->logfile_prefix);
533 av_freep(&ost->audio_channels_map);
534 ost->audio_channels_mapped = 0;
536 av_dict_free(&ost->sws_dict);
538 avcodec_free_context(&ost->enc_ctx);
539 avcodec_parameters_free(&ost->ref_par);
541 while (ost->muxing_queue && av_fifo_size(ost->muxing_queue)) {
543 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
544 av_packet_unref(&pkt);
546 av_fifo_freep(&ost->muxing_queue);
548 av_freep(&output_streams[i]);
551 free_input_threads();
553 for (i = 0; i < nb_input_files; i++) {
554 avformat_close_input(&input_files[i]->ctx);
555 av_freep(&input_files[i]);
557 for (i = 0; i < nb_input_streams; i++) {
558 InputStream *ist = input_streams[i];
560 av_frame_free(&ist->decoded_frame);
561 av_frame_free(&ist->filter_frame);
562 av_dict_free(&ist->decoder_opts);
563 avsubtitle_free(&ist->prev_sub.subtitle);
564 av_frame_free(&ist->sub2video.frame);
565 av_freep(&ist->filters);
566 av_freep(&ist->hwaccel_device);
567 av_freep(&ist->dts_buffer);
569 avcodec_free_context(&ist->dec_ctx);
571 av_freep(&input_streams[i]);
575 if (fclose(vstats_file))
576 av_log(NULL, AV_LOG_ERROR,
577 "Error closing vstats file, loss of information possible: %s\n",
578 av_err2str(AVERROR(errno)));
580 av_freep(&vstats_filename);
582 av_freep(&input_streams);
583 av_freep(&input_files);
584 av_freep(&output_streams);
585 av_freep(&output_files);
589 avformat_network_deinit();
591 if (received_sigterm) {
592 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
593 (int) received_sigterm);
594 } else if (ret && transcode_init_done) {
595 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
601 void remove_avoptions(AVDictionary **a, AVDictionary *b)
603 AVDictionaryEntry *t = NULL;
605 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
606 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
610 void assert_avoptions(AVDictionary *m)
612 AVDictionaryEntry *t;
613 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
614 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
619 static void abort_codec_experimental(AVCodec *c, int encoder)
624 static void update_benchmark(const char *fmt, ...)
626 if (do_benchmark_all) {
627 int64_t t = getutime();
633 vsnprintf(buf, sizeof(buf), fmt, va);
635 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
641 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
644 for (i = 0; i < nb_output_streams; i++) {
645 OutputStream *ost2 = output_streams[i];
646 ost2->finished |= ost == ost2 ? this_stream : others;
650 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
652 AVFormatContext *s = of->ctx;
653 AVStream *st = ost->st;
656 if (!of->header_written) {
658 /* the muxer is not initialized yet, buffer the packet */
659 if (!av_fifo_space(ost->muxing_queue)) {
660 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
661 ost->max_muxing_queue_size);
662 if (new_size <= av_fifo_size(ost->muxing_queue)) {
663 av_log(NULL, AV_LOG_ERROR,
664 "Too many packets buffered for output stream %d:%d.\n",
665 ost->file_index, ost->st->index);
668 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
672 av_packet_move_ref(&tmp_pkt, pkt);
673 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
677 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
678 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
679 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
682 * Audio encoders may split the packets -- #frames in != #packets out.
683 * But there is no reordering, so we can limit the number of output packets
684 * by simply dropping them here.
685 * Counting encoded video frames needs to be done separately because of
686 * reordering, see do_video_out()
688 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
689 if (ost->frame_number >= ost->max_frames) {
690 av_packet_unref(pkt);
695 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
697 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
699 ost->quality = sd ? AV_RL32(sd) : -1;
700 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
702 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
704 ost->error[i] = AV_RL64(sd + 8 + 8*i);
709 if (ost->frame_rate.num && ost->is_cfr) {
710 if (pkt->duration > 0)
711 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
712 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
717 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
718 if (pkt->dts != AV_NOPTS_VALUE &&
719 pkt->pts != AV_NOPTS_VALUE &&
720 pkt->dts > pkt->pts) {
721 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
723 ost->file_index, ost->st->index);
725 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
726 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
727 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
729 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
730 pkt->dts != AV_NOPTS_VALUE &&
731 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
732 ost->last_mux_dts != AV_NOPTS_VALUE) {
733 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
734 if (pkt->dts < max) {
735 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
736 av_log(s, loglevel, "Non-monotonous DTS in output stream "
737 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
738 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
740 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
743 av_log(s, loglevel, "changing to %"PRId64". This may result "
744 "in incorrect timestamps in the output file.\n",
746 if (pkt->pts >= pkt->dts)
747 pkt->pts = FFMAX(pkt->pts, max);
752 ost->last_mux_dts = pkt->dts;
754 ost->data_size += pkt->size;
755 ost->packets_written++;
757 pkt->stream_index = ost->index;
760 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
761 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
762 av_get_media_type_string(ost->enc_ctx->codec_type),
763 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
764 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
769 ret = av_interleaved_write_frame(s, pkt);
771 print_error("av_interleaved_write_frame()", ret);
772 main_return_code = 1;
773 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
775 av_packet_unref(pkt);
778 static void close_output_stream(OutputStream *ost)
780 OutputFile *of = output_files[ost->file_index];
782 ost->finished |= ENCODER_FINISHED;
784 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
785 of->recording_time = FFMIN(of->recording_time, end);
789 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
793 /* apply the output bitstream filters, if any */
794 if (ost->nb_bitstream_filters) {
797 av_packet_split_side_data(pkt);
798 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
804 /* get a packet from the previous filter up the chain */
805 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
806 if (ret == AVERROR(EAGAIN)) {
812 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
813 * the api states this shouldn't happen after init(). Propagate it here to the
814 * muxer and to the next filters in the chain to workaround this.
815 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
816 * par_out->extradata and adapt muxers accordingly to get rid of this. */
817 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
818 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
821 ost->bsf_extradata_updated[idx - 1] |= 1;
824 /* send it to the next filter down the chain or to the muxer */
825 if (idx < ost->nb_bitstream_filters) {
826 /* HACK/FIXME! - See above */
827 if (!(ost->bsf_extradata_updated[idx] & 2)) {
828 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
831 ost->bsf_extradata_updated[idx] |= 2;
833 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
838 write_packet(of, pkt, ost);
841 write_packet(of, pkt, ost);
844 if (ret < 0 && ret != AVERROR_EOF) {
845 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
846 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
852 static int check_recording_time(OutputStream *ost)
854 OutputFile *of = output_files[ost->file_index];
856 if (of->recording_time != INT64_MAX &&
857 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
858 AV_TIME_BASE_Q) >= 0) {
859 close_output_stream(ost);
865 static void do_audio_out(OutputFile *of, OutputStream *ost,
868 AVCodecContext *enc = ost->enc_ctx;
872 av_init_packet(&pkt);
876 if (!check_recording_time(ost))
879 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
880 frame->pts = ost->sync_opts;
881 ost->sync_opts = frame->pts + frame->nb_samples;
882 ost->samples_encoded += frame->nb_samples;
883 ost->frames_encoded++;
885 av_assert0(pkt.size || !pkt.data);
886 update_benchmark(NULL);
888 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
889 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
890 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
891 enc->time_base.num, enc->time_base.den);
894 ret = avcodec_send_frame(enc, frame);
899 ret = avcodec_receive_packet(enc, &pkt);
900 if (ret == AVERROR(EAGAIN))
905 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
907 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
910 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
911 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
912 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
913 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
916 output_packet(of, &pkt, ost);
921 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
925 static void do_subtitle_out(OutputFile *of,
929 int subtitle_out_max_size = 1024 * 1024;
930 int subtitle_out_size, nb, i;
935 if (sub->pts == AV_NOPTS_VALUE) {
936 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
945 subtitle_out = av_malloc(subtitle_out_max_size);
947 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
952 /* Note: DVB subtitle need one packet to draw them and one other
953 packet to clear them */
954 /* XXX: signal it in the codec context ? */
955 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
960 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
962 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
963 pts -= output_files[ost->file_index]->start_time;
964 for (i = 0; i < nb; i++) {
965 unsigned save_num_rects = sub->num_rects;
967 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
968 if (!check_recording_time(ost))
972 // start_display_time is required to be 0
973 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
974 sub->end_display_time -= sub->start_display_time;
975 sub->start_display_time = 0;
979 ost->frames_encoded++;
981 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
982 subtitle_out_max_size, sub);
984 sub->num_rects = save_num_rects;
985 if (subtitle_out_size < 0) {
986 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
990 av_init_packet(&pkt);
991 pkt.data = subtitle_out;
992 pkt.size = subtitle_out_size;
993 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
994 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
995 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
996 /* XXX: the pts correction is handled here. Maybe handling
997 it in the codec would be better */
999 pkt.pts += 90 * sub->start_display_time;
1001 pkt.pts += 90 * sub->end_display_time;
1004 output_packet(of, &pkt, ost);
1008 static void do_video_out(OutputFile *of,
1010 AVFrame *next_picture,
1013 int ret, format_video_sync;
1015 AVCodecContext *enc = ost->enc_ctx;
1016 AVCodecParameters *mux_par = ost->st->codecpar;
1017 int nb_frames, nb0_frames, i;
1018 double delta, delta0;
1019 double duration = 0;
1021 InputStream *ist = NULL;
1022 AVFilterContext *filter = ost->filter->filter;
1024 if (ost->source_index >= 0)
1025 ist = input_streams[ost->source_index];
1027 if (filter->inputs[0]->frame_rate.num > 0 &&
1028 filter->inputs[0]->frame_rate.den > 0)
1029 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
1031 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1032 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1034 if (!ost->filters_script &&
1038 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1039 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1042 if (!next_picture) {
1044 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1045 ost->last_nb0_frames[1],
1046 ost->last_nb0_frames[2]);
1048 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1049 delta = delta0 + duration;
1051 /* by default, we output a single frame */
1052 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1055 format_video_sync = video_sync_method;
1056 if (format_video_sync == VSYNC_AUTO) {
1057 if(!strcmp(of->ctx->oformat->name, "avi")) {
1058 format_video_sync = VSYNC_VFR;
1060 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1062 && format_video_sync == VSYNC_CFR
1063 && input_files[ist->file_index]->ctx->nb_streams == 1
1064 && input_files[ist->file_index]->input_ts_offset == 0) {
1065 format_video_sync = VSYNC_VSCFR;
1067 if (format_video_sync == VSYNC_CFR && copy_ts) {
1068 format_video_sync = VSYNC_VSCFR;
1071 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1075 format_video_sync != VSYNC_PASSTHROUGH &&
1076 format_video_sync != VSYNC_DROP) {
1077 if (delta0 < -0.6) {
1078 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1080 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1081 sync_ipts = ost->sync_opts;
1086 switch (format_video_sync) {
1088 if (ost->frame_number == 0 && delta0 >= 0.5) {
1089 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1092 ost->sync_opts = lrint(sync_ipts);
1095 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1096 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1098 } else if (delta < -1.1)
1100 else if (delta > 1.1) {
1101 nb_frames = lrintf(delta);
1103 nb0_frames = lrintf(delta0 - 0.6);
1109 else if (delta > 0.6)
1110 ost->sync_opts = lrint(sync_ipts);
1113 case VSYNC_PASSTHROUGH:
1114 ost->sync_opts = lrint(sync_ipts);
1121 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1122 nb0_frames = FFMIN(nb0_frames, nb_frames);
1124 memmove(ost->last_nb0_frames + 1,
1125 ost->last_nb0_frames,
1126 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1127 ost->last_nb0_frames[0] = nb0_frames;
1129 if (nb0_frames == 0 && ost->last_dropped) {
1131 av_log(NULL, AV_LOG_VERBOSE,
1132 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1133 ost->frame_number, ost->st->index, ost->last_frame->pts);
1135 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1136 if (nb_frames > dts_error_threshold * 30) {
1137 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1141 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1142 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1143 if (nb_frames_dup > dup_warning) {
1144 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1148 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1150 /* duplicates frame if needed */
1151 for (i = 0; i < nb_frames; i++) {
1152 AVFrame *in_picture;
1153 av_init_packet(&pkt);
1157 if (i < nb0_frames && ost->last_frame) {
1158 in_picture = ost->last_frame;
1160 in_picture = next_picture;
1165 in_picture->pts = ost->sync_opts;
1168 if (!check_recording_time(ost))
1170 if (ost->frame_number >= ost->max_frames)
1174 #if FF_API_LAVF_FMT_RAWPICTURE
1175 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1176 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1177 /* raw pictures are written as AVPicture structure to
1178 avoid any copies. We support temporarily the older
1180 if (in_picture->interlaced_frame)
1181 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1183 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1184 pkt.data = (uint8_t *)in_picture;
1185 pkt.size = sizeof(AVPicture);
1186 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1187 pkt.flags |= AV_PKT_FLAG_KEY;
1189 output_packet(of, &pkt, ost);
1193 int forced_keyframe = 0;
1196 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1197 ost->top_field_first >= 0)
1198 in_picture->top_field_first = !!ost->top_field_first;
1200 if (in_picture->interlaced_frame) {
1201 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1202 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1204 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1206 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1208 in_picture->quality = enc->global_quality;
1209 in_picture->pict_type = 0;
1211 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1212 in_picture->pts * av_q2d(enc->time_base) : NAN;
1213 if (ost->forced_kf_index < ost->forced_kf_count &&
1214 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1215 ost->forced_kf_index++;
1216 forced_keyframe = 1;
1217 } else if (ost->forced_keyframes_pexpr) {
1219 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1220 res = av_expr_eval(ost->forced_keyframes_pexpr,
1221 ost->forced_keyframes_expr_const_values, NULL);
1222 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1223 ost->forced_keyframes_expr_const_values[FKF_N],
1224 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1225 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1226 ost->forced_keyframes_expr_const_values[FKF_T],
1227 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1230 forced_keyframe = 1;
1231 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1232 ost->forced_keyframes_expr_const_values[FKF_N];
1233 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1234 ost->forced_keyframes_expr_const_values[FKF_T];
1235 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1238 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1239 } else if ( ost->forced_keyframes
1240 && !strncmp(ost->forced_keyframes, "source", 6)
1241 && in_picture->key_frame==1) {
1242 forced_keyframe = 1;
1245 if (forced_keyframe) {
1246 in_picture->pict_type = AV_PICTURE_TYPE_I;
1247 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1250 update_benchmark(NULL);
1252 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1253 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1254 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1255 enc->time_base.num, enc->time_base.den);
1258 ost->frames_encoded++;
1260 ret = avcodec_send_frame(enc, in_picture);
1265 ret = avcodec_receive_packet(enc, &pkt);
1266 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1267 if (ret == AVERROR(EAGAIN))
1273 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1274 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1275 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1276 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1279 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1280 pkt.pts = ost->sync_opts;
1282 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1285 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1286 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1287 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1288 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1291 frame_size = pkt.size;
1292 output_packet(of, &pkt, ost);
1294 /* if two pass, output log */
1295 if (ost->logfile && enc->stats_out) {
1296 fprintf(ost->logfile, "%s", enc->stats_out);
1302 * For video, number of frames in == number of packets out.
1303 * But there may be reordering, so we can't throw away frames on encoder
1304 * flush, we need to limit them here, before they go into encoder.
1306 ost->frame_number++;
1308 if (vstats_filename && frame_size)
1309 do_video_stats(ost, frame_size);
1312 if (!ost->last_frame)
1313 ost->last_frame = av_frame_alloc();
1314 av_frame_unref(ost->last_frame);
1315 if (next_picture && ost->last_frame)
1316 av_frame_ref(ost->last_frame, next_picture);
1318 av_frame_free(&ost->last_frame);
1322 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1326 static double psnr(double d)
1328 return -10.0 * log10(d);
1331 static void do_video_stats(OutputStream *ost, int frame_size)
1333 AVCodecContext *enc;
1335 double ti1, bitrate, avg_bitrate;
1337 /* this is executed just the first time do_video_stats is called */
1339 vstats_file = fopen(vstats_filename, "w");
1347 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1348 frame_number = ost->st->nb_frames;
1349 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1350 ost->quality / (float)FF_QP2LAMBDA);
1352 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1353 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1355 fprintf(vstats_file,"f_size= %6d ", frame_size);
1356 /* compute pts value */
1357 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1361 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1362 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1363 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1364 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1365 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1369 static void finish_output_stream(OutputStream *ost)
1371 OutputFile *of = output_files[ost->file_index];
1374 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1377 for (i = 0; i < of->ctx->nb_streams; i++)
1378 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1383 * Get and encode new output from any of the filtergraphs, without causing
1386 * @return 0 for success, <0 for severe errors
1388 static int reap_filters(int flush)
1390 AVFrame *filtered_frame = NULL;
1393 /* Reap all buffers present in the buffer sinks */
1394 for (i = 0; i < nb_output_streams; i++) {
1395 OutputStream *ost = output_streams[i];
1396 OutputFile *of = output_files[ost->file_index];
1397 AVFilterContext *filter;
1398 AVCodecContext *enc = ost->enc_ctx;
1403 filter = ost->filter->filter;
1405 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1406 return AVERROR(ENOMEM);
1408 filtered_frame = ost->filtered_frame;
1411 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1412 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1413 AV_BUFFERSINK_FLAG_NO_REQUEST);
1415 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1416 av_log(NULL, AV_LOG_WARNING,
1417 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1418 } else if (flush && ret == AVERROR_EOF) {
1419 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1420 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1424 if (ost->finished) {
1425 av_frame_unref(filtered_frame);
1428 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1429 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1430 AVRational tb = enc->time_base;
1431 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1433 tb.den <<= extra_bits;
1435 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1436 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1437 float_pts /= 1 << extra_bits;
1438 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1439 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1441 filtered_frame->pts =
1442 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1443 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1445 //if (ost->source_index >= 0)
1446 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1448 switch (filter->inputs[0]->type) {
1449 case AVMEDIA_TYPE_VIDEO:
1450 if (!ost->frame_aspect_ratio.num)
1451 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1454 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1455 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1457 enc->time_base.num, enc->time_base.den);
1460 do_video_out(of, ost, filtered_frame, float_pts);
1462 case AVMEDIA_TYPE_AUDIO:
1463 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1464 enc->channels != av_frame_get_channels(filtered_frame)) {
1465 av_log(NULL, AV_LOG_ERROR,
1466 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1469 do_audio_out(of, ost, filtered_frame);
1472 // TODO support subtitle filters
1476 av_frame_unref(filtered_frame);
1483 static void print_final_stats(int64_t total_size)
1485 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1486 uint64_t subtitle_size = 0;
1487 uint64_t data_size = 0;
1488 float percent = -1.0;
1492 for (i = 0; i < nb_output_streams; i++) {
1493 OutputStream *ost = output_streams[i];
1494 switch (ost->enc_ctx->codec_type) {
1495 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1496 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1497 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1498 default: other_size += ost->data_size; break;
1500 extra_size += ost->enc_ctx->extradata_size;
1501 data_size += ost->data_size;
1502 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1503 != AV_CODEC_FLAG_PASS1)
1507 if (data_size && total_size>0 && total_size >= data_size)
1508 percent = 100.0 * (total_size - data_size) / data_size;
1510 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1511 video_size / 1024.0,
1512 audio_size / 1024.0,
1513 subtitle_size / 1024.0,
1514 other_size / 1024.0,
1515 extra_size / 1024.0);
1517 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1519 av_log(NULL, AV_LOG_INFO, "unknown");
1520 av_log(NULL, AV_LOG_INFO, "\n");
1522 /* print verbose per-stream stats */
1523 for (i = 0; i < nb_input_files; i++) {
1524 InputFile *f = input_files[i];
1525 uint64_t total_packets = 0, total_size = 0;
1527 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1528 i, f->ctx->filename);
1530 for (j = 0; j < f->nb_streams; j++) {
1531 InputStream *ist = input_streams[f->ist_index + j];
1532 enum AVMediaType type = ist->dec_ctx->codec_type;
1534 total_size += ist->data_size;
1535 total_packets += ist->nb_packets;
1537 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1538 i, j, media_type_string(type));
1539 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1540 ist->nb_packets, ist->data_size);
1542 if (ist->decoding_needed) {
1543 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1544 ist->frames_decoded);
1545 if (type == AVMEDIA_TYPE_AUDIO)
1546 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1547 av_log(NULL, AV_LOG_VERBOSE, "; ");
1550 av_log(NULL, AV_LOG_VERBOSE, "\n");
1553 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1554 total_packets, total_size);
1557 for (i = 0; i < nb_output_files; i++) {
1558 OutputFile *of = output_files[i];
1559 uint64_t total_packets = 0, total_size = 0;
1561 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1562 i, of->ctx->filename);
1564 for (j = 0; j < of->ctx->nb_streams; j++) {
1565 OutputStream *ost = output_streams[of->ost_index + j];
1566 enum AVMediaType type = ost->enc_ctx->codec_type;
1568 total_size += ost->data_size;
1569 total_packets += ost->packets_written;
1571 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1572 i, j, media_type_string(type));
1573 if (ost->encoding_needed) {
1574 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1575 ost->frames_encoded);
1576 if (type == AVMEDIA_TYPE_AUDIO)
1577 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1578 av_log(NULL, AV_LOG_VERBOSE, "; ");
1581 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1582 ost->packets_written, ost->data_size);
1584 av_log(NULL, AV_LOG_VERBOSE, "\n");
1587 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1588 total_packets, total_size);
1590 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1591 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1593 av_log(NULL, AV_LOG_WARNING, "\n");
1595 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1600 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1603 AVBPrint buf_script;
1605 AVFormatContext *oc;
1607 AVCodecContext *enc;
1608 int frame_number, vid, i;
1611 int64_t pts = INT64_MIN + 1;
1612 static int64_t last_time = -1;
1613 static int qp_histogram[52];
1614 int hours, mins, secs, us;
1618 if (!print_stats && !is_last_report && !progress_avio)
1621 if (!is_last_report) {
1622 if (last_time == -1) {
1623 last_time = cur_time;
1626 if ((cur_time - last_time) < 500000)
1628 last_time = cur_time;
1631 t = (cur_time-timer_start) / 1000000.0;
1634 oc = output_files[0]->ctx;
1636 total_size = avio_size(oc->pb);
1637 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1638 total_size = avio_tell(oc->pb);
1642 av_bprint_init(&buf_script, 0, 1);
1643 for (i = 0; i < nb_output_streams; i++) {
1645 ost = output_streams[i];
1647 if (!ost->stream_copy)
1648 q = ost->quality / (float) FF_QP2LAMBDA;
1650 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1651 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1652 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1653 ost->file_index, ost->index, q);
1655 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1658 frame_number = ost->frame_number;
1659 fps = t > 1 ? frame_number / t : 0;
1660 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1661 frame_number, fps < 9.95, fps, q);
1662 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1663 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1664 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1665 ost->file_index, ost->index, q);
1667 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1671 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1673 for (j = 0; j < 32; j++)
1674 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1677 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1679 double error, error_sum = 0;
1680 double scale, scale_sum = 0;
1682 char type[3] = { 'Y','U','V' };
1683 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1684 for (j = 0; j < 3; j++) {
1685 if (is_last_report) {
1686 error = enc->error[j];
1687 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1689 error = ost->error[j];
1690 scale = enc->width * enc->height * 255.0 * 255.0;
1696 p = psnr(error / scale);
1697 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1698 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1699 ost->file_index, ost->index, type[j] | 32, p);
1701 p = psnr(error_sum / scale_sum);
1702 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1703 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1704 ost->file_index, ost->index, p);
1708 /* compute min output value */
1709 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1710 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1711 ost->st->time_base, AV_TIME_BASE_Q));
1713 nb_frames_drop += ost->last_dropped;
1716 secs = FFABS(pts) / AV_TIME_BASE;
1717 us = FFABS(pts) % AV_TIME_BASE;
1723 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1724 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1726 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1728 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1729 "size=%8.0fkB time=", total_size / 1024.0);
1731 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1732 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1733 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1734 (100 * us) / AV_TIME_BASE);
1737 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1738 av_bprintf(&buf_script, "bitrate=N/A\n");
1740 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1741 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1744 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1745 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1746 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1747 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1748 hours, mins, secs, us);
1750 if (nb_frames_dup || nb_frames_drop)
1751 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1752 nb_frames_dup, nb_frames_drop);
1753 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1754 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1757 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1758 av_bprintf(&buf_script, "speed=N/A\n");
1760 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1761 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1764 if (print_stats || is_last_report) {
1765 const char end = is_last_report ? '\n' : '\r';
1766 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1767 fprintf(stderr, "%s %c", buf, end);
1769 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1774 if (progress_avio) {
1775 av_bprintf(&buf_script, "progress=%s\n",
1776 is_last_report ? "end" : "continue");
1777 avio_write(progress_avio, buf_script.str,
1778 FFMIN(buf_script.len, buf_script.size - 1));
1779 avio_flush(progress_avio);
1780 av_bprint_finalize(&buf_script, NULL);
1781 if (is_last_report) {
1782 if ((ret = avio_closep(&progress_avio)) < 0)
1783 av_log(NULL, AV_LOG_ERROR,
1784 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1789 print_final_stats(total_size);
1792 static void flush_encoders(void)
1796 for (i = 0; i < nb_output_streams; i++) {
1797 OutputStream *ost = output_streams[i];
1798 AVCodecContext *enc = ost->enc_ctx;
1799 OutputFile *of = output_files[ost->file_index];
1800 int stop_encoding = 0;
1802 if (!ost->encoding_needed)
1805 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1807 #if FF_API_LAVF_FMT_RAWPICTURE
1808 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1812 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1815 avcodec_send_frame(enc, NULL);
1818 const char *desc = NULL;
1820 switch (enc->codec_type) {
1821 case AVMEDIA_TYPE_AUDIO:
1824 case AVMEDIA_TYPE_VIDEO:
1834 av_init_packet(&pkt);
1838 update_benchmark(NULL);
1839 ret = avcodec_receive_packet(enc, &pkt);
1840 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1841 if (ret < 0 && ret != AVERROR_EOF) {
1842 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1847 if (ost->logfile && enc->stats_out) {
1848 fprintf(ost->logfile, "%s", enc->stats_out);
1850 if (ret == AVERROR_EOF) {
1854 if (ost->finished & MUXER_FINISHED) {
1855 av_packet_unref(&pkt);
1858 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1859 pkt_size = pkt.size;
1860 output_packet(of, &pkt, ost);
1861 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1862 do_video_stats(ost, pkt_size);
1873 * Check whether a packet from ist should be written into ost at this time
1875 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1877 OutputFile *of = output_files[ost->file_index];
1878 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1880 if (ost->source_index != ist_index)
1886 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1892 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1894 OutputFile *of = output_files[ost->file_index];
1895 InputFile *f = input_files [ist->file_index];
1896 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1897 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1901 av_init_packet(&opkt);
1903 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1904 !ost->copy_initial_nonkeyframes)
1907 if (!ost->frame_number && !ost->copy_prior_start) {
1908 int64_t comp_start = start_time;
1909 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1910 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1911 if (pkt->pts == AV_NOPTS_VALUE ?
1912 ist->pts < comp_start :
1913 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1917 if (of->recording_time != INT64_MAX &&
1918 ist->pts >= of->recording_time + start_time) {
1919 close_output_stream(ost);
1923 if (f->recording_time != INT64_MAX) {
1924 start_time = f->ctx->start_time;
1925 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1926 start_time += f->start_time;
1927 if (ist->pts >= f->recording_time + start_time) {
1928 close_output_stream(ost);
1933 /* force the input stream PTS */
1934 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1937 if (pkt->pts != AV_NOPTS_VALUE)
1938 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1940 opkt.pts = AV_NOPTS_VALUE;
1942 if (pkt->dts == AV_NOPTS_VALUE)
1943 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1945 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1946 opkt.dts -= ost_tb_start_time;
1948 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1949 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1951 duration = ist->dec_ctx->frame_size;
1952 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1953 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1954 ost->st->time_base) - ost_tb_start_time;
1957 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1958 opkt.flags = pkt->flags;
1959 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1960 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
1961 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
1962 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
1963 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
1965 int ret = av_parser_change(ost->parser, ost->parser_avctx,
1966 &opkt.data, &opkt.size,
1967 pkt->data, pkt->size,
1968 pkt->flags & AV_PKT_FLAG_KEY);
1970 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1975 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1980 opkt.data = pkt->data;
1981 opkt.size = pkt->size;
1983 av_copy_packet_side_data(&opkt, pkt);
1985 #if FF_API_LAVF_FMT_RAWPICTURE
1986 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1987 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
1988 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1989 /* store AVPicture in AVPacket, as expected by the output format */
1990 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
1992 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1996 opkt.data = (uint8_t *)&pict;
1997 opkt.size = sizeof(AVPicture);
1998 opkt.flags |= AV_PKT_FLAG_KEY;
2002 output_packet(of, &opkt, ost);
2005 int guess_input_channel_layout(InputStream *ist)
2007 AVCodecContext *dec = ist->dec_ctx;
2009 if (!dec->channel_layout) {
2010 char layout_name[256];
2012 if (dec->channels > ist->guess_layout_max)
2014 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2015 if (!dec->channel_layout)
2017 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2018 dec->channels, dec->channel_layout);
2019 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2020 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2025 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2027 if (*got_output || ret<0)
2028 decode_error_stat[ret<0] ++;
2030 if (ret < 0 && exit_on_error)
2033 if (exit_on_error && *got_output && ist) {
2034 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2035 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2041 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2042 // There is the following difference: if you got a frame, you must call
2043 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2044 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2045 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2052 ret = avcodec_send_packet(avctx, pkt);
2053 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2054 // decoded frames with avcodec_receive_frame() until done.
2055 if (ret < 0 && ret != AVERROR_EOF)
2059 ret = avcodec_receive_frame(avctx, frame);
2060 if (ret < 0 && ret != AVERROR(EAGAIN))
2068 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2070 AVFrame *decoded_frame, *f;
2071 AVCodecContext *avctx = ist->dec_ctx;
2072 int i, ret, err = 0, resample_changed;
2073 AVRational decoded_frame_tb;
2075 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2076 return AVERROR(ENOMEM);
2077 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2078 return AVERROR(ENOMEM);
2079 decoded_frame = ist->decoded_frame;
2081 update_benchmark(NULL);
2082 ret = decode(avctx, decoded_frame, got_output, pkt);
2083 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2085 if (ret >= 0 && avctx->sample_rate <= 0) {
2086 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2087 ret = AVERROR_INVALIDDATA;
2090 if (ret != AVERROR_EOF)
2091 check_decode_result(ist, got_output, ret);
2093 if (!*got_output || ret < 0)
2096 ist->samples_decoded += decoded_frame->nb_samples;
2097 ist->frames_decoded++;
2100 /* increment next_dts to use for the case where the input stream does not
2101 have timestamps or there are multiple frames in the packet */
2102 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2104 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2108 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2109 ist->resample_channels != avctx->channels ||
2110 ist->resample_channel_layout != decoded_frame->channel_layout ||
2111 ist->resample_sample_rate != decoded_frame->sample_rate;
2112 if (resample_changed) {
2113 char layout1[64], layout2[64];
2115 if (!guess_input_channel_layout(ist)) {
2116 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2117 "layout for Input Stream #%d.%d\n", ist->file_index,
2121 decoded_frame->channel_layout = avctx->channel_layout;
2123 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2124 ist->resample_channel_layout);
2125 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2126 decoded_frame->channel_layout);
2128 av_log(NULL, AV_LOG_INFO,
2129 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2130 ist->file_index, ist->st->index,
2131 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2132 ist->resample_channels, layout1,
2133 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2134 avctx->channels, layout2);
2136 ist->resample_sample_fmt = decoded_frame->format;
2137 ist->resample_sample_rate = decoded_frame->sample_rate;
2138 ist->resample_channel_layout = decoded_frame->channel_layout;
2139 ist->resample_channels = avctx->channels;
2141 for (i = 0; i < ist->nb_filters; i++) {
2142 err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2144 av_log(NULL, AV_LOG_ERROR,
2145 "Error reconfiguring input stream %d:%d filter %d\n",
2146 ist->file_index, ist->st->index, i);
2151 for (i = 0; i < nb_filtergraphs; i++)
2152 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2153 FilterGraph *fg = filtergraphs[i];
2154 if (configure_filtergraph(fg) < 0) {
2155 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2161 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2162 decoded_frame_tb = ist->st->time_base;
2163 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2164 decoded_frame->pts = pkt->pts;
2165 decoded_frame_tb = ist->st->time_base;
2167 decoded_frame->pts = ist->dts;
2168 decoded_frame_tb = AV_TIME_BASE_Q;
2170 if (decoded_frame->pts != AV_NOPTS_VALUE)
2171 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2172 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2173 (AVRational){1, avctx->sample_rate});
2174 ist->nb_samples = decoded_frame->nb_samples;
2175 for (i = 0; i < ist->nb_filters; i++) {
2176 if (i < ist->nb_filters - 1) {
2177 f = ist->filter_frame;
2178 err = av_frame_ref(f, decoded_frame);
2183 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2184 AV_BUFFERSRC_FLAG_PUSH);
2185 if (err == AVERROR_EOF)
2186 err = 0; /* ignore */
2190 decoded_frame->pts = AV_NOPTS_VALUE;
2193 av_frame_unref(ist->filter_frame);
2194 av_frame_unref(decoded_frame);
2195 return err < 0 ? err : ret;
2198 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
2200 AVFrame *decoded_frame, *f;
2201 int i, ret = 0, err = 0, resample_changed;
2202 int64_t best_effort_timestamp;
2203 int64_t dts = AV_NOPTS_VALUE;
2204 AVRational *frame_sample_aspect;
2207 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2208 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2210 if (!eof && pkt && pkt->size == 0)
2213 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2214 return AVERROR(ENOMEM);
2215 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2216 return AVERROR(ENOMEM);
2217 decoded_frame = ist->decoded_frame;
2218 if (ist->dts != AV_NOPTS_VALUE)
2219 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2222 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2225 // The old code used to set dts on the drain packet, which does not work
2226 // with the new API anymore.
2228 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2230 return AVERROR(ENOMEM);
2231 ist->dts_buffer = new;
2232 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2235 update_benchmark(NULL);
2236 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2237 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2239 // The following line may be required in some cases where there is no parser
2240 // or the parser does not has_b_frames correctly
2241 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2242 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2243 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2245 av_log(ist->dec_ctx, AV_LOG_WARNING,
2246 "video_delay is larger in decoder than demuxer %d > %d.\n"
2247 "If you want to help, upload a sample "
2248 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2249 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2250 ist->dec_ctx->has_b_frames,
2251 ist->st->codecpar->video_delay);
2254 if (ret != AVERROR_EOF)
2255 check_decode_result(ist, got_output, ret);
2257 if (*got_output && ret >= 0) {
2258 if (ist->dec_ctx->width != decoded_frame->width ||
2259 ist->dec_ctx->height != decoded_frame->height ||
2260 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2261 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2262 decoded_frame->width,
2263 decoded_frame->height,
2264 decoded_frame->format,
2265 ist->dec_ctx->width,
2266 ist->dec_ctx->height,
2267 ist->dec_ctx->pix_fmt);
2271 if (!*got_output || ret < 0)
2274 if(ist->top_field_first>=0)
2275 decoded_frame->top_field_first = ist->top_field_first;
2277 ist->frames_decoded++;
2279 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2280 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2284 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2286 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2288 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2289 best_effort_timestamp = ist->dts_buffer[0];
2291 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2292 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2293 ist->nb_dts_buffer--;
2296 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2297 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2299 if (ts != AV_NOPTS_VALUE)
2300 ist->next_pts = ist->pts = ts;
2304 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2305 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2306 ist->st->index, av_ts2str(decoded_frame->pts),
2307 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2308 best_effort_timestamp,
2309 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2310 decoded_frame->key_frame, decoded_frame->pict_type,
2311 ist->st->time_base.num, ist->st->time_base.den);
2314 if (ist->st->sample_aspect_ratio.num)
2315 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2317 resample_changed = ist->resample_width != decoded_frame->width ||
2318 ist->resample_height != decoded_frame->height ||
2319 ist->resample_pix_fmt != decoded_frame->format;
2320 if (resample_changed) {
2321 av_log(NULL, AV_LOG_INFO,
2322 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2323 ist->file_index, ist->st->index,
2324 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2325 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2327 ist->resample_width = decoded_frame->width;
2328 ist->resample_height = decoded_frame->height;
2329 ist->resample_pix_fmt = decoded_frame->format;
2331 for (i = 0; i < ist->nb_filters; i++) {
2332 err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2334 av_log(NULL, AV_LOG_ERROR,
2335 "Error reconfiguring input stream %d:%d filter %d\n",
2336 ist->file_index, ist->st->index, i);
2341 for (i = 0; i < nb_filtergraphs; i++) {
2342 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2343 configure_filtergraph(filtergraphs[i]) < 0) {
2344 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2350 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2351 for (i = 0; i < ist->nb_filters; i++) {
2352 if (!frame_sample_aspect->num)
2353 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2355 if (i < ist->nb_filters - 1) {
2356 f = ist->filter_frame;
2357 err = av_frame_ref(f, decoded_frame);
2362 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2363 if (err == AVERROR_EOF) {
2364 err = 0; /* ignore */
2365 } else if (err < 0) {
2366 av_log(NULL, AV_LOG_FATAL,
2367 "Failed to inject frame into filter network: %s\n", av_err2str(err));
2373 av_frame_unref(ist->filter_frame);
2374 av_frame_unref(decoded_frame);
2375 return err < 0 ? err : ret;
2378 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2380 AVSubtitle subtitle;
2381 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2382 &subtitle, got_output, pkt);
2384 check_decode_result(NULL, got_output, ret);
2386 if (ret < 0 || !*got_output) {
2388 sub2video_flush(ist);
2392 if (ist->fix_sub_duration) {
2394 if (ist->prev_sub.got_output) {
2395 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2396 1000, AV_TIME_BASE);
2397 if (end < ist->prev_sub.subtitle.end_display_time) {
2398 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2399 "Subtitle duration reduced from %d to %d%s\n",
2400 ist->prev_sub.subtitle.end_display_time, end,
2401 end <= 0 ? ", dropping it" : "");
2402 ist->prev_sub.subtitle.end_display_time = end;
2405 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2406 FFSWAP(int, ret, ist->prev_sub.ret);
2407 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2415 sub2video_update(ist, &subtitle);
2417 if (!subtitle.num_rects)
2420 ist->frames_decoded++;
2422 for (i = 0; i < nb_output_streams; i++) {
2423 OutputStream *ost = output_streams[i];
2425 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2426 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2429 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2433 avsubtitle_free(&subtitle);
2437 static int send_filter_eof(InputStream *ist)
2440 for (i = 0; i < ist->nb_filters; i++) {
2441 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2448 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2449 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2453 int eof_reached = 0;
2456 if (!ist->saw_first_ts) {
2457 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2459 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2460 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2461 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2463 ist->saw_first_ts = 1;
2466 if (ist->next_dts == AV_NOPTS_VALUE)
2467 ist->next_dts = ist->dts;
2468 if (ist->next_pts == AV_NOPTS_VALUE)
2469 ist->next_pts = ist->pts;
2473 av_init_packet(&avpkt);
2480 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2481 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2482 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2483 ist->next_pts = ist->pts = ist->dts;
2486 // while we have more to decode or while the decoder did output something on EOF
2487 while (ist->decoding_needed) {
2491 ist->pts = ist->next_pts;
2492 ist->dts = ist->next_dts;
2494 switch (ist->dec_ctx->codec_type) {
2495 case AVMEDIA_TYPE_AUDIO:
2496 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
2498 case AVMEDIA_TYPE_VIDEO:
2499 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
2500 if (!repeating || !pkt || got_output) {
2501 if (pkt && pkt->duration) {
2502 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2503 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2504 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2505 duration = ((int64_t)AV_TIME_BASE *
2506 ist->dec_ctx->framerate.den * ticks) /
2507 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2510 if(ist->dts != AV_NOPTS_VALUE && duration) {
2511 ist->next_dts += duration;
2513 ist->next_dts = AV_NOPTS_VALUE;
2517 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2519 case AVMEDIA_TYPE_SUBTITLE:
2522 ret = transcode_subtitles(ist, &avpkt, &got_output);
2523 if (!pkt && ret >= 0)
2530 if (ret == AVERROR_EOF) {
2536 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2537 ist->file_index, ist->st->index, av_err2str(ret));
2540 // Decoding might not terminate if we're draining the decoder, and
2541 // the decoder keeps returning an error.
2542 // This should probably be considered a libavcodec issue.
2543 // Sample: fate-vsynth1-dnxhd-720p-hr-lb
2552 // During draining, we might get multiple output frames in this loop.
2553 // ffmpeg.c does not drain the filter chain on configuration changes,
2554 // which means if we send multiple frames at once to the filters, and
2555 // one of those frames changes configuration, the buffered frames will
2556 // be lost. This can upset certain FATE tests.
2557 // Decode only 1 frame per call on EOF to appease these FATE tests.
2558 // The ideal solution would be to rewrite decoding to use the new
2559 // decoding API in a better way.
2566 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2567 /* except when looping we need to flush but not to send an EOF */
2568 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2569 int ret = send_filter_eof(ist);
2571 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2576 /* handle stream copy */
2577 if (!ist->decoding_needed) {
2578 ist->dts = ist->next_dts;
2579 switch (ist->dec_ctx->codec_type) {
2580 case AVMEDIA_TYPE_AUDIO:
2581 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2582 ist->dec_ctx->sample_rate;
2584 case AVMEDIA_TYPE_VIDEO:
2585 if (ist->framerate.num) {
2586 // TODO: Remove work-around for c99-to-c89 issue 7
2587 AVRational time_base_q = AV_TIME_BASE_Q;
2588 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2589 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2590 } else if (pkt->duration) {
2591 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2592 } else if(ist->dec_ctx->framerate.num != 0) {
2593 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2594 ist->next_dts += ((int64_t)AV_TIME_BASE *
2595 ist->dec_ctx->framerate.den * ticks) /
2596 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2600 ist->pts = ist->dts;
2601 ist->next_pts = ist->next_dts;
2603 for (i = 0; pkt && i < nb_output_streams; i++) {
2604 OutputStream *ost = output_streams[i];
2606 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2609 do_streamcopy(ist, ost, pkt);
2612 return !eof_reached;
2615 static void print_sdp(void)
2620 AVIOContext *sdp_pb;
2621 AVFormatContext **avc;
2623 for (i = 0; i < nb_output_files; i++) {
2624 if (!output_files[i]->header_written)
2628 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2631 for (i = 0, j = 0; i < nb_output_files; i++) {
2632 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2633 avc[j] = output_files[i]->ctx;
2641 av_sdp_create(avc, j, sdp, sizeof(sdp));
2643 if (!sdp_filename) {
2644 printf("SDP:\n%s\n", sdp);
2647 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2648 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2650 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2651 avio_closep(&sdp_pb);
2652 av_freep(&sdp_filename);
2660 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2663 for (i = 0; hwaccels[i].name; i++)
2664 if (hwaccels[i].pix_fmt == pix_fmt)
2665 return &hwaccels[i];
2669 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2671 InputStream *ist = s->opaque;
2672 const enum AVPixelFormat *p;
2675 for (p = pix_fmts; *p != -1; p++) {
2676 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2677 const HWAccel *hwaccel;
2679 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2682 hwaccel = get_hwaccel(*p);
2684 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2685 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2688 ret = hwaccel->init(s);
2690 if (ist->hwaccel_id == hwaccel->id) {
2691 av_log(NULL, AV_LOG_FATAL,
2692 "%s hwaccel requested for input stream #%d:%d, "
2693 "but cannot be initialized.\n", hwaccel->name,
2694 ist->file_index, ist->st->index);
2695 return AV_PIX_FMT_NONE;
2700 if (ist->hw_frames_ctx) {
2701 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2702 if (!s->hw_frames_ctx)
2703 return AV_PIX_FMT_NONE;
2706 ist->active_hwaccel_id = hwaccel->id;
2707 ist->hwaccel_pix_fmt = *p;
2714 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2716 InputStream *ist = s->opaque;
2718 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2719 return ist->hwaccel_get_buffer(s, frame, flags);
2721 return avcodec_default_get_buffer2(s, frame, flags);
2724 static int init_input_stream(int ist_index, char *error, int error_len)
2727 InputStream *ist = input_streams[ist_index];
2729 for (i = 0; i < ist->nb_filters; i++) {
2730 ret = ifilter_parameters_from_decoder(ist->filters[i], ist->dec_ctx);
2732 av_log(NULL, AV_LOG_FATAL, "Error initializing filter input\n");
2737 if (ist->decoding_needed) {
2738 AVCodec *codec = ist->dec;
2740 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2741 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2742 return AVERROR(EINVAL);
2745 ist->dec_ctx->opaque = ist;
2746 ist->dec_ctx->get_format = get_format;
2747 ist->dec_ctx->get_buffer2 = get_buffer;
2748 ist->dec_ctx->thread_safe_callbacks = 1;
2750 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2751 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2752 (ist->decoding_needed & DECODING_FOR_OST)) {
2753 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2754 if (ist->decoding_needed & DECODING_FOR_FILTER)
2755 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2758 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2760 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2761 * audio, and video decoders such as cuvid or mediacodec */
2762 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2764 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2765 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2766 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2767 if (ret == AVERROR_EXPERIMENTAL)
2768 abort_codec_experimental(codec, 0);
2770 snprintf(error, error_len,
2771 "Error while opening decoder for input stream "
2773 ist->file_index, ist->st->index, av_err2str(ret));
2776 assert_avoptions(ist->decoder_opts);
2779 ist->next_pts = AV_NOPTS_VALUE;
2780 ist->next_dts = AV_NOPTS_VALUE;
2785 static InputStream *get_input_stream(OutputStream *ost)
2787 if (ost->source_index >= 0)
2788 return input_streams[ost->source_index];
2792 static int compare_int64(const void *a, const void *b)
2794 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2797 /* open the muxer when all the streams are initialized */
2798 static int check_init_output_file(OutputFile *of, int file_index)
2802 for (i = 0; i < of->ctx->nb_streams; i++) {
2803 OutputStream *ost = output_streams[of->ost_index + i];
2804 if (!ost->initialized)
2808 of->ctx->interrupt_callback = int_cb;
2810 ret = avformat_write_header(of->ctx, &of->opts);
2812 av_log(NULL, AV_LOG_ERROR,
2813 "Could not write header for output file #%d "
2814 "(incorrect codec parameters ?): %s\n",
2815 file_index, av_err2str(ret));
2818 //assert_avoptions(of->opts);
2819 of->header_written = 1;
2821 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2823 if (sdp_filename || want_sdp)
2826 /* flush the muxing queues */
2827 for (i = 0; i < of->ctx->nb_streams; i++) {
2828 OutputStream *ost = output_streams[of->ost_index + i];
2830 while (av_fifo_size(ost->muxing_queue)) {
2832 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2833 write_packet(of, &pkt, ost);
2840 static int init_output_bsfs(OutputStream *ost)
2845 if (!ost->nb_bitstream_filters)
2848 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2849 ctx = ost->bsf_ctx[i];
2851 ret = avcodec_parameters_copy(ctx->par_in,
2852 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2856 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2858 ret = av_bsf_init(ctx);
2860 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2861 ost->bsf_ctx[i]->filter->name);
2866 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2867 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2871 ost->st->time_base = ctx->time_base_out;
2876 static int init_output_stream_streamcopy(OutputStream *ost)
2878 OutputFile *of = output_files[ost->file_index];
2879 InputStream *ist = get_input_stream(ost);
2880 AVCodecParameters *par_dst = ost->st->codecpar;
2881 AVCodecParameters *par_src = ost->ref_par;
2884 uint64_t extra_size;
2886 av_assert0(ist && !ost->filter);
2888 avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
2889 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2891 av_log(NULL, AV_LOG_FATAL,
2892 "Error setting up codec context options.\n");
2895 avcodec_parameters_from_context(par_src, ost->enc_ctx);
2897 extra_size = (uint64_t)par_src->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2899 if (extra_size > INT_MAX) {
2900 return AVERROR(EINVAL);
2903 /* if stream_copy is selected, no need to decode or encode */
2904 par_dst->codec_id = par_src->codec_id;
2905 par_dst->codec_type = par_src->codec_type;
2907 if (!par_dst->codec_tag) {
2908 unsigned int codec_tag;
2909 if (!of->ctx->oformat->codec_tag ||
2910 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_dst->codec_id ||
2911 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag))
2912 par_dst->codec_tag = par_src->codec_tag;
2915 par_dst->bit_rate = par_src->bit_rate;
2916 par_dst->field_order = par_src->field_order;
2917 par_dst->chroma_location = par_src->chroma_location;
2919 if (par_src->extradata_size) {
2920 par_dst->extradata = av_mallocz(extra_size);
2921 if (!par_dst->extradata) {
2922 return AVERROR(ENOMEM);
2924 memcpy(par_dst->extradata, par_src->extradata, par_src->extradata_size);
2925 par_dst->extradata_size = par_src->extradata_size;
2927 par_dst->bits_per_coded_sample = par_src->bits_per_coded_sample;
2928 par_dst->bits_per_raw_sample = par_src->bits_per_raw_sample;
2930 if (!ost->frame_rate.num)
2931 ost->frame_rate = ist->framerate;
2932 ost->st->avg_frame_rate = ost->frame_rate;
2934 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
2938 // copy timebase while removing common factors
2939 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
2942 ost->st->disposition = ist->st->disposition;
2944 if (ist->st->nb_side_data) {
2945 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2946 sizeof(*ist->st->side_data));
2947 if (!ost->st->side_data)
2948 return AVERROR(ENOMEM);
2950 ost->st->nb_side_data = 0;
2951 for (i = 0; i < ist->st->nb_side_data; i++) {
2952 const AVPacketSideData *sd_src = &ist->st->side_data[i];
2953 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2955 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2958 sd_dst->data = av_malloc(sd_src->size);
2960 return AVERROR(ENOMEM);
2961 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2962 sd_dst->size = sd_src->size;
2963 sd_dst->type = sd_src->type;
2964 ost->st->nb_side_data++;
2968 ost->parser = av_parser_init(par_dst->codec_id);
2969 ost->parser_avctx = avcodec_alloc_context3(NULL);
2970 if (!ost->parser_avctx)
2971 return AVERROR(ENOMEM);
2973 switch (par_dst->codec_type) {
2974 case AVMEDIA_TYPE_AUDIO:
2975 if (audio_volume != 256) {
2976 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2979 par_dst->channel_layout = par_src->channel_layout;
2980 par_dst->sample_rate = par_src->sample_rate;
2981 par_dst->channels = par_src->channels;
2982 par_dst->frame_size = par_src->frame_size;
2983 par_dst->block_align = par_src->block_align;
2984 par_dst->initial_padding = par_src->initial_padding;
2985 par_dst->trailing_padding = par_src->trailing_padding;
2986 par_dst->profile = par_src->profile;
2987 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2988 par_dst->block_align= 0;
2989 if(par_dst->codec_id == AV_CODEC_ID_AC3)
2990 par_dst->block_align= 0;
2992 case AVMEDIA_TYPE_VIDEO:
2993 par_dst->format = par_src->format;
2994 par_dst->color_space = par_src->color_space;
2995 par_dst->color_range = par_src->color_range;
2996 par_dst->color_primaries = par_src->color_primaries;
2997 par_dst->color_trc = par_src->color_trc;
2998 par_dst->width = par_src->width;
2999 par_dst->height = par_src->height;
3000 par_dst->video_delay = par_src->video_delay;
3001 par_dst->profile = par_src->profile;
3002 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3004 av_mul_q(ost->frame_aspect_ratio,
3005 (AVRational){ par_dst->height, par_dst->width });
3006 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3007 "with stream copy may produce invalid files\n");
3009 else if (ist->st->sample_aspect_ratio.num)
3010 sar = ist->st->sample_aspect_ratio;
3012 sar = par_src->sample_aspect_ratio;
3013 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3014 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3015 ost->st->r_frame_rate = ist->st->r_frame_rate;
3017 case AVMEDIA_TYPE_SUBTITLE:
3018 par_dst->width = par_src->width;
3019 par_dst->height = par_src->height;
3021 case AVMEDIA_TYPE_UNKNOWN:
3022 case AVMEDIA_TYPE_DATA:
3023 case AVMEDIA_TYPE_ATTACHMENT:
3032 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3034 AVDictionaryEntry *e;
3036 uint8_t *encoder_string;
3037 int encoder_string_len;
3038 int format_flags = 0;
3039 int codec_flags = 0;
3041 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3044 e = av_dict_get(of->opts, "fflags", NULL, 0);
3046 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3049 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3051 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3053 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3056 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3059 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3060 encoder_string = av_mallocz(encoder_string_len);
3061 if (!encoder_string)
3064 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3065 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3067 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3068 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3069 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3070 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3073 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3074 AVCodecContext *avctx)
3077 int n = 1, i, size, index = 0;
3080 for (p = kf; *p; p++)
3084 pts = av_malloc_array(size, sizeof(*pts));
3086 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3091 for (i = 0; i < n; i++) {
3092 char *next = strchr(p, ',');
3097 if (!memcmp(p, "chapters", 8)) {
3099 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3102 if (avf->nb_chapters > INT_MAX - size ||
3103 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3105 av_log(NULL, AV_LOG_FATAL,
3106 "Could not allocate forced key frames array.\n");
3109 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3110 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3112 for (j = 0; j < avf->nb_chapters; j++) {
3113 AVChapter *c = avf->chapters[j];
3114 av_assert1(index < size);
3115 pts[index++] = av_rescale_q(c->start, c->time_base,
3116 avctx->time_base) + t;
3121 t = parse_time_or_die("force_key_frames", p, 1);
3122 av_assert1(index < size);
3123 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3130 av_assert0(index == size);
3131 qsort(pts, size, sizeof(*pts), compare_int64);
3132 ost->forced_kf_count = size;
3133 ost->forced_kf_pts = pts;
3136 static int init_output_stream_encode(OutputStream *ost)
3138 InputStream *ist = get_input_stream(ost);
3139 AVCodecContext *enc_ctx = ost->enc_ctx;
3140 AVCodecContext *dec_ctx = NULL;
3141 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3144 set_encoder_id(output_files[ost->file_index], ost);
3147 ost->st->disposition = ist->st->disposition;
3149 dec_ctx = ist->dec_ctx;
3151 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3153 for (j = 0; j < oc->nb_streams; j++) {
3154 AVStream *st = oc->streams[j];
3155 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3158 if (j == oc->nb_streams)
3159 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3160 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3161 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3164 if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3165 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3166 filtergraph_is_simple(ost->filter->graph)) {
3167 FilterGraph *fg = ost->filter->graph;
3169 if (configure_filtergraph(fg)) {
3170 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3175 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3176 if (!ost->frame_rate.num)
3177 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3178 if (ist && !ost->frame_rate.num)
3179 ost->frame_rate = ist->framerate;
3180 if (ist && !ost->frame_rate.num)
3181 ost->frame_rate = ist->st->r_frame_rate;
3182 if (ist && !ost->frame_rate.num) {
3183 ost->frame_rate = (AVRational){25, 1};
3184 av_log(NULL, AV_LOG_WARNING,
3186 "about the input framerate is available. Falling "
3187 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3188 "if you want a different framerate.\n",
3189 ost->file_index, ost->index);
3191 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3192 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3193 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3194 ost->frame_rate = ost->enc->supported_framerates[idx];
3196 // reduce frame rate for mpeg4 to be within the spec limits
3197 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3198 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3199 ost->frame_rate.num, ost->frame_rate.den, 65535);
3203 switch (enc_ctx->codec_type) {
3204 case AVMEDIA_TYPE_AUDIO:
3205 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3207 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3208 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3209 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3210 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3211 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3212 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3214 case AVMEDIA_TYPE_VIDEO:
3215 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3216 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3217 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3218 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3219 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3220 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3221 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3223 for (j = 0; j < ost->forced_kf_count; j++)
3224 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3226 enc_ctx->time_base);
3228 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3229 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3230 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3231 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3232 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3233 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3234 if (!strncmp(ost->enc->name, "libx264", 7) &&
3235 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3236 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3237 av_log(NULL, AV_LOG_WARNING,
3238 "No pixel format specified, %s for H.264 encoding chosen.\n"
3239 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3240 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3241 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3242 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3243 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3244 av_log(NULL, AV_LOG_WARNING,
3245 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3246 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3247 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3248 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3250 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3251 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3253 ost->st->avg_frame_rate = ost->frame_rate;
3256 enc_ctx->width != dec_ctx->width ||
3257 enc_ctx->height != dec_ctx->height ||
3258 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3259 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3262 if (ost->forced_keyframes) {
3263 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3264 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3265 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3267 av_log(NULL, AV_LOG_ERROR,
3268 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3271 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3272 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3273 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3274 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3276 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3277 // parse it only for static kf timings
3278 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3279 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3283 case AVMEDIA_TYPE_SUBTITLE:
3284 enc_ctx->time_base = (AVRational){1, 1000};
3285 if (!enc_ctx->width) {
3286 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3287 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3290 case AVMEDIA_TYPE_DATA:
3300 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3304 if (ost->encoding_needed) {
3305 AVCodec *codec = ost->enc;
3306 AVCodecContext *dec = NULL;
3309 ret = init_output_stream_encode(ost);
3313 if ((ist = get_input_stream(ost)))
3315 if (dec && dec->subtitle_header) {
3316 /* ASS code assumes this buffer is null terminated so add extra byte. */
3317 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3318 if (!ost->enc_ctx->subtitle_header)
3319 return AVERROR(ENOMEM);
3320 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3321 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3323 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3324 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3325 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3327 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3328 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3329 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3331 if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
3332 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
3333 if (!ost->enc_ctx->hw_frames_ctx)
3334 return AVERROR(ENOMEM);
3337 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3338 if (ret == AVERROR_EXPERIMENTAL)
3339 abort_codec_experimental(codec, 1);
3340 snprintf(error, error_len,
3341 "Error while opening encoder for output stream #%d:%d - "
3342 "maybe incorrect parameters such as bit_rate, rate, width or height",
3343 ost->file_index, ost->index);
3346 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3347 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3348 av_buffersink_set_frame_size(ost->filter->filter,
3349 ost->enc_ctx->frame_size);
3350 assert_avoptions(ost->encoder_opts);
3351 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3352 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3353 " It takes bits/s as argument, not kbits/s\n");
3355 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3357 av_log(NULL, AV_LOG_FATAL,
3358 "Error initializing the output stream codec context.\n");
3362 * FIXME: ost->st->codec should't be needed here anymore.
3364 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3368 if (ost->enc_ctx->nb_coded_side_data) {
3371 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
3372 sizeof(*ost->st->side_data));
3373 if (!ost->st->side_data)
3374 return AVERROR(ENOMEM);
3376 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3377 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3378 AVPacketSideData *sd_dst = &ost->st->side_data[i];
3380 sd_dst->data = av_malloc(sd_src->size);
3382 return AVERROR(ENOMEM);
3383 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3384 sd_dst->size = sd_src->size;
3385 sd_dst->type = sd_src->type;
3386 ost->st->nb_side_data++;
3390 // copy timebase while removing common factors
3391 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3392 ost->st->codec->codec= ost->enc_ctx->codec;
3393 } else if (ost->stream_copy) {
3394 ret = init_output_stream_streamcopy(ost);
3399 * FIXME: will the codec context used by the parser during streamcopy
3400 * This should go away with the new parser API.
3402 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3407 // parse user provided disposition, and update stream values
3408 if (ost->disposition) {
3409 static const AVOption opts[] = {
3410 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3411 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3412 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3413 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3414 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3415 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3416 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3417 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3418 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3419 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3420 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3421 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3422 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3423 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3426 static const AVClass class = {
3428 .item_name = av_default_item_name,
3430 .version = LIBAVUTIL_VERSION_INT,
3432 const AVClass *pclass = &class;
3434 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3439 /* initialize bitstream filters for the output stream
3440 * needs to be done here, because the codec id for streamcopy is not
3441 * known until now */
3442 ret = init_output_bsfs(ost);
3446 ost->initialized = 1;
3448 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3455 static void report_new_stream(int input_index, AVPacket *pkt)
3457 InputFile *file = input_files[input_index];
3458 AVStream *st = file->ctx->streams[pkt->stream_index];
3460 if (pkt->stream_index < file->nb_streams_warn)
3462 av_log(file->ctx, AV_LOG_WARNING,
3463 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3464 av_get_media_type_string(st->codecpar->codec_type),
3465 input_index, pkt->stream_index,
3466 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3467 file->nb_streams_warn = pkt->stream_index + 1;
3470 static int transcode_init(void)
3472 int ret = 0, i, j, k;
3473 AVFormatContext *oc;
3476 char error[1024] = {0};
3478 for (i = 0; i < nb_filtergraphs; i++) {
3479 FilterGraph *fg = filtergraphs[i];
3480 for (j = 0; j < fg->nb_outputs; j++) {
3481 OutputFilter *ofilter = fg->outputs[j];
3482 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3484 if (fg->nb_inputs != 1)
3486 for (k = nb_input_streams-1; k >= 0 ; k--)
3487 if (fg->inputs[0]->ist == input_streams[k])
3489 ofilter->ost->source_index = k;
3493 /* init framerate emulation */
3494 for (i = 0; i < nb_input_files; i++) {
3495 InputFile *ifile = input_files[i];
3496 if (ifile->rate_emu)
3497 for (j = 0; j < ifile->nb_streams; j++)
3498 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3501 /* hwaccel transcoding */
3502 for (i = 0; i < nb_output_streams; i++) {
3503 ost = output_streams[i];
3505 if (!ost->stream_copy) {
3507 if (qsv_transcode_init(ost))
3512 if (cuvid_transcode_init(ost))
3518 /* init input streams */
3519 for (i = 0; i < nb_input_streams; i++)
3520 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3521 for (i = 0; i < nb_output_streams; i++) {
3522 ost = output_streams[i];
3523 avcodec_close(ost->enc_ctx);
3528 /* open each encoder */
3529 for (i = 0; i < nb_output_streams; i++) {
3530 ret = init_output_stream(output_streams[i], error, sizeof(error));
3535 /* discard unused programs */
3536 for (i = 0; i < nb_input_files; i++) {
3537 InputFile *ifile = input_files[i];
3538 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3539 AVProgram *p = ifile->ctx->programs[j];
3540 int discard = AVDISCARD_ALL;
3542 for (k = 0; k < p->nb_stream_indexes; k++)
3543 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3544 discard = AVDISCARD_DEFAULT;
3547 p->discard = discard;
3551 /* write headers for files with no streams */
3552 for (i = 0; i < nb_output_files; i++) {
3553 oc = output_files[i]->ctx;
3554 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3555 ret = check_init_output_file(output_files[i], i);
3562 /* dump the stream mapping */
3563 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3564 for (i = 0; i < nb_input_streams; i++) {
3565 ist = input_streams[i];
3567 for (j = 0; j < ist->nb_filters; j++) {
3568 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3569 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3570 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3571 ist->filters[j]->name);
3572 if (nb_filtergraphs > 1)
3573 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3574 av_log(NULL, AV_LOG_INFO, "\n");
3579 for (i = 0; i < nb_output_streams; i++) {
3580 ost = output_streams[i];
3582 if (ost->attachment_filename) {
3583 /* an attached file */
3584 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3585 ost->attachment_filename, ost->file_index, ost->index);
3589 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3590 /* output from a complex graph */
3591 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3592 if (nb_filtergraphs > 1)
3593 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3595 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3596 ost->index, ost->enc ? ost->enc->name : "?");
3600 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3601 input_streams[ost->source_index]->file_index,
3602 input_streams[ost->source_index]->st->index,
3605 if (ost->sync_ist != input_streams[ost->source_index])
3606 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3607 ost->sync_ist->file_index,
3608 ost->sync_ist->st->index);
3609 if (ost->stream_copy)
3610 av_log(NULL, AV_LOG_INFO, " (copy)");
3612 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3613 const AVCodec *out_codec = ost->enc;
3614 const char *decoder_name = "?";
3615 const char *in_codec_name = "?";
3616 const char *encoder_name = "?";
3617 const char *out_codec_name = "?";
3618 const AVCodecDescriptor *desc;
3621 decoder_name = in_codec->name;
3622 desc = avcodec_descriptor_get(in_codec->id);
3624 in_codec_name = desc->name;
3625 if (!strcmp(decoder_name, in_codec_name))
3626 decoder_name = "native";
3630 encoder_name = out_codec->name;
3631 desc = avcodec_descriptor_get(out_codec->id);
3633 out_codec_name = desc->name;
3634 if (!strcmp(encoder_name, out_codec_name))
3635 encoder_name = "native";
3638 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3639 in_codec_name, decoder_name,
3640 out_codec_name, encoder_name);
3642 av_log(NULL, AV_LOG_INFO, "\n");
3646 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3650 transcode_init_done = 1;
3655 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3656 static int need_output(void)
3660 for (i = 0; i < nb_output_streams; i++) {
3661 OutputStream *ost = output_streams[i];
3662 OutputFile *of = output_files[ost->file_index];
3663 AVFormatContext *os = output_files[ost->file_index]->ctx;
3665 if (ost->finished ||
3666 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3668 if (ost->frame_number >= ost->max_frames) {
3670 for (j = 0; j < of->ctx->nb_streams; j++)
3671 close_output_stream(output_streams[of->ost_index + j]);
3682 * Select the output stream to process.
3684 * @return selected output stream, or NULL if none available
3686 static OutputStream *choose_output(void)
3689 int64_t opts_min = INT64_MAX;
3690 OutputStream *ost_min = NULL;
3692 for (i = 0; i < nb_output_streams; i++) {
3693 OutputStream *ost = output_streams[i];
3694 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3695 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3697 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3698 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3700 if (!ost->finished && opts < opts_min) {
3702 ost_min = ost->unavailable ? NULL : ost;
3708 static void set_tty_echo(int on)
3712 if (tcgetattr(0, &tty) == 0) {
3713 if (on) tty.c_lflag |= ECHO;
3714 else tty.c_lflag &= ~ECHO;
3715 tcsetattr(0, TCSANOW, &tty);
3720 static int check_keyboard_interaction(int64_t cur_time)
3723 static int64_t last_time;
3724 if (received_nb_signals)
3725 return AVERROR_EXIT;
3726 /* read_key() returns 0 on EOF */
3727 if(cur_time - last_time >= 100000 && !run_as_daemon){
3729 last_time = cur_time;
3733 return AVERROR_EXIT;
3734 if (key == '+') av_log_set_level(av_log_get_level()+10);
3735 if (key == '-') av_log_set_level(av_log_get_level()-10);
3736 if (key == 's') qp_hist ^= 1;
3739 do_hex_dump = do_pkt_dump = 0;
3740 } else if(do_pkt_dump){
3744 av_log_set_level(AV_LOG_DEBUG);
3746 if (key == 'c' || key == 'C'){
3747 char buf[4096], target[64], command[256], arg[256] = {0};
3750 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3753 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3758 fprintf(stderr, "\n");
3760 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3761 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3762 target, time, command, arg);
3763 for (i = 0; i < nb_filtergraphs; i++) {
3764 FilterGraph *fg = filtergraphs[i];
3767 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3768 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3769 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3770 } else if (key == 'c') {
3771 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3772 ret = AVERROR_PATCHWELCOME;
3774 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3776 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3781 av_log(NULL, AV_LOG_ERROR,
3782 "Parse error, at least 3 arguments were expected, "
3783 "only %d given in string '%s'\n", n, buf);
3786 if (key == 'd' || key == 'D'){
3789 debug = input_streams[0]->st->codec->debug<<1;
3790 if(!debug) debug = 1;
3791 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3798 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3803 fprintf(stderr, "\n");
3804 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3805 fprintf(stderr,"error parsing debug value\n");
3807 for(i=0;i<nb_input_streams;i++) {
3808 input_streams[i]->st->codec->debug = debug;
3810 for(i=0;i<nb_output_streams;i++) {
3811 OutputStream *ost = output_streams[i];
3812 ost->enc_ctx->debug = debug;
3814 if(debug) av_log_set_level(AV_LOG_DEBUG);
3815 fprintf(stderr,"debug=%d\n", debug);
3818 fprintf(stderr, "key function\n"
3819 "? show this help\n"
3820 "+ increase verbosity\n"
3821 "- decrease verbosity\n"
3822 "c Send command to first matching filter supporting it\n"
3823 "C Send/Queue command to all matching filters\n"
3824 "D cycle through available debug modes\n"
3825 "h dump packets/hex press to cycle through the 3 states\n"
3827 "s Show QP histogram\n"
3834 static void *input_thread(void *arg)
3837 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3842 ret = av_read_frame(f->ctx, &pkt);
3844 if (ret == AVERROR(EAGAIN)) {
3849 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3852 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3853 if (flags && ret == AVERROR(EAGAIN)) {
3855 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3856 av_log(f->ctx, AV_LOG_WARNING,
3857 "Thread message queue blocking; consider raising the "
3858 "thread_queue_size option (current value: %d)\n",
3859 f->thread_queue_size);
3862 if (ret != AVERROR_EOF)
3863 av_log(f->ctx, AV_LOG_ERROR,
3864 "Unable to send packet to main thread: %s\n",
3866 av_packet_unref(&pkt);
3867 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3875 static void free_input_threads(void)
3879 for (i = 0; i < nb_input_files; i++) {
3880 InputFile *f = input_files[i];
3883 if (!f || !f->in_thread_queue)
3885 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3886 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3887 av_packet_unref(&pkt);
3889 pthread_join(f->thread, NULL);
3891 av_thread_message_queue_free(&f->in_thread_queue);
3895 static int init_input_threads(void)
3899 if (nb_input_files == 1)
3902 for (i = 0; i < nb_input_files; i++) {
3903 InputFile *f = input_files[i];
3905 if (f->ctx->pb ? !f->ctx->pb->seekable :
3906 strcmp(f->ctx->iformat->name, "lavfi"))
3907 f->non_blocking = 1;
3908 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3909 f->thread_queue_size, sizeof(AVPacket));
3913 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3914 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3915 av_thread_message_queue_free(&f->in_thread_queue);
3916 return AVERROR(ret);
3922 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3924 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3926 AV_THREAD_MESSAGE_NONBLOCK : 0);
3930 static int get_input_packet(InputFile *f, AVPacket *pkt)
3934 for (i = 0; i < f->nb_streams; i++) {
3935 InputStream *ist = input_streams[f->ist_index + i];
3936 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3937 int64_t now = av_gettime_relative() - ist->start;
3939 return AVERROR(EAGAIN);
3944 if (nb_input_files > 1)
3945 return get_input_packet_mt(f, pkt);
3947 return av_read_frame(f->ctx, pkt);
3950 static int got_eagain(void)
3953 for (i = 0; i < nb_output_streams; i++)
3954 if (output_streams[i]->unavailable)
3959 static void reset_eagain(void)
3962 for (i = 0; i < nb_input_files; i++)
3963 input_files[i]->eagain = 0;
3964 for (i = 0; i < nb_output_streams; i++)
3965 output_streams[i]->unavailable = 0;
3968 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3969 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3970 AVRational time_base)
3976 return tmp_time_base;
3979 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3982 return tmp_time_base;
3988 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3991 AVCodecContext *avctx;
3992 int i, ret, has_audio = 0;
3993 int64_t duration = 0;
3995 ret = av_seek_frame(is, -1, is->start_time, 0);
3999 for (i = 0; i < ifile->nb_streams; i++) {
4000 ist = input_streams[ifile->ist_index + i];
4001 avctx = ist->dec_ctx;
4004 if (ist->decoding_needed) {
4005 process_input_packet(ist, NULL, 1);
4006 avcodec_flush_buffers(avctx);
4009 /* duration is the length of the last frame in a stream
4010 * when audio stream is present we don't care about
4011 * last video frame length because it's not defined exactly */
4012 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4016 for (i = 0; i < ifile->nb_streams; i++) {
4017 ist = input_streams[ifile->ist_index + i];
4018 avctx = ist->dec_ctx;
4021 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4022 AVRational sample_rate = {1, avctx->sample_rate};
4024 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4028 if (ist->framerate.num) {
4029 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4030 } else if (ist->st->avg_frame_rate.num) {
4031 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4032 } else duration = 1;
4034 if (!ifile->duration)
4035 ifile->time_base = ist->st->time_base;
4036 /* the total duration of the stream, max_pts - min_pts is
4037 * the duration of the stream without the last frame */
4038 duration += ist->max_pts - ist->min_pts;
4039 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4043 if (ifile->loop > 0)
4051 * - 0 -- one packet was read and processed
4052 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4053 * this function should be called again
4054 * - AVERROR_EOF -- this function should not be called again
4056 static int process_input(int file_index)
4058 InputFile *ifile = input_files[file_index];
4059 AVFormatContext *is;
4067 ret = get_input_packet(ifile, &pkt);
4069 if (ret == AVERROR(EAGAIN)) {
4073 if (ret < 0 && ifile->loop) {
4074 if ((ret = seek_to_start(ifile, is)) < 0)
4076 ret = get_input_packet(ifile, &pkt);
4077 if (ret == AVERROR(EAGAIN)) {
4083 if (ret != AVERROR_EOF) {
4084 print_error(is->filename, ret);
4089 for (i = 0; i < ifile->nb_streams; i++) {
4090 ist = input_streams[ifile->ist_index + i];
4091 if (ist->decoding_needed) {
4092 ret = process_input_packet(ist, NULL, 0);
4097 /* mark all outputs that don't go through lavfi as finished */
4098 for (j = 0; j < nb_output_streams; j++) {
4099 OutputStream *ost = output_streams[j];
4101 if (ost->source_index == ifile->ist_index + i &&
4102 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4103 finish_output_stream(ost);
4107 ifile->eof_reached = 1;
4108 return AVERROR(EAGAIN);
4114 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4115 is->streams[pkt.stream_index]);
4117 /* the following test is needed in case new streams appear
4118 dynamically in stream : we ignore them */
4119 if (pkt.stream_index >= ifile->nb_streams) {
4120 report_new_stream(file_index, &pkt);
4121 goto discard_packet;
4124 ist = input_streams[ifile->ist_index + pkt.stream_index];
4126 ist->data_size += pkt.size;
4130 goto discard_packet;
4132 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4133 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4138 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4139 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4140 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4141 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4142 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4143 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4144 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4145 av_ts2str(input_files[ist->file_index]->ts_offset),
4146 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4149 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4150 int64_t stime, stime2;
4151 // Correcting starttime based on the enabled streams
4152 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4153 // so we instead do it here as part of discontinuity handling
4154 if ( ist->next_dts == AV_NOPTS_VALUE
4155 && ifile->ts_offset == -is->start_time
4156 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4157 int64_t new_start_time = INT64_MAX;
4158 for (i=0; i<is->nb_streams; i++) {
4159 AVStream *st = is->streams[i];
4160 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4162 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4164 if (new_start_time > is->start_time) {
4165 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4166 ifile->ts_offset = -new_start_time;
4170 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4171 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4172 ist->wrap_correction_done = 1;
4174 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4175 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4176 ist->wrap_correction_done = 0;
4178 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4179 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4180 ist->wrap_correction_done = 0;
4184 /* add the stream-global side data to the first packet */
4185 if (ist->nb_packets == 1) {
4186 if (ist->st->nb_side_data)
4187 av_packet_split_side_data(&pkt);
4188 for (i = 0; i < ist->st->nb_side_data; i++) {
4189 AVPacketSideData *src_sd = &ist->st->side_data[i];
4192 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4194 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4197 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4201 memcpy(dst_data, src_sd->data, src_sd->size);
4205 if (pkt.dts != AV_NOPTS_VALUE)
4206 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4207 if (pkt.pts != AV_NOPTS_VALUE)
4208 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4210 if (pkt.pts != AV_NOPTS_VALUE)
4211 pkt.pts *= ist->ts_scale;
4212 if (pkt.dts != AV_NOPTS_VALUE)
4213 pkt.dts *= ist->ts_scale;
4215 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4216 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4217 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4218 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4219 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4220 int64_t delta = pkt_dts - ifile->last_ts;
4221 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4222 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4223 ifile->ts_offset -= delta;
4224 av_log(NULL, AV_LOG_DEBUG,
4225 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4226 delta, ifile->ts_offset);
4227 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4228 if (pkt.pts != AV_NOPTS_VALUE)
4229 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4233 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4234 if (pkt.pts != AV_NOPTS_VALUE) {
4235 pkt.pts += duration;
4236 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4237 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4240 if (pkt.dts != AV_NOPTS_VALUE)
4241 pkt.dts += duration;
4243 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4244 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4245 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4246 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4248 int64_t delta = pkt_dts - ist->next_dts;
4249 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4250 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4251 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4252 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4253 ifile->ts_offset -= delta;
4254 av_log(NULL, AV_LOG_DEBUG,
4255 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4256 delta, ifile->ts_offset);
4257 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4258 if (pkt.pts != AV_NOPTS_VALUE)
4259 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4262 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4263 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4264 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4265 pkt.dts = AV_NOPTS_VALUE;
4267 if (pkt.pts != AV_NOPTS_VALUE){
4268 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4269 delta = pkt_pts - ist->next_dts;
4270 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4271 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4272 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4273 pkt.pts = AV_NOPTS_VALUE;
4279 if (pkt.dts != AV_NOPTS_VALUE)
4280 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4283 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4284 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4285 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4286 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4287 av_ts2str(input_files[ist->file_index]->ts_offset),
4288 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4291 sub2video_heartbeat(ist, pkt.pts);
4293 process_input_packet(ist, &pkt, 0);
4296 av_packet_unref(&pkt);
4302 * Perform a step of transcoding for the specified filter graph.
4304 * @param[in] graph filter graph to consider
4305 * @param[out] best_ist input stream where a frame would allow to continue
4306 * @return 0 for success, <0 for error
4308 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4311 int nb_requests, nb_requests_max = 0;
4312 InputFilter *ifilter;
4316 ret = avfilter_graph_request_oldest(graph->graph);
4318 return reap_filters(0);
4320 if (ret == AVERROR_EOF) {
4321 ret = reap_filters(1);
4322 for (i = 0; i < graph->nb_outputs; i++)
4323 close_output_stream(graph->outputs[i]->ost);
4326 if (ret != AVERROR(EAGAIN))
4329 for (i = 0; i < graph->nb_inputs; i++) {
4330 ifilter = graph->inputs[i];
4332 if (input_files[ist->file_index]->eagain ||
4333 input_files[ist->file_index]->eof_reached)
4335 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4336 if (nb_requests > nb_requests_max) {
4337 nb_requests_max = nb_requests;
4343 for (i = 0; i < graph->nb_outputs; i++)
4344 graph->outputs[i]->ost->unavailable = 1;
4350 * Run a single step of transcoding.
4352 * @return 0 for success, <0 for error
4354 static int transcode_step(void)
4360 ost = choose_output();
4367 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4372 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4377 av_assert0(ost->source_index >= 0);
4378 ist = input_streams[ost->source_index];
4381 ret = process_input(ist->file_index);
4382 if (ret == AVERROR(EAGAIN)) {
4383 if (input_files[ist->file_index]->eagain)
4384 ost->unavailable = 1;
4389 return ret == AVERROR_EOF ? 0 : ret;
4391 return reap_filters(0);
4395 * The following code is the main loop of the file converter
4397 static int transcode(void)
4400 AVFormatContext *os;
4403 int64_t timer_start;
4404 int64_t total_packets_written = 0;
4406 ret = transcode_init();
4410 if (stdin_interaction) {
4411 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4414 timer_start = av_gettime_relative();
4417 if ((ret = init_input_threads()) < 0)
4421 while (!received_sigterm) {
4422 int64_t cur_time= av_gettime_relative();
4424 /* if 'q' pressed, exits */
4425 if (stdin_interaction)
4426 if (check_keyboard_interaction(cur_time) < 0)
4429 /* check if there's any stream where output is still needed */
4430 if (!need_output()) {
4431 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4435 ret = transcode_step();
4436 if (ret < 0 && ret != AVERROR_EOF) {
4438 av_strerror(ret, errbuf, sizeof(errbuf));
4440 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4444 /* dump report by using the output first video and audio streams */
4445 print_report(0, timer_start, cur_time);
4448 free_input_threads();
4451 /* at the end of stream, we must flush the decoder buffers */
4452 for (i = 0; i < nb_input_streams; i++) {
4453 ist = input_streams[i];
4454 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4455 process_input_packet(ist, NULL, 0);
4462 /* write the trailer if needed and close file */
4463 for (i = 0; i < nb_output_files; i++) {
4464 os = output_files[i]->ctx;
4465 if (!output_files[i]->header_written) {
4466 av_log(NULL, AV_LOG_ERROR,
4467 "Nothing was written into output file %d (%s), because "
4468 "at least one of its streams received no packets.\n",
4472 if ((ret = av_write_trailer(os)) < 0) {
4473 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4479 /* dump report by using the first video and audio streams */
4480 print_report(1, timer_start, av_gettime_relative());
4482 /* close each encoder */
4483 for (i = 0; i < nb_output_streams; i++) {
4484 ost = output_streams[i];
4485 if (ost->encoding_needed) {
4486 av_freep(&ost->enc_ctx->stats_in);
4488 total_packets_written += ost->packets_written;
4491 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4492 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4496 /* close each decoder */
4497 for (i = 0; i < nb_input_streams; i++) {
4498 ist = input_streams[i];
4499 if (ist->decoding_needed) {
4500 avcodec_close(ist->dec_ctx);
4501 if (ist->hwaccel_uninit)
4502 ist->hwaccel_uninit(ist->dec_ctx);
4506 av_buffer_unref(&hw_device_ctx);
4513 free_input_threads();
4516 if (output_streams) {
4517 for (i = 0; i < nb_output_streams; i++) {
4518 ost = output_streams[i];
4521 if (fclose(ost->logfile))
4522 av_log(NULL, AV_LOG_ERROR,
4523 "Error closing logfile, loss of information possible: %s\n",
4524 av_err2str(AVERROR(errno)));
4525 ost->logfile = NULL;
4527 av_freep(&ost->forced_kf_pts);
4528 av_freep(&ost->apad);
4529 av_freep(&ost->disposition);
4530 av_dict_free(&ost->encoder_opts);
4531 av_dict_free(&ost->sws_dict);
4532 av_dict_free(&ost->swr_opts);
4533 av_dict_free(&ost->resample_opts);
4541 static int64_t getutime(void)
4544 struct rusage rusage;
4546 getrusage(RUSAGE_SELF, &rusage);
4547 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4548 #elif HAVE_GETPROCESSTIMES
4550 FILETIME c, e, k, u;
4551 proc = GetCurrentProcess();
4552 GetProcessTimes(proc, &c, &e, &k, &u);
4553 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4555 return av_gettime_relative();
4559 static int64_t getmaxrss(void)
4561 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4562 struct rusage rusage;
4563 getrusage(RUSAGE_SELF, &rusage);
4564 return (int64_t)rusage.ru_maxrss * 1024;
4565 #elif HAVE_GETPROCESSMEMORYINFO
4567 PROCESS_MEMORY_COUNTERS memcounters;
4568 proc = GetCurrentProcess();
4569 memcounters.cb = sizeof(memcounters);
4570 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4571 return memcounters.PeakPagefileUsage;
4577 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4581 int main(int argc, char **argv)
4588 register_exit(ffmpeg_cleanup);
4590 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4592 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4593 parse_loglevel(argc, argv, options);
4595 if(argc>1 && !strcmp(argv[1], "-d")){
4597 av_log_set_callback(log_callback_null);
4602 avcodec_register_all();
4604 avdevice_register_all();
4606 avfilter_register_all();
4608 avformat_network_init();
4610 show_banner(argc, argv, options);
4612 /* parse options and open all input/output files */
4613 ret = ffmpeg_parse_options(argc, argv);
4617 if (nb_output_files <= 0 && nb_input_files == 0) {
4619 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4623 /* file converter / grab */
4624 if (nb_output_files <= 0) {
4625 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4629 // if (nb_input_files == 0) {
4630 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4634 for (i = 0; i < nb_output_files; i++) {
4635 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4639 current_time = ti = getutime();
4640 if (transcode() < 0)
4642 ti = getutime() - ti;
4644 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4646 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4647 decode_error_stat[0], decode_error_stat[1]);
4648 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4651 exit_program(received_nb_signals ? 255 : main_return_code);
4652 return main_return_code;