2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static unsigned dup_warning = 1000;
130 static int nb_frames_drop = 0;
131 static int64_t decode_error_stat[2];
133 static int want_sdp = 1;
135 static int current_time;
136 AVIOContext *progress_avio = NULL;
138 static uint8_t *subtitle_out;
140 InputStream **input_streams = NULL;
141 int nb_input_streams = 0;
142 InputFile **input_files = NULL;
143 int nb_input_files = 0;
145 OutputStream **output_streams = NULL;
146 int nb_output_streams = 0;
147 OutputFile **output_files = NULL;
148 int nb_output_files = 0;
150 FilterGraph **filtergraphs;
155 /* init terminal so that we can grab keys */
156 static struct termios oldtty;
157 static int restore_tty;
161 static void free_input_threads(void);
165 Convert subtitles to video with alpha to insert them in filter graphs.
166 This is a temporary solution until libavfilter gets real subtitles support.
169 static int sub2video_get_blank_frame(InputStream *ist)
172 AVFrame *frame = ist->sub2video.frame;
174 av_frame_unref(frame);
175 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
176 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
177 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
178 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
180 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
184 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
187 uint32_t *pal, *dst2;
191 if (r->type != SUBTITLE_BITMAP) {
192 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
195 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
197 r->x, r->y, r->w, r->h, w, h
202 dst += r->y * dst_linesize + r->x * 4;
204 pal = (uint32_t *)r->data[1];
205 for (y = 0; y < r->h; y++) {
206 dst2 = (uint32_t *)dst;
208 for (x = 0; x < r->w; x++)
209 *(dst2++) = pal[*(src2++)];
211 src += r->linesize[0];
215 static void sub2video_push_ref(InputStream *ist, int64_t pts)
217 AVFrame *frame = ist->sub2video.frame;
220 av_assert1(frame->data[0]);
221 ist->sub2video.last_pts = frame->pts = pts;
222 for (i = 0; i < ist->nb_filters; i++)
223 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
224 AV_BUFFERSRC_FLAG_KEEP_REF |
225 AV_BUFFERSRC_FLAG_PUSH);
228 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
230 AVFrame *frame = ist->sub2video.frame;
234 int64_t pts, end_pts;
239 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240 AV_TIME_BASE_Q, ist->st->time_base);
241 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
242 AV_TIME_BASE_Q, ist->st->time_base);
243 num_rects = sub->num_rects;
245 pts = ist->sub2video.end_pts;
249 if (sub2video_get_blank_frame(ist) < 0) {
250 av_log(ist->dec_ctx, AV_LOG_ERROR,
251 "Impossible to get a blank canvas.\n");
254 dst = frame->data [0];
255 dst_linesize = frame->linesize[0];
256 for (i = 0; i < num_rects; i++)
257 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
258 sub2video_push_ref(ist, pts);
259 ist->sub2video.end_pts = end_pts;
262 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
264 InputFile *infile = input_files[ist->file_index];
268 /* When a frame is read from a file, examine all sub2video streams in
269 the same file and send the sub2video frame again. Otherwise, decoded
270 video frames could be accumulating in the filter graph while a filter
271 (possibly overlay) is desperately waiting for a subtitle frame. */
272 for (i = 0; i < infile->nb_streams; i++) {
273 InputStream *ist2 = input_streams[infile->ist_index + i];
274 if (!ist2->sub2video.frame)
276 /* subtitles seem to be usually muxed ahead of other streams;
277 if not, subtracting a larger time here is necessary */
278 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
279 /* do not send the heartbeat frame if the subtitle is already ahead */
280 if (pts2 <= ist2->sub2video.last_pts)
282 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
283 sub2video_update(ist2, NULL);
284 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
285 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
287 sub2video_push_ref(ist2, pts2);
291 static void sub2video_flush(InputStream *ist)
295 if (ist->sub2video.end_pts < INT64_MAX)
296 sub2video_update(ist, NULL);
297 for (i = 0; i < ist->nb_filters; i++)
298 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
301 /* end of sub2video hack */
303 static void term_exit_sigsafe(void)
307 tcsetattr (0, TCSANOW, &oldtty);
313 av_log(NULL, AV_LOG_QUIET, "%s", "");
317 static volatile int received_sigterm = 0;
318 static volatile int received_nb_signals = 0;
319 static volatile int transcode_init_done = 0;
320 static volatile int ffmpeg_exited = 0;
321 static int main_return_code = 0;
324 sigterm_handler(int sig)
326 received_sigterm = sig;
327 received_nb_signals++;
329 if(received_nb_signals > 3) {
330 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
331 strlen("Received > 3 system signals, hard exiting\n"));
337 #if HAVE_SETCONSOLECTRLHANDLER
338 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
340 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345 case CTRL_BREAK_EVENT:
346 sigterm_handler(SIGINT);
349 case CTRL_CLOSE_EVENT:
350 case CTRL_LOGOFF_EVENT:
351 case CTRL_SHUTDOWN_EVENT:
352 sigterm_handler(SIGTERM);
353 /* Basically, with these 3 events, when we return from this method the
354 process is hard terminated, so stall as long as we need to
355 to try and let the main thread(s) clean up and gracefully terminate
356 (we have at most 5 seconds, but should be done far before that). */
357 while (!ffmpeg_exited) {
363 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
372 if (!run_as_daemon && stdin_interaction) {
374 if (tcgetattr (0, &tty) == 0) {
378 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
379 |INLCR|IGNCR|ICRNL|IXON);
380 tty.c_oflag |= OPOST;
381 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
382 tty.c_cflag &= ~(CSIZE|PARENB);
387 tcsetattr (0, TCSANOW, &tty);
389 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
393 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
394 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
396 signal(SIGXCPU, sigterm_handler);
398 #if HAVE_SETCONSOLECTRLHANDLER
399 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
403 /* read a key without blocking */
404 static int read_key(void)
416 n = select(1, &rfds, NULL, NULL, &tv);
425 # if HAVE_PEEKNAMEDPIPE
427 static HANDLE input_handle;
430 input_handle = GetStdHandle(STD_INPUT_HANDLE);
431 is_pipe = !GetConsoleMode(input_handle, &dw);
435 /* When running under a GUI, you will end here. */
436 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
437 // input pipe may have been closed by the program that ran ffmpeg
455 static int decode_interrupt_cb(void *ctx)
457 return received_nb_signals > transcode_init_done;
460 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
462 static void ffmpeg_cleanup(int ret)
467 int maxrss = getmaxrss() / 1024;
468 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
471 for (i = 0; i < nb_filtergraphs; i++) {
472 FilterGraph *fg = filtergraphs[i];
473 avfilter_graph_free(&fg->graph);
474 for (j = 0; j < fg->nb_inputs; j++) {
475 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
476 av_freep(&fg->inputs[j]->name);
477 av_freep(&fg->inputs[j]);
479 av_freep(&fg->inputs);
480 for (j = 0; j < fg->nb_outputs; j++) {
481 av_freep(&fg->outputs[j]->name);
482 av_freep(&fg->outputs[j]->formats);
483 av_freep(&fg->outputs[j]->channel_layouts);
484 av_freep(&fg->outputs[j]->sample_rates);
485 av_freep(&fg->outputs[j]);
487 av_freep(&fg->outputs);
488 av_freep(&fg->graph_desc);
490 av_freep(&filtergraphs[i]);
492 av_freep(&filtergraphs);
494 av_freep(&subtitle_out);
497 for (i = 0; i < nb_output_files; i++) {
498 OutputFile *of = output_files[i];
503 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
505 avformat_free_context(s);
506 av_dict_free(&of->opts);
508 av_freep(&output_files[i]);
510 for (i = 0; i < nb_output_streams; i++) {
511 OutputStream *ost = output_streams[i];
516 for (j = 0; j < ost->nb_bitstream_filters; j++)
517 av_bsf_free(&ost->bsf_ctx[j]);
518 av_freep(&ost->bsf_ctx);
519 av_freep(&ost->bsf_extradata_updated);
521 av_frame_free(&ost->filtered_frame);
522 av_frame_free(&ost->last_frame);
523 av_dict_free(&ost->encoder_opts);
525 av_parser_close(ost->parser);
526 avcodec_free_context(&ost->parser_avctx);
528 av_freep(&ost->forced_keyframes);
529 av_expr_free(ost->forced_keyframes_pexpr);
530 av_freep(&ost->avfilter);
531 av_freep(&ost->logfile_prefix);
533 av_freep(&ost->audio_channels_map);
534 ost->audio_channels_mapped = 0;
536 av_dict_free(&ost->sws_dict);
538 avcodec_free_context(&ost->enc_ctx);
539 avcodec_parameters_free(&ost->ref_par);
541 while (ost->muxing_queue && av_fifo_size(ost->muxing_queue)) {
543 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
544 av_packet_unref(&pkt);
546 av_fifo_freep(&ost->muxing_queue);
548 av_freep(&output_streams[i]);
551 free_input_threads();
553 for (i = 0; i < nb_input_files; i++) {
554 avformat_close_input(&input_files[i]->ctx);
555 av_freep(&input_files[i]);
557 for (i = 0; i < nb_input_streams; i++) {
558 InputStream *ist = input_streams[i];
560 av_frame_free(&ist->decoded_frame);
561 av_frame_free(&ist->filter_frame);
562 av_dict_free(&ist->decoder_opts);
563 avsubtitle_free(&ist->prev_sub.subtitle);
564 av_frame_free(&ist->sub2video.frame);
565 av_freep(&ist->filters);
566 av_freep(&ist->hwaccel_device);
567 av_freep(&ist->dts_buffer);
569 avcodec_free_context(&ist->dec_ctx);
571 av_freep(&input_streams[i]);
575 if (fclose(vstats_file))
576 av_log(NULL, AV_LOG_ERROR,
577 "Error closing vstats file, loss of information possible: %s\n",
578 av_err2str(AVERROR(errno)));
580 av_freep(&vstats_filename);
582 av_freep(&input_streams);
583 av_freep(&input_files);
584 av_freep(&output_streams);
585 av_freep(&output_files);
589 avformat_network_deinit();
591 if (received_sigterm) {
592 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
593 (int) received_sigterm);
594 } else if (ret && transcode_init_done) {
595 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
601 void remove_avoptions(AVDictionary **a, AVDictionary *b)
603 AVDictionaryEntry *t = NULL;
605 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
606 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
610 void assert_avoptions(AVDictionary *m)
612 AVDictionaryEntry *t;
613 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
614 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
619 static void abort_codec_experimental(AVCodec *c, int encoder)
624 static void update_benchmark(const char *fmt, ...)
626 if (do_benchmark_all) {
627 int64_t t = getutime();
633 vsnprintf(buf, sizeof(buf), fmt, va);
635 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
641 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
644 for (i = 0; i < nb_output_streams; i++) {
645 OutputStream *ost2 = output_streams[i];
646 ost2->finished |= ost == ost2 ? this_stream : others;
650 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
652 AVFormatContext *s = of->ctx;
653 AVStream *st = ost->st;
656 if (!of->header_written) {
658 /* the muxer is not initialized yet, buffer the packet */
659 if (!av_fifo_space(ost->muxing_queue)) {
660 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
661 ost->max_muxing_queue_size);
662 if (new_size <= av_fifo_size(ost->muxing_queue)) {
663 av_log(NULL, AV_LOG_ERROR,
664 "Too many packets buffered for output stream %d:%d.\n",
665 ost->file_index, ost->st->index);
668 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
672 av_packet_move_ref(&tmp_pkt, pkt);
673 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
677 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
678 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
679 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
682 * Audio encoders may split the packets -- #frames in != #packets out.
683 * But there is no reordering, so we can limit the number of output packets
684 * by simply dropping them here.
685 * Counting encoded video frames needs to be done separately because of
686 * reordering, see do_video_out()
688 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
689 if (ost->frame_number >= ost->max_frames) {
690 av_packet_unref(pkt);
695 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
697 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
699 ost->quality = sd ? AV_RL32(sd) : -1;
700 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
702 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
704 ost->error[i] = AV_RL64(sd + 8 + 8*i);
709 if (ost->frame_rate.num && ost->is_cfr) {
710 if (pkt->duration > 0)
711 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
712 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
717 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
718 if (pkt->dts != AV_NOPTS_VALUE &&
719 pkt->pts != AV_NOPTS_VALUE &&
720 pkt->dts > pkt->pts) {
721 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
723 ost->file_index, ost->st->index);
725 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
726 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
727 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
729 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
730 pkt->dts != AV_NOPTS_VALUE &&
731 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
732 ost->last_mux_dts != AV_NOPTS_VALUE) {
733 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
734 if (pkt->dts < max) {
735 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
736 av_log(s, loglevel, "Non-monotonous DTS in output stream "
737 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
738 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
740 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
743 av_log(s, loglevel, "changing to %"PRId64". This may result "
744 "in incorrect timestamps in the output file.\n",
746 if (pkt->pts >= pkt->dts)
747 pkt->pts = FFMAX(pkt->pts, max);
752 ost->last_mux_dts = pkt->dts;
754 ost->data_size += pkt->size;
755 ost->packets_written++;
757 pkt->stream_index = ost->index;
760 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
761 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
762 av_get_media_type_string(ost->enc_ctx->codec_type),
763 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
764 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
769 ret = av_interleaved_write_frame(s, pkt);
771 print_error("av_interleaved_write_frame()", ret);
772 main_return_code = 1;
773 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
775 av_packet_unref(pkt);
778 static void close_output_stream(OutputStream *ost)
780 OutputFile *of = output_files[ost->file_index];
782 ost->finished |= ENCODER_FINISHED;
784 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
785 of->recording_time = FFMIN(of->recording_time, end);
789 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
793 /* apply the output bitstream filters, if any */
794 if (ost->nb_bitstream_filters) {
797 av_packet_split_side_data(pkt);
798 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
804 /* get a packet from the previous filter up the chain */
805 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
806 if (ret == AVERROR(EAGAIN)) {
812 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
813 * the api states this shouldn't happen after init(). Propagate it here to the
814 * muxer and to the next filters in the chain to workaround this.
815 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
816 * par_out->extradata and adapt muxers accordingly to get rid of this. */
817 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
818 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
821 ost->bsf_extradata_updated[idx - 1] |= 1;
824 /* send it to the next filter down the chain or to the muxer */
825 if (idx < ost->nb_bitstream_filters) {
826 /* HACK/FIXME! - See above */
827 if (!(ost->bsf_extradata_updated[idx] & 2)) {
828 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
831 ost->bsf_extradata_updated[idx] |= 2;
833 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
838 write_packet(of, pkt, ost);
841 write_packet(of, pkt, ost);
844 if (ret < 0 && ret != AVERROR_EOF) {
845 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
846 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
852 static int check_recording_time(OutputStream *ost)
854 OutputFile *of = output_files[ost->file_index];
856 if (of->recording_time != INT64_MAX &&
857 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
858 AV_TIME_BASE_Q) >= 0) {
859 close_output_stream(ost);
865 static void do_audio_out(OutputFile *of, OutputStream *ost,
868 AVCodecContext *enc = ost->enc_ctx;
872 av_init_packet(&pkt);
876 if (!check_recording_time(ost))
879 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
880 frame->pts = ost->sync_opts;
881 ost->sync_opts = frame->pts + frame->nb_samples;
882 ost->samples_encoded += frame->nb_samples;
883 ost->frames_encoded++;
885 av_assert0(pkt.size || !pkt.data);
886 update_benchmark(NULL);
888 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
889 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
890 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
891 enc->time_base.num, enc->time_base.den);
894 ret = avcodec_send_frame(enc, frame);
899 ret = avcodec_receive_packet(enc, &pkt);
900 if (ret == AVERROR(EAGAIN))
905 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
907 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
910 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
911 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
912 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
913 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
916 output_packet(of, &pkt, ost);
921 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
925 static void do_subtitle_out(OutputFile *of,
929 int subtitle_out_max_size = 1024 * 1024;
930 int subtitle_out_size, nb, i;
935 if (sub->pts == AV_NOPTS_VALUE) {
936 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
945 subtitle_out = av_malloc(subtitle_out_max_size);
947 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
952 /* Note: DVB subtitle need one packet to draw them and one other
953 packet to clear them */
954 /* XXX: signal it in the codec context ? */
955 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
960 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
962 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
963 pts -= output_files[ost->file_index]->start_time;
964 for (i = 0; i < nb; i++) {
965 unsigned save_num_rects = sub->num_rects;
967 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
968 if (!check_recording_time(ost))
972 // start_display_time is required to be 0
973 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
974 sub->end_display_time -= sub->start_display_time;
975 sub->start_display_time = 0;
979 ost->frames_encoded++;
981 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
982 subtitle_out_max_size, sub);
984 sub->num_rects = save_num_rects;
985 if (subtitle_out_size < 0) {
986 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
990 av_init_packet(&pkt);
991 pkt.data = subtitle_out;
992 pkt.size = subtitle_out_size;
993 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
994 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
995 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
996 /* XXX: the pts correction is handled here. Maybe handling
997 it in the codec would be better */
999 pkt.pts += 90 * sub->start_display_time;
1001 pkt.pts += 90 * sub->end_display_time;
1004 output_packet(of, &pkt, ost);
1008 static void do_video_out(OutputFile *of,
1010 AVFrame *next_picture,
1013 int ret, format_video_sync;
1015 AVCodecContext *enc = ost->enc_ctx;
1016 AVCodecParameters *mux_par = ost->st->codecpar;
1017 AVRational frame_rate;
1018 int nb_frames, nb0_frames, i;
1019 double delta, delta0;
1020 double duration = 0;
1022 InputStream *ist = NULL;
1023 AVFilterContext *filter = ost->filter->filter;
1025 if (ost->source_index >= 0)
1026 ist = input_streams[ost->source_index];
1028 frame_rate = av_buffersink_get_frame_rate(filter);
1029 if (frame_rate.num > 0 && frame_rate.den > 0)
1030 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1032 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1033 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1035 if (!ost->filters_script &&
1039 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1040 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1043 if (!next_picture) {
1045 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1046 ost->last_nb0_frames[1],
1047 ost->last_nb0_frames[2]);
1049 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1050 delta = delta0 + duration;
1052 /* by default, we output a single frame */
1053 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1056 format_video_sync = video_sync_method;
1057 if (format_video_sync == VSYNC_AUTO) {
1058 if(!strcmp(of->ctx->oformat->name, "avi")) {
1059 format_video_sync = VSYNC_VFR;
1061 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1063 && format_video_sync == VSYNC_CFR
1064 && input_files[ist->file_index]->ctx->nb_streams == 1
1065 && input_files[ist->file_index]->input_ts_offset == 0) {
1066 format_video_sync = VSYNC_VSCFR;
1068 if (format_video_sync == VSYNC_CFR && copy_ts) {
1069 format_video_sync = VSYNC_VSCFR;
1072 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1076 format_video_sync != VSYNC_PASSTHROUGH &&
1077 format_video_sync != VSYNC_DROP) {
1078 if (delta0 < -0.6) {
1079 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1081 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1082 sync_ipts = ost->sync_opts;
1087 switch (format_video_sync) {
1089 if (ost->frame_number == 0 && delta0 >= 0.5) {
1090 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1093 ost->sync_opts = lrint(sync_ipts);
1096 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1097 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1099 } else if (delta < -1.1)
1101 else if (delta > 1.1) {
1102 nb_frames = lrintf(delta);
1104 nb0_frames = lrintf(delta0 - 0.6);
1110 else if (delta > 0.6)
1111 ost->sync_opts = lrint(sync_ipts);
1114 case VSYNC_PASSTHROUGH:
1115 ost->sync_opts = lrint(sync_ipts);
1122 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1123 nb0_frames = FFMIN(nb0_frames, nb_frames);
1125 memmove(ost->last_nb0_frames + 1,
1126 ost->last_nb0_frames,
1127 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1128 ost->last_nb0_frames[0] = nb0_frames;
1130 if (nb0_frames == 0 && ost->last_dropped) {
1132 av_log(NULL, AV_LOG_VERBOSE,
1133 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1134 ost->frame_number, ost->st->index, ost->last_frame->pts);
1136 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1137 if (nb_frames > dts_error_threshold * 30) {
1138 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1142 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1143 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1144 if (nb_frames_dup > dup_warning) {
1145 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1149 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1151 /* duplicates frame if needed */
1152 for (i = 0; i < nb_frames; i++) {
1153 AVFrame *in_picture;
1154 av_init_packet(&pkt);
1158 if (i < nb0_frames && ost->last_frame) {
1159 in_picture = ost->last_frame;
1161 in_picture = next_picture;
1166 in_picture->pts = ost->sync_opts;
1169 if (!check_recording_time(ost))
1171 if (ost->frame_number >= ost->max_frames)
1175 #if FF_API_LAVF_FMT_RAWPICTURE
1176 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1177 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1178 /* raw pictures are written as AVPicture structure to
1179 avoid any copies. We support temporarily the older
1181 if (in_picture->interlaced_frame)
1182 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1184 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1185 pkt.data = (uint8_t *)in_picture;
1186 pkt.size = sizeof(AVPicture);
1187 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1188 pkt.flags |= AV_PKT_FLAG_KEY;
1190 output_packet(of, &pkt, ost);
1194 int forced_keyframe = 0;
1197 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1198 ost->top_field_first >= 0)
1199 in_picture->top_field_first = !!ost->top_field_first;
1201 if (in_picture->interlaced_frame) {
1202 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1203 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1205 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1207 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1209 in_picture->quality = enc->global_quality;
1210 in_picture->pict_type = 0;
1212 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1213 in_picture->pts * av_q2d(enc->time_base) : NAN;
1214 if (ost->forced_kf_index < ost->forced_kf_count &&
1215 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1216 ost->forced_kf_index++;
1217 forced_keyframe = 1;
1218 } else if (ost->forced_keyframes_pexpr) {
1220 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1221 res = av_expr_eval(ost->forced_keyframes_pexpr,
1222 ost->forced_keyframes_expr_const_values, NULL);
1223 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1224 ost->forced_keyframes_expr_const_values[FKF_N],
1225 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1226 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1227 ost->forced_keyframes_expr_const_values[FKF_T],
1228 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1231 forced_keyframe = 1;
1232 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1233 ost->forced_keyframes_expr_const_values[FKF_N];
1234 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1235 ost->forced_keyframes_expr_const_values[FKF_T];
1236 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1239 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1240 } else if ( ost->forced_keyframes
1241 && !strncmp(ost->forced_keyframes, "source", 6)
1242 && in_picture->key_frame==1) {
1243 forced_keyframe = 1;
1246 if (forced_keyframe) {
1247 in_picture->pict_type = AV_PICTURE_TYPE_I;
1248 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1251 update_benchmark(NULL);
1253 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1254 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1255 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1256 enc->time_base.num, enc->time_base.den);
1259 ost->frames_encoded++;
1261 ret = avcodec_send_frame(enc, in_picture);
1266 ret = avcodec_receive_packet(enc, &pkt);
1267 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1268 if (ret == AVERROR(EAGAIN))
1274 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1275 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1276 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1277 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1280 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1281 pkt.pts = ost->sync_opts;
1283 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1286 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1287 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1288 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1289 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1292 frame_size = pkt.size;
1293 output_packet(of, &pkt, ost);
1295 /* if two pass, output log */
1296 if (ost->logfile && enc->stats_out) {
1297 fprintf(ost->logfile, "%s", enc->stats_out);
1303 * For video, number of frames in == number of packets out.
1304 * But there may be reordering, so we can't throw away frames on encoder
1305 * flush, we need to limit them here, before they go into encoder.
1307 ost->frame_number++;
1309 if (vstats_filename && frame_size)
1310 do_video_stats(ost, frame_size);
1313 if (!ost->last_frame)
1314 ost->last_frame = av_frame_alloc();
1315 av_frame_unref(ost->last_frame);
1316 if (next_picture && ost->last_frame)
1317 av_frame_ref(ost->last_frame, next_picture);
1319 av_frame_free(&ost->last_frame);
1323 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1327 static double psnr(double d)
1329 return -10.0 * log10(d);
1332 static void do_video_stats(OutputStream *ost, int frame_size)
1334 AVCodecContext *enc;
1336 double ti1, bitrate, avg_bitrate;
1338 /* this is executed just the first time do_video_stats is called */
1340 vstats_file = fopen(vstats_filename, "w");
1348 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1349 frame_number = ost->st->nb_frames;
1350 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1351 ost->quality / (float)FF_QP2LAMBDA);
1353 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1354 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1356 fprintf(vstats_file,"f_size= %6d ", frame_size);
1357 /* compute pts value */
1358 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1362 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1363 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1364 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1365 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1366 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1370 static void finish_output_stream(OutputStream *ost)
1372 OutputFile *of = output_files[ost->file_index];
1375 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1378 for (i = 0; i < of->ctx->nb_streams; i++)
1379 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1384 * Get and encode new output from any of the filtergraphs, without causing
1387 * @return 0 for success, <0 for severe errors
1389 static int reap_filters(int flush)
1391 AVFrame *filtered_frame = NULL;
1394 /* Reap all buffers present in the buffer sinks */
1395 for (i = 0; i < nb_output_streams; i++) {
1396 OutputStream *ost = output_streams[i];
1397 OutputFile *of = output_files[ost->file_index];
1398 AVFilterContext *filter;
1399 AVCodecContext *enc = ost->enc_ctx;
1404 filter = ost->filter->filter;
1406 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1407 return AVERROR(ENOMEM);
1409 filtered_frame = ost->filtered_frame;
1412 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1413 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1414 AV_BUFFERSINK_FLAG_NO_REQUEST);
1416 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1417 av_log(NULL, AV_LOG_WARNING,
1418 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1419 } else if (flush && ret == AVERROR_EOF) {
1420 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1421 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1425 if (ost->finished) {
1426 av_frame_unref(filtered_frame);
1429 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1430 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1431 AVRational filter_tb = av_buffersink_get_time_base(filter);
1432 AVRational tb = enc->time_base;
1433 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1435 tb.den <<= extra_bits;
1437 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1438 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1439 float_pts /= 1 << extra_bits;
1440 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1441 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1443 filtered_frame->pts =
1444 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1445 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1447 //if (ost->source_index >= 0)
1448 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1450 switch (av_buffersink_get_type(filter)) {
1451 case AVMEDIA_TYPE_VIDEO:
1452 if (!ost->frame_aspect_ratio.num)
1453 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1456 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1457 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1459 enc->time_base.num, enc->time_base.den);
1462 do_video_out(of, ost, filtered_frame, float_pts);
1464 case AVMEDIA_TYPE_AUDIO:
1465 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1466 enc->channels != av_frame_get_channels(filtered_frame)) {
1467 av_log(NULL, AV_LOG_ERROR,
1468 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1471 do_audio_out(of, ost, filtered_frame);
1474 // TODO support subtitle filters
1478 av_frame_unref(filtered_frame);
1485 static void print_final_stats(int64_t total_size)
1487 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1488 uint64_t subtitle_size = 0;
1489 uint64_t data_size = 0;
1490 float percent = -1.0;
1494 for (i = 0; i < nb_output_streams; i++) {
1495 OutputStream *ost = output_streams[i];
1496 switch (ost->enc_ctx->codec_type) {
1497 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1498 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1499 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1500 default: other_size += ost->data_size; break;
1502 extra_size += ost->enc_ctx->extradata_size;
1503 data_size += ost->data_size;
1504 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1505 != AV_CODEC_FLAG_PASS1)
1509 if (data_size && total_size>0 && total_size >= data_size)
1510 percent = 100.0 * (total_size - data_size) / data_size;
1512 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1513 video_size / 1024.0,
1514 audio_size / 1024.0,
1515 subtitle_size / 1024.0,
1516 other_size / 1024.0,
1517 extra_size / 1024.0);
1519 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1521 av_log(NULL, AV_LOG_INFO, "unknown");
1522 av_log(NULL, AV_LOG_INFO, "\n");
1524 /* print verbose per-stream stats */
1525 for (i = 0; i < nb_input_files; i++) {
1526 InputFile *f = input_files[i];
1527 uint64_t total_packets = 0, total_size = 0;
1529 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1530 i, f->ctx->filename);
1532 for (j = 0; j < f->nb_streams; j++) {
1533 InputStream *ist = input_streams[f->ist_index + j];
1534 enum AVMediaType type = ist->dec_ctx->codec_type;
1536 total_size += ist->data_size;
1537 total_packets += ist->nb_packets;
1539 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1540 i, j, media_type_string(type));
1541 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1542 ist->nb_packets, ist->data_size);
1544 if (ist->decoding_needed) {
1545 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1546 ist->frames_decoded);
1547 if (type == AVMEDIA_TYPE_AUDIO)
1548 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1549 av_log(NULL, AV_LOG_VERBOSE, "; ");
1552 av_log(NULL, AV_LOG_VERBOSE, "\n");
1555 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1556 total_packets, total_size);
1559 for (i = 0; i < nb_output_files; i++) {
1560 OutputFile *of = output_files[i];
1561 uint64_t total_packets = 0, total_size = 0;
1563 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1564 i, of->ctx->filename);
1566 for (j = 0; j < of->ctx->nb_streams; j++) {
1567 OutputStream *ost = output_streams[of->ost_index + j];
1568 enum AVMediaType type = ost->enc_ctx->codec_type;
1570 total_size += ost->data_size;
1571 total_packets += ost->packets_written;
1573 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1574 i, j, media_type_string(type));
1575 if (ost->encoding_needed) {
1576 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1577 ost->frames_encoded);
1578 if (type == AVMEDIA_TYPE_AUDIO)
1579 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1580 av_log(NULL, AV_LOG_VERBOSE, "; ");
1583 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1584 ost->packets_written, ost->data_size);
1586 av_log(NULL, AV_LOG_VERBOSE, "\n");
1589 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1590 total_packets, total_size);
1592 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1593 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1595 av_log(NULL, AV_LOG_WARNING, "\n");
1597 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1602 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1605 AVBPrint buf_script;
1607 AVFormatContext *oc;
1609 AVCodecContext *enc;
1610 int frame_number, vid, i;
1613 int64_t pts = INT64_MIN + 1;
1614 static int64_t last_time = -1;
1615 static int qp_histogram[52];
1616 int hours, mins, secs, us;
1620 if (!print_stats && !is_last_report && !progress_avio)
1623 if (!is_last_report) {
1624 if (last_time == -1) {
1625 last_time = cur_time;
1628 if ((cur_time - last_time) < 500000)
1630 last_time = cur_time;
1633 t = (cur_time-timer_start) / 1000000.0;
1636 oc = output_files[0]->ctx;
1638 total_size = avio_size(oc->pb);
1639 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1640 total_size = avio_tell(oc->pb);
1644 av_bprint_init(&buf_script, 0, 1);
1645 for (i = 0; i < nb_output_streams; i++) {
1647 ost = output_streams[i];
1649 if (!ost->stream_copy)
1650 q = ost->quality / (float) FF_QP2LAMBDA;
1652 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1653 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1654 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1655 ost->file_index, ost->index, q);
1657 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1660 frame_number = ost->frame_number;
1661 fps = t > 1 ? frame_number / t : 0;
1662 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1663 frame_number, fps < 9.95, fps, q);
1664 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1665 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1666 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1667 ost->file_index, ost->index, q);
1669 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1673 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1675 for (j = 0; j < 32; j++)
1676 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1679 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1681 double error, error_sum = 0;
1682 double scale, scale_sum = 0;
1684 char type[3] = { 'Y','U','V' };
1685 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1686 for (j = 0; j < 3; j++) {
1687 if (is_last_report) {
1688 error = enc->error[j];
1689 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1691 error = ost->error[j];
1692 scale = enc->width * enc->height * 255.0 * 255.0;
1698 p = psnr(error / scale);
1699 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1700 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1701 ost->file_index, ost->index, type[j] | 32, p);
1703 p = psnr(error_sum / scale_sum);
1704 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1705 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1706 ost->file_index, ost->index, p);
1710 /* compute min output value */
1711 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1712 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1713 ost->st->time_base, AV_TIME_BASE_Q));
1715 nb_frames_drop += ost->last_dropped;
1718 secs = FFABS(pts) / AV_TIME_BASE;
1719 us = FFABS(pts) % AV_TIME_BASE;
1725 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1726 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1728 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1730 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1731 "size=%8.0fkB time=", total_size / 1024.0);
1733 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1734 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1735 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1736 (100 * us) / AV_TIME_BASE);
1739 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1740 av_bprintf(&buf_script, "bitrate=N/A\n");
1742 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1743 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1746 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1747 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1748 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1749 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1750 hours, mins, secs, us);
1752 if (nb_frames_dup || nb_frames_drop)
1753 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1754 nb_frames_dup, nb_frames_drop);
1755 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1756 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1759 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1760 av_bprintf(&buf_script, "speed=N/A\n");
1762 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1763 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1766 if (print_stats || is_last_report) {
1767 const char end = is_last_report ? '\n' : '\r';
1768 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1769 fprintf(stderr, "%s %c", buf, end);
1771 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1776 if (progress_avio) {
1777 av_bprintf(&buf_script, "progress=%s\n",
1778 is_last_report ? "end" : "continue");
1779 avio_write(progress_avio, buf_script.str,
1780 FFMIN(buf_script.len, buf_script.size - 1));
1781 avio_flush(progress_avio);
1782 av_bprint_finalize(&buf_script, NULL);
1783 if (is_last_report) {
1784 if ((ret = avio_closep(&progress_avio)) < 0)
1785 av_log(NULL, AV_LOG_ERROR,
1786 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1791 print_final_stats(total_size);
1794 static void flush_encoders(void)
1798 for (i = 0; i < nb_output_streams; i++) {
1799 OutputStream *ost = output_streams[i];
1800 AVCodecContext *enc = ost->enc_ctx;
1801 OutputFile *of = output_files[ost->file_index];
1803 if (!ost->encoding_needed)
1806 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1808 #if FF_API_LAVF_FMT_RAWPICTURE
1809 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1813 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1816 avcodec_send_frame(enc, NULL);
1819 const char *desc = NULL;
1823 switch (enc->codec_type) {
1824 case AVMEDIA_TYPE_AUDIO:
1827 case AVMEDIA_TYPE_VIDEO:
1834 av_init_packet(&pkt);
1838 update_benchmark(NULL);
1839 ret = avcodec_receive_packet(enc, &pkt);
1840 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1841 if (ret < 0 && ret != AVERROR_EOF) {
1842 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1847 if (ost->logfile && enc->stats_out) {
1848 fprintf(ost->logfile, "%s", enc->stats_out);
1850 if (ret == AVERROR_EOF) {
1853 if (ost->finished & MUXER_FINISHED) {
1854 av_packet_unref(&pkt);
1857 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1858 pkt_size = pkt.size;
1859 output_packet(of, &pkt, ost);
1860 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1861 do_video_stats(ost, pkt_size);
1868 * Check whether a packet from ist should be written into ost at this time
1870 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1872 OutputFile *of = output_files[ost->file_index];
1873 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1875 if (ost->source_index != ist_index)
1881 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1887 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1889 OutputFile *of = output_files[ost->file_index];
1890 InputFile *f = input_files [ist->file_index];
1891 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1892 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1896 av_init_packet(&opkt);
1898 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1899 !ost->copy_initial_nonkeyframes)
1902 if (!ost->frame_number && !ost->copy_prior_start) {
1903 int64_t comp_start = start_time;
1904 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1905 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1906 if (pkt->pts == AV_NOPTS_VALUE ?
1907 ist->pts < comp_start :
1908 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1912 if (of->recording_time != INT64_MAX &&
1913 ist->pts >= of->recording_time + start_time) {
1914 close_output_stream(ost);
1918 if (f->recording_time != INT64_MAX) {
1919 start_time = f->ctx->start_time;
1920 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1921 start_time += f->start_time;
1922 if (ist->pts >= f->recording_time + start_time) {
1923 close_output_stream(ost);
1928 /* force the input stream PTS */
1929 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1932 if (pkt->pts != AV_NOPTS_VALUE)
1933 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1935 opkt.pts = AV_NOPTS_VALUE;
1937 if (pkt->dts == AV_NOPTS_VALUE)
1938 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1940 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1941 opkt.dts -= ost_tb_start_time;
1943 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1944 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1946 duration = ist->dec_ctx->frame_size;
1947 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1948 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1949 ost->st->time_base) - ost_tb_start_time;
1952 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1953 opkt.flags = pkt->flags;
1954 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1955 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
1956 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
1957 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
1958 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
1960 int ret = av_parser_change(ost->parser, ost->parser_avctx,
1961 &opkt.data, &opkt.size,
1962 pkt->data, pkt->size,
1963 pkt->flags & AV_PKT_FLAG_KEY);
1965 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1970 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1975 opkt.data = pkt->data;
1976 opkt.size = pkt->size;
1978 av_copy_packet_side_data(&opkt, pkt);
1980 #if FF_API_LAVF_FMT_RAWPICTURE
1981 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1982 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
1983 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1984 /* store AVPicture in AVPacket, as expected by the output format */
1985 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
1987 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1991 opkt.data = (uint8_t *)&pict;
1992 opkt.size = sizeof(AVPicture);
1993 opkt.flags |= AV_PKT_FLAG_KEY;
1997 output_packet(of, &opkt, ost);
2000 int guess_input_channel_layout(InputStream *ist)
2002 AVCodecContext *dec = ist->dec_ctx;
2004 if (!dec->channel_layout) {
2005 char layout_name[256];
2007 if (dec->channels > ist->guess_layout_max)
2009 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2010 if (!dec->channel_layout)
2012 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2013 dec->channels, dec->channel_layout);
2014 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2015 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2020 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2022 if (*got_output || ret<0)
2023 decode_error_stat[ret<0] ++;
2025 if (ret < 0 && exit_on_error)
2028 if (exit_on_error && *got_output && ist) {
2029 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2030 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2036 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2037 // There is the following difference: if you got a frame, you must call
2038 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2039 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2040 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2047 ret = avcodec_send_packet(avctx, pkt);
2048 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2049 // decoded frames with avcodec_receive_frame() until done.
2050 if (ret < 0 && ret != AVERROR_EOF)
2054 ret = avcodec_receive_frame(avctx, frame);
2055 if (ret < 0 && ret != AVERROR(EAGAIN))
2063 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2068 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2069 for (i = 0; i < ist->nb_filters; i++) {
2070 if (i < ist->nb_filters - 1) {
2071 f = ist->filter_frame;
2072 ret = av_frame_ref(f, decoded_frame);
2077 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2078 AV_BUFFERSRC_FLAG_PUSH);
2079 if (ret == AVERROR_EOF)
2080 ret = 0; /* ignore */
2082 av_log(NULL, AV_LOG_ERROR,
2083 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2090 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2092 AVFrame *decoded_frame;
2093 AVCodecContext *avctx = ist->dec_ctx;
2094 int i, ret, err = 0, resample_changed;
2095 AVRational decoded_frame_tb;
2097 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2098 return AVERROR(ENOMEM);
2099 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2100 return AVERROR(ENOMEM);
2101 decoded_frame = ist->decoded_frame;
2103 update_benchmark(NULL);
2104 ret = decode(avctx, decoded_frame, got_output, pkt);
2105 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2107 if (ret >= 0 && avctx->sample_rate <= 0) {
2108 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2109 ret = AVERROR_INVALIDDATA;
2112 if (ret != AVERROR_EOF)
2113 check_decode_result(ist, got_output, ret);
2115 if (!*got_output || ret < 0)
2118 ist->samples_decoded += decoded_frame->nb_samples;
2119 ist->frames_decoded++;
2122 /* increment next_dts to use for the case where the input stream does not
2123 have timestamps or there are multiple frames in the packet */
2124 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2126 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2130 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2131 ist->resample_channels != avctx->channels ||
2132 ist->resample_channel_layout != decoded_frame->channel_layout ||
2133 ist->resample_sample_rate != decoded_frame->sample_rate;
2134 if (resample_changed) {
2135 char layout1[64], layout2[64];
2137 if (!guess_input_channel_layout(ist)) {
2138 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2139 "layout for Input Stream #%d.%d\n", ist->file_index,
2143 decoded_frame->channel_layout = avctx->channel_layout;
2145 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2146 ist->resample_channel_layout);
2147 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2148 decoded_frame->channel_layout);
2150 av_log(NULL, AV_LOG_INFO,
2151 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2152 ist->file_index, ist->st->index,
2153 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2154 ist->resample_channels, layout1,
2155 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2156 avctx->channels, layout2);
2158 ist->resample_sample_fmt = decoded_frame->format;
2159 ist->resample_sample_rate = decoded_frame->sample_rate;
2160 ist->resample_channel_layout = decoded_frame->channel_layout;
2161 ist->resample_channels = avctx->channels;
2163 for (i = 0; i < ist->nb_filters; i++) {
2164 err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2166 av_log(NULL, AV_LOG_ERROR,
2167 "Error reconfiguring input stream %d:%d filter %d\n",
2168 ist->file_index, ist->st->index, i);
2173 for (i = 0; i < nb_filtergraphs; i++)
2174 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2175 FilterGraph *fg = filtergraphs[i];
2176 if (configure_filtergraph(fg) < 0) {
2177 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2183 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2184 decoded_frame_tb = ist->st->time_base;
2185 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2186 decoded_frame->pts = pkt->pts;
2187 decoded_frame_tb = ist->st->time_base;
2189 decoded_frame->pts = ist->dts;
2190 decoded_frame_tb = AV_TIME_BASE_Q;
2192 if (decoded_frame->pts != AV_NOPTS_VALUE)
2193 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2194 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2195 (AVRational){1, avctx->sample_rate});
2196 ist->nb_samples = decoded_frame->nb_samples;
2197 err = send_frame_to_filters(ist, decoded_frame);
2198 decoded_frame->pts = AV_NOPTS_VALUE;
2201 av_frame_unref(ist->filter_frame);
2202 av_frame_unref(decoded_frame);
2203 return err < 0 ? err : ret;
2206 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
2208 AVFrame *decoded_frame;
2209 int i, ret = 0, err = 0, resample_changed;
2210 int64_t best_effort_timestamp;
2211 int64_t dts = AV_NOPTS_VALUE;
2214 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2215 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2217 if (!eof && pkt && pkt->size == 0)
2220 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2221 return AVERROR(ENOMEM);
2222 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2223 return AVERROR(ENOMEM);
2224 decoded_frame = ist->decoded_frame;
2225 if (ist->dts != AV_NOPTS_VALUE)
2226 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2229 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2232 // The old code used to set dts on the drain packet, which does not work
2233 // with the new API anymore.
2235 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2237 return AVERROR(ENOMEM);
2238 ist->dts_buffer = new;
2239 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2242 update_benchmark(NULL);
2243 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2244 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2246 // The following line may be required in some cases where there is no parser
2247 // or the parser does not has_b_frames correctly
2248 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2249 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2250 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2252 av_log(ist->dec_ctx, AV_LOG_WARNING,
2253 "video_delay is larger in decoder than demuxer %d > %d.\n"
2254 "If you want to help, upload a sample "
2255 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2256 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2257 ist->dec_ctx->has_b_frames,
2258 ist->st->codecpar->video_delay);
2261 if (ret != AVERROR_EOF)
2262 check_decode_result(ist, got_output, ret);
2264 if (*got_output && ret >= 0) {
2265 if (ist->dec_ctx->width != decoded_frame->width ||
2266 ist->dec_ctx->height != decoded_frame->height ||
2267 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2268 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2269 decoded_frame->width,
2270 decoded_frame->height,
2271 decoded_frame->format,
2272 ist->dec_ctx->width,
2273 ist->dec_ctx->height,
2274 ist->dec_ctx->pix_fmt);
2278 if (!*got_output || ret < 0)
2281 if(ist->top_field_first>=0)
2282 decoded_frame->top_field_first = ist->top_field_first;
2284 ist->frames_decoded++;
2286 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2287 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2291 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2293 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2295 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2296 best_effort_timestamp = ist->dts_buffer[0];
2298 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2299 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2300 ist->nb_dts_buffer--;
2303 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2304 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2306 if (ts != AV_NOPTS_VALUE)
2307 ist->next_pts = ist->pts = ts;
2311 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2312 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2313 ist->st->index, av_ts2str(decoded_frame->pts),
2314 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2315 best_effort_timestamp,
2316 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2317 decoded_frame->key_frame, decoded_frame->pict_type,
2318 ist->st->time_base.num, ist->st->time_base.den);
2321 if (ist->st->sample_aspect_ratio.num)
2322 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2324 resample_changed = ist->resample_width != decoded_frame->width ||
2325 ist->resample_height != decoded_frame->height ||
2326 ist->resample_pix_fmt != decoded_frame->format;
2327 if (resample_changed) {
2328 av_log(NULL, AV_LOG_INFO,
2329 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2330 ist->file_index, ist->st->index,
2331 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2332 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2334 ist->resample_width = decoded_frame->width;
2335 ist->resample_height = decoded_frame->height;
2336 ist->resample_pix_fmt = decoded_frame->format;
2338 for (i = 0; i < ist->nb_filters; i++) {
2339 err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2341 av_log(NULL, AV_LOG_ERROR,
2342 "Error reconfiguring input stream %d:%d filter %d\n",
2343 ist->file_index, ist->st->index, i);
2348 for (i = 0; i < nb_filtergraphs; i++) {
2349 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2350 configure_filtergraph(filtergraphs[i]) < 0) {
2351 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2357 err = send_frame_to_filters(ist, decoded_frame);
2360 av_frame_unref(ist->filter_frame);
2361 av_frame_unref(decoded_frame);
2362 return err < 0 ? err : ret;
2365 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2367 AVSubtitle subtitle;
2368 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2369 &subtitle, got_output, pkt);
2371 check_decode_result(NULL, got_output, ret);
2373 if (ret < 0 || !*got_output) {
2375 sub2video_flush(ist);
2379 if (ist->fix_sub_duration) {
2381 if (ist->prev_sub.got_output) {
2382 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2383 1000, AV_TIME_BASE);
2384 if (end < ist->prev_sub.subtitle.end_display_time) {
2385 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2386 "Subtitle duration reduced from %d to %d%s\n",
2387 ist->prev_sub.subtitle.end_display_time, end,
2388 end <= 0 ? ", dropping it" : "");
2389 ist->prev_sub.subtitle.end_display_time = end;
2392 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2393 FFSWAP(int, ret, ist->prev_sub.ret);
2394 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2402 sub2video_update(ist, &subtitle);
2404 if (!subtitle.num_rects)
2407 ist->frames_decoded++;
2409 for (i = 0; i < nb_output_streams; i++) {
2410 OutputStream *ost = output_streams[i];
2412 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2413 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2416 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2420 avsubtitle_free(&subtitle);
2424 static int send_filter_eof(InputStream *ist)
2427 for (i = 0; i < ist->nb_filters; i++) {
2428 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2435 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2436 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2440 int eof_reached = 0;
2443 if (!ist->saw_first_ts) {
2444 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2446 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2447 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2448 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2450 ist->saw_first_ts = 1;
2453 if (ist->next_dts == AV_NOPTS_VALUE)
2454 ist->next_dts = ist->dts;
2455 if (ist->next_pts == AV_NOPTS_VALUE)
2456 ist->next_pts = ist->pts;
2460 av_init_packet(&avpkt);
2467 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2468 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2469 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2470 ist->next_pts = ist->pts = ist->dts;
2473 // while we have more to decode or while the decoder did output something on EOF
2474 while (ist->decoding_needed) {
2478 ist->pts = ist->next_pts;
2479 ist->dts = ist->next_dts;
2481 switch (ist->dec_ctx->codec_type) {
2482 case AVMEDIA_TYPE_AUDIO:
2483 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
2485 case AVMEDIA_TYPE_VIDEO:
2486 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
2487 if (!repeating || !pkt || got_output) {
2488 if (pkt && pkt->duration) {
2489 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2490 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2491 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2492 duration = ((int64_t)AV_TIME_BASE *
2493 ist->dec_ctx->framerate.den * ticks) /
2494 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2497 if(ist->dts != AV_NOPTS_VALUE && duration) {
2498 ist->next_dts += duration;
2500 ist->next_dts = AV_NOPTS_VALUE;
2504 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2506 case AVMEDIA_TYPE_SUBTITLE:
2509 ret = transcode_subtitles(ist, &avpkt, &got_output);
2510 if (!pkt && ret >= 0)
2517 if (ret == AVERROR_EOF) {
2523 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2524 ist->file_index, ist->st->index, av_err2str(ret));
2527 // Decoding might not terminate if we're draining the decoder, and
2528 // the decoder keeps returning an error.
2529 // This should probably be considered a libavcodec issue.
2530 // Sample: fate-vsynth1-dnxhd-720p-hr-lb
2539 // During draining, we might get multiple output frames in this loop.
2540 // ffmpeg.c does not drain the filter chain on configuration changes,
2541 // which means if we send multiple frames at once to the filters, and
2542 // one of those frames changes configuration, the buffered frames will
2543 // be lost. This can upset certain FATE tests.
2544 // Decode only 1 frame per call on EOF to appease these FATE tests.
2545 // The ideal solution would be to rewrite decoding to use the new
2546 // decoding API in a better way.
2553 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2554 /* except when looping we need to flush but not to send an EOF */
2555 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2556 int ret = send_filter_eof(ist);
2558 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2563 /* handle stream copy */
2564 if (!ist->decoding_needed) {
2565 ist->dts = ist->next_dts;
2566 switch (ist->dec_ctx->codec_type) {
2567 case AVMEDIA_TYPE_AUDIO:
2568 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2569 ist->dec_ctx->sample_rate;
2571 case AVMEDIA_TYPE_VIDEO:
2572 if (ist->framerate.num) {
2573 // TODO: Remove work-around for c99-to-c89 issue 7
2574 AVRational time_base_q = AV_TIME_BASE_Q;
2575 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2576 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2577 } else if (pkt->duration) {
2578 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2579 } else if(ist->dec_ctx->framerate.num != 0) {
2580 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2581 ist->next_dts += ((int64_t)AV_TIME_BASE *
2582 ist->dec_ctx->framerate.den * ticks) /
2583 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2587 ist->pts = ist->dts;
2588 ist->next_pts = ist->next_dts;
2590 for (i = 0; pkt && i < nb_output_streams; i++) {
2591 OutputStream *ost = output_streams[i];
2593 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2596 do_streamcopy(ist, ost, pkt);
2599 return !eof_reached;
2602 static void print_sdp(void)
2607 AVIOContext *sdp_pb;
2608 AVFormatContext **avc;
2610 for (i = 0; i < nb_output_files; i++) {
2611 if (!output_files[i]->header_written)
2615 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2618 for (i = 0, j = 0; i < nb_output_files; i++) {
2619 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2620 avc[j] = output_files[i]->ctx;
2628 av_sdp_create(avc, j, sdp, sizeof(sdp));
2630 if (!sdp_filename) {
2631 printf("SDP:\n%s\n", sdp);
2634 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2635 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2637 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2638 avio_closep(&sdp_pb);
2639 av_freep(&sdp_filename);
2647 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2650 for (i = 0; hwaccels[i].name; i++)
2651 if (hwaccels[i].pix_fmt == pix_fmt)
2652 return &hwaccels[i];
2656 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2658 InputStream *ist = s->opaque;
2659 const enum AVPixelFormat *p;
2662 for (p = pix_fmts; *p != -1; p++) {
2663 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2664 const HWAccel *hwaccel;
2666 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2669 hwaccel = get_hwaccel(*p);
2671 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2672 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2675 ret = hwaccel->init(s);
2677 if (ist->hwaccel_id == hwaccel->id) {
2678 av_log(NULL, AV_LOG_FATAL,
2679 "%s hwaccel requested for input stream #%d:%d, "
2680 "but cannot be initialized.\n", hwaccel->name,
2681 ist->file_index, ist->st->index);
2682 return AV_PIX_FMT_NONE;
2687 if (ist->hw_frames_ctx) {
2688 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2689 if (!s->hw_frames_ctx)
2690 return AV_PIX_FMT_NONE;
2693 ist->active_hwaccel_id = hwaccel->id;
2694 ist->hwaccel_pix_fmt = *p;
2701 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2703 InputStream *ist = s->opaque;
2705 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2706 return ist->hwaccel_get_buffer(s, frame, flags);
2708 return avcodec_default_get_buffer2(s, frame, flags);
2711 static int init_input_stream(int ist_index, char *error, int error_len)
2714 InputStream *ist = input_streams[ist_index];
2716 for (i = 0; i < ist->nb_filters; i++) {
2717 ret = ifilter_parameters_from_decoder(ist->filters[i], ist->dec_ctx);
2719 av_log(NULL, AV_LOG_FATAL, "Error initializing filter input\n");
2724 if (ist->decoding_needed) {
2725 AVCodec *codec = ist->dec;
2727 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2728 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2729 return AVERROR(EINVAL);
2732 ist->dec_ctx->opaque = ist;
2733 ist->dec_ctx->get_format = get_format;
2734 ist->dec_ctx->get_buffer2 = get_buffer;
2735 ist->dec_ctx->thread_safe_callbacks = 1;
2737 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2738 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2739 (ist->decoding_needed & DECODING_FOR_OST)) {
2740 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2741 if (ist->decoding_needed & DECODING_FOR_FILTER)
2742 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2745 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2747 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2748 * audio, and video decoders such as cuvid or mediacodec */
2749 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2751 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2752 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2753 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2754 if (ret == AVERROR_EXPERIMENTAL)
2755 abort_codec_experimental(codec, 0);
2757 snprintf(error, error_len,
2758 "Error while opening decoder for input stream "
2760 ist->file_index, ist->st->index, av_err2str(ret));
2763 assert_avoptions(ist->decoder_opts);
2766 ist->next_pts = AV_NOPTS_VALUE;
2767 ist->next_dts = AV_NOPTS_VALUE;
2772 static InputStream *get_input_stream(OutputStream *ost)
2774 if (ost->source_index >= 0)
2775 return input_streams[ost->source_index];
2779 static int compare_int64(const void *a, const void *b)
2781 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2784 /* open the muxer when all the streams are initialized */
2785 static int check_init_output_file(OutputFile *of, int file_index)
2789 for (i = 0; i < of->ctx->nb_streams; i++) {
2790 OutputStream *ost = output_streams[of->ost_index + i];
2791 if (!ost->initialized)
2795 of->ctx->interrupt_callback = int_cb;
2797 ret = avformat_write_header(of->ctx, &of->opts);
2799 av_log(NULL, AV_LOG_ERROR,
2800 "Could not write header for output file #%d "
2801 "(incorrect codec parameters ?): %s\n",
2802 file_index, av_err2str(ret));
2805 //assert_avoptions(of->opts);
2806 of->header_written = 1;
2808 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2810 if (sdp_filename || want_sdp)
2813 /* flush the muxing queues */
2814 for (i = 0; i < of->ctx->nb_streams; i++) {
2815 OutputStream *ost = output_streams[of->ost_index + i];
2817 while (av_fifo_size(ost->muxing_queue)) {
2819 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2820 write_packet(of, &pkt, ost);
2827 static int init_output_bsfs(OutputStream *ost)
2832 if (!ost->nb_bitstream_filters)
2835 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2836 ctx = ost->bsf_ctx[i];
2838 ret = avcodec_parameters_copy(ctx->par_in,
2839 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2843 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2845 ret = av_bsf_init(ctx);
2847 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2848 ost->bsf_ctx[i]->filter->name);
2853 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2854 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2858 ost->st->time_base = ctx->time_base_out;
2863 static int init_output_stream_streamcopy(OutputStream *ost)
2865 OutputFile *of = output_files[ost->file_index];
2866 InputStream *ist = get_input_stream(ost);
2867 AVCodecParameters *par_dst = ost->st->codecpar;
2868 AVCodecParameters *par_src = ost->ref_par;
2871 uint32_t codec_tag = par_dst->codec_tag;
2873 av_assert0(ist && !ost->filter);
2875 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
2877 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2879 av_log(NULL, AV_LOG_FATAL,
2880 "Error setting up codec context options.\n");
2883 avcodec_parameters_from_context(par_src, ost->enc_ctx);
2886 unsigned int codec_tag_tmp;
2887 if (!of->ctx->oformat->codec_tag ||
2888 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
2889 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
2890 codec_tag = par_src->codec_tag;
2893 ret = avcodec_parameters_copy(par_dst, par_src);
2897 par_dst->codec_tag = codec_tag;
2899 if (!ost->frame_rate.num)
2900 ost->frame_rate = ist->framerate;
2901 ost->st->avg_frame_rate = ost->frame_rate;
2903 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
2907 // copy timebase while removing common factors
2908 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
2909 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
2911 // copy estimated duration as a hint to the muxer
2912 if (ost->st->duration <= 0 && ist->st->duration > 0)
2913 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
2916 ost->st->disposition = ist->st->disposition;
2918 if (ist->st->nb_side_data) {
2919 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2920 sizeof(*ist->st->side_data));
2921 if (!ost->st->side_data)
2922 return AVERROR(ENOMEM);
2924 ost->st->nb_side_data = 0;
2925 for (i = 0; i < ist->st->nb_side_data; i++) {
2926 const AVPacketSideData *sd_src = &ist->st->side_data[i];
2927 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2929 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2932 sd_dst->data = av_malloc(sd_src->size);
2934 return AVERROR(ENOMEM);
2935 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2936 sd_dst->size = sd_src->size;
2937 sd_dst->type = sd_src->type;
2938 ost->st->nb_side_data++;
2942 ost->parser = av_parser_init(par_dst->codec_id);
2943 ost->parser_avctx = avcodec_alloc_context3(NULL);
2944 if (!ost->parser_avctx)
2945 return AVERROR(ENOMEM);
2947 switch (par_dst->codec_type) {
2948 case AVMEDIA_TYPE_AUDIO:
2949 if (audio_volume != 256) {
2950 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2953 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2954 par_dst->block_align= 0;
2955 if(par_dst->codec_id == AV_CODEC_ID_AC3)
2956 par_dst->block_align= 0;
2958 case AVMEDIA_TYPE_VIDEO:
2959 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2961 av_mul_q(ost->frame_aspect_ratio,
2962 (AVRational){ par_dst->height, par_dst->width });
2963 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2964 "with stream copy may produce invalid files\n");
2966 else if (ist->st->sample_aspect_ratio.num)
2967 sar = ist->st->sample_aspect_ratio;
2969 sar = par_src->sample_aspect_ratio;
2970 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
2971 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2972 ost->st->r_frame_rate = ist->st->r_frame_rate;
2979 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2981 AVDictionaryEntry *e;
2983 uint8_t *encoder_string;
2984 int encoder_string_len;
2985 int format_flags = 0;
2986 int codec_flags = 0;
2988 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2991 e = av_dict_get(of->opts, "fflags", NULL, 0);
2993 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2996 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2998 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3000 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3003 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3006 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3007 encoder_string = av_mallocz(encoder_string_len);
3008 if (!encoder_string)
3011 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3012 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3014 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3015 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3016 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3017 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3020 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3021 AVCodecContext *avctx)
3024 int n = 1, i, size, index = 0;
3027 for (p = kf; *p; p++)
3031 pts = av_malloc_array(size, sizeof(*pts));
3033 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3038 for (i = 0; i < n; i++) {
3039 char *next = strchr(p, ',');
3044 if (!memcmp(p, "chapters", 8)) {
3046 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3049 if (avf->nb_chapters > INT_MAX - size ||
3050 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3052 av_log(NULL, AV_LOG_FATAL,
3053 "Could not allocate forced key frames array.\n");
3056 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3057 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3059 for (j = 0; j < avf->nb_chapters; j++) {
3060 AVChapter *c = avf->chapters[j];
3061 av_assert1(index < size);
3062 pts[index++] = av_rescale_q(c->start, c->time_base,
3063 avctx->time_base) + t;
3068 t = parse_time_or_die("force_key_frames", p, 1);
3069 av_assert1(index < size);
3070 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3077 av_assert0(index == size);
3078 qsort(pts, size, sizeof(*pts), compare_int64);
3079 ost->forced_kf_count = size;
3080 ost->forced_kf_pts = pts;
3083 static int init_output_stream_encode(OutputStream *ost)
3085 InputStream *ist = get_input_stream(ost);
3086 AVCodecContext *enc_ctx = ost->enc_ctx;
3087 AVCodecContext *dec_ctx = NULL;
3088 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3091 set_encoder_id(output_files[ost->file_index], ost);
3094 ost->st->disposition = ist->st->disposition;
3096 dec_ctx = ist->dec_ctx;
3098 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3100 for (j = 0; j < oc->nb_streams; j++) {
3101 AVStream *st = oc->streams[j];
3102 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3105 if (j == oc->nb_streams)
3106 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3107 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3108 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3111 if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3112 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3113 filtergraph_is_simple(ost->filter->graph)) {
3114 FilterGraph *fg = ost->filter->graph;
3116 if (configure_filtergraph(fg)) {
3117 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3122 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3123 if (!ost->frame_rate.num)
3124 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3125 if (ist && !ost->frame_rate.num)
3126 ost->frame_rate = ist->framerate;
3127 if (ist && !ost->frame_rate.num)
3128 ost->frame_rate = ist->st->r_frame_rate;
3129 if (ist && !ost->frame_rate.num) {
3130 ost->frame_rate = (AVRational){25, 1};
3131 av_log(NULL, AV_LOG_WARNING,
3133 "about the input framerate is available. Falling "
3134 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3135 "if you want a different framerate.\n",
3136 ost->file_index, ost->index);
3138 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3139 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3140 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3141 ost->frame_rate = ost->enc->supported_framerates[idx];
3143 // reduce frame rate for mpeg4 to be within the spec limits
3144 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3145 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3146 ost->frame_rate.num, ost->frame_rate.den, 65535);
3150 switch (enc_ctx->codec_type) {
3151 case AVMEDIA_TYPE_AUDIO:
3152 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3154 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3155 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3156 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3157 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3158 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3159 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3161 case AVMEDIA_TYPE_VIDEO:
3162 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3163 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3164 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3165 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3166 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3167 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3168 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3170 for (j = 0; j < ost->forced_kf_count; j++)
3171 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3173 enc_ctx->time_base);
3175 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3176 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3177 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3178 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3179 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3180 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3181 if (!strncmp(ost->enc->name, "libx264", 7) &&
3182 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3183 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3184 av_log(NULL, AV_LOG_WARNING,
3185 "No pixel format specified, %s for H.264 encoding chosen.\n"
3186 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3187 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3188 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3189 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3190 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3191 av_log(NULL, AV_LOG_WARNING,
3192 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3193 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3194 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3195 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3197 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3198 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3200 ost->st->avg_frame_rate = ost->frame_rate;
3203 enc_ctx->width != dec_ctx->width ||
3204 enc_ctx->height != dec_ctx->height ||
3205 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3206 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3209 if (ost->forced_keyframes) {
3210 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3211 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3212 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3214 av_log(NULL, AV_LOG_ERROR,
3215 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3218 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3219 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3220 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3221 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3223 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3224 // parse it only for static kf timings
3225 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3226 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3230 case AVMEDIA_TYPE_SUBTITLE:
3231 enc_ctx->time_base = (AVRational){1, 1000};
3232 if (!enc_ctx->width) {
3233 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3234 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3237 case AVMEDIA_TYPE_DATA:
3247 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3251 if (ost->encoding_needed) {
3252 AVCodec *codec = ost->enc;
3253 AVCodecContext *dec = NULL;
3256 ret = init_output_stream_encode(ost);
3260 if ((ist = get_input_stream(ost)))
3262 if (dec && dec->subtitle_header) {
3263 /* ASS code assumes this buffer is null terminated so add extra byte. */
3264 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3265 if (!ost->enc_ctx->subtitle_header)
3266 return AVERROR(ENOMEM);
3267 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3268 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3270 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3271 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3272 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3274 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3275 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3276 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3278 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter)) {
3279 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3280 if (!ost->enc_ctx->hw_frames_ctx)
3281 return AVERROR(ENOMEM);
3284 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3285 if (ret == AVERROR_EXPERIMENTAL)
3286 abort_codec_experimental(codec, 1);
3287 snprintf(error, error_len,
3288 "Error while opening encoder for output stream #%d:%d - "
3289 "maybe incorrect parameters such as bit_rate, rate, width or height",
3290 ost->file_index, ost->index);
3293 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3294 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3295 av_buffersink_set_frame_size(ost->filter->filter,
3296 ost->enc_ctx->frame_size);
3297 assert_avoptions(ost->encoder_opts);
3298 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3299 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3300 " It takes bits/s as argument, not kbits/s\n");
3302 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3304 av_log(NULL, AV_LOG_FATAL,
3305 "Error initializing the output stream codec context.\n");
3309 * FIXME: ost->st->codec should't be needed here anymore.
3311 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3315 if (ost->enc_ctx->nb_coded_side_data) {
3318 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
3319 sizeof(*ost->st->side_data));
3320 if (!ost->st->side_data)
3321 return AVERROR(ENOMEM);
3323 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3324 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3325 AVPacketSideData *sd_dst = &ost->st->side_data[i];
3327 sd_dst->data = av_malloc(sd_src->size);
3329 return AVERROR(ENOMEM);
3330 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3331 sd_dst->size = sd_src->size;
3332 sd_dst->type = sd_src->type;
3333 ost->st->nb_side_data++;
3337 // copy timebase while removing common factors
3338 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3339 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3341 // copy estimated duration as a hint to the muxer
3342 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3343 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3345 ost->st->codec->codec= ost->enc_ctx->codec;
3346 } else if (ost->stream_copy) {
3347 ret = init_output_stream_streamcopy(ost);
3352 * FIXME: will the codec context used by the parser during streamcopy
3353 * This should go away with the new parser API.
3355 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3360 // parse user provided disposition, and update stream values
3361 if (ost->disposition) {
3362 static const AVOption opts[] = {
3363 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3364 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3365 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3366 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3367 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3368 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3369 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3370 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3371 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3372 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3373 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3374 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3375 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3376 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3379 static const AVClass class = {
3381 .item_name = av_default_item_name,
3383 .version = LIBAVUTIL_VERSION_INT,
3385 const AVClass *pclass = &class;
3387 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3392 /* initialize bitstream filters for the output stream
3393 * needs to be done here, because the codec id for streamcopy is not
3394 * known until now */
3395 ret = init_output_bsfs(ost);
3399 ost->initialized = 1;
3401 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3408 static void report_new_stream(int input_index, AVPacket *pkt)
3410 InputFile *file = input_files[input_index];
3411 AVStream *st = file->ctx->streams[pkt->stream_index];
3413 if (pkt->stream_index < file->nb_streams_warn)
3415 av_log(file->ctx, AV_LOG_WARNING,
3416 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3417 av_get_media_type_string(st->codecpar->codec_type),
3418 input_index, pkt->stream_index,
3419 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3420 file->nb_streams_warn = pkt->stream_index + 1;
3423 static int transcode_init(void)
3425 int ret = 0, i, j, k;
3426 AVFormatContext *oc;
3429 char error[1024] = {0};
3431 for (i = 0; i < nb_filtergraphs; i++) {
3432 FilterGraph *fg = filtergraphs[i];
3433 for (j = 0; j < fg->nb_outputs; j++) {
3434 OutputFilter *ofilter = fg->outputs[j];
3435 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3437 if (fg->nb_inputs != 1)
3439 for (k = nb_input_streams-1; k >= 0 ; k--)
3440 if (fg->inputs[0]->ist == input_streams[k])
3442 ofilter->ost->source_index = k;
3446 /* init framerate emulation */
3447 for (i = 0; i < nb_input_files; i++) {
3448 InputFile *ifile = input_files[i];
3449 if (ifile->rate_emu)
3450 for (j = 0; j < ifile->nb_streams; j++)
3451 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3454 /* hwaccel transcoding */
3455 for (i = 0; i < nb_output_streams; i++) {
3456 ost = output_streams[i];
3458 if (!ost->stream_copy) {
3460 if (qsv_transcode_init(ost))
3465 if (cuvid_transcode_init(ost))
3471 /* init input streams */
3472 for (i = 0; i < nb_input_streams; i++)
3473 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3474 for (i = 0; i < nb_output_streams; i++) {
3475 ost = output_streams[i];
3476 avcodec_close(ost->enc_ctx);
3481 /* open each encoder */
3482 for (i = 0; i < nb_output_streams; i++) {
3483 ret = init_output_stream(output_streams[i], error, sizeof(error));
3488 /* discard unused programs */
3489 for (i = 0; i < nb_input_files; i++) {
3490 InputFile *ifile = input_files[i];
3491 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3492 AVProgram *p = ifile->ctx->programs[j];
3493 int discard = AVDISCARD_ALL;
3495 for (k = 0; k < p->nb_stream_indexes; k++)
3496 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3497 discard = AVDISCARD_DEFAULT;
3500 p->discard = discard;
3504 /* write headers for files with no streams */
3505 for (i = 0; i < nb_output_files; i++) {
3506 oc = output_files[i]->ctx;
3507 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3508 ret = check_init_output_file(output_files[i], i);
3515 /* dump the stream mapping */
3516 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3517 for (i = 0; i < nb_input_streams; i++) {
3518 ist = input_streams[i];
3520 for (j = 0; j < ist->nb_filters; j++) {
3521 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3522 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3523 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3524 ist->filters[j]->name);
3525 if (nb_filtergraphs > 1)
3526 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3527 av_log(NULL, AV_LOG_INFO, "\n");
3532 for (i = 0; i < nb_output_streams; i++) {
3533 ost = output_streams[i];
3535 if (ost->attachment_filename) {
3536 /* an attached file */
3537 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3538 ost->attachment_filename, ost->file_index, ost->index);
3542 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3543 /* output from a complex graph */
3544 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3545 if (nb_filtergraphs > 1)
3546 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3548 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3549 ost->index, ost->enc ? ost->enc->name : "?");
3553 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3554 input_streams[ost->source_index]->file_index,
3555 input_streams[ost->source_index]->st->index,
3558 if (ost->sync_ist != input_streams[ost->source_index])
3559 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3560 ost->sync_ist->file_index,
3561 ost->sync_ist->st->index);
3562 if (ost->stream_copy)
3563 av_log(NULL, AV_LOG_INFO, " (copy)");
3565 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3566 const AVCodec *out_codec = ost->enc;
3567 const char *decoder_name = "?";
3568 const char *in_codec_name = "?";
3569 const char *encoder_name = "?";
3570 const char *out_codec_name = "?";
3571 const AVCodecDescriptor *desc;
3574 decoder_name = in_codec->name;
3575 desc = avcodec_descriptor_get(in_codec->id);
3577 in_codec_name = desc->name;
3578 if (!strcmp(decoder_name, in_codec_name))
3579 decoder_name = "native";
3583 encoder_name = out_codec->name;
3584 desc = avcodec_descriptor_get(out_codec->id);
3586 out_codec_name = desc->name;
3587 if (!strcmp(encoder_name, out_codec_name))
3588 encoder_name = "native";
3591 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3592 in_codec_name, decoder_name,
3593 out_codec_name, encoder_name);
3595 av_log(NULL, AV_LOG_INFO, "\n");
3599 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3603 transcode_init_done = 1;
3608 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3609 static int need_output(void)
3613 for (i = 0; i < nb_output_streams; i++) {
3614 OutputStream *ost = output_streams[i];
3615 OutputFile *of = output_files[ost->file_index];
3616 AVFormatContext *os = output_files[ost->file_index]->ctx;
3618 if (ost->finished ||
3619 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3621 if (ost->frame_number >= ost->max_frames) {
3623 for (j = 0; j < of->ctx->nb_streams; j++)
3624 close_output_stream(output_streams[of->ost_index + j]);
3635 * Select the output stream to process.
3637 * @return selected output stream, or NULL if none available
3639 static OutputStream *choose_output(void)
3642 int64_t opts_min = INT64_MAX;
3643 OutputStream *ost_min = NULL;
3645 for (i = 0; i < nb_output_streams; i++) {
3646 OutputStream *ost = output_streams[i];
3647 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3648 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3650 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3651 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3653 if (!ost->finished && opts < opts_min) {
3655 ost_min = ost->unavailable ? NULL : ost;
3661 static void set_tty_echo(int on)
3665 if (tcgetattr(0, &tty) == 0) {
3666 if (on) tty.c_lflag |= ECHO;
3667 else tty.c_lflag &= ~ECHO;
3668 tcsetattr(0, TCSANOW, &tty);
3673 static int check_keyboard_interaction(int64_t cur_time)
3676 static int64_t last_time;
3677 if (received_nb_signals)
3678 return AVERROR_EXIT;
3679 /* read_key() returns 0 on EOF */
3680 if(cur_time - last_time >= 100000 && !run_as_daemon){
3682 last_time = cur_time;
3686 return AVERROR_EXIT;
3687 if (key == '+') av_log_set_level(av_log_get_level()+10);
3688 if (key == '-') av_log_set_level(av_log_get_level()-10);
3689 if (key == 's') qp_hist ^= 1;
3692 do_hex_dump = do_pkt_dump = 0;
3693 } else if(do_pkt_dump){
3697 av_log_set_level(AV_LOG_DEBUG);
3699 if (key == 'c' || key == 'C'){
3700 char buf[4096], target[64], command[256], arg[256] = {0};
3703 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3706 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3711 fprintf(stderr, "\n");
3713 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3714 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3715 target, time, command, arg);
3716 for (i = 0; i < nb_filtergraphs; i++) {
3717 FilterGraph *fg = filtergraphs[i];
3720 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3721 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3722 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3723 } else if (key == 'c') {
3724 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3725 ret = AVERROR_PATCHWELCOME;
3727 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3729 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3734 av_log(NULL, AV_LOG_ERROR,
3735 "Parse error, at least 3 arguments were expected, "
3736 "only %d given in string '%s'\n", n, buf);
3739 if (key == 'd' || key == 'D'){
3742 debug = input_streams[0]->st->codec->debug<<1;
3743 if(!debug) debug = 1;
3744 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3751 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3756 fprintf(stderr, "\n");
3757 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3758 fprintf(stderr,"error parsing debug value\n");
3760 for(i=0;i<nb_input_streams;i++) {
3761 input_streams[i]->st->codec->debug = debug;
3763 for(i=0;i<nb_output_streams;i++) {
3764 OutputStream *ost = output_streams[i];
3765 ost->enc_ctx->debug = debug;
3767 if(debug) av_log_set_level(AV_LOG_DEBUG);
3768 fprintf(stderr,"debug=%d\n", debug);
3771 fprintf(stderr, "key function\n"
3772 "? show this help\n"
3773 "+ increase verbosity\n"
3774 "- decrease verbosity\n"
3775 "c Send command to first matching filter supporting it\n"
3776 "C Send/Queue command to all matching filters\n"
3777 "D cycle through available debug modes\n"
3778 "h dump packets/hex press to cycle through the 3 states\n"
3780 "s Show QP histogram\n"
3787 static void *input_thread(void *arg)
3790 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3795 ret = av_read_frame(f->ctx, &pkt);
3797 if (ret == AVERROR(EAGAIN)) {
3802 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3805 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3806 if (flags && ret == AVERROR(EAGAIN)) {
3808 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3809 av_log(f->ctx, AV_LOG_WARNING,
3810 "Thread message queue blocking; consider raising the "
3811 "thread_queue_size option (current value: %d)\n",
3812 f->thread_queue_size);
3815 if (ret != AVERROR_EOF)
3816 av_log(f->ctx, AV_LOG_ERROR,
3817 "Unable to send packet to main thread: %s\n",
3819 av_packet_unref(&pkt);
3820 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3828 static void free_input_threads(void)
3832 for (i = 0; i < nb_input_files; i++) {
3833 InputFile *f = input_files[i];
3836 if (!f || !f->in_thread_queue)
3838 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3839 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3840 av_packet_unref(&pkt);
3842 pthread_join(f->thread, NULL);
3844 av_thread_message_queue_free(&f->in_thread_queue);
3848 static int init_input_threads(void)
3852 if (nb_input_files == 1)
3855 for (i = 0; i < nb_input_files; i++) {
3856 InputFile *f = input_files[i];
3858 if (f->ctx->pb ? !f->ctx->pb->seekable :
3859 strcmp(f->ctx->iformat->name, "lavfi"))
3860 f->non_blocking = 1;
3861 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3862 f->thread_queue_size, sizeof(AVPacket));
3866 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3867 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3868 av_thread_message_queue_free(&f->in_thread_queue);
3869 return AVERROR(ret);
3875 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3877 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3879 AV_THREAD_MESSAGE_NONBLOCK : 0);
3883 static int get_input_packet(InputFile *f, AVPacket *pkt)
3887 for (i = 0; i < f->nb_streams; i++) {
3888 InputStream *ist = input_streams[f->ist_index + i];
3889 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3890 int64_t now = av_gettime_relative() - ist->start;
3892 return AVERROR(EAGAIN);
3897 if (nb_input_files > 1)
3898 return get_input_packet_mt(f, pkt);
3900 return av_read_frame(f->ctx, pkt);
3903 static int got_eagain(void)
3906 for (i = 0; i < nb_output_streams; i++)
3907 if (output_streams[i]->unavailable)
3912 static void reset_eagain(void)
3915 for (i = 0; i < nb_input_files; i++)
3916 input_files[i]->eagain = 0;
3917 for (i = 0; i < nb_output_streams; i++)
3918 output_streams[i]->unavailable = 0;
3921 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3922 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3923 AVRational time_base)
3929 return tmp_time_base;
3932 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3935 return tmp_time_base;
3941 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3944 AVCodecContext *avctx;
3945 int i, ret, has_audio = 0;
3946 int64_t duration = 0;
3948 ret = av_seek_frame(is, -1, is->start_time, 0);
3952 for (i = 0; i < ifile->nb_streams; i++) {
3953 ist = input_streams[ifile->ist_index + i];
3954 avctx = ist->dec_ctx;
3957 if (ist->decoding_needed) {
3958 process_input_packet(ist, NULL, 1);
3959 avcodec_flush_buffers(avctx);
3962 /* duration is the length of the last frame in a stream
3963 * when audio stream is present we don't care about
3964 * last video frame length because it's not defined exactly */
3965 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3969 for (i = 0; i < ifile->nb_streams; i++) {
3970 ist = input_streams[ifile->ist_index + i];
3971 avctx = ist->dec_ctx;
3974 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3975 AVRational sample_rate = {1, avctx->sample_rate};
3977 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3981 if (ist->framerate.num) {
3982 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3983 } else if (ist->st->avg_frame_rate.num) {
3984 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3985 } else duration = 1;
3987 if (!ifile->duration)
3988 ifile->time_base = ist->st->time_base;
3989 /* the total duration of the stream, max_pts - min_pts is
3990 * the duration of the stream without the last frame */
3991 duration += ist->max_pts - ist->min_pts;
3992 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3996 if (ifile->loop > 0)
4004 * - 0 -- one packet was read and processed
4005 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4006 * this function should be called again
4007 * - AVERROR_EOF -- this function should not be called again
4009 static int process_input(int file_index)
4011 InputFile *ifile = input_files[file_index];
4012 AVFormatContext *is;
4020 ret = get_input_packet(ifile, &pkt);
4022 if (ret == AVERROR(EAGAIN)) {
4026 if (ret < 0 && ifile->loop) {
4027 if ((ret = seek_to_start(ifile, is)) < 0)
4029 ret = get_input_packet(ifile, &pkt);
4030 if (ret == AVERROR(EAGAIN)) {
4036 if (ret != AVERROR_EOF) {
4037 print_error(is->filename, ret);
4042 for (i = 0; i < ifile->nb_streams; i++) {
4043 ist = input_streams[ifile->ist_index + i];
4044 if (ist->decoding_needed) {
4045 ret = process_input_packet(ist, NULL, 0);
4050 /* mark all outputs that don't go through lavfi as finished */
4051 for (j = 0; j < nb_output_streams; j++) {
4052 OutputStream *ost = output_streams[j];
4054 if (ost->source_index == ifile->ist_index + i &&
4055 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4056 finish_output_stream(ost);
4060 ifile->eof_reached = 1;
4061 return AVERROR(EAGAIN);
4067 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4068 is->streams[pkt.stream_index]);
4070 /* the following test is needed in case new streams appear
4071 dynamically in stream : we ignore them */
4072 if (pkt.stream_index >= ifile->nb_streams) {
4073 report_new_stream(file_index, &pkt);
4074 goto discard_packet;
4077 ist = input_streams[ifile->ist_index + pkt.stream_index];
4079 ist->data_size += pkt.size;
4083 goto discard_packet;
4085 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4086 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4091 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4092 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4093 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4094 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4095 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4096 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4097 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4098 av_ts2str(input_files[ist->file_index]->ts_offset),
4099 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4102 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4103 int64_t stime, stime2;
4104 // Correcting starttime based on the enabled streams
4105 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4106 // so we instead do it here as part of discontinuity handling
4107 if ( ist->next_dts == AV_NOPTS_VALUE
4108 && ifile->ts_offset == -is->start_time
4109 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4110 int64_t new_start_time = INT64_MAX;
4111 for (i=0; i<is->nb_streams; i++) {
4112 AVStream *st = is->streams[i];
4113 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4115 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4117 if (new_start_time > is->start_time) {
4118 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4119 ifile->ts_offset = -new_start_time;
4123 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4124 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4125 ist->wrap_correction_done = 1;
4127 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4128 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4129 ist->wrap_correction_done = 0;
4131 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4132 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4133 ist->wrap_correction_done = 0;
4137 /* add the stream-global side data to the first packet */
4138 if (ist->nb_packets == 1) {
4139 if (ist->st->nb_side_data)
4140 av_packet_split_side_data(&pkt);
4141 for (i = 0; i < ist->st->nb_side_data; i++) {
4142 AVPacketSideData *src_sd = &ist->st->side_data[i];
4145 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4147 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4150 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4154 memcpy(dst_data, src_sd->data, src_sd->size);
4158 if (pkt.dts != AV_NOPTS_VALUE)
4159 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4160 if (pkt.pts != AV_NOPTS_VALUE)
4161 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4163 if (pkt.pts != AV_NOPTS_VALUE)
4164 pkt.pts *= ist->ts_scale;
4165 if (pkt.dts != AV_NOPTS_VALUE)
4166 pkt.dts *= ist->ts_scale;
4168 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4169 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4170 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4171 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4172 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4173 int64_t delta = pkt_dts - ifile->last_ts;
4174 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4175 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4176 ifile->ts_offset -= delta;
4177 av_log(NULL, AV_LOG_DEBUG,
4178 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4179 delta, ifile->ts_offset);
4180 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4181 if (pkt.pts != AV_NOPTS_VALUE)
4182 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4186 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4187 if (pkt.pts != AV_NOPTS_VALUE) {
4188 pkt.pts += duration;
4189 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4190 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4193 if (pkt.dts != AV_NOPTS_VALUE)
4194 pkt.dts += duration;
4196 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4197 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4198 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4199 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4201 int64_t delta = pkt_dts - ist->next_dts;
4202 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4203 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4204 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4205 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4206 ifile->ts_offset -= delta;
4207 av_log(NULL, AV_LOG_DEBUG,
4208 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4209 delta, ifile->ts_offset);
4210 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4211 if (pkt.pts != AV_NOPTS_VALUE)
4212 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4215 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4216 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4217 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4218 pkt.dts = AV_NOPTS_VALUE;
4220 if (pkt.pts != AV_NOPTS_VALUE){
4221 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4222 delta = pkt_pts - ist->next_dts;
4223 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4224 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4225 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4226 pkt.pts = AV_NOPTS_VALUE;
4232 if (pkt.dts != AV_NOPTS_VALUE)
4233 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4236 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4237 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4238 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4239 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4240 av_ts2str(input_files[ist->file_index]->ts_offset),
4241 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4244 sub2video_heartbeat(ist, pkt.pts);
4246 process_input_packet(ist, &pkt, 0);
4249 av_packet_unref(&pkt);
4255 * Perform a step of transcoding for the specified filter graph.
4257 * @param[in] graph filter graph to consider
4258 * @param[out] best_ist input stream where a frame would allow to continue
4259 * @return 0 for success, <0 for error
4261 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4264 int nb_requests, nb_requests_max = 0;
4265 InputFilter *ifilter;
4269 ret = avfilter_graph_request_oldest(graph->graph);
4271 return reap_filters(0);
4273 if (ret == AVERROR_EOF) {
4274 ret = reap_filters(1);
4275 for (i = 0; i < graph->nb_outputs; i++)
4276 close_output_stream(graph->outputs[i]->ost);
4279 if (ret != AVERROR(EAGAIN))
4282 for (i = 0; i < graph->nb_inputs; i++) {
4283 ifilter = graph->inputs[i];
4285 if (input_files[ist->file_index]->eagain ||
4286 input_files[ist->file_index]->eof_reached)
4288 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4289 if (nb_requests > nb_requests_max) {
4290 nb_requests_max = nb_requests;
4296 for (i = 0; i < graph->nb_outputs; i++)
4297 graph->outputs[i]->ost->unavailable = 1;
4303 * Run a single step of transcoding.
4305 * @return 0 for success, <0 for error
4307 static int transcode_step(void)
4313 ost = choose_output();
4320 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4325 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4330 av_assert0(ost->source_index >= 0);
4331 ist = input_streams[ost->source_index];
4334 ret = process_input(ist->file_index);
4335 if (ret == AVERROR(EAGAIN)) {
4336 if (input_files[ist->file_index]->eagain)
4337 ost->unavailable = 1;
4342 return ret == AVERROR_EOF ? 0 : ret;
4344 return reap_filters(0);
4348 * The following code is the main loop of the file converter
4350 static int transcode(void)
4353 AVFormatContext *os;
4356 int64_t timer_start;
4357 int64_t total_packets_written = 0;
4359 ret = transcode_init();
4363 if (stdin_interaction) {
4364 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4367 timer_start = av_gettime_relative();
4370 if ((ret = init_input_threads()) < 0)
4374 while (!received_sigterm) {
4375 int64_t cur_time= av_gettime_relative();
4377 /* if 'q' pressed, exits */
4378 if (stdin_interaction)
4379 if (check_keyboard_interaction(cur_time) < 0)
4382 /* check if there's any stream where output is still needed */
4383 if (!need_output()) {
4384 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4388 ret = transcode_step();
4389 if (ret < 0 && ret != AVERROR_EOF) {
4391 av_strerror(ret, errbuf, sizeof(errbuf));
4393 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4397 /* dump report by using the output first video and audio streams */
4398 print_report(0, timer_start, cur_time);
4401 free_input_threads();
4404 /* at the end of stream, we must flush the decoder buffers */
4405 for (i = 0; i < nb_input_streams; i++) {
4406 ist = input_streams[i];
4407 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4408 process_input_packet(ist, NULL, 0);
4415 /* write the trailer if needed and close file */
4416 for (i = 0; i < nb_output_files; i++) {
4417 os = output_files[i]->ctx;
4418 if (!output_files[i]->header_written) {
4419 av_log(NULL, AV_LOG_ERROR,
4420 "Nothing was written into output file %d (%s), because "
4421 "at least one of its streams received no packets.\n",
4425 if ((ret = av_write_trailer(os)) < 0) {
4426 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4432 /* dump report by using the first video and audio streams */
4433 print_report(1, timer_start, av_gettime_relative());
4435 /* close each encoder */
4436 for (i = 0; i < nb_output_streams; i++) {
4437 ost = output_streams[i];
4438 if (ost->encoding_needed) {
4439 av_freep(&ost->enc_ctx->stats_in);
4441 total_packets_written += ost->packets_written;
4444 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4445 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4449 /* close each decoder */
4450 for (i = 0; i < nb_input_streams; i++) {
4451 ist = input_streams[i];
4452 if (ist->decoding_needed) {
4453 avcodec_close(ist->dec_ctx);
4454 if (ist->hwaccel_uninit)
4455 ist->hwaccel_uninit(ist->dec_ctx);
4459 av_buffer_unref(&hw_device_ctx);
4466 free_input_threads();
4469 if (output_streams) {
4470 for (i = 0; i < nb_output_streams; i++) {
4471 ost = output_streams[i];
4474 if (fclose(ost->logfile))
4475 av_log(NULL, AV_LOG_ERROR,
4476 "Error closing logfile, loss of information possible: %s\n",
4477 av_err2str(AVERROR(errno)));
4478 ost->logfile = NULL;
4480 av_freep(&ost->forced_kf_pts);
4481 av_freep(&ost->apad);
4482 av_freep(&ost->disposition);
4483 av_dict_free(&ost->encoder_opts);
4484 av_dict_free(&ost->sws_dict);
4485 av_dict_free(&ost->swr_opts);
4486 av_dict_free(&ost->resample_opts);
4494 static int64_t getutime(void)
4497 struct rusage rusage;
4499 getrusage(RUSAGE_SELF, &rusage);
4500 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4501 #elif HAVE_GETPROCESSTIMES
4503 FILETIME c, e, k, u;
4504 proc = GetCurrentProcess();
4505 GetProcessTimes(proc, &c, &e, &k, &u);
4506 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4508 return av_gettime_relative();
4512 static int64_t getmaxrss(void)
4514 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4515 struct rusage rusage;
4516 getrusage(RUSAGE_SELF, &rusage);
4517 return (int64_t)rusage.ru_maxrss * 1024;
4518 #elif HAVE_GETPROCESSMEMORYINFO
4520 PROCESS_MEMORY_COUNTERS memcounters;
4521 proc = GetCurrentProcess();
4522 memcounters.cb = sizeof(memcounters);
4523 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4524 return memcounters.PeakPagefileUsage;
4530 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4534 int main(int argc, char **argv)
4541 register_exit(ffmpeg_cleanup);
4543 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4545 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4546 parse_loglevel(argc, argv, options);
4548 if(argc>1 && !strcmp(argv[1], "-d")){
4550 av_log_set_callback(log_callback_null);
4555 avcodec_register_all();
4557 avdevice_register_all();
4559 avfilter_register_all();
4561 avformat_network_init();
4563 show_banner(argc, argv, options);
4565 /* parse options and open all input/output files */
4566 ret = ffmpeg_parse_options(argc, argv);
4570 if (nb_output_files <= 0 && nb_input_files == 0) {
4572 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4576 /* file converter / grab */
4577 if (nb_output_files <= 0) {
4578 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4582 // if (nb_input_files == 0) {
4583 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4587 for (i = 0; i < nb_output_files; i++) {
4588 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4592 current_time = ti = getutime();
4593 if (transcode() < 0)
4595 ti = getutime() - ti;
4597 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4599 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4600 decode_error_stat[0], decode_error_stat[1]);
4601 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4604 exit_program(received_nb_signals ? 255 : main_return_code);
4605 return main_return_code;