2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static unsigned dup_warning = 1000;
130 static int nb_frames_drop = 0;
131 static int64_t decode_error_stat[2];
133 static int want_sdp = 1;
135 static int current_time;
136 AVIOContext *progress_avio = NULL;
138 static uint8_t *subtitle_out;
140 InputStream **input_streams = NULL;
141 int nb_input_streams = 0;
142 InputFile **input_files = NULL;
143 int nb_input_files = 0;
145 OutputStream **output_streams = NULL;
146 int nb_output_streams = 0;
147 OutputFile **output_files = NULL;
148 int nb_output_files = 0;
150 FilterGraph **filtergraphs;
155 /* init terminal so that we can grab keys */
156 static struct termios oldtty;
157 static int restore_tty;
161 static void free_input_threads(void);
165 Convert subtitles to video with alpha to insert them in filter graphs.
166 This is a temporary solution until libavfilter gets real subtitles support.
169 static int sub2video_get_blank_frame(InputStream *ist)
172 AVFrame *frame = ist->sub2video.frame;
174 av_frame_unref(frame);
175 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
176 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
177 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
178 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
180 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
184 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
187 uint32_t *pal, *dst2;
191 if (r->type != SUBTITLE_BITMAP) {
192 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
195 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
197 r->x, r->y, r->w, r->h, w, h
202 dst += r->y * dst_linesize + r->x * 4;
204 pal = (uint32_t *)r->data[1];
205 for (y = 0; y < r->h; y++) {
206 dst2 = (uint32_t *)dst;
208 for (x = 0; x < r->w; x++)
209 *(dst2++) = pal[*(src2++)];
211 src += r->linesize[0];
215 static void sub2video_push_ref(InputStream *ist, int64_t pts)
217 AVFrame *frame = ist->sub2video.frame;
220 av_assert1(frame->data[0]);
221 ist->sub2video.last_pts = frame->pts = pts;
222 for (i = 0; i < ist->nb_filters; i++)
223 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
224 AV_BUFFERSRC_FLAG_KEEP_REF |
225 AV_BUFFERSRC_FLAG_PUSH);
228 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
230 AVFrame *frame = ist->sub2video.frame;
234 int64_t pts, end_pts;
239 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240 AV_TIME_BASE_Q, ist->st->time_base);
241 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
242 AV_TIME_BASE_Q, ist->st->time_base);
243 num_rects = sub->num_rects;
245 pts = ist->sub2video.end_pts;
249 if (sub2video_get_blank_frame(ist) < 0) {
250 av_log(ist->dec_ctx, AV_LOG_ERROR,
251 "Impossible to get a blank canvas.\n");
254 dst = frame->data [0];
255 dst_linesize = frame->linesize[0];
256 for (i = 0; i < num_rects; i++)
257 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
258 sub2video_push_ref(ist, pts);
259 ist->sub2video.end_pts = end_pts;
262 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
264 InputFile *infile = input_files[ist->file_index];
268 /* When a frame is read from a file, examine all sub2video streams in
269 the same file and send the sub2video frame again. Otherwise, decoded
270 video frames could be accumulating in the filter graph while a filter
271 (possibly overlay) is desperately waiting for a subtitle frame. */
272 for (i = 0; i < infile->nb_streams; i++) {
273 InputStream *ist2 = input_streams[infile->ist_index + i];
274 if (!ist2->sub2video.frame)
276 /* subtitles seem to be usually muxed ahead of other streams;
277 if not, subtracting a larger time here is necessary */
278 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
279 /* do not send the heartbeat frame if the subtitle is already ahead */
280 if (pts2 <= ist2->sub2video.last_pts)
282 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
283 sub2video_update(ist2, NULL);
284 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
285 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
287 sub2video_push_ref(ist2, pts2);
291 static void sub2video_flush(InputStream *ist)
295 if (ist->sub2video.end_pts < INT64_MAX)
296 sub2video_update(ist, NULL);
297 for (i = 0; i < ist->nb_filters; i++)
298 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
301 /* end of sub2video hack */
303 static void term_exit_sigsafe(void)
307 tcsetattr (0, TCSANOW, &oldtty);
313 av_log(NULL, AV_LOG_QUIET, "%s", "");
317 static volatile int received_sigterm = 0;
318 static volatile int received_nb_signals = 0;
319 static volatile int transcode_init_done = 0;
320 static volatile int ffmpeg_exited = 0;
321 static int main_return_code = 0;
324 sigterm_handler(int sig)
326 received_sigterm = sig;
327 received_nb_signals++;
329 if(received_nb_signals > 3) {
330 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
331 strlen("Received > 3 system signals, hard exiting\n"));
337 #if HAVE_SETCONSOLECTRLHANDLER
338 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
340 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345 case CTRL_BREAK_EVENT:
346 sigterm_handler(SIGINT);
349 case CTRL_CLOSE_EVENT:
350 case CTRL_LOGOFF_EVENT:
351 case CTRL_SHUTDOWN_EVENT:
352 sigterm_handler(SIGTERM);
353 /* Basically, with these 3 events, when we return from this method the
354 process is hard terminated, so stall as long as we need to
355 to try and let the main thread(s) clean up and gracefully terminate
356 (we have at most 5 seconds, but should be done far before that). */
357 while (!ffmpeg_exited) {
363 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
372 if (!run_as_daemon && stdin_interaction) {
374 if (tcgetattr (0, &tty) == 0) {
378 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
379 |INLCR|IGNCR|ICRNL|IXON);
380 tty.c_oflag |= OPOST;
381 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
382 tty.c_cflag &= ~(CSIZE|PARENB);
387 tcsetattr (0, TCSANOW, &tty);
389 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
393 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
394 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
396 signal(SIGXCPU, sigterm_handler);
398 #if HAVE_SETCONSOLECTRLHANDLER
399 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
403 /* read a key without blocking */
404 static int read_key(void)
416 n = select(1, &rfds, NULL, NULL, &tv);
425 # if HAVE_PEEKNAMEDPIPE
427 static HANDLE input_handle;
430 input_handle = GetStdHandle(STD_INPUT_HANDLE);
431 is_pipe = !GetConsoleMode(input_handle, &dw);
435 /* When running under a GUI, you will end here. */
436 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
437 // input pipe may have been closed by the program that ran ffmpeg
455 static int decode_interrupt_cb(void *ctx)
457 return received_nb_signals > transcode_init_done;
460 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
462 static void ffmpeg_cleanup(int ret)
467 int maxrss = getmaxrss() / 1024;
468 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
471 for (i = 0; i < nb_filtergraphs; i++) {
472 FilterGraph *fg = filtergraphs[i];
473 avfilter_graph_free(&fg->graph);
474 for (j = 0; j < fg->nb_inputs; j++) {
475 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
476 av_freep(&fg->inputs[j]->name);
477 av_freep(&fg->inputs[j]);
479 av_freep(&fg->inputs);
480 for (j = 0; j < fg->nb_outputs; j++) {
481 av_freep(&fg->outputs[j]->name);
482 av_freep(&fg->outputs[j]->formats);
483 av_freep(&fg->outputs[j]->channel_layouts);
484 av_freep(&fg->outputs[j]->sample_rates);
485 av_freep(&fg->outputs[j]);
487 av_freep(&fg->outputs);
488 av_freep(&fg->graph_desc);
490 av_freep(&filtergraphs[i]);
492 av_freep(&filtergraphs);
494 av_freep(&subtitle_out);
497 for (i = 0; i < nb_output_files; i++) {
498 OutputFile *of = output_files[i];
503 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
505 avformat_free_context(s);
506 av_dict_free(&of->opts);
508 av_freep(&output_files[i]);
510 for (i = 0; i < nb_output_streams; i++) {
511 OutputStream *ost = output_streams[i];
516 for (j = 0; j < ost->nb_bitstream_filters; j++)
517 av_bsf_free(&ost->bsf_ctx[j]);
518 av_freep(&ost->bsf_ctx);
519 av_freep(&ost->bsf_extradata_updated);
521 av_frame_free(&ost->filtered_frame);
522 av_frame_free(&ost->last_frame);
523 av_dict_free(&ost->encoder_opts);
525 av_parser_close(ost->parser);
526 avcodec_free_context(&ost->parser_avctx);
528 av_freep(&ost->forced_keyframes);
529 av_expr_free(ost->forced_keyframes_pexpr);
530 av_freep(&ost->avfilter);
531 av_freep(&ost->logfile_prefix);
533 av_freep(&ost->audio_channels_map);
534 ost->audio_channels_mapped = 0;
536 av_dict_free(&ost->sws_dict);
538 avcodec_free_context(&ost->enc_ctx);
539 avcodec_parameters_free(&ost->ref_par);
541 while (ost->muxing_queue && av_fifo_size(ost->muxing_queue)) {
543 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
544 av_packet_unref(&pkt);
546 av_fifo_freep(&ost->muxing_queue);
548 av_freep(&output_streams[i]);
551 free_input_threads();
553 for (i = 0; i < nb_input_files; i++) {
554 avformat_close_input(&input_files[i]->ctx);
555 av_freep(&input_files[i]);
557 for (i = 0; i < nb_input_streams; i++) {
558 InputStream *ist = input_streams[i];
560 av_frame_free(&ist->decoded_frame);
561 av_frame_free(&ist->filter_frame);
562 av_dict_free(&ist->decoder_opts);
563 avsubtitle_free(&ist->prev_sub.subtitle);
564 av_frame_free(&ist->sub2video.frame);
565 av_freep(&ist->filters);
566 av_freep(&ist->hwaccel_device);
567 av_freep(&ist->dts_buffer);
569 avcodec_free_context(&ist->dec_ctx);
571 av_freep(&input_streams[i]);
575 if (fclose(vstats_file))
576 av_log(NULL, AV_LOG_ERROR,
577 "Error closing vstats file, loss of information possible: %s\n",
578 av_err2str(AVERROR(errno)));
580 av_freep(&vstats_filename);
582 av_freep(&input_streams);
583 av_freep(&input_files);
584 av_freep(&output_streams);
585 av_freep(&output_files);
589 avformat_network_deinit();
591 if (received_sigterm) {
592 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
593 (int) received_sigterm);
594 } else if (ret && transcode_init_done) {
595 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
601 void remove_avoptions(AVDictionary **a, AVDictionary *b)
603 AVDictionaryEntry *t = NULL;
605 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
606 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
610 void assert_avoptions(AVDictionary *m)
612 AVDictionaryEntry *t;
613 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
614 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
619 static void abort_codec_experimental(AVCodec *c, int encoder)
624 static void update_benchmark(const char *fmt, ...)
626 if (do_benchmark_all) {
627 int64_t t = getutime();
633 vsnprintf(buf, sizeof(buf), fmt, va);
635 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
641 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
644 for (i = 0; i < nb_output_streams; i++) {
645 OutputStream *ost2 = output_streams[i];
646 ost2->finished |= ost == ost2 ? this_stream : others;
650 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
652 AVFormatContext *s = of->ctx;
653 AVStream *st = ost->st;
656 if (!of->header_written) {
658 /* the muxer is not initialized yet, buffer the packet */
659 if (!av_fifo_space(ost->muxing_queue)) {
660 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
661 ost->max_muxing_queue_size);
662 if (new_size <= av_fifo_size(ost->muxing_queue)) {
663 av_log(NULL, AV_LOG_ERROR,
664 "Too many packets buffered for output stream %d:%d.\n",
665 ost->file_index, ost->st->index);
668 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
672 av_packet_move_ref(&tmp_pkt, pkt);
673 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
677 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
678 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
679 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
682 * Audio encoders may split the packets -- #frames in != #packets out.
683 * But there is no reordering, so we can limit the number of output packets
684 * by simply dropping them here.
685 * Counting encoded video frames needs to be done separately because of
686 * reordering, see do_video_out()
688 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
689 if (ost->frame_number >= ost->max_frames) {
690 av_packet_unref(pkt);
695 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
697 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
699 ost->quality = sd ? AV_RL32(sd) : -1;
700 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
702 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
704 ost->error[i] = AV_RL64(sd + 8 + 8*i);
709 if (ost->frame_rate.num && ost->is_cfr) {
710 if (pkt->duration > 0)
711 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
712 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
717 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
718 if (pkt->dts != AV_NOPTS_VALUE &&
719 pkt->pts != AV_NOPTS_VALUE &&
720 pkt->dts > pkt->pts) {
721 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
723 ost->file_index, ost->st->index);
725 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
726 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
727 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
729 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
730 pkt->dts != AV_NOPTS_VALUE &&
731 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
732 ost->last_mux_dts != AV_NOPTS_VALUE) {
733 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
734 if (pkt->dts < max) {
735 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
736 av_log(s, loglevel, "Non-monotonous DTS in output stream "
737 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
738 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
740 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
743 av_log(s, loglevel, "changing to %"PRId64". This may result "
744 "in incorrect timestamps in the output file.\n",
746 if (pkt->pts >= pkt->dts)
747 pkt->pts = FFMAX(pkt->pts, max);
752 ost->last_mux_dts = pkt->dts;
754 ost->data_size += pkt->size;
755 ost->packets_written++;
757 pkt->stream_index = ost->index;
760 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
761 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
762 av_get_media_type_string(ost->enc_ctx->codec_type),
763 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
764 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
769 ret = av_interleaved_write_frame(s, pkt);
771 print_error("av_interleaved_write_frame()", ret);
772 main_return_code = 1;
773 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
775 av_packet_unref(pkt);
778 static void close_output_stream(OutputStream *ost)
780 OutputFile *of = output_files[ost->file_index];
782 ost->finished |= ENCODER_FINISHED;
784 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
785 of->recording_time = FFMIN(of->recording_time, end);
789 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
793 /* apply the output bitstream filters, if any */
794 if (ost->nb_bitstream_filters) {
797 av_packet_split_side_data(pkt);
798 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
804 /* get a packet from the previous filter up the chain */
805 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
806 if (ret == AVERROR(EAGAIN)) {
812 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
813 * the api states this shouldn't happen after init(). Propagate it here to the
814 * muxer and to the next filters in the chain to workaround this.
815 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
816 * par_out->extradata and adapt muxers accordingly to get rid of this. */
817 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
818 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
821 ost->bsf_extradata_updated[idx - 1] |= 1;
824 /* send it to the next filter down the chain or to the muxer */
825 if (idx < ost->nb_bitstream_filters) {
826 /* HACK/FIXME! - See above */
827 if (!(ost->bsf_extradata_updated[idx] & 2)) {
828 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
831 ost->bsf_extradata_updated[idx] |= 2;
833 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
838 write_packet(of, pkt, ost);
841 write_packet(of, pkt, ost);
844 if (ret < 0 && ret != AVERROR_EOF) {
845 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
846 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
852 static int check_recording_time(OutputStream *ost)
854 OutputFile *of = output_files[ost->file_index];
856 if (of->recording_time != INT64_MAX &&
857 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
858 AV_TIME_BASE_Q) >= 0) {
859 close_output_stream(ost);
865 static void do_audio_out(OutputFile *of, OutputStream *ost,
868 AVCodecContext *enc = ost->enc_ctx;
872 av_init_packet(&pkt);
876 if (!check_recording_time(ost))
879 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
880 frame->pts = ost->sync_opts;
881 ost->sync_opts = frame->pts + frame->nb_samples;
882 ost->samples_encoded += frame->nb_samples;
883 ost->frames_encoded++;
885 av_assert0(pkt.size || !pkt.data);
886 update_benchmark(NULL);
888 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
889 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
890 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
891 enc->time_base.num, enc->time_base.den);
894 ret = avcodec_send_frame(enc, frame);
899 ret = avcodec_receive_packet(enc, &pkt);
900 if (ret == AVERROR(EAGAIN))
905 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
907 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
910 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
911 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
912 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
913 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
916 output_packet(of, &pkt, ost);
921 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
925 static void do_subtitle_out(OutputFile *of,
929 int subtitle_out_max_size = 1024 * 1024;
930 int subtitle_out_size, nb, i;
935 if (sub->pts == AV_NOPTS_VALUE) {
936 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
945 subtitle_out = av_malloc(subtitle_out_max_size);
947 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
952 /* Note: DVB subtitle need one packet to draw them and one other
953 packet to clear them */
954 /* XXX: signal it in the codec context ? */
955 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
960 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
962 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
963 pts -= output_files[ost->file_index]->start_time;
964 for (i = 0; i < nb; i++) {
965 unsigned save_num_rects = sub->num_rects;
967 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
968 if (!check_recording_time(ost))
972 // start_display_time is required to be 0
973 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
974 sub->end_display_time -= sub->start_display_time;
975 sub->start_display_time = 0;
979 ost->frames_encoded++;
981 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
982 subtitle_out_max_size, sub);
984 sub->num_rects = save_num_rects;
985 if (subtitle_out_size < 0) {
986 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
990 av_init_packet(&pkt);
991 pkt.data = subtitle_out;
992 pkt.size = subtitle_out_size;
993 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
994 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
995 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
996 /* XXX: the pts correction is handled here. Maybe handling
997 it in the codec would be better */
999 pkt.pts += 90 * sub->start_display_time;
1001 pkt.pts += 90 * sub->end_display_time;
1004 output_packet(of, &pkt, ost);
1008 static void do_video_out(OutputFile *of,
1010 AVFrame *next_picture,
1013 int ret, format_video_sync;
1015 AVCodecContext *enc = ost->enc_ctx;
1016 AVCodecParameters *mux_par = ost->st->codecpar;
1017 int nb_frames, nb0_frames, i;
1018 double delta, delta0;
1019 double duration = 0;
1021 InputStream *ist = NULL;
1022 AVFilterContext *filter = ost->filter->filter;
1024 if (ost->source_index >= 0)
1025 ist = input_streams[ost->source_index];
1027 if (filter->inputs[0]->frame_rate.num > 0 &&
1028 filter->inputs[0]->frame_rate.den > 0)
1029 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
1031 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1032 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1034 if (!ost->filters_script &&
1038 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1039 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1042 if (!next_picture) {
1044 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1045 ost->last_nb0_frames[1],
1046 ost->last_nb0_frames[2]);
1048 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1049 delta = delta0 + duration;
1051 /* by default, we output a single frame */
1052 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1055 format_video_sync = video_sync_method;
1056 if (format_video_sync == VSYNC_AUTO) {
1057 if(!strcmp(of->ctx->oformat->name, "avi")) {
1058 format_video_sync = VSYNC_VFR;
1060 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1062 && format_video_sync == VSYNC_CFR
1063 && input_files[ist->file_index]->ctx->nb_streams == 1
1064 && input_files[ist->file_index]->input_ts_offset == 0) {
1065 format_video_sync = VSYNC_VSCFR;
1067 if (format_video_sync == VSYNC_CFR && copy_ts) {
1068 format_video_sync = VSYNC_VSCFR;
1071 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1075 format_video_sync != VSYNC_PASSTHROUGH &&
1076 format_video_sync != VSYNC_DROP) {
1077 if (delta0 < -0.6) {
1078 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1080 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1081 sync_ipts = ost->sync_opts;
1086 switch (format_video_sync) {
1088 if (ost->frame_number == 0 && delta0 >= 0.5) {
1089 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1092 ost->sync_opts = lrint(sync_ipts);
1095 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1096 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1098 } else if (delta < -1.1)
1100 else if (delta > 1.1) {
1101 nb_frames = lrintf(delta);
1103 nb0_frames = lrintf(delta0 - 0.6);
1109 else if (delta > 0.6)
1110 ost->sync_opts = lrint(sync_ipts);
1113 case VSYNC_PASSTHROUGH:
1114 ost->sync_opts = lrint(sync_ipts);
1121 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1122 nb0_frames = FFMIN(nb0_frames, nb_frames);
1124 memmove(ost->last_nb0_frames + 1,
1125 ost->last_nb0_frames,
1126 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1127 ost->last_nb0_frames[0] = nb0_frames;
1129 if (nb0_frames == 0 && ost->last_dropped) {
1131 av_log(NULL, AV_LOG_VERBOSE,
1132 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1133 ost->frame_number, ost->st->index, ost->last_frame->pts);
1135 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1136 if (nb_frames > dts_error_threshold * 30) {
1137 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1141 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1142 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1143 if (nb_frames_dup > dup_warning) {
1144 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1148 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1150 /* duplicates frame if needed */
1151 for (i = 0; i < nb_frames; i++) {
1152 AVFrame *in_picture;
1153 av_init_packet(&pkt);
1157 if (i < nb0_frames && ost->last_frame) {
1158 in_picture = ost->last_frame;
1160 in_picture = next_picture;
1165 in_picture->pts = ost->sync_opts;
1168 if (!check_recording_time(ost))
1170 if (ost->frame_number >= ost->max_frames)
1174 #if FF_API_LAVF_FMT_RAWPICTURE
1175 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1176 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1177 /* raw pictures are written as AVPicture structure to
1178 avoid any copies. We support temporarily the older
1180 if (in_picture->interlaced_frame)
1181 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1183 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1184 pkt.data = (uint8_t *)in_picture;
1185 pkt.size = sizeof(AVPicture);
1186 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1187 pkt.flags |= AV_PKT_FLAG_KEY;
1189 output_packet(of, &pkt, ost);
1193 int forced_keyframe = 0;
1196 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1197 ost->top_field_first >= 0)
1198 in_picture->top_field_first = !!ost->top_field_first;
1200 if (in_picture->interlaced_frame) {
1201 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1202 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1204 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1206 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1208 in_picture->quality = enc->global_quality;
1209 in_picture->pict_type = 0;
1211 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1212 in_picture->pts * av_q2d(enc->time_base) : NAN;
1213 if (ost->forced_kf_index < ost->forced_kf_count &&
1214 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1215 ost->forced_kf_index++;
1216 forced_keyframe = 1;
1217 } else if (ost->forced_keyframes_pexpr) {
1219 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1220 res = av_expr_eval(ost->forced_keyframes_pexpr,
1221 ost->forced_keyframes_expr_const_values, NULL);
1222 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1223 ost->forced_keyframes_expr_const_values[FKF_N],
1224 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1225 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1226 ost->forced_keyframes_expr_const_values[FKF_T],
1227 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1230 forced_keyframe = 1;
1231 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1232 ost->forced_keyframes_expr_const_values[FKF_N];
1233 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1234 ost->forced_keyframes_expr_const_values[FKF_T];
1235 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1238 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1239 } else if ( ost->forced_keyframes
1240 && !strncmp(ost->forced_keyframes, "source", 6)
1241 && in_picture->key_frame==1) {
1242 forced_keyframe = 1;
1245 if (forced_keyframe) {
1246 in_picture->pict_type = AV_PICTURE_TYPE_I;
1247 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1250 update_benchmark(NULL);
1252 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1253 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1254 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1255 enc->time_base.num, enc->time_base.den);
1258 ost->frames_encoded++;
1260 ret = avcodec_send_frame(enc, in_picture);
1265 ret = avcodec_receive_packet(enc, &pkt);
1266 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1267 if (ret == AVERROR(EAGAIN))
1273 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1274 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1275 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1276 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1279 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1280 pkt.pts = ost->sync_opts;
1282 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1285 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1286 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1287 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1288 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1291 frame_size = pkt.size;
1292 output_packet(of, &pkt, ost);
1294 /* if two pass, output log */
1295 if (ost->logfile && enc->stats_out) {
1296 fprintf(ost->logfile, "%s", enc->stats_out);
1302 * For video, number of frames in == number of packets out.
1303 * But there may be reordering, so we can't throw away frames on encoder
1304 * flush, we need to limit them here, before they go into encoder.
1306 ost->frame_number++;
1308 if (vstats_filename && frame_size)
1309 do_video_stats(ost, frame_size);
1312 if (!ost->last_frame)
1313 ost->last_frame = av_frame_alloc();
1314 av_frame_unref(ost->last_frame);
1315 if (next_picture && ost->last_frame)
1316 av_frame_ref(ost->last_frame, next_picture);
1318 av_frame_free(&ost->last_frame);
1322 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1326 static double psnr(double d)
1328 return -10.0 * log10(d);
1331 static void do_video_stats(OutputStream *ost, int frame_size)
1333 AVCodecContext *enc;
1335 double ti1, bitrate, avg_bitrate;
1337 /* this is executed just the first time do_video_stats is called */
1339 vstats_file = fopen(vstats_filename, "w");
1347 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1348 frame_number = ost->st->nb_frames;
1349 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1350 ost->quality / (float)FF_QP2LAMBDA);
1352 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1353 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1355 fprintf(vstats_file,"f_size= %6d ", frame_size);
1356 /* compute pts value */
1357 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1361 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1362 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1363 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1364 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1365 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1369 static void finish_output_stream(OutputStream *ost)
1371 OutputFile *of = output_files[ost->file_index];
1374 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1377 for (i = 0; i < of->ctx->nb_streams; i++)
1378 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1383 * Get and encode new output from any of the filtergraphs, without causing
1386 * @return 0 for success, <0 for severe errors
1388 static int reap_filters(int flush)
1390 AVFrame *filtered_frame = NULL;
1393 /* Reap all buffers present in the buffer sinks */
1394 for (i = 0; i < nb_output_streams; i++) {
1395 OutputStream *ost = output_streams[i];
1396 OutputFile *of = output_files[ost->file_index];
1397 AVFilterContext *filter;
1398 AVCodecContext *enc = ost->enc_ctx;
1403 filter = ost->filter->filter;
1405 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1406 return AVERROR(ENOMEM);
1408 filtered_frame = ost->filtered_frame;
1411 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1412 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1413 AV_BUFFERSINK_FLAG_NO_REQUEST);
1415 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1416 av_log(NULL, AV_LOG_WARNING,
1417 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1418 } else if (flush && ret == AVERROR_EOF) {
1419 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1420 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1424 if (ost->finished) {
1425 av_frame_unref(filtered_frame);
1428 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1429 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1430 AVRational tb = enc->time_base;
1431 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1433 tb.den <<= extra_bits;
1435 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1436 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1437 float_pts /= 1 << extra_bits;
1438 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1439 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1441 filtered_frame->pts =
1442 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1443 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1445 //if (ost->source_index >= 0)
1446 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1448 switch (filter->inputs[0]->type) {
1449 case AVMEDIA_TYPE_VIDEO:
1450 if (!ost->frame_aspect_ratio.num)
1451 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1454 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1455 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1457 enc->time_base.num, enc->time_base.den);
1460 do_video_out(of, ost, filtered_frame, float_pts);
1462 case AVMEDIA_TYPE_AUDIO:
1463 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1464 enc->channels != av_frame_get_channels(filtered_frame)) {
1465 av_log(NULL, AV_LOG_ERROR,
1466 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1469 do_audio_out(of, ost, filtered_frame);
1472 // TODO support subtitle filters
1476 av_frame_unref(filtered_frame);
1483 static void print_final_stats(int64_t total_size)
1485 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1486 uint64_t subtitle_size = 0;
1487 uint64_t data_size = 0;
1488 float percent = -1.0;
1492 for (i = 0; i < nb_output_streams; i++) {
1493 OutputStream *ost = output_streams[i];
1494 switch (ost->enc_ctx->codec_type) {
1495 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1496 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1497 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1498 default: other_size += ost->data_size; break;
1500 extra_size += ost->enc_ctx->extradata_size;
1501 data_size += ost->data_size;
1502 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1503 != AV_CODEC_FLAG_PASS1)
1507 if (data_size && total_size>0 && total_size >= data_size)
1508 percent = 100.0 * (total_size - data_size) / data_size;
1510 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1511 video_size / 1024.0,
1512 audio_size / 1024.0,
1513 subtitle_size / 1024.0,
1514 other_size / 1024.0,
1515 extra_size / 1024.0);
1517 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1519 av_log(NULL, AV_LOG_INFO, "unknown");
1520 av_log(NULL, AV_LOG_INFO, "\n");
1522 /* print verbose per-stream stats */
1523 for (i = 0; i < nb_input_files; i++) {
1524 InputFile *f = input_files[i];
1525 uint64_t total_packets = 0, total_size = 0;
1527 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1528 i, f->ctx->filename);
1530 for (j = 0; j < f->nb_streams; j++) {
1531 InputStream *ist = input_streams[f->ist_index + j];
1532 enum AVMediaType type = ist->dec_ctx->codec_type;
1534 total_size += ist->data_size;
1535 total_packets += ist->nb_packets;
1537 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1538 i, j, media_type_string(type));
1539 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1540 ist->nb_packets, ist->data_size);
1542 if (ist->decoding_needed) {
1543 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1544 ist->frames_decoded);
1545 if (type == AVMEDIA_TYPE_AUDIO)
1546 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1547 av_log(NULL, AV_LOG_VERBOSE, "; ");
1550 av_log(NULL, AV_LOG_VERBOSE, "\n");
1553 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1554 total_packets, total_size);
1557 for (i = 0; i < nb_output_files; i++) {
1558 OutputFile *of = output_files[i];
1559 uint64_t total_packets = 0, total_size = 0;
1561 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1562 i, of->ctx->filename);
1564 for (j = 0; j < of->ctx->nb_streams; j++) {
1565 OutputStream *ost = output_streams[of->ost_index + j];
1566 enum AVMediaType type = ost->enc_ctx->codec_type;
1568 total_size += ost->data_size;
1569 total_packets += ost->packets_written;
1571 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1572 i, j, media_type_string(type));
1573 if (ost->encoding_needed) {
1574 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1575 ost->frames_encoded);
1576 if (type == AVMEDIA_TYPE_AUDIO)
1577 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1578 av_log(NULL, AV_LOG_VERBOSE, "; ");
1581 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1582 ost->packets_written, ost->data_size);
1584 av_log(NULL, AV_LOG_VERBOSE, "\n");
1587 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1588 total_packets, total_size);
1590 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1591 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1593 av_log(NULL, AV_LOG_WARNING, "\n");
1595 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1600 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1603 AVBPrint buf_script;
1605 AVFormatContext *oc;
1607 AVCodecContext *enc;
1608 int frame_number, vid, i;
1611 int64_t pts = INT64_MIN + 1;
1612 static int64_t last_time = -1;
1613 static int qp_histogram[52];
1614 int hours, mins, secs, us;
1618 if (!print_stats && !is_last_report && !progress_avio)
1621 if (!is_last_report) {
1622 if (last_time == -1) {
1623 last_time = cur_time;
1626 if ((cur_time - last_time) < 500000)
1628 last_time = cur_time;
1631 t = (cur_time-timer_start) / 1000000.0;
1634 oc = output_files[0]->ctx;
1636 total_size = avio_size(oc->pb);
1637 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1638 total_size = avio_tell(oc->pb);
1642 av_bprint_init(&buf_script, 0, 1);
1643 for (i = 0; i < nb_output_streams; i++) {
1645 ost = output_streams[i];
1647 if (!ost->stream_copy)
1648 q = ost->quality / (float) FF_QP2LAMBDA;
1650 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1651 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1652 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1653 ost->file_index, ost->index, q);
1655 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1658 frame_number = ost->frame_number;
1659 fps = t > 1 ? frame_number / t : 0;
1660 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1661 frame_number, fps < 9.95, fps, q);
1662 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1663 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1664 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1665 ost->file_index, ost->index, q);
1667 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1671 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1673 for (j = 0; j < 32; j++)
1674 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1677 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1679 double error, error_sum = 0;
1680 double scale, scale_sum = 0;
1682 char type[3] = { 'Y','U','V' };
1683 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1684 for (j = 0; j < 3; j++) {
1685 if (is_last_report) {
1686 error = enc->error[j];
1687 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1689 error = ost->error[j];
1690 scale = enc->width * enc->height * 255.0 * 255.0;
1696 p = psnr(error / scale);
1697 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1698 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1699 ost->file_index, ost->index, type[j] | 32, p);
1701 p = psnr(error_sum / scale_sum);
1702 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1703 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1704 ost->file_index, ost->index, p);
1708 /* compute min output value */
1709 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1710 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1711 ost->st->time_base, AV_TIME_BASE_Q));
1713 nb_frames_drop += ost->last_dropped;
1716 secs = FFABS(pts) / AV_TIME_BASE;
1717 us = FFABS(pts) % AV_TIME_BASE;
1723 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1724 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1726 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1728 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1729 "size=%8.0fkB time=", total_size / 1024.0);
1731 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1732 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1733 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1734 (100 * us) / AV_TIME_BASE);
1737 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1738 av_bprintf(&buf_script, "bitrate=N/A\n");
1740 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1741 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1744 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1745 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1746 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1747 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1748 hours, mins, secs, us);
1750 if (nb_frames_dup || nb_frames_drop)
1751 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1752 nb_frames_dup, nb_frames_drop);
1753 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1754 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1757 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1758 av_bprintf(&buf_script, "speed=N/A\n");
1760 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1761 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1764 if (print_stats || is_last_report) {
1765 const char end = is_last_report ? '\n' : '\r';
1766 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1767 fprintf(stderr, "%s %c", buf, end);
1769 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1774 if (progress_avio) {
1775 av_bprintf(&buf_script, "progress=%s\n",
1776 is_last_report ? "end" : "continue");
1777 avio_write(progress_avio, buf_script.str,
1778 FFMIN(buf_script.len, buf_script.size - 1));
1779 avio_flush(progress_avio);
1780 av_bprint_finalize(&buf_script, NULL);
1781 if (is_last_report) {
1782 if ((ret = avio_closep(&progress_avio)) < 0)
1783 av_log(NULL, AV_LOG_ERROR,
1784 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1789 print_final_stats(total_size);
1792 static void flush_encoders(void)
1796 for (i = 0; i < nb_output_streams; i++) {
1797 OutputStream *ost = output_streams[i];
1798 AVCodecContext *enc = ost->enc_ctx;
1799 OutputFile *of = output_files[ost->file_index];
1800 int stop_encoding = 0;
1802 if (!ost->encoding_needed)
1805 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1807 #if FF_API_LAVF_FMT_RAWPICTURE
1808 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1812 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1815 avcodec_send_frame(enc, NULL);
1818 const char *desc = NULL;
1820 switch (enc->codec_type) {
1821 case AVMEDIA_TYPE_AUDIO:
1824 case AVMEDIA_TYPE_VIDEO:
1834 av_init_packet(&pkt);
1838 update_benchmark(NULL);
1839 ret = avcodec_receive_packet(enc, &pkt);
1840 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1841 if (ret < 0 && ret != AVERROR_EOF) {
1842 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1847 if (ost->logfile && enc->stats_out) {
1848 fprintf(ost->logfile, "%s", enc->stats_out);
1850 if (ret == AVERROR_EOF) {
1854 if (ost->finished & MUXER_FINISHED) {
1855 av_packet_unref(&pkt);
1858 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1859 pkt_size = pkt.size;
1860 output_packet(of, &pkt, ost);
1861 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1862 do_video_stats(ost, pkt_size);
1873 * Check whether a packet from ist should be written into ost at this time
1875 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1877 OutputFile *of = output_files[ost->file_index];
1878 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1880 if (ost->source_index != ist_index)
1886 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1892 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1894 OutputFile *of = output_files[ost->file_index];
1895 InputFile *f = input_files [ist->file_index];
1896 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1897 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1901 av_init_packet(&opkt);
1903 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1904 !ost->copy_initial_nonkeyframes)
1907 if (!ost->frame_number && !ost->copy_prior_start) {
1908 int64_t comp_start = start_time;
1909 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1910 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1911 if (pkt->pts == AV_NOPTS_VALUE ?
1912 ist->pts < comp_start :
1913 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1917 if (of->recording_time != INT64_MAX &&
1918 ist->pts >= of->recording_time + start_time) {
1919 close_output_stream(ost);
1923 if (f->recording_time != INT64_MAX) {
1924 start_time = f->ctx->start_time;
1925 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1926 start_time += f->start_time;
1927 if (ist->pts >= f->recording_time + start_time) {
1928 close_output_stream(ost);
1933 /* force the input stream PTS */
1934 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1937 if (pkt->pts != AV_NOPTS_VALUE)
1938 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1940 opkt.pts = AV_NOPTS_VALUE;
1942 if (pkt->dts == AV_NOPTS_VALUE)
1943 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1945 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1946 opkt.dts -= ost_tb_start_time;
1948 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1949 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1951 duration = ist->dec_ctx->frame_size;
1952 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1953 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1954 ost->st->time_base) - ost_tb_start_time;
1957 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1958 opkt.flags = pkt->flags;
1959 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1960 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
1961 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
1962 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
1963 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
1965 int ret = av_parser_change(ost->parser, ost->parser_avctx,
1966 &opkt.data, &opkt.size,
1967 pkt->data, pkt->size,
1968 pkt->flags & AV_PKT_FLAG_KEY);
1970 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1975 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1980 opkt.data = pkt->data;
1981 opkt.size = pkt->size;
1983 av_copy_packet_side_data(&opkt, pkt);
1985 #if FF_API_LAVF_FMT_RAWPICTURE
1986 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1987 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
1988 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1989 /* store AVPicture in AVPacket, as expected by the output format */
1990 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
1992 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1996 opkt.data = (uint8_t *)&pict;
1997 opkt.size = sizeof(AVPicture);
1998 opkt.flags |= AV_PKT_FLAG_KEY;
2002 output_packet(of, &opkt, ost);
2005 int guess_input_channel_layout(InputStream *ist)
2007 AVCodecContext *dec = ist->dec_ctx;
2009 if (!dec->channel_layout) {
2010 char layout_name[256];
2012 if (dec->channels > ist->guess_layout_max)
2014 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2015 if (!dec->channel_layout)
2017 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2018 dec->channels, dec->channel_layout);
2019 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2020 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2025 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2027 if (*got_output || ret<0)
2028 decode_error_stat[ret<0] ++;
2030 if (ret < 0 && exit_on_error)
2033 if (exit_on_error && *got_output && ist) {
2034 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2035 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2041 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2042 // There is the following difference: if you got a frame, you must call
2043 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2044 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2045 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2052 ret = avcodec_send_packet(avctx, pkt);
2053 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2054 // decoded frames with avcodec_receive_frame() until done.
2055 if (ret < 0 && ret != AVERROR_EOF)
2059 ret = avcodec_receive_frame(avctx, frame);
2060 if (ret < 0 && ret != AVERROR(EAGAIN))
2068 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2070 AVFrame *decoded_frame, *f;
2071 AVCodecContext *avctx = ist->dec_ctx;
2072 int i, ret, err = 0, resample_changed;
2073 AVRational decoded_frame_tb;
2075 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2076 return AVERROR(ENOMEM);
2077 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2078 return AVERROR(ENOMEM);
2079 decoded_frame = ist->decoded_frame;
2081 update_benchmark(NULL);
2082 ret = decode(avctx, decoded_frame, got_output, pkt);
2083 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2085 if (ret >= 0 && avctx->sample_rate <= 0) {
2086 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2087 ret = AVERROR_INVALIDDATA;
2090 if (ret != AVERROR_EOF)
2091 check_decode_result(ist, got_output, ret);
2093 if (!*got_output || ret < 0)
2096 ist->samples_decoded += decoded_frame->nb_samples;
2097 ist->frames_decoded++;
2100 /* increment next_dts to use for the case where the input stream does not
2101 have timestamps or there are multiple frames in the packet */
2102 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2104 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2108 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2109 ist->resample_channels != avctx->channels ||
2110 ist->resample_channel_layout != decoded_frame->channel_layout ||
2111 ist->resample_sample_rate != decoded_frame->sample_rate;
2112 if (resample_changed) {
2113 char layout1[64], layout2[64];
2115 if (!guess_input_channel_layout(ist)) {
2116 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2117 "layout for Input Stream #%d.%d\n", ist->file_index,
2121 decoded_frame->channel_layout = avctx->channel_layout;
2123 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2124 ist->resample_channel_layout);
2125 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2126 decoded_frame->channel_layout);
2128 av_log(NULL, AV_LOG_INFO,
2129 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2130 ist->file_index, ist->st->index,
2131 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2132 ist->resample_channels, layout1,
2133 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2134 avctx->channels, layout2);
2136 ist->resample_sample_fmt = decoded_frame->format;
2137 ist->resample_sample_rate = decoded_frame->sample_rate;
2138 ist->resample_channel_layout = decoded_frame->channel_layout;
2139 ist->resample_channels = avctx->channels;
2141 for (i = 0; i < ist->nb_filters; i++) {
2142 err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2144 av_log(NULL, AV_LOG_ERROR,
2145 "Error reconfiguring input stream %d:%d filter %d\n",
2146 ist->file_index, ist->st->index, i);
2151 for (i = 0; i < nb_filtergraphs; i++)
2152 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2153 FilterGraph *fg = filtergraphs[i];
2154 if (configure_filtergraph(fg) < 0) {
2155 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2161 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2162 decoded_frame_tb = ist->st->time_base;
2163 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2164 decoded_frame->pts = pkt->pts;
2165 decoded_frame_tb = ist->st->time_base;
2167 decoded_frame->pts = ist->dts;
2168 decoded_frame_tb = AV_TIME_BASE_Q;
2170 if (decoded_frame->pts != AV_NOPTS_VALUE)
2171 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2172 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2173 (AVRational){1, avctx->sample_rate});
2174 ist->nb_samples = decoded_frame->nb_samples;
2175 for (i = 0; i < ist->nb_filters; i++) {
2176 if (i < ist->nb_filters - 1) {
2177 f = ist->filter_frame;
2178 err = av_frame_ref(f, decoded_frame);
2183 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2184 AV_BUFFERSRC_FLAG_PUSH);
2185 if (err == AVERROR_EOF)
2186 err = 0; /* ignore */
2190 decoded_frame->pts = AV_NOPTS_VALUE;
2193 av_frame_unref(ist->filter_frame);
2194 av_frame_unref(decoded_frame);
2195 return err < 0 ? err : ret;
2198 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
2200 AVFrame *decoded_frame, *f;
2201 int i, ret = 0, err = 0, resample_changed;
2202 int64_t best_effort_timestamp;
2203 int64_t dts = AV_NOPTS_VALUE;
2204 AVRational *frame_sample_aspect;
2207 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2208 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2210 if (!eof && pkt && pkt->size == 0)
2213 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2214 return AVERROR(ENOMEM);
2215 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2216 return AVERROR(ENOMEM);
2217 decoded_frame = ist->decoded_frame;
2218 if (ist->dts != AV_NOPTS_VALUE)
2219 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2222 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2225 // The old code used to set dts on the drain packet, which does not work
2226 // with the new API anymore.
2228 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2230 return AVERROR(ENOMEM);
2231 ist->dts_buffer = new;
2232 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2235 update_benchmark(NULL);
2236 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2237 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2239 // The following line may be required in some cases where there is no parser
2240 // or the parser does not has_b_frames correctly
2241 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2242 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2243 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2245 av_log(ist->dec_ctx, AV_LOG_WARNING,
2246 "video_delay is larger in decoder than demuxer %d > %d.\n"
2247 "If you want to help, upload a sample "
2248 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2249 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2250 ist->dec_ctx->has_b_frames,
2251 ist->st->codecpar->video_delay);
2254 if (ret != AVERROR_EOF)
2255 check_decode_result(ist, got_output, ret);
2257 if (*got_output && ret >= 0) {
2258 if (ist->dec_ctx->width != decoded_frame->width ||
2259 ist->dec_ctx->height != decoded_frame->height ||
2260 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2261 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2262 decoded_frame->width,
2263 decoded_frame->height,
2264 decoded_frame->format,
2265 ist->dec_ctx->width,
2266 ist->dec_ctx->height,
2267 ist->dec_ctx->pix_fmt);
2271 if (!*got_output || ret < 0)
2274 if(ist->top_field_first>=0)
2275 decoded_frame->top_field_first = ist->top_field_first;
2277 ist->frames_decoded++;
2279 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2280 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2284 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2286 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2288 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2289 best_effort_timestamp = ist->dts_buffer[0];
2291 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2292 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2293 ist->nb_dts_buffer--;
2296 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2297 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2299 if (ts != AV_NOPTS_VALUE)
2300 ist->next_pts = ist->pts = ts;
2304 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2305 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2306 ist->st->index, av_ts2str(decoded_frame->pts),
2307 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2308 best_effort_timestamp,
2309 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2310 decoded_frame->key_frame, decoded_frame->pict_type,
2311 ist->st->time_base.num, ist->st->time_base.den);
2314 if (ist->st->sample_aspect_ratio.num)
2315 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2317 resample_changed = ist->resample_width != decoded_frame->width ||
2318 ist->resample_height != decoded_frame->height ||
2319 ist->resample_pix_fmt != decoded_frame->format;
2320 if (resample_changed) {
2321 av_log(NULL, AV_LOG_INFO,
2322 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2323 ist->file_index, ist->st->index,
2324 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2325 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2327 ist->resample_width = decoded_frame->width;
2328 ist->resample_height = decoded_frame->height;
2329 ist->resample_pix_fmt = decoded_frame->format;
2331 for (i = 0; i < ist->nb_filters; i++) {
2332 err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2334 av_log(NULL, AV_LOG_ERROR,
2335 "Error reconfiguring input stream %d:%d filter %d\n",
2336 ist->file_index, ist->st->index, i);
2341 for (i = 0; i < nb_filtergraphs; i++) {
2342 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2343 configure_filtergraph(filtergraphs[i]) < 0) {
2344 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2350 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2351 for (i = 0; i < ist->nb_filters; i++) {
2352 if (!frame_sample_aspect->num)
2353 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2355 if (i < ist->nb_filters - 1) {
2356 f = ist->filter_frame;
2357 err = av_frame_ref(f, decoded_frame);
2362 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2363 if (err == AVERROR_EOF) {
2364 err = 0; /* ignore */
2365 } else if (err < 0) {
2366 av_log(NULL, AV_LOG_FATAL,
2367 "Failed to inject frame into filter network: %s\n", av_err2str(err));
2373 av_frame_unref(ist->filter_frame);
2374 av_frame_unref(decoded_frame);
2375 return err < 0 ? err : ret;
2378 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2380 AVSubtitle subtitle;
2381 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2382 &subtitle, got_output, pkt);
2384 check_decode_result(NULL, got_output, ret);
2386 if (ret < 0 || !*got_output) {
2388 sub2video_flush(ist);
2392 if (ist->fix_sub_duration) {
2394 if (ist->prev_sub.got_output) {
2395 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2396 1000, AV_TIME_BASE);
2397 if (end < ist->prev_sub.subtitle.end_display_time) {
2398 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2399 "Subtitle duration reduced from %d to %d%s\n",
2400 ist->prev_sub.subtitle.end_display_time, end,
2401 end <= 0 ? ", dropping it" : "");
2402 ist->prev_sub.subtitle.end_display_time = end;
2405 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2406 FFSWAP(int, ret, ist->prev_sub.ret);
2407 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2415 sub2video_update(ist, &subtitle);
2417 if (!subtitle.num_rects)
2420 ist->frames_decoded++;
2422 for (i = 0; i < nb_output_streams; i++) {
2423 OutputStream *ost = output_streams[i];
2425 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2426 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2429 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2433 avsubtitle_free(&subtitle);
2437 static int send_filter_eof(InputStream *ist)
2440 for (i = 0; i < ist->nb_filters; i++) {
2441 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2448 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2449 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2453 int eof_reached = 0;
2456 if (!ist->saw_first_ts) {
2457 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2459 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2460 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2461 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2463 ist->saw_first_ts = 1;
2466 if (ist->next_dts == AV_NOPTS_VALUE)
2467 ist->next_dts = ist->dts;
2468 if (ist->next_pts == AV_NOPTS_VALUE)
2469 ist->next_pts = ist->pts;
2473 av_init_packet(&avpkt);
2480 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2481 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2482 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2483 ist->next_pts = ist->pts = ist->dts;
2486 // while we have more to decode or while the decoder did output something on EOF
2487 while (ist->decoding_needed) {
2491 ist->pts = ist->next_pts;
2492 ist->dts = ist->next_dts;
2494 switch (ist->dec_ctx->codec_type) {
2495 case AVMEDIA_TYPE_AUDIO:
2496 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
2498 case AVMEDIA_TYPE_VIDEO:
2499 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
2500 if (!repeating || !pkt || got_output) {
2501 if (pkt && pkt->duration) {
2502 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2503 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2504 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2505 duration = ((int64_t)AV_TIME_BASE *
2506 ist->dec_ctx->framerate.den * ticks) /
2507 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2510 if(ist->dts != AV_NOPTS_VALUE && duration) {
2511 ist->next_dts += duration;
2513 ist->next_dts = AV_NOPTS_VALUE;
2517 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2519 case AVMEDIA_TYPE_SUBTITLE:
2522 ret = transcode_subtitles(ist, &avpkt, &got_output);
2523 if (!pkt && ret >= 0)
2530 if (ret == AVERROR_EOF) {
2536 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2537 ist->file_index, ist->st->index, av_err2str(ret));
2540 // Decoding might not terminate if we're draining the decoder, and
2541 // the decoder keeps returning an error.
2542 // This should probably be considered a libavcodec issue.
2543 // Sample: fate-vsynth1-dnxhd-720p-hr-lb
2552 // During draining, we might get multiple output frames in this loop.
2553 // ffmpeg.c does not drain the filter chain on configuration changes,
2554 // which means if we send multiple frames at once to the filters, and
2555 // one of those frames changes configuration, the buffered frames will
2556 // be lost. This can upset certain FATE tests.
2557 // Decode only 1 frame per call on EOF to appease these FATE tests.
2558 // The ideal solution would be to rewrite decoding to use the new
2559 // decoding API in a better way.
2566 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2567 /* except when looping we need to flush but not to send an EOF */
2568 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2569 int ret = send_filter_eof(ist);
2571 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2576 /* handle stream copy */
2577 if (!ist->decoding_needed) {
2578 ist->dts = ist->next_dts;
2579 switch (ist->dec_ctx->codec_type) {
2580 case AVMEDIA_TYPE_AUDIO:
2581 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2582 ist->dec_ctx->sample_rate;
2584 case AVMEDIA_TYPE_VIDEO:
2585 if (ist->framerate.num) {
2586 // TODO: Remove work-around for c99-to-c89 issue 7
2587 AVRational time_base_q = AV_TIME_BASE_Q;
2588 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2589 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2590 } else if (pkt->duration) {
2591 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2592 } else if(ist->dec_ctx->framerate.num != 0) {
2593 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2594 ist->next_dts += ((int64_t)AV_TIME_BASE *
2595 ist->dec_ctx->framerate.den * ticks) /
2596 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2600 ist->pts = ist->dts;
2601 ist->next_pts = ist->next_dts;
2603 for (i = 0; pkt && i < nb_output_streams; i++) {
2604 OutputStream *ost = output_streams[i];
2606 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2609 do_streamcopy(ist, ost, pkt);
2612 return !eof_reached;
2615 static void print_sdp(void)
2620 AVIOContext *sdp_pb;
2621 AVFormatContext **avc;
2623 for (i = 0; i < nb_output_files; i++) {
2624 if (!output_files[i]->header_written)
2628 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2631 for (i = 0, j = 0; i < nb_output_files; i++) {
2632 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2633 avc[j] = output_files[i]->ctx;
2641 av_sdp_create(avc, j, sdp, sizeof(sdp));
2643 if (!sdp_filename) {
2644 printf("SDP:\n%s\n", sdp);
2647 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2648 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2650 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2651 avio_closep(&sdp_pb);
2652 av_freep(&sdp_filename);
2660 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2663 for (i = 0; hwaccels[i].name; i++)
2664 if (hwaccels[i].pix_fmt == pix_fmt)
2665 return &hwaccels[i];
2669 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2671 InputStream *ist = s->opaque;
2672 const enum AVPixelFormat *p;
2675 for (p = pix_fmts; *p != -1; p++) {
2676 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2677 const HWAccel *hwaccel;
2679 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2682 hwaccel = get_hwaccel(*p);
2684 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2685 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2688 ret = hwaccel->init(s);
2690 if (ist->hwaccel_id == hwaccel->id) {
2691 av_log(NULL, AV_LOG_FATAL,
2692 "%s hwaccel requested for input stream #%d:%d, "
2693 "but cannot be initialized.\n", hwaccel->name,
2694 ist->file_index, ist->st->index);
2695 return AV_PIX_FMT_NONE;
2700 if (ist->hw_frames_ctx) {
2701 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2702 if (!s->hw_frames_ctx)
2703 return AV_PIX_FMT_NONE;
2706 ist->active_hwaccel_id = hwaccel->id;
2707 ist->hwaccel_pix_fmt = *p;
2714 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2716 InputStream *ist = s->opaque;
2718 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2719 return ist->hwaccel_get_buffer(s, frame, flags);
2721 return avcodec_default_get_buffer2(s, frame, flags);
2724 static int init_input_stream(int ist_index, char *error, int error_len)
2727 InputStream *ist = input_streams[ist_index];
2729 for (i = 0; i < ist->nb_filters; i++) {
2730 ret = ifilter_parameters_from_decoder(ist->filters[i], ist->dec_ctx);
2732 av_log(NULL, AV_LOG_FATAL, "Error initializing filter input\n");
2737 if (ist->decoding_needed) {
2738 AVCodec *codec = ist->dec;
2740 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2741 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2742 return AVERROR(EINVAL);
2745 ist->dec_ctx->opaque = ist;
2746 ist->dec_ctx->get_format = get_format;
2747 ist->dec_ctx->get_buffer2 = get_buffer;
2748 ist->dec_ctx->thread_safe_callbacks = 1;
2750 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2751 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2752 (ist->decoding_needed & DECODING_FOR_OST)) {
2753 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2754 if (ist->decoding_needed & DECODING_FOR_FILTER)
2755 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2758 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2760 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2761 * audio, and video decoders such as cuvid or mediacodec */
2762 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2764 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2765 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2766 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2767 if (ret == AVERROR_EXPERIMENTAL)
2768 abort_codec_experimental(codec, 0);
2770 snprintf(error, error_len,
2771 "Error while opening decoder for input stream "
2773 ist->file_index, ist->st->index, av_err2str(ret));
2776 assert_avoptions(ist->decoder_opts);
2779 ist->next_pts = AV_NOPTS_VALUE;
2780 ist->next_dts = AV_NOPTS_VALUE;
2785 static InputStream *get_input_stream(OutputStream *ost)
2787 if (ost->source_index >= 0)
2788 return input_streams[ost->source_index];
2792 static int compare_int64(const void *a, const void *b)
2794 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2797 /* open the muxer when all the streams are initialized */
2798 static int check_init_output_file(OutputFile *of, int file_index)
2802 for (i = 0; i < of->ctx->nb_streams; i++) {
2803 OutputStream *ost = output_streams[of->ost_index + i];
2804 if (!ost->initialized)
2808 of->ctx->interrupt_callback = int_cb;
2810 ret = avformat_write_header(of->ctx, &of->opts);
2812 av_log(NULL, AV_LOG_ERROR,
2813 "Could not write header for output file #%d "
2814 "(incorrect codec parameters ?): %s\n",
2815 file_index, av_err2str(ret));
2818 //assert_avoptions(of->opts);
2819 of->header_written = 1;
2821 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2823 if (sdp_filename || want_sdp)
2826 /* flush the muxing queues */
2827 for (i = 0; i < of->ctx->nb_streams; i++) {
2828 OutputStream *ost = output_streams[of->ost_index + i];
2830 while (av_fifo_size(ost->muxing_queue)) {
2832 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2833 write_packet(of, &pkt, ost);
2840 static int init_output_bsfs(OutputStream *ost)
2845 if (!ost->nb_bitstream_filters)
2848 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2849 ctx = ost->bsf_ctx[i];
2851 ret = avcodec_parameters_copy(ctx->par_in,
2852 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2856 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2858 ret = av_bsf_init(ctx);
2860 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2861 ost->bsf_ctx[i]->filter->name);
2866 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2867 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2871 ost->st->time_base = ctx->time_base_out;
2876 static int init_output_stream_streamcopy(OutputStream *ost)
2878 OutputFile *of = output_files[ost->file_index];
2879 InputStream *ist = get_input_stream(ost);
2880 AVCodecParameters *par_dst = ost->st->codecpar;
2881 AVCodecParameters *par_src = ost->ref_par;
2884 uint32_t codec_tag = par_dst->codec_tag;
2886 av_assert0(ist && !ost->filter);
2888 avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
2889 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2891 av_log(NULL, AV_LOG_FATAL,
2892 "Error setting up codec context options.\n");
2895 avcodec_parameters_from_context(par_src, ost->enc_ctx);
2898 unsigned int codec_tag_tmp;
2899 if (!of->ctx->oformat->codec_tag ||
2900 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
2901 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
2902 codec_tag = par_src->codec_tag;
2905 ret = avcodec_parameters_copy(par_dst, par_src);
2909 par_dst->codec_tag = codec_tag;
2911 if (!ost->frame_rate.num)
2912 ost->frame_rate = ist->framerate;
2913 ost->st->avg_frame_rate = ost->frame_rate;
2915 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
2919 // copy timebase while removing common factors
2920 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
2923 ost->st->disposition = ist->st->disposition;
2925 if (ist->st->nb_side_data) {
2926 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2927 sizeof(*ist->st->side_data));
2928 if (!ost->st->side_data)
2929 return AVERROR(ENOMEM);
2931 ost->st->nb_side_data = 0;
2932 for (i = 0; i < ist->st->nb_side_data; i++) {
2933 const AVPacketSideData *sd_src = &ist->st->side_data[i];
2934 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2936 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2939 sd_dst->data = av_malloc(sd_src->size);
2941 return AVERROR(ENOMEM);
2942 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2943 sd_dst->size = sd_src->size;
2944 sd_dst->type = sd_src->type;
2945 ost->st->nb_side_data++;
2949 ost->parser = av_parser_init(par_dst->codec_id);
2950 ost->parser_avctx = avcodec_alloc_context3(NULL);
2951 if (!ost->parser_avctx)
2952 return AVERROR(ENOMEM);
2954 switch (par_dst->codec_type) {
2955 case AVMEDIA_TYPE_AUDIO:
2956 if (audio_volume != 256) {
2957 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2960 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2961 par_dst->block_align= 0;
2962 if(par_dst->codec_id == AV_CODEC_ID_AC3)
2963 par_dst->block_align= 0;
2965 case AVMEDIA_TYPE_VIDEO:
2966 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2968 av_mul_q(ost->frame_aspect_ratio,
2969 (AVRational){ par_dst->height, par_dst->width });
2970 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2971 "with stream copy may produce invalid files\n");
2973 else if (ist->st->sample_aspect_ratio.num)
2974 sar = ist->st->sample_aspect_ratio;
2976 sar = par_src->sample_aspect_ratio;
2977 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
2978 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2979 ost->st->r_frame_rate = ist->st->r_frame_rate;
2986 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2988 AVDictionaryEntry *e;
2990 uint8_t *encoder_string;
2991 int encoder_string_len;
2992 int format_flags = 0;
2993 int codec_flags = 0;
2995 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2998 e = av_dict_get(of->opts, "fflags", NULL, 0);
3000 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3003 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3005 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3007 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3010 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3013 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3014 encoder_string = av_mallocz(encoder_string_len);
3015 if (!encoder_string)
3018 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3019 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3021 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3022 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3023 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3024 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3027 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3028 AVCodecContext *avctx)
3031 int n = 1, i, size, index = 0;
3034 for (p = kf; *p; p++)
3038 pts = av_malloc_array(size, sizeof(*pts));
3040 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3045 for (i = 0; i < n; i++) {
3046 char *next = strchr(p, ',');
3051 if (!memcmp(p, "chapters", 8)) {
3053 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3056 if (avf->nb_chapters > INT_MAX - size ||
3057 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3059 av_log(NULL, AV_LOG_FATAL,
3060 "Could not allocate forced key frames array.\n");
3063 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3064 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3066 for (j = 0; j < avf->nb_chapters; j++) {
3067 AVChapter *c = avf->chapters[j];
3068 av_assert1(index < size);
3069 pts[index++] = av_rescale_q(c->start, c->time_base,
3070 avctx->time_base) + t;
3075 t = parse_time_or_die("force_key_frames", p, 1);
3076 av_assert1(index < size);
3077 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3084 av_assert0(index == size);
3085 qsort(pts, size, sizeof(*pts), compare_int64);
3086 ost->forced_kf_count = size;
3087 ost->forced_kf_pts = pts;
3090 static int init_output_stream_encode(OutputStream *ost)
3092 InputStream *ist = get_input_stream(ost);
3093 AVCodecContext *enc_ctx = ost->enc_ctx;
3094 AVCodecContext *dec_ctx = NULL;
3095 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3098 set_encoder_id(output_files[ost->file_index], ost);
3101 ost->st->disposition = ist->st->disposition;
3103 dec_ctx = ist->dec_ctx;
3105 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3107 for (j = 0; j < oc->nb_streams; j++) {
3108 AVStream *st = oc->streams[j];
3109 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3112 if (j == oc->nb_streams)
3113 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3114 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3115 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3118 if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3119 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3120 filtergraph_is_simple(ost->filter->graph)) {
3121 FilterGraph *fg = ost->filter->graph;
3123 if (configure_filtergraph(fg)) {
3124 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3129 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3130 if (!ost->frame_rate.num)
3131 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3132 if (ist && !ost->frame_rate.num)
3133 ost->frame_rate = ist->framerate;
3134 if (ist && !ost->frame_rate.num)
3135 ost->frame_rate = ist->st->r_frame_rate;
3136 if (ist && !ost->frame_rate.num) {
3137 ost->frame_rate = (AVRational){25, 1};
3138 av_log(NULL, AV_LOG_WARNING,
3140 "about the input framerate is available. Falling "
3141 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3142 "if you want a different framerate.\n",
3143 ost->file_index, ost->index);
3145 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3146 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3147 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3148 ost->frame_rate = ost->enc->supported_framerates[idx];
3150 // reduce frame rate for mpeg4 to be within the spec limits
3151 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3152 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3153 ost->frame_rate.num, ost->frame_rate.den, 65535);
3157 switch (enc_ctx->codec_type) {
3158 case AVMEDIA_TYPE_AUDIO:
3159 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3161 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3162 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3163 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3164 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3165 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3166 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3168 case AVMEDIA_TYPE_VIDEO:
3169 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3170 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3171 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3172 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3173 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3174 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3175 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3177 for (j = 0; j < ost->forced_kf_count; j++)
3178 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3180 enc_ctx->time_base);
3182 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3183 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3184 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3185 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3186 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3187 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3188 if (!strncmp(ost->enc->name, "libx264", 7) &&
3189 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3190 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3191 av_log(NULL, AV_LOG_WARNING,
3192 "No pixel format specified, %s for H.264 encoding chosen.\n"
3193 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3194 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3195 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3196 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3197 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3198 av_log(NULL, AV_LOG_WARNING,
3199 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3200 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3201 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3202 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3204 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3205 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3207 ost->st->avg_frame_rate = ost->frame_rate;
3210 enc_ctx->width != dec_ctx->width ||
3211 enc_ctx->height != dec_ctx->height ||
3212 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3213 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3216 if (ost->forced_keyframes) {
3217 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3218 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3219 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3221 av_log(NULL, AV_LOG_ERROR,
3222 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3225 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3226 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3227 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3228 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3230 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3231 // parse it only for static kf timings
3232 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3233 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3237 case AVMEDIA_TYPE_SUBTITLE:
3238 enc_ctx->time_base = (AVRational){1, 1000};
3239 if (!enc_ctx->width) {
3240 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3241 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3244 case AVMEDIA_TYPE_DATA:
3254 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3258 if (ost->encoding_needed) {
3259 AVCodec *codec = ost->enc;
3260 AVCodecContext *dec = NULL;
3263 ret = init_output_stream_encode(ost);
3267 if ((ist = get_input_stream(ost)))
3269 if (dec && dec->subtitle_header) {
3270 /* ASS code assumes this buffer is null terminated so add extra byte. */
3271 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3272 if (!ost->enc_ctx->subtitle_header)
3273 return AVERROR(ENOMEM);
3274 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3275 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3277 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3278 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3279 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3281 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3282 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3283 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3285 if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
3286 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
3287 if (!ost->enc_ctx->hw_frames_ctx)
3288 return AVERROR(ENOMEM);
3291 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3292 if (ret == AVERROR_EXPERIMENTAL)
3293 abort_codec_experimental(codec, 1);
3294 snprintf(error, error_len,
3295 "Error while opening encoder for output stream #%d:%d - "
3296 "maybe incorrect parameters such as bit_rate, rate, width or height",
3297 ost->file_index, ost->index);
3300 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3301 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3302 av_buffersink_set_frame_size(ost->filter->filter,
3303 ost->enc_ctx->frame_size);
3304 assert_avoptions(ost->encoder_opts);
3305 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3306 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3307 " It takes bits/s as argument, not kbits/s\n");
3309 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3311 av_log(NULL, AV_LOG_FATAL,
3312 "Error initializing the output stream codec context.\n");
3316 * FIXME: ost->st->codec should't be needed here anymore.
3318 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3322 if (ost->enc_ctx->nb_coded_side_data) {
3325 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
3326 sizeof(*ost->st->side_data));
3327 if (!ost->st->side_data)
3328 return AVERROR(ENOMEM);
3330 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3331 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3332 AVPacketSideData *sd_dst = &ost->st->side_data[i];
3334 sd_dst->data = av_malloc(sd_src->size);
3336 return AVERROR(ENOMEM);
3337 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3338 sd_dst->size = sd_src->size;
3339 sd_dst->type = sd_src->type;
3340 ost->st->nb_side_data++;
3344 // copy timebase while removing common factors
3345 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3346 ost->st->codec->codec= ost->enc_ctx->codec;
3347 } else if (ost->stream_copy) {
3348 ret = init_output_stream_streamcopy(ost);
3353 * FIXME: will the codec context used by the parser during streamcopy
3354 * This should go away with the new parser API.
3356 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3361 // parse user provided disposition, and update stream values
3362 if (ost->disposition) {
3363 static const AVOption opts[] = {
3364 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3365 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3366 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3367 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3368 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3369 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3370 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3371 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3372 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3373 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3374 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3375 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3376 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3377 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3380 static const AVClass class = {
3382 .item_name = av_default_item_name,
3384 .version = LIBAVUTIL_VERSION_INT,
3386 const AVClass *pclass = &class;
3388 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3393 /* initialize bitstream filters for the output stream
3394 * needs to be done here, because the codec id for streamcopy is not
3395 * known until now */
3396 ret = init_output_bsfs(ost);
3400 ost->initialized = 1;
3402 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3409 static void report_new_stream(int input_index, AVPacket *pkt)
3411 InputFile *file = input_files[input_index];
3412 AVStream *st = file->ctx->streams[pkt->stream_index];
3414 if (pkt->stream_index < file->nb_streams_warn)
3416 av_log(file->ctx, AV_LOG_WARNING,
3417 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3418 av_get_media_type_string(st->codecpar->codec_type),
3419 input_index, pkt->stream_index,
3420 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3421 file->nb_streams_warn = pkt->stream_index + 1;
3424 static int transcode_init(void)
3426 int ret = 0, i, j, k;
3427 AVFormatContext *oc;
3430 char error[1024] = {0};
3432 for (i = 0; i < nb_filtergraphs; i++) {
3433 FilterGraph *fg = filtergraphs[i];
3434 for (j = 0; j < fg->nb_outputs; j++) {
3435 OutputFilter *ofilter = fg->outputs[j];
3436 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3438 if (fg->nb_inputs != 1)
3440 for (k = nb_input_streams-1; k >= 0 ; k--)
3441 if (fg->inputs[0]->ist == input_streams[k])
3443 ofilter->ost->source_index = k;
3447 /* init framerate emulation */
3448 for (i = 0; i < nb_input_files; i++) {
3449 InputFile *ifile = input_files[i];
3450 if (ifile->rate_emu)
3451 for (j = 0; j < ifile->nb_streams; j++)
3452 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3455 /* hwaccel transcoding */
3456 for (i = 0; i < nb_output_streams; i++) {
3457 ost = output_streams[i];
3459 if (!ost->stream_copy) {
3461 if (qsv_transcode_init(ost))
3466 if (cuvid_transcode_init(ost))
3472 /* init input streams */
3473 for (i = 0; i < nb_input_streams; i++)
3474 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3475 for (i = 0; i < nb_output_streams; i++) {
3476 ost = output_streams[i];
3477 avcodec_close(ost->enc_ctx);
3482 /* open each encoder */
3483 for (i = 0; i < nb_output_streams; i++) {
3484 ret = init_output_stream(output_streams[i], error, sizeof(error));
3489 /* discard unused programs */
3490 for (i = 0; i < nb_input_files; i++) {
3491 InputFile *ifile = input_files[i];
3492 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3493 AVProgram *p = ifile->ctx->programs[j];
3494 int discard = AVDISCARD_ALL;
3496 for (k = 0; k < p->nb_stream_indexes; k++)
3497 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3498 discard = AVDISCARD_DEFAULT;
3501 p->discard = discard;
3505 /* write headers for files with no streams */
3506 for (i = 0; i < nb_output_files; i++) {
3507 oc = output_files[i]->ctx;
3508 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3509 ret = check_init_output_file(output_files[i], i);
3516 /* dump the stream mapping */
3517 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3518 for (i = 0; i < nb_input_streams; i++) {
3519 ist = input_streams[i];
3521 for (j = 0; j < ist->nb_filters; j++) {
3522 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3523 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3524 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3525 ist->filters[j]->name);
3526 if (nb_filtergraphs > 1)
3527 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3528 av_log(NULL, AV_LOG_INFO, "\n");
3533 for (i = 0; i < nb_output_streams; i++) {
3534 ost = output_streams[i];
3536 if (ost->attachment_filename) {
3537 /* an attached file */
3538 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3539 ost->attachment_filename, ost->file_index, ost->index);
3543 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3544 /* output from a complex graph */
3545 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3546 if (nb_filtergraphs > 1)
3547 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3549 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3550 ost->index, ost->enc ? ost->enc->name : "?");
3554 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3555 input_streams[ost->source_index]->file_index,
3556 input_streams[ost->source_index]->st->index,
3559 if (ost->sync_ist != input_streams[ost->source_index])
3560 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3561 ost->sync_ist->file_index,
3562 ost->sync_ist->st->index);
3563 if (ost->stream_copy)
3564 av_log(NULL, AV_LOG_INFO, " (copy)");
3566 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3567 const AVCodec *out_codec = ost->enc;
3568 const char *decoder_name = "?";
3569 const char *in_codec_name = "?";
3570 const char *encoder_name = "?";
3571 const char *out_codec_name = "?";
3572 const AVCodecDescriptor *desc;
3575 decoder_name = in_codec->name;
3576 desc = avcodec_descriptor_get(in_codec->id);
3578 in_codec_name = desc->name;
3579 if (!strcmp(decoder_name, in_codec_name))
3580 decoder_name = "native";
3584 encoder_name = out_codec->name;
3585 desc = avcodec_descriptor_get(out_codec->id);
3587 out_codec_name = desc->name;
3588 if (!strcmp(encoder_name, out_codec_name))
3589 encoder_name = "native";
3592 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3593 in_codec_name, decoder_name,
3594 out_codec_name, encoder_name);
3596 av_log(NULL, AV_LOG_INFO, "\n");
3600 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3604 transcode_init_done = 1;
3609 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3610 static int need_output(void)
3614 for (i = 0; i < nb_output_streams; i++) {
3615 OutputStream *ost = output_streams[i];
3616 OutputFile *of = output_files[ost->file_index];
3617 AVFormatContext *os = output_files[ost->file_index]->ctx;
3619 if (ost->finished ||
3620 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3622 if (ost->frame_number >= ost->max_frames) {
3624 for (j = 0; j < of->ctx->nb_streams; j++)
3625 close_output_stream(output_streams[of->ost_index + j]);
3636 * Select the output stream to process.
3638 * @return selected output stream, or NULL if none available
3640 static OutputStream *choose_output(void)
3643 int64_t opts_min = INT64_MAX;
3644 OutputStream *ost_min = NULL;
3646 for (i = 0; i < nb_output_streams; i++) {
3647 OutputStream *ost = output_streams[i];
3648 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3649 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3651 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3652 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3654 if (!ost->finished && opts < opts_min) {
3656 ost_min = ost->unavailable ? NULL : ost;
3662 static void set_tty_echo(int on)
3666 if (tcgetattr(0, &tty) == 0) {
3667 if (on) tty.c_lflag |= ECHO;
3668 else tty.c_lflag &= ~ECHO;
3669 tcsetattr(0, TCSANOW, &tty);
3674 static int check_keyboard_interaction(int64_t cur_time)
3677 static int64_t last_time;
3678 if (received_nb_signals)
3679 return AVERROR_EXIT;
3680 /* read_key() returns 0 on EOF */
3681 if(cur_time - last_time >= 100000 && !run_as_daemon){
3683 last_time = cur_time;
3687 return AVERROR_EXIT;
3688 if (key == '+') av_log_set_level(av_log_get_level()+10);
3689 if (key == '-') av_log_set_level(av_log_get_level()-10);
3690 if (key == 's') qp_hist ^= 1;
3693 do_hex_dump = do_pkt_dump = 0;
3694 } else if(do_pkt_dump){
3698 av_log_set_level(AV_LOG_DEBUG);
3700 if (key == 'c' || key == 'C'){
3701 char buf[4096], target[64], command[256], arg[256] = {0};
3704 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3707 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3712 fprintf(stderr, "\n");
3714 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3715 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3716 target, time, command, arg);
3717 for (i = 0; i < nb_filtergraphs; i++) {
3718 FilterGraph *fg = filtergraphs[i];
3721 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3722 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3723 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3724 } else if (key == 'c') {
3725 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3726 ret = AVERROR_PATCHWELCOME;
3728 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3730 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3735 av_log(NULL, AV_LOG_ERROR,
3736 "Parse error, at least 3 arguments were expected, "
3737 "only %d given in string '%s'\n", n, buf);
3740 if (key == 'd' || key == 'D'){
3743 debug = input_streams[0]->st->codec->debug<<1;
3744 if(!debug) debug = 1;
3745 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3752 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3757 fprintf(stderr, "\n");
3758 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3759 fprintf(stderr,"error parsing debug value\n");
3761 for(i=0;i<nb_input_streams;i++) {
3762 input_streams[i]->st->codec->debug = debug;
3764 for(i=0;i<nb_output_streams;i++) {
3765 OutputStream *ost = output_streams[i];
3766 ost->enc_ctx->debug = debug;
3768 if(debug) av_log_set_level(AV_LOG_DEBUG);
3769 fprintf(stderr,"debug=%d\n", debug);
3772 fprintf(stderr, "key function\n"
3773 "? show this help\n"
3774 "+ increase verbosity\n"
3775 "- decrease verbosity\n"
3776 "c Send command to first matching filter supporting it\n"
3777 "C Send/Queue command to all matching filters\n"
3778 "D cycle through available debug modes\n"
3779 "h dump packets/hex press to cycle through the 3 states\n"
3781 "s Show QP histogram\n"
3788 static void *input_thread(void *arg)
3791 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3796 ret = av_read_frame(f->ctx, &pkt);
3798 if (ret == AVERROR(EAGAIN)) {
3803 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3806 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3807 if (flags && ret == AVERROR(EAGAIN)) {
3809 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3810 av_log(f->ctx, AV_LOG_WARNING,
3811 "Thread message queue blocking; consider raising the "
3812 "thread_queue_size option (current value: %d)\n",
3813 f->thread_queue_size);
3816 if (ret != AVERROR_EOF)
3817 av_log(f->ctx, AV_LOG_ERROR,
3818 "Unable to send packet to main thread: %s\n",
3820 av_packet_unref(&pkt);
3821 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3829 static void free_input_threads(void)
3833 for (i = 0; i < nb_input_files; i++) {
3834 InputFile *f = input_files[i];
3837 if (!f || !f->in_thread_queue)
3839 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3840 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3841 av_packet_unref(&pkt);
3843 pthread_join(f->thread, NULL);
3845 av_thread_message_queue_free(&f->in_thread_queue);
3849 static int init_input_threads(void)
3853 if (nb_input_files == 1)
3856 for (i = 0; i < nb_input_files; i++) {
3857 InputFile *f = input_files[i];
3859 if (f->ctx->pb ? !f->ctx->pb->seekable :
3860 strcmp(f->ctx->iformat->name, "lavfi"))
3861 f->non_blocking = 1;
3862 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3863 f->thread_queue_size, sizeof(AVPacket));
3867 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3868 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3869 av_thread_message_queue_free(&f->in_thread_queue);
3870 return AVERROR(ret);
3876 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3878 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3880 AV_THREAD_MESSAGE_NONBLOCK : 0);
3884 static int get_input_packet(InputFile *f, AVPacket *pkt)
3888 for (i = 0; i < f->nb_streams; i++) {
3889 InputStream *ist = input_streams[f->ist_index + i];
3890 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3891 int64_t now = av_gettime_relative() - ist->start;
3893 return AVERROR(EAGAIN);
3898 if (nb_input_files > 1)
3899 return get_input_packet_mt(f, pkt);
3901 return av_read_frame(f->ctx, pkt);
3904 static int got_eagain(void)
3907 for (i = 0; i < nb_output_streams; i++)
3908 if (output_streams[i]->unavailable)
3913 static void reset_eagain(void)
3916 for (i = 0; i < nb_input_files; i++)
3917 input_files[i]->eagain = 0;
3918 for (i = 0; i < nb_output_streams; i++)
3919 output_streams[i]->unavailable = 0;
3922 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3923 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3924 AVRational time_base)
3930 return tmp_time_base;
3933 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3936 return tmp_time_base;
3942 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3945 AVCodecContext *avctx;
3946 int i, ret, has_audio = 0;
3947 int64_t duration = 0;
3949 ret = av_seek_frame(is, -1, is->start_time, 0);
3953 for (i = 0; i < ifile->nb_streams; i++) {
3954 ist = input_streams[ifile->ist_index + i];
3955 avctx = ist->dec_ctx;
3958 if (ist->decoding_needed) {
3959 process_input_packet(ist, NULL, 1);
3960 avcodec_flush_buffers(avctx);
3963 /* duration is the length of the last frame in a stream
3964 * when audio stream is present we don't care about
3965 * last video frame length because it's not defined exactly */
3966 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3970 for (i = 0; i < ifile->nb_streams; i++) {
3971 ist = input_streams[ifile->ist_index + i];
3972 avctx = ist->dec_ctx;
3975 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3976 AVRational sample_rate = {1, avctx->sample_rate};
3978 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3982 if (ist->framerate.num) {
3983 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3984 } else if (ist->st->avg_frame_rate.num) {
3985 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3986 } else duration = 1;
3988 if (!ifile->duration)
3989 ifile->time_base = ist->st->time_base;
3990 /* the total duration of the stream, max_pts - min_pts is
3991 * the duration of the stream without the last frame */
3992 duration += ist->max_pts - ist->min_pts;
3993 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3997 if (ifile->loop > 0)
4005 * - 0 -- one packet was read and processed
4006 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4007 * this function should be called again
4008 * - AVERROR_EOF -- this function should not be called again
4010 static int process_input(int file_index)
4012 InputFile *ifile = input_files[file_index];
4013 AVFormatContext *is;
4021 ret = get_input_packet(ifile, &pkt);
4023 if (ret == AVERROR(EAGAIN)) {
4027 if (ret < 0 && ifile->loop) {
4028 if ((ret = seek_to_start(ifile, is)) < 0)
4030 ret = get_input_packet(ifile, &pkt);
4031 if (ret == AVERROR(EAGAIN)) {
4037 if (ret != AVERROR_EOF) {
4038 print_error(is->filename, ret);
4043 for (i = 0; i < ifile->nb_streams; i++) {
4044 ist = input_streams[ifile->ist_index + i];
4045 if (ist->decoding_needed) {
4046 ret = process_input_packet(ist, NULL, 0);
4051 /* mark all outputs that don't go through lavfi as finished */
4052 for (j = 0; j < nb_output_streams; j++) {
4053 OutputStream *ost = output_streams[j];
4055 if (ost->source_index == ifile->ist_index + i &&
4056 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4057 finish_output_stream(ost);
4061 ifile->eof_reached = 1;
4062 return AVERROR(EAGAIN);
4068 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4069 is->streams[pkt.stream_index]);
4071 /* the following test is needed in case new streams appear
4072 dynamically in stream : we ignore them */
4073 if (pkt.stream_index >= ifile->nb_streams) {
4074 report_new_stream(file_index, &pkt);
4075 goto discard_packet;
4078 ist = input_streams[ifile->ist_index + pkt.stream_index];
4080 ist->data_size += pkt.size;
4084 goto discard_packet;
4086 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4087 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4092 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4093 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4094 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4095 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4096 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4097 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4098 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4099 av_ts2str(input_files[ist->file_index]->ts_offset),
4100 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4103 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4104 int64_t stime, stime2;
4105 // Correcting starttime based on the enabled streams
4106 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4107 // so we instead do it here as part of discontinuity handling
4108 if ( ist->next_dts == AV_NOPTS_VALUE
4109 && ifile->ts_offset == -is->start_time
4110 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4111 int64_t new_start_time = INT64_MAX;
4112 for (i=0; i<is->nb_streams; i++) {
4113 AVStream *st = is->streams[i];
4114 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4116 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4118 if (new_start_time > is->start_time) {
4119 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4120 ifile->ts_offset = -new_start_time;
4124 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4125 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4126 ist->wrap_correction_done = 1;
4128 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4129 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4130 ist->wrap_correction_done = 0;
4132 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4133 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4134 ist->wrap_correction_done = 0;
4138 /* add the stream-global side data to the first packet */
4139 if (ist->nb_packets == 1) {
4140 if (ist->st->nb_side_data)
4141 av_packet_split_side_data(&pkt);
4142 for (i = 0; i < ist->st->nb_side_data; i++) {
4143 AVPacketSideData *src_sd = &ist->st->side_data[i];
4146 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4148 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4151 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4155 memcpy(dst_data, src_sd->data, src_sd->size);
4159 if (pkt.dts != AV_NOPTS_VALUE)
4160 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4161 if (pkt.pts != AV_NOPTS_VALUE)
4162 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4164 if (pkt.pts != AV_NOPTS_VALUE)
4165 pkt.pts *= ist->ts_scale;
4166 if (pkt.dts != AV_NOPTS_VALUE)
4167 pkt.dts *= ist->ts_scale;
4169 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4170 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4171 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4172 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4173 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4174 int64_t delta = pkt_dts - ifile->last_ts;
4175 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4176 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4177 ifile->ts_offset -= delta;
4178 av_log(NULL, AV_LOG_DEBUG,
4179 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4180 delta, ifile->ts_offset);
4181 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4182 if (pkt.pts != AV_NOPTS_VALUE)
4183 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4187 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4188 if (pkt.pts != AV_NOPTS_VALUE) {
4189 pkt.pts += duration;
4190 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4191 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4194 if (pkt.dts != AV_NOPTS_VALUE)
4195 pkt.dts += duration;
4197 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4198 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4199 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4200 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4202 int64_t delta = pkt_dts - ist->next_dts;
4203 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4204 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4205 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4206 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4207 ifile->ts_offset -= delta;
4208 av_log(NULL, AV_LOG_DEBUG,
4209 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4210 delta, ifile->ts_offset);
4211 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4212 if (pkt.pts != AV_NOPTS_VALUE)
4213 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4216 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4217 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4218 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4219 pkt.dts = AV_NOPTS_VALUE;
4221 if (pkt.pts != AV_NOPTS_VALUE){
4222 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4223 delta = pkt_pts - ist->next_dts;
4224 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4225 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4226 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4227 pkt.pts = AV_NOPTS_VALUE;
4233 if (pkt.dts != AV_NOPTS_VALUE)
4234 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4237 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4238 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4239 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4240 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4241 av_ts2str(input_files[ist->file_index]->ts_offset),
4242 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4245 sub2video_heartbeat(ist, pkt.pts);
4247 process_input_packet(ist, &pkt, 0);
4250 av_packet_unref(&pkt);
4256 * Perform a step of transcoding for the specified filter graph.
4258 * @param[in] graph filter graph to consider
4259 * @param[out] best_ist input stream where a frame would allow to continue
4260 * @return 0 for success, <0 for error
4262 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4265 int nb_requests, nb_requests_max = 0;
4266 InputFilter *ifilter;
4270 ret = avfilter_graph_request_oldest(graph->graph);
4272 return reap_filters(0);
4274 if (ret == AVERROR_EOF) {
4275 ret = reap_filters(1);
4276 for (i = 0; i < graph->nb_outputs; i++)
4277 close_output_stream(graph->outputs[i]->ost);
4280 if (ret != AVERROR(EAGAIN))
4283 for (i = 0; i < graph->nb_inputs; i++) {
4284 ifilter = graph->inputs[i];
4286 if (input_files[ist->file_index]->eagain ||
4287 input_files[ist->file_index]->eof_reached)
4289 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4290 if (nb_requests > nb_requests_max) {
4291 nb_requests_max = nb_requests;
4297 for (i = 0; i < graph->nb_outputs; i++)
4298 graph->outputs[i]->ost->unavailable = 1;
4304 * Run a single step of transcoding.
4306 * @return 0 for success, <0 for error
4308 static int transcode_step(void)
4314 ost = choose_output();
4321 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4326 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4331 av_assert0(ost->source_index >= 0);
4332 ist = input_streams[ost->source_index];
4335 ret = process_input(ist->file_index);
4336 if (ret == AVERROR(EAGAIN)) {
4337 if (input_files[ist->file_index]->eagain)
4338 ost->unavailable = 1;
4343 return ret == AVERROR_EOF ? 0 : ret;
4345 return reap_filters(0);
4349 * The following code is the main loop of the file converter
4351 static int transcode(void)
4354 AVFormatContext *os;
4357 int64_t timer_start;
4358 int64_t total_packets_written = 0;
4360 ret = transcode_init();
4364 if (stdin_interaction) {
4365 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4368 timer_start = av_gettime_relative();
4371 if ((ret = init_input_threads()) < 0)
4375 while (!received_sigterm) {
4376 int64_t cur_time= av_gettime_relative();
4378 /* if 'q' pressed, exits */
4379 if (stdin_interaction)
4380 if (check_keyboard_interaction(cur_time) < 0)
4383 /* check if there's any stream where output is still needed */
4384 if (!need_output()) {
4385 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4389 ret = transcode_step();
4390 if (ret < 0 && ret != AVERROR_EOF) {
4392 av_strerror(ret, errbuf, sizeof(errbuf));
4394 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4398 /* dump report by using the output first video and audio streams */
4399 print_report(0, timer_start, cur_time);
4402 free_input_threads();
4405 /* at the end of stream, we must flush the decoder buffers */
4406 for (i = 0; i < nb_input_streams; i++) {
4407 ist = input_streams[i];
4408 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4409 process_input_packet(ist, NULL, 0);
4416 /* write the trailer if needed and close file */
4417 for (i = 0; i < nb_output_files; i++) {
4418 os = output_files[i]->ctx;
4419 if (!output_files[i]->header_written) {
4420 av_log(NULL, AV_LOG_ERROR,
4421 "Nothing was written into output file %d (%s), because "
4422 "at least one of its streams received no packets.\n",
4426 if ((ret = av_write_trailer(os)) < 0) {
4427 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4433 /* dump report by using the first video and audio streams */
4434 print_report(1, timer_start, av_gettime_relative());
4436 /* close each encoder */
4437 for (i = 0; i < nb_output_streams; i++) {
4438 ost = output_streams[i];
4439 if (ost->encoding_needed) {
4440 av_freep(&ost->enc_ctx->stats_in);
4442 total_packets_written += ost->packets_written;
4445 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4446 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4450 /* close each decoder */
4451 for (i = 0; i < nb_input_streams; i++) {
4452 ist = input_streams[i];
4453 if (ist->decoding_needed) {
4454 avcodec_close(ist->dec_ctx);
4455 if (ist->hwaccel_uninit)
4456 ist->hwaccel_uninit(ist->dec_ctx);
4460 av_buffer_unref(&hw_device_ctx);
4467 free_input_threads();
4470 if (output_streams) {
4471 for (i = 0; i < nb_output_streams; i++) {
4472 ost = output_streams[i];
4475 if (fclose(ost->logfile))
4476 av_log(NULL, AV_LOG_ERROR,
4477 "Error closing logfile, loss of information possible: %s\n",
4478 av_err2str(AVERROR(errno)));
4479 ost->logfile = NULL;
4481 av_freep(&ost->forced_kf_pts);
4482 av_freep(&ost->apad);
4483 av_freep(&ost->disposition);
4484 av_dict_free(&ost->encoder_opts);
4485 av_dict_free(&ost->sws_dict);
4486 av_dict_free(&ost->swr_opts);
4487 av_dict_free(&ost->resample_opts);
4495 static int64_t getutime(void)
4498 struct rusage rusage;
4500 getrusage(RUSAGE_SELF, &rusage);
4501 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4502 #elif HAVE_GETPROCESSTIMES
4504 FILETIME c, e, k, u;
4505 proc = GetCurrentProcess();
4506 GetProcessTimes(proc, &c, &e, &k, &u);
4507 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4509 return av_gettime_relative();
4513 static int64_t getmaxrss(void)
4515 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4516 struct rusage rusage;
4517 getrusage(RUSAGE_SELF, &rusage);
4518 return (int64_t)rusage.ru_maxrss * 1024;
4519 #elif HAVE_GETPROCESSMEMORYINFO
4521 PROCESS_MEMORY_COUNTERS memcounters;
4522 proc = GetCurrentProcess();
4523 memcounters.cb = sizeof(memcounters);
4524 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4525 return memcounters.PeakPagefileUsage;
4531 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4535 int main(int argc, char **argv)
4542 register_exit(ffmpeg_cleanup);
4544 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4546 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4547 parse_loglevel(argc, argv, options);
4549 if(argc>1 && !strcmp(argv[1], "-d")){
4551 av_log_set_callback(log_callback_null);
4556 avcodec_register_all();
4558 avdevice_register_all();
4560 avfilter_register_all();
4562 avformat_network_init();
4564 show_banner(argc, argv, options);
4566 /* parse options and open all input/output files */
4567 ret = ffmpeg_parse_options(argc, argv);
4571 if (nb_output_files <= 0 && nb_input_files == 0) {
4573 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4577 /* file converter / grab */
4578 if (nb_output_files <= 0) {
4579 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4583 // if (nb_input_files == 0) {
4584 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4588 for (i = 0; i < nb_output_files; i++) {
4589 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4593 current_time = ti = getutime();
4594 if (transcode() < 0)
4596 ti = getutime() - ti;
4598 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4600 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4601 decode_error_stat[0], decode_error_stat[1]);
4602 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4605 exit_program(received_nb_signals ? 255 : main_return_code);
4606 return main_return_code;