2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static unsigned dup_warning = 1000;
130 static int nb_frames_drop = 0;
131 static int64_t decode_error_stat[2];
133 static int want_sdp = 1;
135 static int current_time;
136 AVIOContext *progress_avio = NULL;
138 static uint8_t *subtitle_out;
140 InputStream **input_streams = NULL;
141 int nb_input_streams = 0;
142 InputFile **input_files = NULL;
143 int nb_input_files = 0;
145 OutputStream **output_streams = NULL;
146 int nb_output_streams = 0;
147 OutputFile **output_files = NULL;
148 int nb_output_files = 0;
150 FilterGraph **filtergraphs;
155 /* init terminal so that we can grab keys */
156 static struct termios oldtty;
157 static int restore_tty;
161 static void free_input_threads(void);
165 Convert subtitles to video with alpha to insert them in filter graphs.
166 This is a temporary solution until libavfilter gets real subtitles support.
169 static int sub2video_get_blank_frame(InputStream *ist)
172 AVFrame *frame = ist->sub2video.frame;
174 av_frame_unref(frame);
175 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
176 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
177 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
178 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
180 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
184 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
187 uint32_t *pal, *dst2;
191 if (r->type != SUBTITLE_BITMAP) {
192 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
195 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
197 r->x, r->y, r->w, r->h, w, h
202 dst += r->y * dst_linesize + r->x * 4;
204 pal = (uint32_t *)r->data[1];
205 for (y = 0; y < r->h; y++) {
206 dst2 = (uint32_t *)dst;
208 for (x = 0; x < r->w; x++)
209 *(dst2++) = pal[*(src2++)];
211 src += r->linesize[0];
215 static void sub2video_push_ref(InputStream *ist, int64_t pts)
217 AVFrame *frame = ist->sub2video.frame;
220 av_assert1(frame->data[0]);
221 ist->sub2video.last_pts = frame->pts = pts;
222 for (i = 0; i < ist->nb_filters; i++)
223 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
224 AV_BUFFERSRC_FLAG_KEEP_REF |
225 AV_BUFFERSRC_FLAG_PUSH);
228 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
230 AVFrame *frame = ist->sub2video.frame;
234 int64_t pts, end_pts;
239 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240 AV_TIME_BASE_Q, ist->st->time_base);
241 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
242 AV_TIME_BASE_Q, ist->st->time_base);
243 num_rects = sub->num_rects;
245 pts = ist->sub2video.end_pts;
249 if (sub2video_get_blank_frame(ist) < 0) {
250 av_log(ist->dec_ctx, AV_LOG_ERROR,
251 "Impossible to get a blank canvas.\n");
254 dst = frame->data [0];
255 dst_linesize = frame->linesize[0];
256 for (i = 0; i < num_rects; i++)
257 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
258 sub2video_push_ref(ist, pts);
259 ist->sub2video.end_pts = end_pts;
262 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
264 InputFile *infile = input_files[ist->file_index];
268 /* When a frame is read from a file, examine all sub2video streams in
269 the same file and send the sub2video frame again. Otherwise, decoded
270 video frames could be accumulating in the filter graph while a filter
271 (possibly overlay) is desperately waiting for a subtitle frame. */
272 for (i = 0; i < infile->nb_streams; i++) {
273 InputStream *ist2 = input_streams[infile->ist_index + i];
274 if (!ist2->sub2video.frame)
276 /* subtitles seem to be usually muxed ahead of other streams;
277 if not, subtracting a larger time here is necessary */
278 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
279 /* do not send the heartbeat frame if the subtitle is already ahead */
280 if (pts2 <= ist2->sub2video.last_pts)
282 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
283 sub2video_update(ist2, NULL);
284 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
285 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
287 sub2video_push_ref(ist2, pts2);
291 static void sub2video_flush(InputStream *ist)
295 if (ist->sub2video.end_pts < INT64_MAX)
296 sub2video_update(ist, NULL);
297 for (i = 0; i < ist->nb_filters; i++)
298 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
301 /* end of sub2video hack */
303 static void term_exit_sigsafe(void)
307 tcsetattr (0, TCSANOW, &oldtty);
313 av_log(NULL, AV_LOG_QUIET, "%s", "");
317 static volatile int received_sigterm = 0;
318 static volatile int received_nb_signals = 0;
319 static volatile int transcode_init_done = 0;
320 static volatile int ffmpeg_exited = 0;
321 static int main_return_code = 0;
324 sigterm_handler(int sig)
326 received_sigterm = sig;
327 received_nb_signals++;
329 if(received_nb_signals > 3) {
330 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
331 strlen("Received > 3 system signals, hard exiting\n"));
337 #if HAVE_SETCONSOLECTRLHANDLER
338 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
340 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345 case CTRL_BREAK_EVENT:
346 sigterm_handler(SIGINT);
349 case CTRL_CLOSE_EVENT:
350 case CTRL_LOGOFF_EVENT:
351 case CTRL_SHUTDOWN_EVENT:
352 sigterm_handler(SIGTERM);
353 /* Basically, with these 3 events, when we return from this method the
354 process is hard terminated, so stall as long as we need to
355 to try and let the main thread(s) clean up and gracefully terminate
356 (we have at most 5 seconds, but should be done far before that). */
357 while (!ffmpeg_exited) {
363 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
372 if (!run_as_daemon && stdin_interaction) {
374 if (tcgetattr (0, &tty) == 0) {
378 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
379 |INLCR|IGNCR|ICRNL|IXON);
380 tty.c_oflag |= OPOST;
381 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
382 tty.c_cflag &= ~(CSIZE|PARENB);
387 tcsetattr (0, TCSANOW, &tty);
389 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
393 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
394 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
396 signal(SIGXCPU, sigterm_handler);
398 #if HAVE_SETCONSOLECTRLHANDLER
399 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
403 /* read a key without blocking */
404 static int read_key(void)
416 n = select(1, &rfds, NULL, NULL, &tv);
425 # if HAVE_PEEKNAMEDPIPE
427 static HANDLE input_handle;
430 input_handle = GetStdHandle(STD_INPUT_HANDLE);
431 is_pipe = !GetConsoleMode(input_handle, &dw);
435 /* When running under a GUI, you will end here. */
436 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
437 // input pipe may have been closed by the program that ran ffmpeg
455 static int decode_interrupt_cb(void *ctx)
457 return received_nb_signals > transcode_init_done;
460 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
462 static void ffmpeg_cleanup(int ret)
467 int maxrss = getmaxrss() / 1024;
468 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
471 for (i = 0; i < nb_filtergraphs; i++) {
472 FilterGraph *fg = filtergraphs[i];
473 avfilter_graph_free(&fg->graph);
474 for (j = 0; j < fg->nb_inputs; j++) {
475 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
476 av_freep(&fg->inputs[j]->name);
477 av_freep(&fg->inputs[j]);
479 av_freep(&fg->inputs);
480 for (j = 0; j < fg->nb_outputs; j++) {
481 av_freep(&fg->outputs[j]->name);
482 av_freep(&fg->outputs[j]->formats);
483 av_freep(&fg->outputs[j]->channel_layouts);
484 av_freep(&fg->outputs[j]->sample_rates);
485 av_freep(&fg->outputs[j]);
487 av_freep(&fg->outputs);
488 av_freep(&fg->graph_desc);
490 av_freep(&filtergraphs[i]);
492 av_freep(&filtergraphs);
494 av_freep(&subtitle_out);
497 for (i = 0; i < nb_output_files; i++) {
498 OutputFile *of = output_files[i];
503 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
505 avformat_free_context(s);
506 av_dict_free(&of->opts);
508 av_freep(&output_files[i]);
510 for (i = 0; i < nb_output_streams; i++) {
511 OutputStream *ost = output_streams[i];
516 for (j = 0; j < ost->nb_bitstream_filters; j++)
517 av_bsf_free(&ost->bsf_ctx[j]);
518 av_freep(&ost->bsf_ctx);
519 av_freep(&ost->bsf_extradata_updated);
521 av_frame_free(&ost->filtered_frame);
522 av_frame_free(&ost->last_frame);
523 av_dict_free(&ost->encoder_opts);
525 av_parser_close(ost->parser);
526 avcodec_free_context(&ost->parser_avctx);
528 av_freep(&ost->forced_keyframes);
529 av_expr_free(ost->forced_keyframes_pexpr);
530 av_freep(&ost->avfilter);
531 av_freep(&ost->logfile_prefix);
533 av_freep(&ost->audio_channels_map);
534 ost->audio_channels_mapped = 0;
536 av_dict_free(&ost->sws_dict);
538 avcodec_free_context(&ost->enc_ctx);
539 avcodec_parameters_free(&ost->ref_par);
541 while (ost->muxing_queue && av_fifo_size(ost->muxing_queue)) {
543 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
544 av_packet_unref(&pkt);
546 av_fifo_freep(&ost->muxing_queue);
548 av_freep(&output_streams[i]);
551 free_input_threads();
553 for (i = 0; i < nb_input_files; i++) {
554 avformat_close_input(&input_files[i]->ctx);
555 av_freep(&input_files[i]);
557 for (i = 0; i < nb_input_streams; i++) {
558 InputStream *ist = input_streams[i];
560 av_frame_free(&ist->decoded_frame);
561 av_frame_free(&ist->filter_frame);
562 av_dict_free(&ist->decoder_opts);
563 avsubtitle_free(&ist->prev_sub.subtitle);
564 av_frame_free(&ist->sub2video.frame);
565 av_freep(&ist->filters);
566 av_freep(&ist->hwaccel_device);
567 av_freep(&ist->dts_buffer);
569 avcodec_free_context(&ist->dec_ctx);
571 av_freep(&input_streams[i]);
575 if (fclose(vstats_file))
576 av_log(NULL, AV_LOG_ERROR,
577 "Error closing vstats file, loss of information possible: %s\n",
578 av_err2str(AVERROR(errno)));
580 av_freep(&vstats_filename);
582 av_freep(&input_streams);
583 av_freep(&input_files);
584 av_freep(&output_streams);
585 av_freep(&output_files);
589 avformat_network_deinit();
591 if (received_sigterm) {
592 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
593 (int) received_sigterm);
594 } else if (ret && transcode_init_done) {
595 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
601 void remove_avoptions(AVDictionary **a, AVDictionary *b)
603 AVDictionaryEntry *t = NULL;
605 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
606 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
610 void assert_avoptions(AVDictionary *m)
612 AVDictionaryEntry *t;
613 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
614 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
619 static void abort_codec_experimental(AVCodec *c, int encoder)
624 static void update_benchmark(const char *fmt, ...)
626 if (do_benchmark_all) {
627 int64_t t = getutime();
633 vsnprintf(buf, sizeof(buf), fmt, va);
635 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
641 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
644 for (i = 0; i < nb_output_streams; i++) {
645 OutputStream *ost2 = output_streams[i];
646 ost2->finished |= ost == ost2 ? this_stream : others;
650 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
652 AVFormatContext *s = of->ctx;
653 AVStream *st = ost->st;
656 if (!of->header_written) {
658 /* the muxer is not initialized yet, buffer the packet */
659 if (!av_fifo_space(ost->muxing_queue)) {
660 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
661 ost->max_muxing_queue_size);
662 if (new_size <= av_fifo_size(ost->muxing_queue)) {
663 av_log(NULL, AV_LOG_ERROR,
664 "Too many packets buffered for output stream %d:%d.\n",
665 ost->file_index, ost->st->index);
668 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
672 av_packet_move_ref(&tmp_pkt, pkt);
673 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
677 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
678 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
679 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
682 * Audio encoders may split the packets -- #frames in != #packets out.
683 * But there is no reordering, so we can limit the number of output packets
684 * by simply dropping them here.
685 * Counting encoded video frames needs to be done separately because of
686 * reordering, see do_video_out()
688 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
689 if (ost->frame_number >= ost->max_frames) {
690 av_packet_unref(pkt);
695 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
697 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
699 ost->quality = sd ? AV_RL32(sd) : -1;
700 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
702 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
704 ost->error[i] = AV_RL64(sd + 8 + 8*i);
709 if (ost->frame_rate.num && ost->is_cfr) {
710 if (pkt->duration > 0)
711 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
712 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
717 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
718 if (pkt->dts != AV_NOPTS_VALUE &&
719 pkt->pts != AV_NOPTS_VALUE &&
720 pkt->dts > pkt->pts) {
721 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
723 ost->file_index, ost->st->index);
725 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
726 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
727 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
729 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
730 pkt->dts != AV_NOPTS_VALUE &&
731 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
732 ost->last_mux_dts != AV_NOPTS_VALUE) {
733 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
734 if (pkt->dts < max) {
735 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
736 av_log(s, loglevel, "Non-monotonous DTS in output stream "
737 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
738 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
740 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
743 av_log(s, loglevel, "changing to %"PRId64". This may result "
744 "in incorrect timestamps in the output file.\n",
746 if (pkt->pts >= pkt->dts)
747 pkt->pts = FFMAX(pkt->pts, max);
752 ost->last_mux_dts = pkt->dts;
754 ost->data_size += pkt->size;
755 ost->packets_written++;
757 pkt->stream_index = ost->index;
760 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
761 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
762 av_get_media_type_string(ost->enc_ctx->codec_type),
763 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
764 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
769 ret = av_interleaved_write_frame(s, pkt);
771 print_error("av_interleaved_write_frame()", ret);
772 main_return_code = 1;
773 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
775 av_packet_unref(pkt);
778 static void close_output_stream(OutputStream *ost)
780 OutputFile *of = output_files[ost->file_index];
782 ost->finished |= ENCODER_FINISHED;
784 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
785 of->recording_time = FFMIN(of->recording_time, end);
789 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
793 /* apply the output bitstream filters, if any */
794 if (ost->nb_bitstream_filters) {
797 av_packet_split_side_data(pkt);
798 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
804 /* get a packet from the previous filter up the chain */
805 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
806 if (ret == AVERROR(EAGAIN)) {
812 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
813 * the api states this shouldn't happen after init(). Propagate it here to the
814 * muxer and to the next filters in the chain to workaround this.
815 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
816 * par_out->extradata and adapt muxers accordingly to get rid of this. */
817 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
818 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
821 ost->bsf_extradata_updated[idx - 1] |= 1;
824 /* send it to the next filter down the chain or to the muxer */
825 if (idx < ost->nb_bitstream_filters) {
826 /* HACK/FIXME! - See above */
827 if (!(ost->bsf_extradata_updated[idx] & 2)) {
828 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
831 ost->bsf_extradata_updated[idx] |= 2;
833 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
838 write_packet(of, pkt, ost);
841 write_packet(of, pkt, ost);
844 if (ret < 0 && ret != AVERROR_EOF) {
845 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
846 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
852 static int check_recording_time(OutputStream *ost)
854 OutputFile *of = output_files[ost->file_index];
856 if (of->recording_time != INT64_MAX &&
857 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
858 AV_TIME_BASE_Q) >= 0) {
859 close_output_stream(ost);
865 static void do_audio_out(OutputFile *of, OutputStream *ost,
868 AVCodecContext *enc = ost->enc_ctx;
872 av_init_packet(&pkt);
876 if (!check_recording_time(ost))
879 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
880 frame->pts = ost->sync_opts;
881 ost->sync_opts = frame->pts + frame->nb_samples;
882 ost->samples_encoded += frame->nb_samples;
883 ost->frames_encoded++;
885 av_assert0(pkt.size || !pkt.data);
886 update_benchmark(NULL);
888 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
889 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
890 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
891 enc->time_base.num, enc->time_base.den);
894 ret = avcodec_send_frame(enc, frame);
899 ret = avcodec_receive_packet(enc, &pkt);
900 if (ret == AVERROR(EAGAIN))
905 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
907 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
910 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
911 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
912 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
913 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
916 output_packet(of, &pkt, ost);
921 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
925 static void do_subtitle_out(OutputFile *of,
929 int subtitle_out_max_size = 1024 * 1024;
930 int subtitle_out_size, nb, i;
935 if (sub->pts == AV_NOPTS_VALUE) {
936 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
945 subtitle_out = av_malloc(subtitle_out_max_size);
947 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
952 /* Note: DVB subtitle need one packet to draw them and one other
953 packet to clear them */
954 /* XXX: signal it in the codec context ? */
955 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
960 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
962 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
963 pts -= output_files[ost->file_index]->start_time;
964 for (i = 0; i < nb; i++) {
965 unsigned save_num_rects = sub->num_rects;
967 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
968 if (!check_recording_time(ost))
972 // start_display_time is required to be 0
973 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
974 sub->end_display_time -= sub->start_display_time;
975 sub->start_display_time = 0;
979 ost->frames_encoded++;
981 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
982 subtitle_out_max_size, sub);
984 sub->num_rects = save_num_rects;
985 if (subtitle_out_size < 0) {
986 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
990 av_init_packet(&pkt);
991 pkt.data = subtitle_out;
992 pkt.size = subtitle_out_size;
993 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
994 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
995 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
996 /* XXX: the pts correction is handled here. Maybe handling
997 it in the codec would be better */
999 pkt.pts += 90 * sub->start_display_time;
1001 pkt.pts += 90 * sub->end_display_time;
1004 output_packet(of, &pkt, ost);
1008 static void do_video_out(OutputFile *of,
1010 AVFrame *next_picture,
1013 int ret, format_video_sync;
1015 AVCodecContext *enc = ost->enc_ctx;
1016 AVCodecParameters *mux_par = ost->st->codecpar;
1017 int nb_frames, nb0_frames, i;
1018 double delta, delta0;
1019 double duration = 0;
1021 InputStream *ist = NULL;
1022 AVFilterContext *filter = ost->filter->filter;
1024 if (ost->source_index >= 0)
1025 ist = input_streams[ost->source_index];
1027 if (filter->inputs[0]->frame_rate.num > 0 &&
1028 filter->inputs[0]->frame_rate.den > 0)
1029 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
1031 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1032 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1034 if (!ost->filters_script &&
1038 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1039 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1042 if (!next_picture) {
1044 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1045 ost->last_nb0_frames[1],
1046 ost->last_nb0_frames[2]);
1048 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1049 delta = delta0 + duration;
1051 /* by default, we output a single frame */
1052 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1055 format_video_sync = video_sync_method;
1056 if (format_video_sync == VSYNC_AUTO) {
1057 if(!strcmp(of->ctx->oformat->name, "avi")) {
1058 format_video_sync = VSYNC_VFR;
1060 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1062 && format_video_sync == VSYNC_CFR
1063 && input_files[ist->file_index]->ctx->nb_streams == 1
1064 && input_files[ist->file_index]->input_ts_offset == 0) {
1065 format_video_sync = VSYNC_VSCFR;
1067 if (format_video_sync == VSYNC_CFR && copy_ts) {
1068 format_video_sync = VSYNC_VSCFR;
1071 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1075 format_video_sync != VSYNC_PASSTHROUGH &&
1076 format_video_sync != VSYNC_DROP) {
1077 if (delta0 < -0.6) {
1078 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1080 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1081 sync_ipts = ost->sync_opts;
1086 switch (format_video_sync) {
1088 if (ost->frame_number == 0 && delta0 >= 0.5) {
1089 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1092 ost->sync_opts = lrint(sync_ipts);
1095 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1096 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1098 } else if (delta < -1.1)
1100 else if (delta > 1.1) {
1101 nb_frames = lrintf(delta);
1103 nb0_frames = lrintf(delta0 - 0.6);
1109 else if (delta > 0.6)
1110 ost->sync_opts = lrint(sync_ipts);
1113 case VSYNC_PASSTHROUGH:
1114 ost->sync_opts = lrint(sync_ipts);
1121 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1122 nb0_frames = FFMIN(nb0_frames, nb_frames);
1124 memmove(ost->last_nb0_frames + 1,
1125 ost->last_nb0_frames,
1126 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1127 ost->last_nb0_frames[0] = nb0_frames;
1129 if (nb0_frames == 0 && ost->last_dropped) {
1131 av_log(NULL, AV_LOG_VERBOSE,
1132 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1133 ost->frame_number, ost->st->index, ost->last_frame->pts);
1135 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1136 if (nb_frames > dts_error_threshold * 30) {
1137 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1141 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1142 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1143 if (nb_frames_dup > dup_warning) {
1144 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1148 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1150 /* duplicates frame if needed */
1151 for (i = 0; i < nb_frames; i++) {
1152 AVFrame *in_picture;
1153 av_init_packet(&pkt);
1157 if (i < nb0_frames && ost->last_frame) {
1158 in_picture = ost->last_frame;
1160 in_picture = next_picture;
1165 in_picture->pts = ost->sync_opts;
1168 if (!check_recording_time(ost))
1170 if (ost->frame_number >= ost->max_frames)
1174 #if FF_API_LAVF_FMT_RAWPICTURE
1175 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1176 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1177 /* raw pictures are written as AVPicture structure to
1178 avoid any copies. We support temporarily the older
1180 if (in_picture->interlaced_frame)
1181 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1183 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1184 pkt.data = (uint8_t *)in_picture;
1185 pkt.size = sizeof(AVPicture);
1186 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1187 pkt.flags |= AV_PKT_FLAG_KEY;
1189 output_packet(of, &pkt, ost);
1193 int forced_keyframe = 0;
1196 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1197 ost->top_field_first >= 0)
1198 in_picture->top_field_first = !!ost->top_field_first;
1200 if (in_picture->interlaced_frame) {
1201 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1202 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1204 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1206 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1208 in_picture->quality = enc->global_quality;
1209 in_picture->pict_type = 0;
1211 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1212 in_picture->pts * av_q2d(enc->time_base) : NAN;
1213 if (ost->forced_kf_index < ost->forced_kf_count &&
1214 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1215 ost->forced_kf_index++;
1216 forced_keyframe = 1;
1217 } else if (ost->forced_keyframes_pexpr) {
1219 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1220 res = av_expr_eval(ost->forced_keyframes_pexpr,
1221 ost->forced_keyframes_expr_const_values, NULL);
1222 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1223 ost->forced_keyframes_expr_const_values[FKF_N],
1224 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1225 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1226 ost->forced_keyframes_expr_const_values[FKF_T],
1227 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1230 forced_keyframe = 1;
1231 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1232 ost->forced_keyframes_expr_const_values[FKF_N];
1233 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1234 ost->forced_keyframes_expr_const_values[FKF_T];
1235 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1238 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1239 } else if ( ost->forced_keyframes
1240 && !strncmp(ost->forced_keyframes, "source", 6)
1241 && in_picture->key_frame==1) {
1242 forced_keyframe = 1;
1245 if (forced_keyframe) {
1246 in_picture->pict_type = AV_PICTURE_TYPE_I;
1247 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1250 update_benchmark(NULL);
1252 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1253 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1254 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1255 enc->time_base.num, enc->time_base.den);
1258 ost->frames_encoded++;
1260 ret = avcodec_send_frame(enc, in_picture);
1265 ret = avcodec_receive_packet(enc, &pkt);
1266 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1267 if (ret == AVERROR(EAGAIN))
1273 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1274 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1275 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1276 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1279 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1280 pkt.pts = ost->sync_opts;
1282 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1285 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1286 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1287 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1288 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1291 frame_size = pkt.size;
1292 output_packet(of, &pkt, ost);
1294 /* if two pass, output log */
1295 if (ost->logfile && enc->stats_out) {
1296 fprintf(ost->logfile, "%s", enc->stats_out);
1302 * For video, number of frames in == number of packets out.
1303 * But there may be reordering, so we can't throw away frames on encoder
1304 * flush, we need to limit them here, before they go into encoder.
1306 ost->frame_number++;
1308 if (vstats_filename && frame_size)
1309 do_video_stats(ost, frame_size);
1312 if (!ost->last_frame)
1313 ost->last_frame = av_frame_alloc();
1314 av_frame_unref(ost->last_frame);
1315 if (next_picture && ost->last_frame)
1316 av_frame_ref(ost->last_frame, next_picture);
1318 av_frame_free(&ost->last_frame);
1322 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1326 static double psnr(double d)
1328 return -10.0 * log10(d);
1331 static void do_video_stats(OutputStream *ost, int frame_size)
1333 AVCodecContext *enc;
1335 double ti1, bitrate, avg_bitrate;
1337 /* this is executed just the first time do_video_stats is called */
1339 vstats_file = fopen(vstats_filename, "w");
1347 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1348 frame_number = ost->st->nb_frames;
1349 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1350 ost->quality / (float)FF_QP2LAMBDA);
1352 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1353 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1355 fprintf(vstats_file,"f_size= %6d ", frame_size);
1356 /* compute pts value */
1357 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1361 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1362 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1363 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1364 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1365 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1369 static void finish_output_stream(OutputStream *ost)
1371 OutputFile *of = output_files[ost->file_index];
1374 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1377 for (i = 0; i < of->ctx->nb_streams; i++)
1378 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1383 * Get and encode new output from any of the filtergraphs, without causing
1386 * @return 0 for success, <0 for severe errors
1388 static int reap_filters(int flush)
1390 AVFrame *filtered_frame = NULL;
1393 /* Reap all buffers present in the buffer sinks */
1394 for (i = 0; i < nb_output_streams; i++) {
1395 OutputStream *ost = output_streams[i];
1396 OutputFile *of = output_files[ost->file_index];
1397 AVFilterContext *filter;
1398 AVCodecContext *enc = ost->enc_ctx;
1403 filter = ost->filter->filter;
1405 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1406 return AVERROR(ENOMEM);
1408 filtered_frame = ost->filtered_frame;
1411 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1412 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1413 AV_BUFFERSINK_FLAG_NO_REQUEST);
1415 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1416 av_log(NULL, AV_LOG_WARNING,
1417 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1418 } else if (flush && ret == AVERROR_EOF) {
1419 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1420 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1424 if (ost->finished) {
1425 av_frame_unref(filtered_frame);
1428 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1429 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1430 AVRational tb = enc->time_base;
1431 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1433 tb.den <<= extra_bits;
1435 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1436 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1437 float_pts /= 1 << extra_bits;
1438 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1439 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1441 filtered_frame->pts =
1442 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1443 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1445 //if (ost->source_index >= 0)
1446 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1448 switch (filter->inputs[0]->type) {
1449 case AVMEDIA_TYPE_VIDEO:
1450 if (!ost->frame_aspect_ratio.num)
1451 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1454 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1455 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1457 enc->time_base.num, enc->time_base.den);
1460 do_video_out(of, ost, filtered_frame, float_pts);
1462 case AVMEDIA_TYPE_AUDIO:
1463 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1464 enc->channels != av_frame_get_channels(filtered_frame)) {
1465 av_log(NULL, AV_LOG_ERROR,
1466 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1469 do_audio_out(of, ost, filtered_frame);
1472 // TODO support subtitle filters
1476 av_frame_unref(filtered_frame);
1483 static void print_final_stats(int64_t total_size)
1485 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1486 uint64_t subtitle_size = 0;
1487 uint64_t data_size = 0;
1488 float percent = -1.0;
1492 for (i = 0; i < nb_output_streams; i++) {
1493 OutputStream *ost = output_streams[i];
1494 switch (ost->enc_ctx->codec_type) {
1495 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1496 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1497 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1498 default: other_size += ost->data_size; break;
1500 extra_size += ost->enc_ctx->extradata_size;
1501 data_size += ost->data_size;
1502 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1503 != AV_CODEC_FLAG_PASS1)
1507 if (data_size && total_size>0 && total_size >= data_size)
1508 percent = 100.0 * (total_size - data_size) / data_size;
1510 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1511 video_size / 1024.0,
1512 audio_size / 1024.0,
1513 subtitle_size / 1024.0,
1514 other_size / 1024.0,
1515 extra_size / 1024.0);
1517 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1519 av_log(NULL, AV_LOG_INFO, "unknown");
1520 av_log(NULL, AV_LOG_INFO, "\n");
1522 /* print verbose per-stream stats */
1523 for (i = 0; i < nb_input_files; i++) {
1524 InputFile *f = input_files[i];
1525 uint64_t total_packets = 0, total_size = 0;
1527 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1528 i, f->ctx->filename);
1530 for (j = 0; j < f->nb_streams; j++) {
1531 InputStream *ist = input_streams[f->ist_index + j];
1532 enum AVMediaType type = ist->dec_ctx->codec_type;
1534 total_size += ist->data_size;
1535 total_packets += ist->nb_packets;
1537 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1538 i, j, media_type_string(type));
1539 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1540 ist->nb_packets, ist->data_size);
1542 if (ist->decoding_needed) {
1543 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1544 ist->frames_decoded);
1545 if (type == AVMEDIA_TYPE_AUDIO)
1546 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1547 av_log(NULL, AV_LOG_VERBOSE, "; ");
1550 av_log(NULL, AV_LOG_VERBOSE, "\n");
1553 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1554 total_packets, total_size);
1557 for (i = 0; i < nb_output_files; i++) {
1558 OutputFile *of = output_files[i];
1559 uint64_t total_packets = 0, total_size = 0;
1561 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1562 i, of->ctx->filename);
1564 for (j = 0; j < of->ctx->nb_streams; j++) {
1565 OutputStream *ost = output_streams[of->ost_index + j];
1566 enum AVMediaType type = ost->enc_ctx->codec_type;
1568 total_size += ost->data_size;
1569 total_packets += ost->packets_written;
1571 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1572 i, j, media_type_string(type));
1573 if (ost->encoding_needed) {
1574 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1575 ost->frames_encoded);
1576 if (type == AVMEDIA_TYPE_AUDIO)
1577 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1578 av_log(NULL, AV_LOG_VERBOSE, "; ");
1581 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1582 ost->packets_written, ost->data_size);
1584 av_log(NULL, AV_LOG_VERBOSE, "\n");
1587 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1588 total_packets, total_size);
1590 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1591 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1593 av_log(NULL, AV_LOG_WARNING, "\n");
1595 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1600 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1603 AVBPrint buf_script;
1605 AVFormatContext *oc;
1607 AVCodecContext *enc;
1608 int frame_number, vid, i;
1611 int64_t pts = INT64_MIN + 1;
1612 static int64_t last_time = -1;
1613 static int qp_histogram[52];
1614 int hours, mins, secs, us;
1618 if (!print_stats && !is_last_report && !progress_avio)
1621 if (!is_last_report) {
1622 if (last_time == -1) {
1623 last_time = cur_time;
1626 if ((cur_time - last_time) < 500000)
1628 last_time = cur_time;
1631 t = (cur_time-timer_start) / 1000000.0;
1634 oc = output_files[0]->ctx;
1636 total_size = avio_size(oc->pb);
1637 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1638 total_size = avio_tell(oc->pb);
1642 av_bprint_init(&buf_script, 0, 1);
1643 for (i = 0; i < nb_output_streams; i++) {
1645 ost = output_streams[i];
1647 if (!ost->stream_copy)
1648 q = ost->quality / (float) FF_QP2LAMBDA;
1650 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1651 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1652 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1653 ost->file_index, ost->index, q);
1655 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1658 frame_number = ost->frame_number;
1659 fps = t > 1 ? frame_number / t : 0;
1660 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1661 frame_number, fps < 9.95, fps, q);
1662 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1663 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1664 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1665 ost->file_index, ost->index, q);
1667 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1671 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1673 for (j = 0; j < 32; j++)
1674 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1677 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1679 double error, error_sum = 0;
1680 double scale, scale_sum = 0;
1682 char type[3] = { 'Y','U','V' };
1683 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1684 for (j = 0; j < 3; j++) {
1685 if (is_last_report) {
1686 error = enc->error[j];
1687 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1689 error = ost->error[j];
1690 scale = enc->width * enc->height * 255.0 * 255.0;
1696 p = psnr(error / scale);
1697 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1698 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1699 ost->file_index, ost->index, type[j] | 32, p);
1701 p = psnr(error_sum / scale_sum);
1702 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1703 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1704 ost->file_index, ost->index, p);
1708 /* compute min output value */
1709 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1710 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1711 ost->st->time_base, AV_TIME_BASE_Q));
1713 nb_frames_drop += ost->last_dropped;
1716 secs = FFABS(pts) / AV_TIME_BASE;
1717 us = FFABS(pts) % AV_TIME_BASE;
1723 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1724 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1726 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1728 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1729 "size=%8.0fkB time=", total_size / 1024.0);
1731 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1732 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1733 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1734 (100 * us) / AV_TIME_BASE);
1737 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1738 av_bprintf(&buf_script, "bitrate=N/A\n");
1740 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1741 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1744 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1745 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1746 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1747 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1748 hours, mins, secs, us);
1750 if (nb_frames_dup || nb_frames_drop)
1751 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1752 nb_frames_dup, nb_frames_drop);
1753 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1754 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1757 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1758 av_bprintf(&buf_script, "speed=N/A\n");
1760 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1761 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1764 if (print_stats || is_last_report) {
1765 const char end = is_last_report ? '\n' : '\r';
1766 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1767 fprintf(stderr, "%s %c", buf, end);
1769 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1774 if (progress_avio) {
1775 av_bprintf(&buf_script, "progress=%s\n",
1776 is_last_report ? "end" : "continue");
1777 avio_write(progress_avio, buf_script.str,
1778 FFMIN(buf_script.len, buf_script.size - 1));
1779 avio_flush(progress_avio);
1780 av_bprint_finalize(&buf_script, NULL);
1781 if (is_last_report) {
1782 if ((ret = avio_closep(&progress_avio)) < 0)
1783 av_log(NULL, AV_LOG_ERROR,
1784 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1789 print_final_stats(total_size);
1792 static void flush_encoders(void)
1796 for (i = 0; i < nb_output_streams; i++) {
1797 OutputStream *ost = output_streams[i];
1798 AVCodecContext *enc = ost->enc_ctx;
1799 OutputFile *of = output_files[ost->file_index];
1800 int stop_encoding = 0;
1802 if (!ost->encoding_needed)
1805 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1807 #if FF_API_LAVF_FMT_RAWPICTURE
1808 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1812 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1815 avcodec_send_frame(enc, NULL);
1818 const char *desc = NULL;
1820 switch (enc->codec_type) {
1821 case AVMEDIA_TYPE_AUDIO:
1824 case AVMEDIA_TYPE_VIDEO:
1834 av_init_packet(&pkt);
1838 update_benchmark(NULL);
1839 ret = avcodec_receive_packet(enc, &pkt);
1840 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1841 if (ret < 0 && ret != AVERROR_EOF) {
1842 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1847 if (ost->logfile && enc->stats_out) {
1848 fprintf(ost->logfile, "%s", enc->stats_out);
1850 if (ret == AVERROR_EOF) {
1854 if (ost->finished & MUXER_FINISHED) {
1855 av_packet_unref(&pkt);
1858 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1859 pkt_size = pkt.size;
1860 output_packet(of, &pkt, ost);
1861 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1862 do_video_stats(ost, pkt_size);
1873 * Check whether a packet from ist should be written into ost at this time
1875 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1877 OutputFile *of = output_files[ost->file_index];
1878 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1880 if (ost->source_index != ist_index)
1886 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1892 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1894 OutputFile *of = output_files[ost->file_index];
1895 InputFile *f = input_files [ist->file_index];
1896 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1897 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1901 av_init_packet(&opkt);
1903 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1904 !ost->copy_initial_nonkeyframes)
1907 if (!ost->frame_number && !ost->copy_prior_start) {
1908 int64_t comp_start = start_time;
1909 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1910 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1911 if (pkt->pts == AV_NOPTS_VALUE ?
1912 ist->pts < comp_start :
1913 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1917 if (of->recording_time != INT64_MAX &&
1918 ist->pts >= of->recording_time + start_time) {
1919 close_output_stream(ost);
1923 if (f->recording_time != INT64_MAX) {
1924 start_time = f->ctx->start_time;
1925 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1926 start_time += f->start_time;
1927 if (ist->pts >= f->recording_time + start_time) {
1928 close_output_stream(ost);
1933 /* force the input stream PTS */
1934 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1937 if (pkt->pts != AV_NOPTS_VALUE)
1938 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1940 opkt.pts = AV_NOPTS_VALUE;
1942 if (pkt->dts == AV_NOPTS_VALUE)
1943 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1945 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1946 opkt.dts -= ost_tb_start_time;
1948 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1949 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1951 duration = ist->dec_ctx->frame_size;
1952 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1953 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1954 ost->st->time_base) - ost_tb_start_time;
1957 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1958 opkt.flags = pkt->flags;
1959 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1960 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
1961 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
1962 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
1963 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
1965 int ret = av_parser_change(ost->parser, ost->parser_avctx,
1966 &opkt.data, &opkt.size,
1967 pkt->data, pkt->size,
1968 pkt->flags & AV_PKT_FLAG_KEY);
1970 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1975 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1980 opkt.data = pkt->data;
1981 opkt.size = pkt->size;
1983 av_copy_packet_side_data(&opkt, pkt);
1985 #if FF_API_LAVF_FMT_RAWPICTURE
1986 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1987 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
1988 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1989 /* store AVPicture in AVPacket, as expected by the output format */
1990 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
1992 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1996 opkt.data = (uint8_t *)&pict;
1997 opkt.size = sizeof(AVPicture);
1998 opkt.flags |= AV_PKT_FLAG_KEY;
2002 output_packet(of, &opkt, ost);
2005 int guess_input_channel_layout(InputStream *ist)
2007 AVCodecContext *dec = ist->dec_ctx;
2009 if (!dec->channel_layout) {
2010 char layout_name[256];
2012 if (dec->channels > ist->guess_layout_max)
2014 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2015 if (!dec->channel_layout)
2017 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2018 dec->channels, dec->channel_layout);
2019 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2020 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2025 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2027 if (*got_output || ret<0)
2028 decode_error_stat[ret<0] ++;
2030 if (ret < 0 && exit_on_error)
2033 if (exit_on_error && *got_output && ist) {
2034 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2035 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2041 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2042 // There is the following difference: if you got a frame, you must call
2043 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2044 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2045 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2052 ret = avcodec_send_packet(avctx, pkt);
2053 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2054 // decoded frames with avcodec_receive_frame() until done.
2055 if (ret < 0 && ret != AVERROR_EOF)
2059 ret = avcodec_receive_frame(avctx, frame);
2060 if (ret < 0 && ret != AVERROR(EAGAIN))
2068 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2073 for (i = 0; i < ist->nb_filters; i++) {
2074 if (i < ist->nb_filters - 1) {
2075 f = ist->filter_frame;
2076 ret = av_frame_ref(f, decoded_frame);
2081 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2082 AV_BUFFERSRC_FLAG_PUSH);
2083 if (ret == AVERROR_EOF)
2084 ret = 0; /* ignore */
2086 av_log(NULL, AV_LOG_ERROR,
2087 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2094 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2096 AVFrame *decoded_frame;
2097 AVCodecContext *avctx = ist->dec_ctx;
2098 int i, ret, err = 0, resample_changed;
2099 AVRational decoded_frame_tb;
2101 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2102 return AVERROR(ENOMEM);
2103 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2104 return AVERROR(ENOMEM);
2105 decoded_frame = ist->decoded_frame;
2107 update_benchmark(NULL);
2108 ret = decode(avctx, decoded_frame, got_output, pkt);
2109 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2111 if (ret >= 0 && avctx->sample_rate <= 0) {
2112 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2113 ret = AVERROR_INVALIDDATA;
2116 if (ret != AVERROR_EOF)
2117 check_decode_result(ist, got_output, ret);
2119 if (!*got_output || ret < 0)
2122 ist->samples_decoded += decoded_frame->nb_samples;
2123 ist->frames_decoded++;
2126 /* increment next_dts to use for the case where the input stream does not
2127 have timestamps or there are multiple frames in the packet */
2128 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2130 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2134 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2135 ist->resample_channels != avctx->channels ||
2136 ist->resample_channel_layout != decoded_frame->channel_layout ||
2137 ist->resample_sample_rate != decoded_frame->sample_rate;
2138 if (resample_changed) {
2139 char layout1[64], layout2[64];
2141 if (!guess_input_channel_layout(ist)) {
2142 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2143 "layout for Input Stream #%d.%d\n", ist->file_index,
2147 decoded_frame->channel_layout = avctx->channel_layout;
2149 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2150 ist->resample_channel_layout);
2151 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2152 decoded_frame->channel_layout);
2154 av_log(NULL, AV_LOG_INFO,
2155 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2156 ist->file_index, ist->st->index,
2157 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2158 ist->resample_channels, layout1,
2159 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2160 avctx->channels, layout2);
2162 ist->resample_sample_fmt = decoded_frame->format;
2163 ist->resample_sample_rate = decoded_frame->sample_rate;
2164 ist->resample_channel_layout = decoded_frame->channel_layout;
2165 ist->resample_channels = avctx->channels;
2167 for (i = 0; i < ist->nb_filters; i++) {
2168 err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2170 av_log(NULL, AV_LOG_ERROR,
2171 "Error reconfiguring input stream %d:%d filter %d\n",
2172 ist->file_index, ist->st->index, i);
2177 for (i = 0; i < nb_filtergraphs; i++)
2178 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2179 FilterGraph *fg = filtergraphs[i];
2180 if (configure_filtergraph(fg) < 0) {
2181 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2187 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2188 decoded_frame_tb = ist->st->time_base;
2189 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2190 decoded_frame->pts = pkt->pts;
2191 decoded_frame_tb = ist->st->time_base;
2193 decoded_frame->pts = ist->dts;
2194 decoded_frame_tb = AV_TIME_BASE_Q;
2196 if (decoded_frame->pts != AV_NOPTS_VALUE)
2197 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2198 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2199 (AVRational){1, avctx->sample_rate});
2200 ist->nb_samples = decoded_frame->nb_samples;
2201 err = send_frame_to_filters(ist, decoded_frame);
2202 decoded_frame->pts = AV_NOPTS_VALUE;
2205 av_frame_unref(ist->filter_frame);
2206 av_frame_unref(decoded_frame);
2207 return err < 0 ? err : ret;
2210 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
2212 AVFrame *decoded_frame;
2213 int i, ret = 0, err = 0, resample_changed;
2214 int64_t best_effort_timestamp;
2215 int64_t dts = AV_NOPTS_VALUE;
2218 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2219 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2221 if (!eof && pkt && pkt->size == 0)
2224 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2225 return AVERROR(ENOMEM);
2226 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2227 return AVERROR(ENOMEM);
2228 decoded_frame = ist->decoded_frame;
2229 if (ist->dts != AV_NOPTS_VALUE)
2230 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2233 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2236 // The old code used to set dts on the drain packet, which does not work
2237 // with the new API anymore.
2239 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2241 return AVERROR(ENOMEM);
2242 ist->dts_buffer = new;
2243 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2246 update_benchmark(NULL);
2247 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2248 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2250 // The following line may be required in some cases where there is no parser
2251 // or the parser does not has_b_frames correctly
2252 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2253 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2254 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2256 av_log(ist->dec_ctx, AV_LOG_WARNING,
2257 "video_delay is larger in decoder than demuxer %d > %d.\n"
2258 "If you want to help, upload a sample "
2259 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2260 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2261 ist->dec_ctx->has_b_frames,
2262 ist->st->codecpar->video_delay);
2265 if (ret != AVERROR_EOF)
2266 check_decode_result(ist, got_output, ret);
2268 if (*got_output && ret >= 0) {
2269 if (ist->dec_ctx->width != decoded_frame->width ||
2270 ist->dec_ctx->height != decoded_frame->height ||
2271 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2272 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2273 decoded_frame->width,
2274 decoded_frame->height,
2275 decoded_frame->format,
2276 ist->dec_ctx->width,
2277 ist->dec_ctx->height,
2278 ist->dec_ctx->pix_fmt);
2282 if (!*got_output || ret < 0)
2285 if(ist->top_field_first>=0)
2286 decoded_frame->top_field_first = ist->top_field_first;
2288 ist->frames_decoded++;
2290 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2291 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2295 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2297 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2299 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2300 best_effort_timestamp = ist->dts_buffer[0];
2302 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2303 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2304 ist->nb_dts_buffer--;
2307 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2308 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2310 if (ts != AV_NOPTS_VALUE)
2311 ist->next_pts = ist->pts = ts;
2315 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2316 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2317 ist->st->index, av_ts2str(decoded_frame->pts),
2318 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2319 best_effort_timestamp,
2320 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2321 decoded_frame->key_frame, decoded_frame->pict_type,
2322 ist->st->time_base.num, ist->st->time_base.den);
2325 if (ist->st->sample_aspect_ratio.num)
2326 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2328 resample_changed = ist->resample_width != decoded_frame->width ||
2329 ist->resample_height != decoded_frame->height ||
2330 ist->resample_pix_fmt != decoded_frame->format;
2331 if (resample_changed) {
2332 av_log(NULL, AV_LOG_INFO,
2333 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2334 ist->file_index, ist->st->index,
2335 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2336 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2338 ist->resample_width = decoded_frame->width;
2339 ist->resample_height = decoded_frame->height;
2340 ist->resample_pix_fmt = decoded_frame->format;
2342 for (i = 0; i < ist->nb_filters; i++) {
2343 err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2345 av_log(NULL, AV_LOG_ERROR,
2346 "Error reconfiguring input stream %d:%d filter %d\n",
2347 ist->file_index, ist->st->index, i);
2352 for (i = 0; i < nb_filtergraphs; i++) {
2353 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2354 configure_filtergraph(filtergraphs[i]) < 0) {
2355 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2361 err = send_frame_to_filters(ist, decoded_frame);
2364 av_frame_unref(ist->filter_frame);
2365 av_frame_unref(decoded_frame);
2366 return err < 0 ? err : ret;
2369 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2371 AVSubtitle subtitle;
2372 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2373 &subtitle, got_output, pkt);
2375 check_decode_result(NULL, got_output, ret);
2377 if (ret < 0 || !*got_output) {
2379 sub2video_flush(ist);
2383 if (ist->fix_sub_duration) {
2385 if (ist->prev_sub.got_output) {
2386 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2387 1000, AV_TIME_BASE);
2388 if (end < ist->prev_sub.subtitle.end_display_time) {
2389 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2390 "Subtitle duration reduced from %d to %d%s\n",
2391 ist->prev_sub.subtitle.end_display_time, end,
2392 end <= 0 ? ", dropping it" : "");
2393 ist->prev_sub.subtitle.end_display_time = end;
2396 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2397 FFSWAP(int, ret, ist->prev_sub.ret);
2398 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2406 sub2video_update(ist, &subtitle);
2408 if (!subtitle.num_rects)
2411 ist->frames_decoded++;
2413 for (i = 0; i < nb_output_streams; i++) {
2414 OutputStream *ost = output_streams[i];
2416 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2417 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2420 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2424 avsubtitle_free(&subtitle);
2428 static int send_filter_eof(InputStream *ist)
2431 for (i = 0; i < ist->nb_filters; i++) {
2432 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2439 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2440 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2444 int eof_reached = 0;
2447 if (!ist->saw_first_ts) {
2448 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2450 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2451 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2452 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2454 ist->saw_first_ts = 1;
2457 if (ist->next_dts == AV_NOPTS_VALUE)
2458 ist->next_dts = ist->dts;
2459 if (ist->next_pts == AV_NOPTS_VALUE)
2460 ist->next_pts = ist->pts;
2464 av_init_packet(&avpkt);
2471 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2472 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2473 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2474 ist->next_pts = ist->pts = ist->dts;
2477 // while we have more to decode or while the decoder did output something on EOF
2478 while (ist->decoding_needed) {
2482 ist->pts = ist->next_pts;
2483 ist->dts = ist->next_dts;
2485 switch (ist->dec_ctx->codec_type) {
2486 case AVMEDIA_TYPE_AUDIO:
2487 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
2489 case AVMEDIA_TYPE_VIDEO:
2490 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
2491 if (!repeating || !pkt || got_output) {
2492 if (pkt && pkt->duration) {
2493 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2494 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2495 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2496 duration = ((int64_t)AV_TIME_BASE *
2497 ist->dec_ctx->framerate.den * ticks) /
2498 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2501 if(ist->dts != AV_NOPTS_VALUE && duration) {
2502 ist->next_dts += duration;
2504 ist->next_dts = AV_NOPTS_VALUE;
2508 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2510 case AVMEDIA_TYPE_SUBTITLE:
2513 ret = transcode_subtitles(ist, &avpkt, &got_output);
2514 if (!pkt && ret >= 0)
2521 if (ret == AVERROR_EOF) {
2527 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2528 ist->file_index, ist->st->index, av_err2str(ret));
2531 // Decoding might not terminate if we're draining the decoder, and
2532 // the decoder keeps returning an error.
2533 // This should probably be considered a libavcodec issue.
2534 // Sample: fate-vsynth1-dnxhd-720p-hr-lb
2543 // During draining, we might get multiple output frames in this loop.
2544 // ffmpeg.c does not drain the filter chain on configuration changes,
2545 // which means if we send multiple frames at once to the filters, and
2546 // one of those frames changes configuration, the buffered frames will
2547 // be lost. This can upset certain FATE tests.
2548 // Decode only 1 frame per call on EOF to appease these FATE tests.
2549 // The ideal solution would be to rewrite decoding to use the new
2550 // decoding API in a better way.
2557 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2558 /* except when looping we need to flush but not to send an EOF */
2559 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2560 int ret = send_filter_eof(ist);
2562 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2567 /* handle stream copy */
2568 if (!ist->decoding_needed) {
2569 ist->dts = ist->next_dts;
2570 switch (ist->dec_ctx->codec_type) {
2571 case AVMEDIA_TYPE_AUDIO:
2572 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2573 ist->dec_ctx->sample_rate;
2575 case AVMEDIA_TYPE_VIDEO:
2576 if (ist->framerate.num) {
2577 // TODO: Remove work-around for c99-to-c89 issue 7
2578 AVRational time_base_q = AV_TIME_BASE_Q;
2579 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2580 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2581 } else if (pkt->duration) {
2582 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2583 } else if(ist->dec_ctx->framerate.num != 0) {
2584 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2585 ist->next_dts += ((int64_t)AV_TIME_BASE *
2586 ist->dec_ctx->framerate.den * ticks) /
2587 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2591 ist->pts = ist->dts;
2592 ist->next_pts = ist->next_dts;
2594 for (i = 0; pkt && i < nb_output_streams; i++) {
2595 OutputStream *ost = output_streams[i];
2597 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2600 do_streamcopy(ist, ost, pkt);
2603 return !eof_reached;
2606 static void print_sdp(void)
2611 AVIOContext *sdp_pb;
2612 AVFormatContext **avc;
2614 for (i = 0; i < nb_output_files; i++) {
2615 if (!output_files[i]->header_written)
2619 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2622 for (i = 0, j = 0; i < nb_output_files; i++) {
2623 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2624 avc[j] = output_files[i]->ctx;
2632 av_sdp_create(avc, j, sdp, sizeof(sdp));
2634 if (!sdp_filename) {
2635 printf("SDP:\n%s\n", sdp);
2638 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2639 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2641 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2642 avio_closep(&sdp_pb);
2643 av_freep(&sdp_filename);
2651 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2654 for (i = 0; hwaccels[i].name; i++)
2655 if (hwaccels[i].pix_fmt == pix_fmt)
2656 return &hwaccels[i];
2660 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2662 InputStream *ist = s->opaque;
2663 const enum AVPixelFormat *p;
2666 for (p = pix_fmts; *p != -1; p++) {
2667 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2668 const HWAccel *hwaccel;
2670 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2673 hwaccel = get_hwaccel(*p);
2675 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2676 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2679 ret = hwaccel->init(s);
2681 if (ist->hwaccel_id == hwaccel->id) {
2682 av_log(NULL, AV_LOG_FATAL,
2683 "%s hwaccel requested for input stream #%d:%d, "
2684 "but cannot be initialized.\n", hwaccel->name,
2685 ist->file_index, ist->st->index);
2686 return AV_PIX_FMT_NONE;
2691 if (ist->hw_frames_ctx) {
2692 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2693 if (!s->hw_frames_ctx)
2694 return AV_PIX_FMT_NONE;
2697 ist->active_hwaccel_id = hwaccel->id;
2698 ist->hwaccel_pix_fmt = *p;
2705 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2707 InputStream *ist = s->opaque;
2709 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2710 return ist->hwaccel_get_buffer(s, frame, flags);
2712 return avcodec_default_get_buffer2(s, frame, flags);
2715 static int init_input_stream(int ist_index, char *error, int error_len)
2718 InputStream *ist = input_streams[ist_index];
2720 for (i = 0; i < ist->nb_filters; i++) {
2721 ret = ifilter_parameters_from_decoder(ist->filters[i], ist->dec_ctx);
2723 av_log(NULL, AV_LOG_FATAL, "Error initializing filter input\n");
2728 if (ist->decoding_needed) {
2729 AVCodec *codec = ist->dec;
2731 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2732 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2733 return AVERROR(EINVAL);
2736 ist->dec_ctx->opaque = ist;
2737 ist->dec_ctx->get_format = get_format;
2738 ist->dec_ctx->get_buffer2 = get_buffer;
2739 ist->dec_ctx->thread_safe_callbacks = 1;
2741 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2742 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2743 (ist->decoding_needed & DECODING_FOR_OST)) {
2744 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2745 if (ist->decoding_needed & DECODING_FOR_FILTER)
2746 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2749 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2751 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2752 * audio, and video decoders such as cuvid or mediacodec */
2753 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2755 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2756 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2757 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2758 if (ret == AVERROR_EXPERIMENTAL)
2759 abort_codec_experimental(codec, 0);
2761 snprintf(error, error_len,
2762 "Error while opening decoder for input stream "
2764 ist->file_index, ist->st->index, av_err2str(ret));
2767 assert_avoptions(ist->decoder_opts);
2770 ist->next_pts = AV_NOPTS_VALUE;
2771 ist->next_dts = AV_NOPTS_VALUE;
2776 static InputStream *get_input_stream(OutputStream *ost)
2778 if (ost->source_index >= 0)
2779 return input_streams[ost->source_index];
2783 static int compare_int64(const void *a, const void *b)
2785 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2788 /* open the muxer when all the streams are initialized */
2789 static int check_init_output_file(OutputFile *of, int file_index)
2793 for (i = 0; i < of->ctx->nb_streams; i++) {
2794 OutputStream *ost = output_streams[of->ost_index + i];
2795 if (!ost->initialized)
2799 of->ctx->interrupt_callback = int_cb;
2801 ret = avformat_write_header(of->ctx, &of->opts);
2803 av_log(NULL, AV_LOG_ERROR,
2804 "Could not write header for output file #%d "
2805 "(incorrect codec parameters ?): %s\n",
2806 file_index, av_err2str(ret));
2809 //assert_avoptions(of->opts);
2810 of->header_written = 1;
2812 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2814 if (sdp_filename || want_sdp)
2817 /* flush the muxing queues */
2818 for (i = 0; i < of->ctx->nb_streams; i++) {
2819 OutputStream *ost = output_streams[of->ost_index + i];
2821 while (av_fifo_size(ost->muxing_queue)) {
2823 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2824 write_packet(of, &pkt, ost);
2831 static int init_output_bsfs(OutputStream *ost)
2836 if (!ost->nb_bitstream_filters)
2839 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2840 ctx = ost->bsf_ctx[i];
2842 ret = avcodec_parameters_copy(ctx->par_in,
2843 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2847 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2849 ret = av_bsf_init(ctx);
2851 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2852 ost->bsf_ctx[i]->filter->name);
2857 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2858 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2862 ost->st->time_base = ctx->time_base_out;
2867 static int init_output_stream_streamcopy(OutputStream *ost)
2869 OutputFile *of = output_files[ost->file_index];
2870 InputStream *ist = get_input_stream(ost);
2871 AVCodecParameters *par_dst = ost->st->codecpar;
2872 AVCodecParameters *par_src = ost->ref_par;
2875 uint32_t codec_tag = par_dst->codec_tag;
2877 av_assert0(ist && !ost->filter);
2879 avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
2880 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2882 av_log(NULL, AV_LOG_FATAL,
2883 "Error setting up codec context options.\n");
2886 avcodec_parameters_from_context(par_src, ost->enc_ctx);
2889 unsigned int codec_tag_tmp;
2890 if (!of->ctx->oformat->codec_tag ||
2891 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
2892 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
2893 codec_tag = par_src->codec_tag;
2896 ret = avcodec_parameters_copy(par_dst, par_src);
2900 par_dst->codec_tag = codec_tag;
2902 if (!ost->frame_rate.num)
2903 ost->frame_rate = ist->framerate;
2904 ost->st->avg_frame_rate = ost->frame_rate;
2906 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
2910 // copy timebase while removing common factors
2911 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
2914 ost->st->disposition = ist->st->disposition;
2916 if (ist->st->nb_side_data) {
2917 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2918 sizeof(*ist->st->side_data));
2919 if (!ost->st->side_data)
2920 return AVERROR(ENOMEM);
2922 ost->st->nb_side_data = 0;
2923 for (i = 0; i < ist->st->nb_side_data; i++) {
2924 const AVPacketSideData *sd_src = &ist->st->side_data[i];
2925 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2927 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2930 sd_dst->data = av_malloc(sd_src->size);
2932 return AVERROR(ENOMEM);
2933 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2934 sd_dst->size = sd_src->size;
2935 sd_dst->type = sd_src->type;
2936 ost->st->nb_side_data++;
2940 ost->parser = av_parser_init(par_dst->codec_id);
2941 ost->parser_avctx = avcodec_alloc_context3(NULL);
2942 if (!ost->parser_avctx)
2943 return AVERROR(ENOMEM);
2945 switch (par_dst->codec_type) {
2946 case AVMEDIA_TYPE_AUDIO:
2947 if (audio_volume != 256) {
2948 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2951 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2952 par_dst->block_align= 0;
2953 if(par_dst->codec_id == AV_CODEC_ID_AC3)
2954 par_dst->block_align= 0;
2956 case AVMEDIA_TYPE_VIDEO:
2957 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2959 av_mul_q(ost->frame_aspect_ratio,
2960 (AVRational){ par_dst->height, par_dst->width });
2961 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2962 "with stream copy may produce invalid files\n");
2964 else if (ist->st->sample_aspect_ratio.num)
2965 sar = ist->st->sample_aspect_ratio;
2967 sar = par_src->sample_aspect_ratio;
2968 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
2969 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2970 ost->st->r_frame_rate = ist->st->r_frame_rate;
2977 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2979 AVDictionaryEntry *e;
2981 uint8_t *encoder_string;
2982 int encoder_string_len;
2983 int format_flags = 0;
2984 int codec_flags = 0;
2986 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2989 e = av_dict_get(of->opts, "fflags", NULL, 0);
2991 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2994 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2996 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2998 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3001 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3004 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3005 encoder_string = av_mallocz(encoder_string_len);
3006 if (!encoder_string)
3009 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3010 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3012 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3013 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3014 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3015 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3018 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3019 AVCodecContext *avctx)
3022 int n = 1, i, size, index = 0;
3025 for (p = kf; *p; p++)
3029 pts = av_malloc_array(size, sizeof(*pts));
3031 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3036 for (i = 0; i < n; i++) {
3037 char *next = strchr(p, ',');
3042 if (!memcmp(p, "chapters", 8)) {
3044 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3047 if (avf->nb_chapters > INT_MAX - size ||
3048 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3050 av_log(NULL, AV_LOG_FATAL,
3051 "Could not allocate forced key frames array.\n");
3054 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3055 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3057 for (j = 0; j < avf->nb_chapters; j++) {
3058 AVChapter *c = avf->chapters[j];
3059 av_assert1(index < size);
3060 pts[index++] = av_rescale_q(c->start, c->time_base,
3061 avctx->time_base) + t;
3066 t = parse_time_or_die("force_key_frames", p, 1);
3067 av_assert1(index < size);
3068 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3075 av_assert0(index == size);
3076 qsort(pts, size, sizeof(*pts), compare_int64);
3077 ost->forced_kf_count = size;
3078 ost->forced_kf_pts = pts;
3081 static int init_output_stream_encode(OutputStream *ost)
3083 InputStream *ist = get_input_stream(ost);
3084 AVCodecContext *enc_ctx = ost->enc_ctx;
3085 AVCodecContext *dec_ctx = NULL;
3086 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3089 set_encoder_id(output_files[ost->file_index], ost);
3092 ost->st->disposition = ist->st->disposition;
3094 dec_ctx = ist->dec_ctx;
3096 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3098 for (j = 0; j < oc->nb_streams; j++) {
3099 AVStream *st = oc->streams[j];
3100 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3103 if (j == oc->nb_streams)
3104 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3105 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3106 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3109 if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3110 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3111 filtergraph_is_simple(ost->filter->graph)) {
3112 FilterGraph *fg = ost->filter->graph;
3114 if (configure_filtergraph(fg)) {
3115 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3120 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3121 if (!ost->frame_rate.num)
3122 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3123 if (ist && !ost->frame_rate.num)
3124 ost->frame_rate = ist->framerate;
3125 if (ist && !ost->frame_rate.num)
3126 ost->frame_rate = ist->st->r_frame_rate;
3127 if (ist && !ost->frame_rate.num) {
3128 ost->frame_rate = (AVRational){25, 1};
3129 av_log(NULL, AV_LOG_WARNING,
3131 "about the input framerate is available. Falling "
3132 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3133 "if you want a different framerate.\n",
3134 ost->file_index, ost->index);
3136 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3137 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3138 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3139 ost->frame_rate = ost->enc->supported_framerates[idx];
3141 // reduce frame rate for mpeg4 to be within the spec limits
3142 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3143 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3144 ost->frame_rate.num, ost->frame_rate.den, 65535);
3148 switch (enc_ctx->codec_type) {
3149 case AVMEDIA_TYPE_AUDIO:
3150 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3152 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3153 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3154 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3155 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3156 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3157 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3159 case AVMEDIA_TYPE_VIDEO:
3160 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3161 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3162 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3163 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3164 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3165 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3166 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3168 for (j = 0; j < ost->forced_kf_count; j++)
3169 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3171 enc_ctx->time_base);
3173 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3174 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3175 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3176 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3177 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3178 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3179 if (!strncmp(ost->enc->name, "libx264", 7) &&
3180 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3181 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3182 av_log(NULL, AV_LOG_WARNING,
3183 "No pixel format specified, %s for H.264 encoding chosen.\n"
3184 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3185 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3186 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3187 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3188 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3189 av_log(NULL, AV_LOG_WARNING,
3190 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3191 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3192 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3193 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3195 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3196 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3198 ost->st->avg_frame_rate = ost->frame_rate;
3201 enc_ctx->width != dec_ctx->width ||
3202 enc_ctx->height != dec_ctx->height ||
3203 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3204 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3207 if (ost->forced_keyframes) {
3208 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3209 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3210 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3212 av_log(NULL, AV_LOG_ERROR,
3213 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3216 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3217 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3218 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3219 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3221 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3222 // parse it only for static kf timings
3223 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3224 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3228 case AVMEDIA_TYPE_SUBTITLE:
3229 enc_ctx->time_base = (AVRational){1, 1000};
3230 if (!enc_ctx->width) {
3231 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3232 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3235 case AVMEDIA_TYPE_DATA:
3245 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3249 if (ost->encoding_needed) {
3250 AVCodec *codec = ost->enc;
3251 AVCodecContext *dec = NULL;
3254 ret = init_output_stream_encode(ost);
3258 if ((ist = get_input_stream(ost)))
3260 if (dec && dec->subtitle_header) {
3261 /* ASS code assumes this buffer is null terminated so add extra byte. */
3262 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3263 if (!ost->enc_ctx->subtitle_header)
3264 return AVERROR(ENOMEM);
3265 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3266 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3268 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3269 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3270 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3272 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3273 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3274 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3276 if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
3277 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
3278 if (!ost->enc_ctx->hw_frames_ctx)
3279 return AVERROR(ENOMEM);
3282 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3283 if (ret == AVERROR_EXPERIMENTAL)
3284 abort_codec_experimental(codec, 1);
3285 snprintf(error, error_len,
3286 "Error while opening encoder for output stream #%d:%d - "
3287 "maybe incorrect parameters such as bit_rate, rate, width or height",
3288 ost->file_index, ost->index);
3291 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3292 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3293 av_buffersink_set_frame_size(ost->filter->filter,
3294 ost->enc_ctx->frame_size);
3295 assert_avoptions(ost->encoder_opts);
3296 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3297 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3298 " It takes bits/s as argument, not kbits/s\n");
3300 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3302 av_log(NULL, AV_LOG_FATAL,
3303 "Error initializing the output stream codec context.\n");
3307 * FIXME: ost->st->codec should't be needed here anymore.
3309 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3313 if (ost->enc_ctx->nb_coded_side_data) {
3316 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
3317 sizeof(*ost->st->side_data));
3318 if (!ost->st->side_data)
3319 return AVERROR(ENOMEM);
3321 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3322 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3323 AVPacketSideData *sd_dst = &ost->st->side_data[i];
3325 sd_dst->data = av_malloc(sd_src->size);
3327 return AVERROR(ENOMEM);
3328 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3329 sd_dst->size = sd_src->size;
3330 sd_dst->type = sd_src->type;
3331 ost->st->nb_side_data++;
3335 // copy timebase while removing common factors
3336 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3337 ost->st->codec->codec= ost->enc_ctx->codec;
3338 } else if (ost->stream_copy) {
3339 ret = init_output_stream_streamcopy(ost);
3344 * FIXME: will the codec context used by the parser during streamcopy
3345 * This should go away with the new parser API.
3347 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3352 // parse user provided disposition, and update stream values
3353 if (ost->disposition) {
3354 static const AVOption opts[] = {
3355 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3356 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3357 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3358 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3359 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3360 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3361 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3362 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3363 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3364 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3365 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3366 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3367 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3368 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3371 static const AVClass class = {
3373 .item_name = av_default_item_name,
3375 .version = LIBAVUTIL_VERSION_INT,
3377 const AVClass *pclass = &class;
3379 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3384 /* initialize bitstream filters for the output stream
3385 * needs to be done here, because the codec id for streamcopy is not
3386 * known until now */
3387 ret = init_output_bsfs(ost);
3391 ost->initialized = 1;
3393 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3400 static void report_new_stream(int input_index, AVPacket *pkt)
3402 InputFile *file = input_files[input_index];
3403 AVStream *st = file->ctx->streams[pkt->stream_index];
3405 if (pkt->stream_index < file->nb_streams_warn)
3407 av_log(file->ctx, AV_LOG_WARNING,
3408 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3409 av_get_media_type_string(st->codecpar->codec_type),
3410 input_index, pkt->stream_index,
3411 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3412 file->nb_streams_warn = pkt->stream_index + 1;
3415 static int transcode_init(void)
3417 int ret = 0, i, j, k;
3418 AVFormatContext *oc;
3421 char error[1024] = {0};
3423 for (i = 0; i < nb_filtergraphs; i++) {
3424 FilterGraph *fg = filtergraphs[i];
3425 for (j = 0; j < fg->nb_outputs; j++) {
3426 OutputFilter *ofilter = fg->outputs[j];
3427 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3429 if (fg->nb_inputs != 1)
3431 for (k = nb_input_streams-1; k >= 0 ; k--)
3432 if (fg->inputs[0]->ist == input_streams[k])
3434 ofilter->ost->source_index = k;
3438 /* init framerate emulation */
3439 for (i = 0; i < nb_input_files; i++) {
3440 InputFile *ifile = input_files[i];
3441 if (ifile->rate_emu)
3442 for (j = 0; j < ifile->nb_streams; j++)
3443 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3446 /* hwaccel transcoding */
3447 for (i = 0; i < nb_output_streams; i++) {
3448 ost = output_streams[i];
3450 if (!ost->stream_copy) {
3452 if (qsv_transcode_init(ost))
3457 if (cuvid_transcode_init(ost))
3463 /* init input streams */
3464 for (i = 0; i < nb_input_streams; i++)
3465 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3466 for (i = 0; i < nb_output_streams; i++) {
3467 ost = output_streams[i];
3468 avcodec_close(ost->enc_ctx);
3473 /* open each encoder */
3474 for (i = 0; i < nb_output_streams; i++) {
3475 ret = init_output_stream(output_streams[i], error, sizeof(error));
3480 /* discard unused programs */
3481 for (i = 0; i < nb_input_files; i++) {
3482 InputFile *ifile = input_files[i];
3483 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3484 AVProgram *p = ifile->ctx->programs[j];
3485 int discard = AVDISCARD_ALL;
3487 for (k = 0; k < p->nb_stream_indexes; k++)
3488 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3489 discard = AVDISCARD_DEFAULT;
3492 p->discard = discard;
3496 /* write headers for files with no streams */
3497 for (i = 0; i < nb_output_files; i++) {
3498 oc = output_files[i]->ctx;
3499 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3500 ret = check_init_output_file(output_files[i], i);
3507 /* dump the stream mapping */
3508 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3509 for (i = 0; i < nb_input_streams; i++) {
3510 ist = input_streams[i];
3512 for (j = 0; j < ist->nb_filters; j++) {
3513 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3514 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3515 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3516 ist->filters[j]->name);
3517 if (nb_filtergraphs > 1)
3518 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3519 av_log(NULL, AV_LOG_INFO, "\n");
3524 for (i = 0; i < nb_output_streams; i++) {
3525 ost = output_streams[i];
3527 if (ost->attachment_filename) {
3528 /* an attached file */
3529 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3530 ost->attachment_filename, ost->file_index, ost->index);
3534 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3535 /* output from a complex graph */
3536 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3537 if (nb_filtergraphs > 1)
3538 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3540 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3541 ost->index, ost->enc ? ost->enc->name : "?");
3545 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3546 input_streams[ost->source_index]->file_index,
3547 input_streams[ost->source_index]->st->index,
3550 if (ost->sync_ist != input_streams[ost->source_index])
3551 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3552 ost->sync_ist->file_index,
3553 ost->sync_ist->st->index);
3554 if (ost->stream_copy)
3555 av_log(NULL, AV_LOG_INFO, " (copy)");
3557 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3558 const AVCodec *out_codec = ost->enc;
3559 const char *decoder_name = "?";
3560 const char *in_codec_name = "?";
3561 const char *encoder_name = "?";
3562 const char *out_codec_name = "?";
3563 const AVCodecDescriptor *desc;
3566 decoder_name = in_codec->name;
3567 desc = avcodec_descriptor_get(in_codec->id);
3569 in_codec_name = desc->name;
3570 if (!strcmp(decoder_name, in_codec_name))
3571 decoder_name = "native";
3575 encoder_name = out_codec->name;
3576 desc = avcodec_descriptor_get(out_codec->id);
3578 out_codec_name = desc->name;
3579 if (!strcmp(encoder_name, out_codec_name))
3580 encoder_name = "native";
3583 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3584 in_codec_name, decoder_name,
3585 out_codec_name, encoder_name);
3587 av_log(NULL, AV_LOG_INFO, "\n");
3591 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3595 transcode_init_done = 1;
3600 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3601 static int need_output(void)
3605 for (i = 0; i < nb_output_streams; i++) {
3606 OutputStream *ost = output_streams[i];
3607 OutputFile *of = output_files[ost->file_index];
3608 AVFormatContext *os = output_files[ost->file_index]->ctx;
3610 if (ost->finished ||
3611 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3613 if (ost->frame_number >= ost->max_frames) {
3615 for (j = 0; j < of->ctx->nb_streams; j++)
3616 close_output_stream(output_streams[of->ost_index + j]);
3627 * Select the output stream to process.
3629 * @return selected output stream, or NULL if none available
3631 static OutputStream *choose_output(void)
3634 int64_t opts_min = INT64_MAX;
3635 OutputStream *ost_min = NULL;
3637 for (i = 0; i < nb_output_streams; i++) {
3638 OutputStream *ost = output_streams[i];
3639 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3640 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3642 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3643 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3645 if (!ost->finished && opts < opts_min) {
3647 ost_min = ost->unavailable ? NULL : ost;
3653 static void set_tty_echo(int on)
3657 if (tcgetattr(0, &tty) == 0) {
3658 if (on) tty.c_lflag |= ECHO;
3659 else tty.c_lflag &= ~ECHO;
3660 tcsetattr(0, TCSANOW, &tty);
3665 static int check_keyboard_interaction(int64_t cur_time)
3668 static int64_t last_time;
3669 if (received_nb_signals)
3670 return AVERROR_EXIT;
3671 /* read_key() returns 0 on EOF */
3672 if(cur_time - last_time >= 100000 && !run_as_daemon){
3674 last_time = cur_time;
3678 return AVERROR_EXIT;
3679 if (key == '+') av_log_set_level(av_log_get_level()+10);
3680 if (key == '-') av_log_set_level(av_log_get_level()-10);
3681 if (key == 's') qp_hist ^= 1;
3684 do_hex_dump = do_pkt_dump = 0;
3685 } else if(do_pkt_dump){
3689 av_log_set_level(AV_LOG_DEBUG);
3691 if (key == 'c' || key == 'C'){
3692 char buf[4096], target[64], command[256], arg[256] = {0};
3695 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3698 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3703 fprintf(stderr, "\n");
3705 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3706 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3707 target, time, command, arg);
3708 for (i = 0; i < nb_filtergraphs; i++) {
3709 FilterGraph *fg = filtergraphs[i];
3712 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3713 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3714 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3715 } else if (key == 'c') {
3716 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3717 ret = AVERROR_PATCHWELCOME;
3719 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3721 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3726 av_log(NULL, AV_LOG_ERROR,
3727 "Parse error, at least 3 arguments were expected, "
3728 "only %d given in string '%s'\n", n, buf);
3731 if (key == 'd' || key == 'D'){
3734 debug = input_streams[0]->st->codec->debug<<1;
3735 if(!debug) debug = 1;
3736 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3743 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3748 fprintf(stderr, "\n");
3749 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3750 fprintf(stderr,"error parsing debug value\n");
3752 for(i=0;i<nb_input_streams;i++) {
3753 input_streams[i]->st->codec->debug = debug;
3755 for(i=0;i<nb_output_streams;i++) {
3756 OutputStream *ost = output_streams[i];
3757 ost->enc_ctx->debug = debug;
3759 if(debug) av_log_set_level(AV_LOG_DEBUG);
3760 fprintf(stderr,"debug=%d\n", debug);
3763 fprintf(stderr, "key function\n"
3764 "? show this help\n"
3765 "+ increase verbosity\n"
3766 "- decrease verbosity\n"
3767 "c Send command to first matching filter supporting it\n"
3768 "C Send/Queue command to all matching filters\n"
3769 "D cycle through available debug modes\n"
3770 "h dump packets/hex press to cycle through the 3 states\n"
3772 "s Show QP histogram\n"
3779 static void *input_thread(void *arg)
3782 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3787 ret = av_read_frame(f->ctx, &pkt);
3789 if (ret == AVERROR(EAGAIN)) {
3794 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3797 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3798 if (flags && ret == AVERROR(EAGAIN)) {
3800 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3801 av_log(f->ctx, AV_LOG_WARNING,
3802 "Thread message queue blocking; consider raising the "
3803 "thread_queue_size option (current value: %d)\n",
3804 f->thread_queue_size);
3807 if (ret != AVERROR_EOF)
3808 av_log(f->ctx, AV_LOG_ERROR,
3809 "Unable to send packet to main thread: %s\n",
3811 av_packet_unref(&pkt);
3812 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3820 static void free_input_threads(void)
3824 for (i = 0; i < nb_input_files; i++) {
3825 InputFile *f = input_files[i];
3828 if (!f || !f->in_thread_queue)
3830 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3831 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3832 av_packet_unref(&pkt);
3834 pthread_join(f->thread, NULL);
3836 av_thread_message_queue_free(&f->in_thread_queue);
3840 static int init_input_threads(void)
3844 if (nb_input_files == 1)
3847 for (i = 0; i < nb_input_files; i++) {
3848 InputFile *f = input_files[i];
3850 if (f->ctx->pb ? !f->ctx->pb->seekable :
3851 strcmp(f->ctx->iformat->name, "lavfi"))
3852 f->non_blocking = 1;
3853 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3854 f->thread_queue_size, sizeof(AVPacket));
3858 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3859 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3860 av_thread_message_queue_free(&f->in_thread_queue);
3861 return AVERROR(ret);
3867 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3869 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3871 AV_THREAD_MESSAGE_NONBLOCK : 0);
3875 static int get_input_packet(InputFile *f, AVPacket *pkt)
3879 for (i = 0; i < f->nb_streams; i++) {
3880 InputStream *ist = input_streams[f->ist_index + i];
3881 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3882 int64_t now = av_gettime_relative() - ist->start;
3884 return AVERROR(EAGAIN);
3889 if (nb_input_files > 1)
3890 return get_input_packet_mt(f, pkt);
3892 return av_read_frame(f->ctx, pkt);
3895 static int got_eagain(void)
3898 for (i = 0; i < nb_output_streams; i++)
3899 if (output_streams[i]->unavailable)
3904 static void reset_eagain(void)
3907 for (i = 0; i < nb_input_files; i++)
3908 input_files[i]->eagain = 0;
3909 for (i = 0; i < nb_output_streams; i++)
3910 output_streams[i]->unavailable = 0;
3913 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3914 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3915 AVRational time_base)
3921 return tmp_time_base;
3924 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3927 return tmp_time_base;
3933 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3936 AVCodecContext *avctx;
3937 int i, ret, has_audio = 0;
3938 int64_t duration = 0;
3940 ret = av_seek_frame(is, -1, is->start_time, 0);
3944 for (i = 0; i < ifile->nb_streams; i++) {
3945 ist = input_streams[ifile->ist_index + i];
3946 avctx = ist->dec_ctx;
3949 if (ist->decoding_needed) {
3950 process_input_packet(ist, NULL, 1);
3951 avcodec_flush_buffers(avctx);
3954 /* duration is the length of the last frame in a stream
3955 * when audio stream is present we don't care about
3956 * last video frame length because it's not defined exactly */
3957 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3961 for (i = 0; i < ifile->nb_streams; i++) {
3962 ist = input_streams[ifile->ist_index + i];
3963 avctx = ist->dec_ctx;
3966 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3967 AVRational sample_rate = {1, avctx->sample_rate};
3969 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3973 if (ist->framerate.num) {
3974 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3975 } else if (ist->st->avg_frame_rate.num) {
3976 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3977 } else duration = 1;
3979 if (!ifile->duration)
3980 ifile->time_base = ist->st->time_base;
3981 /* the total duration of the stream, max_pts - min_pts is
3982 * the duration of the stream without the last frame */
3983 duration += ist->max_pts - ist->min_pts;
3984 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3988 if (ifile->loop > 0)
3996 * - 0 -- one packet was read and processed
3997 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3998 * this function should be called again
3999 * - AVERROR_EOF -- this function should not be called again
4001 static int process_input(int file_index)
4003 InputFile *ifile = input_files[file_index];
4004 AVFormatContext *is;
4012 ret = get_input_packet(ifile, &pkt);
4014 if (ret == AVERROR(EAGAIN)) {
4018 if (ret < 0 && ifile->loop) {
4019 if ((ret = seek_to_start(ifile, is)) < 0)
4021 ret = get_input_packet(ifile, &pkt);
4022 if (ret == AVERROR(EAGAIN)) {
4028 if (ret != AVERROR_EOF) {
4029 print_error(is->filename, ret);
4034 for (i = 0; i < ifile->nb_streams; i++) {
4035 ist = input_streams[ifile->ist_index + i];
4036 if (ist->decoding_needed) {
4037 ret = process_input_packet(ist, NULL, 0);
4042 /* mark all outputs that don't go through lavfi as finished */
4043 for (j = 0; j < nb_output_streams; j++) {
4044 OutputStream *ost = output_streams[j];
4046 if (ost->source_index == ifile->ist_index + i &&
4047 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4048 finish_output_stream(ost);
4052 ifile->eof_reached = 1;
4053 return AVERROR(EAGAIN);
4059 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4060 is->streams[pkt.stream_index]);
4062 /* the following test is needed in case new streams appear
4063 dynamically in stream : we ignore them */
4064 if (pkt.stream_index >= ifile->nb_streams) {
4065 report_new_stream(file_index, &pkt);
4066 goto discard_packet;
4069 ist = input_streams[ifile->ist_index + pkt.stream_index];
4071 ist->data_size += pkt.size;
4075 goto discard_packet;
4077 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4078 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4083 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4084 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4085 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4086 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4087 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4088 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4089 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4090 av_ts2str(input_files[ist->file_index]->ts_offset),
4091 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4094 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4095 int64_t stime, stime2;
4096 // Correcting starttime based on the enabled streams
4097 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4098 // so we instead do it here as part of discontinuity handling
4099 if ( ist->next_dts == AV_NOPTS_VALUE
4100 && ifile->ts_offset == -is->start_time
4101 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4102 int64_t new_start_time = INT64_MAX;
4103 for (i=0; i<is->nb_streams; i++) {
4104 AVStream *st = is->streams[i];
4105 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4107 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4109 if (new_start_time > is->start_time) {
4110 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4111 ifile->ts_offset = -new_start_time;
4115 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4116 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4117 ist->wrap_correction_done = 1;
4119 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4120 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4121 ist->wrap_correction_done = 0;
4123 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4124 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4125 ist->wrap_correction_done = 0;
4129 /* add the stream-global side data to the first packet */
4130 if (ist->nb_packets == 1) {
4131 if (ist->st->nb_side_data)
4132 av_packet_split_side_data(&pkt);
4133 for (i = 0; i < ist->st->nb_side_data; i++) {
4134 AVPacketSideData *src_sd = &ist->st->side_data[i];
4137 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4139 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4142 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4146 memcpy(dst_data, src_sd->data, src_sd->size);
4150 if (pkt.dts != AV_NOPTS_VALUE)
4151 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4152 if (pkt.pts != AV_NOPTS_VALUE)
4153 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4155 if (pkt.pts != AV_NOPTS_VALUE)
4156 pkt.pts *= ist->ts_scale;
4157 if (pkt.dts != AV_NOPTS_VALUE)
4158 pkt.dts *= ist->ts_scale;
4160 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4161 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4162 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4163 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4164 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4165 int64_t delta = pkt_dts - ifile->last_ts;
4166 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4167 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4168 ifile->ts_offset -= delta;
4169 av_log(NULL, AV_LOG_DEBUG,
4170 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4171 delta, ifile->ts_offset);
4172 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4173 if (pkt.pts != AV_NOPTS_VALUE)
4174 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4178 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4179 if (pkt.pts != AV_NOPTS_VALUE) {
4180 pkt.pts += duration;
4181 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4182 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4185 if (pkt.dts != AV_NOPTS_VALUE)
4186 pkt.dts += duration;
4188 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4189 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4190 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4191 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4193 int64_t delta = pkt_dts - ist->next_dts;
4194 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4195 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4196 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4197 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4198 ifile->ts_offset -= delta;
4199 av_log(NULL, AV_LOG_DEBUG,
4200 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4201 delta, ifile->ts_offset);
4202 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4203 if (pkt.pts != AV_NOPTS_VALUE)
4204 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4207 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4208 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4209 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4210 pkt.dts = AV_NOPTS_VALUE;
4212 if (pkt.pts != AV_NOPTS_VALUE){
4213 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4214 delta = pkt_pts - ist->next_dts;
4215 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4216 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4217 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4218 pkt.pts = AV_NOPTS_VALUE;
4224 if (pkt.dts != AV_NOPTS_VALUE)
4225 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4228 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4229 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4230 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4231 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4232 av_ts2str(input_files[ist->file_index]->ts_offset),
4233 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4236 sub2video_heartbeat(ist, pkt.pts);
4238 process_input_packet(ist, &pkt, 0);
4241 av_packet_unref(&pkt);
4247 * Perform a step of transcoding for the specified filter graph.
4249 * @param[in] graph filter graph to consider
4250 * @param[out] best_ist input stream where a frame would allow to continue
4251 * @return 0 for success, <0 for error
4253 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4256 int nb_requests, nb_requests_max = 0;
4257 InputFilter *ifilter;
4261 ret = avfilter_graph_request_oldest(graph->graph);
4263 return reap_filters(0);
4265 if (ret == AVERROR_EOF) {
4266 ret = reap_filters(1);
4267 for (i = 0; i < graph->nb_outputs; i++)
4268 close_output_stream(graph->outputs[i]->ost);
4271 if (ret != AVERROR(EAGAIN))
4274 for (i = 0; i < graph->nb_inputs; i++) {
4275 ifilter = graph->inputs[i];
4277 if (input_files[ist->file_index]->eagain ||
4278 input_files[ist->file_index]->eof_reached)
4280 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4281 if (nb_requests > nb_requests_max) {
4282 nb_requests_max = nb_requests;
4288 for (i = 0; i < graph->nb_outputs; i++)
4289 graph->outputs[i]->ost->unavailable = 1;
4295 * Run a single step of transcoding.
4297 * @return 0 for success, <0 for error
4299 static int transcode_step(void)
4305 ost = choose_output();
4312 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4317 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4322 av_assert0(ost->source_index >= 0);
4323 ist = input_streams[ost->source_index];
4326 ret = process_input(ist->file_index);
4327 if (ret == AVERROR(EAGAIN)) {
4328 if (input_files[ist->file_index]->eagain)
4329 ost->unavailable = 1;
4334 return ret == AVERROR_EOF ? 0 : ret;
4336 return reap_filters(0);
4340 * The following code is the main loop of the file converter
4342 static int transcode(void)
4345 AVFormatContext *os;
4348 int64_t timer_start;
4349 int64_t total_packets_written = 0;
4351 ret = transcode_init();
4355 if (stdin_interaction) {
4356 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4359 timer_start = av_gettime_relative();
4362 if ((ret = init_input_threads()) < 0)
4366 while (!received_sigterm) {
4367 int64_t cur_time= av_gettime_relative();
4369 /* if 'q' pressed, exits */
4370 if (stdin_interaction)
4371 if (check_keyboard_interaction(cur_time) < 0)
4374 /* check if there's any stream where output is still needed */
4375 if (!need_output()) {
4376 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4380 ret = transcode_step();
4381 if (ret < 0 && ret != AVERROR_EOF) {
4383 av_strerror(ret, errbuf, sizeof(errbuf));
4385 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4389 /* dump report by using the output first video and audio streams */
4390 print_report(0, timer_start, cur_time);
4393 free_input_threads();
4396 /* at the end of stream, we must flush the decoder buffers */
4397 for (i = 0; i < nb_input_streams; i++) {
4398 ist = input_streams[i];
4399 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4400 process_input_packet(ist, NULL, 0);
4407 /* write the trailer if needed and close file */
4408 for (i = 0; i < nb_output_files; i++) {
4409 os = output_files[i]->ctx;
4410 if (!output_files[i]->header_written) {
4411 av_log(NULL, AV_LOG_ERROR,
4412 "Nothing was written into output file %d (%s), because "
4413 "at least one of its streams received no packets.\n",
4417 if ((ret = av_write_trailer(os)) < 0) {
4418 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4424 /* dump report by using the first video and audio streams */
4425 print_report(1, timer_start, av_gettime_relative());
4427 /* close each encoder */
4428 for (i = 0; i < nb_output_streams; i++) {
4429 ost = output_streams[i];
4430 if (ost->encoding_needed) {
4431 av_freep(&ost->enc_ctx->stats_in);
4433 total_packets_written += ost->packets_written;
4436 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4437 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4441 /* close each decoder */
4442 for (i = 0; i < nb_input_streams; i++) {
4443 ist = input_streams[i];
4444 if (ist->decoding_needed) {
4445 avcodec_close(ist->dec_ctx);
4446 if (ist->hwaccel_uninit)
4447 ist->hwaccel_uninit(ist->dec_ctx);
4451 av_buffer_unref(&hw_device_ctx);
4458 free_input_threads();
4461 if (output_streams) {
4462 for (i = 0; i < nb_output_streams; i++) {
4463 ost = output_streams[i];
4466 if (fclose(ost->logfile))
4467 av_log(NULL, AV_LOG_ERROR,
4468 "Error closing logfile, loss of information possible: %s\n",
4469 av_err2str(AVERROR(errno)));
4470 ost->logfile = NULL;
4472 av_freep(&ost->forced_kf_pts);
4473 av_freep(&ost->apad);
4474 av_freep(&ost->disposition);
4475 av_dict_free(&ost->encoder_opts);
4476 av_dict_free(&ost->sws_dict);
4477 av_dict_free(&ost->swr_opts);
4478 av_dict_free(&ost->resample_opts);
4486 static int64_t getutime(void)
4489 struct rusage rusage;
4491 getrusage(RUSAGE_SELF, &rusage);
4492 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4493 #elif HAVE_GETPROCESSTIMES
4495 FILETIME c, e, k, u;
4496 proc = GetCurrentProcess();
4497 GetProcessTimes(proc, &c, &e, &k, &u);
4498 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4500 return av_gettime_relative();
4504 static int64_t getmaxrss(void)
4506 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4507 struct rusage rusage;
4508 getrusage(RUSAGE_SELF, &rusage);
4509 return (int64_t)rusage.ru_maxrss * 1024;
4510 #elif HAVE_GETPROCESSMEMORYINFO
4512 PROCESS_MEMORY_COUNTERS memcounters;
4513 proc = GetCurrentProcess();
4514 memcounters.cb = sizeof(memcounters);
4515 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4516 return memcounters.PeakPagefileUsage;
4522 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4526 int main(int argc, char **argv)
4533 register_exit(ffmpeg_cleanup);
4535 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4537 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4538 parse_loglevel(argc, argv, options);
4540 if(argc>1 && !strcmp(argv[1], "-d")){
4542 av_log_set_callback(log_callback_null);
4547 avcodec_register_all();
4549 avdevice_register_all();
4551 avfilter_register_all();
4553 avformat_network_init();
4555 show_banner(argc, argv, options);
4557 /* parse options and open all input/output files */
4558 ret = ffmpeg_parse_options(argc, argv);
4562 if (nb_output_files <= 0 && nb_input_files == 0) {
4564 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4568 /* file converter / grab */
4569 if (nb_output_files <= 0) {
4570 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4574 // if (nb_input_files == 0) {
4575 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4579 for (i = 0; i < nb_output_files; i++) {
4580 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4584 current_time = ti = getutime();
4585 if (transcode() < 0)
4587 ti = getutime() - ti;
4589 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4591 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4592 decode_error_stat[0], decode_error_stat[1]);
4593 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4596 exit_program(received_nb_signals ? 255 : main_return_code);
4597 return main_return_code;