2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static unsigned dup_warning = 1000;
130 static int nb_frames_drop = 0;
131 static int64_t decode_error_stat[2];
133 static int want_sdp = 1;
135 static int current_time;
136 AVIOContext *progress_avio = NULL;
138 static uint8_t *subtitle_out;
140 InputStream **input_streams = NULL;
141 int nb_input_streams = 0;
142 InputFile **input_files = NULL;
143 int nb_input_files = 0;
145 OutputStream **output_streams = NULL;
146 int nb_output_streams = 0;
147 OutputFile **output_files = NULL;
148 int nb_output_files = 0;
150 FilterGraph **filtergraphs;
155 /* init terminal so that we can grab keys */
156 static struct termios oldtty;
157 static int restore_tty;
161 static void free_input_threads(void);
165 Convert subtitles to video with alpha to insert them in filter graphs.
166 This is a temporary solution until libavfilter gets real subtitles support.
169 static int sub2video_get_blank_frame(InputStream *ist)
172 AVFrame *frame = ist->sub2video.frame;
174 av_frame_unref(frame);
175 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
176 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
177 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
178 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
180 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
184 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
187 uint32_t *pal, *dst2;
191 if (r->type != SUBTITLE_BITMAP) {
192 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
195 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
197 r->x, r->y, r->w, r->h, w, h
202 dst += r->y * dst_linesize + r->x * 4;
204 pal = (uint32_t *)r->data[1];
205 for (y = 0; y < r->h; y++) {
206 dst2 = (uint32_t *)dst;
208 for (x = 0; x < r->w; x++)
209 *(dst2++) = pal[*(src2++)];
211 src += r->linesize[0];
215 static void sub2video_push_ref(InputStream *ist, int64_t pts)
217 AVFrame *frame = ist->sub2video.frame;
220 av_assert1(frame->data[0]);
221 ist->sub2video.last_pts = frame->pts = pts;
222 for (i = 0; i < ist->nb_filters; i++)
223 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
224 AV_BUFFERSRC_FLAG_KEEP_REF |
225 AV_BUFFERSRC_FLAG_PUSH);
228 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
230 AVFrame *frame = ist->sub2video.frame;
234 int64_t pts, end_pts;
239 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240 AV_TIME_BASE_Q, ist->st->time_base);
241 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
242 AV_TIME_BASE_Q, ist->st->time_base);
243 num_rects = sub->num_rects;
245 pts = ist->sub2video.end_pts;
249 if (sub2video_get_blank_frame(ist) < 0) {
250 av_log(ist->dec_ctx, AV_LOG_ERROR,
251 "Impossible to get a blank canvas.\n");
254 dst = frame->data [0];
255 dst_linesize = frame->linesize[0];
256 for (i = 0; i < num_rects; i++)
257 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
258 sub2video_push_ref(ist, pts);
259 ist->sub2video.end_pts = end_pts;
262 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
264 InputFile *infile = input_files[ist->file_index];
268 /* When a frame is read from a file, examine all sub2video streams in
269 the same file and send the sub2video frame again. Otherwise, decoded
270 video frames could be accumulating in the filter graph while a filter
271 (possibly overlay) is desperately waiting for a subtitle frame. */
272 for (i = 0; i < infile->nb_streams; i++) {
273 InputStream *ist2 = input_streams[infile->ist_index + i];
274 if (!ist2->sub2video.frame)
276 /* subtitles seem to be usually muxed ahead of other streams;
277 if not, subtracting a larger time here is necessary */
278 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
279 /* do not send the heartbeat frame if the subtitle is already ahead */
280 if (pts2 <= ist2->sub2video.last_pts)
282 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
283 sub2video_update(ist2, NULL);
284 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
285 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
287 sub2video_push_ref(ist2, pts2);
291 static void sub2video_flush(InputStream *ist)
295 if (ist->sub2video.end_pts < INT64_MAX)
296 sub2video_update(ist, NULL);
297 for (i = 0; i < ist->nb_filters; i++)
298 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
301 /* end of sub2video hack */
303 static void term_exit_sigsafe(void)
307 tcsetattr (0, TCSANOW, &oldtty);
313 av_log(NULL, AV_LOG_QUIET, "%s", "");
317 static volatile int received_sigterm = 0;
318 static volatile int received_nb_signals = 0;
319 static volatile int transcode_init_done = 0;
320 static volatile int ffmpeg_exited = 0;
321 static int main_return_code = 0;
324 sigterm_handler(int sig)
326 received_sigterm = sig;
327 received_nb_signals++;
329 if(received_nb_signals > 3) {
330 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
331 strlen("Received > 3 system signals, hard exiting\n"));
337 #if HAVE_SETCONSOLECTRLHANDLER
338 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
340 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345 case CTRL_BREAK_EVENT:
346 sigterm_handler(SIGINT);
349 case CTRL_CLOSE_EVENT:
350 case CTRL_LOGOFF_EVENT:
351 case CTRL_SHUTDOWN_EVENT:
352 sigterm_handler(SIGTERM);
353 /* Basically, with these 3 events, when we return from this method the
354 process is hard terminated, so stall as long as we need to
355 to try and let the main thread(s) clean up and gracefully terminate
356 (we have at most 5 seconds, but should be done far before that). */
357 while (!ffmpeg_exited) {
363 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
372 if (!run_as_daemon && stdin_interaction) {
374 if (tcgetattr (0, &tty) == 0) {
378 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
379 |INLCR|IGNCR|ICRNL|IXON);
380 tty.c_oflag |= OPOST;
381 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
382 tty.c_cflag &= ~(CSIZE|PARENB);
387 tcsetattr (0, TCSANOW, &tty);
389 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
393 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
394 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
396 signal(SIGXCPU, sigterm_handler);
398 #if HAVE_SETCONSOLECTRLHANDLER
399 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
403 /* read a key without blocking */
404 static int read_key(void)
416 n = select(1, &rfds, NULL, NULL, &tv);
425 # if HAVE_PEEKNAMEDPIPE
427 static HANDLE input_handle;
430 input_handle = GetStdHandle(STD_INPUT_HANDLE);
431 is_pipe = !GetConsoleMode(input_handle, &dw);
435 /* When running under a GUI, you will end here. */
436 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
437 // input pipe may have been closed by the program that ran ffmpeg
455 static int decode_interrupt_cb(void *ctx)
457 return received_nb_signals > transcode_init_done;
460 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
462 static void ffmpeg_cleanup(int ret)
467 int maxrss = getmaxrss() / 1024;
468 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
471 for (i = 0; i < nb_filtergraphs; i++) {
472 FilterGraph *fg = filtergraphs[i];
473 avfilter_graph_free(&fg->graph);
474 for (j = 0; j < fg->nb_inputs; j++) {
475 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
476 av_freep(&fg->inputs[j]->name);
477 av_freep(&fg->inputs[j]);
479 av_freep(&fg->inputs);
480 for (j = 0; j < fg->nb_outputs; j++) {
481 av_freep(&fg->outputs[j]->name);
482 av_freep(&fg->outputs[j]->formats);
483 av_freep(&fg->outputs[j]->channel_layouts);
484 av_freep(&fg->outputs[j]->sample_rates);
485 av_freep(&fg->outputs[j]);
487 av_freep(&fg->outputs);
488 av_freep(&fg->graph_desc);
490 av_freep(&filtergraphs[i]);
492 av_freep(&filtergraphs);
494 av_freep(&subtitle_out);
497 for (i = 0; i < nb_output_files; i++) {
498 OutputFile *of = output_files[i];
503 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
505 avformat_free_context(s);
506 av_dict_free(&of->opts);
508 av_freep(&output_files[i]);
510 for (i = 0; i < nb_output_streams; i++) {
511 OutputStream *ost = output_streams[i];
516 for (j = 0; j < ost->nb_bitstream_filters; j++)
517 av_bsf_free(&ost->bsf_ctx[j]);
518 av_freep(&ost->bsf_ctx);
519 av_freep(&ost->bsf_extradata_updated);
521 av_frame_free(&ost->filtered_frame);
522 av_frame_free(&ost->last_frame);
523 av_dict_free(&ost->encoder_opts);
525 av_parser_close(ost->parser);
526 avcodec_free_context(&ost->parser_avctx);
528 av_freep(&ost->forced_keyframes);
529 av_expr_free(ost->forced_keyframes_pexpr);
530 av_freep(&ost->avfilter);
531 av_freep(&ost->logfile_prefix);
533 av_freep(&ost->audio_channels_map);
534 ost->audio_channels_mapped = 0;
536 av_dict_free(&ost->sws_dict);
538 avcodec_free_context(&ost->enc_ctx);
539 avcodec_parameters_free(&ost->ref_par);
541 while (ost->muxing_queue && av_fifo_size(ost->muxing_queue)) {
543 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
544 av_packet_unref(&pkt);
546 av_fifo_freep(&ost->muxing_queue);
548 av_freep(&output_streams[i]);
551 free_input_threads();
553 for (i = 0; i < nb_input_files; i++) {
554 avformat_close_input(&input_files[i]->ctx);
555 av_freep(&input_files[i]);
557 for (i = 0; i < nb_input_streams; i++) {
558 InputStream *ist = input_streams[i];
560 av_frame_free(&ist->decoded_frame);
561 av_frame_free(&ist->filter_frame);
562 av_dict_free(&ist->decoder_opts);
563 avsubtitle_free(&ist->prev_sub.subtitle);
564 av_frame_free(&ist->sub2video.frame);
565 av_freep(&ist->filters);
566 av_freep(&ist->hwaccel_device);
567 av_freep(&ist->dts_buffer);
569 avcodec_free_context(&ist->dec_ctx);
571 av_freep(&input_streams[i]);
575 if (fclose(vstats_file))
576 av_log(NULL, AV_LOG_ERROR,
577 "Error closing vstats file, loss of information possible: %s\n",
578 av_err2str(AVERROR(errno)));
580 av_freep(&vstats_filename);
582 av_freep(&input_streams);
583 av_freep(&input_files);
584 av_freep(&output_streams);
585 av_freep(&output_files);
589 avformat_network_deinit();
591 if (received_sigterm) {
592 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
593 (int) received_sigterm);
594 } else if (ret && transcode_init_done) {
595 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
601 void remove_avoptions(AVDictionary **a, AVDictionary *b)
603 AVDictionaryEntry *t = NULL;
605 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
606 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
610 void assert_avoptions(AVDictionary *m)
612 AVDictionaryEntry *t;
613 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
614 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
619 static void abort_codec_experimental(AVCodec *c, int encoder)
624 static void update_benchmark(const char *fmt, ...)
626 if (do_benchmark_all) {
627 int64_t t = getutime();
633 vsnprintf(buf, sizeof(buf), fmt, va);
635 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
641 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
644 for (i = 0; i < nb_output_streams; i++) {
645 OutputStream *ost2 = output_streams[i];
646 ost2->finished |= ost == ost2 ? this_stream : others;
650 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
652 AVFormatContext *s = of->ctx;
653 AVStream *st = ost->st;
656 if (!of->header_written) {
658 /* the muxer is not initialized yet, buffer the packet */
659 if (!av_fifo_space(ost->muxing_queue)) {
660 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
661 ost->max_muxing_queue_size);
662 if (new_size <= av_fifo_size(ost->muxing_queue)) {
663 av_log(NULL, AV_LOG_ERROR,
664 "Too many packets buffered for output stream %d:%d.\n",
665 ost->file_index, ost->st->index);
668 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
672 av_packet_move_ref(&tmp_pkt, pkt);
673 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
677 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
678 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
679 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
682 * Audio encoders may split the packets -- #frames in != #packets out.
683 * But there is no reordering, so we can limit the number of output packets
684 * by simply dropping them here.
685 * Counting encoded video frames needs to be done separately because of
686 * reordering, see do_video_out()
688 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
689 if (ost->frame_number >= ost->max_frames) {
690 av_packet_unref(pkt);
695 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
697 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
699 ost->quality = sd ? AV_RL32(sd) : -1;
700 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
702 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
704 ost->error[i] = AV_RL64(sd + 8 + 8*i);
709 if (ost->frame_rate.num && ost->is_cfr) {
710 if (pkt->duration > 0)
711 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
712 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
717 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
718 if (pkt->dts != AV_NOPTS_VALUE &&
719 pkt->pts != AV_NOPTS_VALUE &&
720 pkt->dts > pkt->pts) {
721 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
723 ost->file_index, ost->st->index);
725 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
726 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
727 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
729 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
730 pkt->dts != AV_NOPTS_VALUE &&
731 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
732 ost->last_mux_dts != AV_NOPTS_VALUE) {
733 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
734 if (pkt->dts < max) {
735 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
736 av_log(s, loglevel, "Non-monotonous DTS in output stream "
737 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
738 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
740 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
743 av_log(s, loglevel, "changing to %"PRId64". This may result "
744 "in incorrect timestamps in the output file.\n",
746 if (pkt->pts >= pkt->dts)
747 pkt->pts = FFMAX(pkt->pts, max);
752 ost->last_mux_dts = pkt->dts;
754 ost->data_size += pkt->size;
755 ost->packets_written++;
757 pkt->stream_index = ost->index;
760 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
761 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
762 av_get_media_type_string(ost->enc_ctx->codec_type),
763 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
764 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
769 ret = av_interleaved_write_frame(s, pkt);
771 print_error("av_interleaved_write_frame()", ret);
772 main_return_code = 1;
773 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
775 av_packet_unref(pkt);
778 static void close_output_stream(OutputStream *ost)
780 OutputFile *of = output_files[ost->file_index];
782 ost->finished |= ENCODER_FINISHED;
784 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
785 of->recording_time = FFMIN(of->recording_time, end);
789 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
793 /* apply the output bitstream filters, if any */
794 if (ost->nb_bitstream_filters) {
797 av_packet_split_side_data(pkt);
798 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
804 /* get a packet from the previous filter up the chain */
805 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
806 if (ret == AVERROR(EAGAIN)) {
812 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
813 * the api states this shouldn't happen after init(). Propagate it here to the
814 * muxer and to the next filters in the chain to workaround this.
815 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
816 * par_out->extradata and adapt muxers accordingly to get rid of this. */
817 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
818 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
821 ost->bsf_extradata_updated[idx - 1] |= 1;
824 /* send it to the next filter down the chain or to the muxer */
825 if (idx < ost->nb_bitstream_filters) {
826 /* HACK/FIXME! - See above */
827 if (!(ost->bsf_extradata_updated[idx] & 2)) {
828 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
831 ost->bsf_extradata_updated[idx] |= 2;
833 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
838 write_packet(of, pkt, ost);
841 write_packet(of, pkt, ost);
844 if (ret < 0 && ret != AVERROR_EOF) {
845 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
846 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
852 static int check_recording_time(OutputStream *ost)
854 OutputFile *of = output_files[ost->file_index];
856 if (of->recording_time != INT64_MAX &&
857 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
858 AV_TIME_BASE_Q) >= 0) {
859 close_output_stream(ost);
865 static void do_audio_out(OutputFile *of, OutputStream *ost,
868 AVCodecContext *enc = ost->enc_ctx;
872 av_init_packet(&pkt);
876 if (!check_recording_time(ost))
879 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
880 frame->pts = ost->sync_opts;
881 ost->sync_opts = frame->pts + frame->nb_samples;
882 ost->samples_encoded += frame->nb_samples;
883 ost->frames_encoded++;
885 av_assert0(pkt.size || !pkt.data);
886 update_benchmark(NULL);
888 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
889 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
890 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
891 enc->time_base.num, enc->time_base.den);
894 ret = avcodec_send_frame(enc, frame);
899 ret = avcodec_receive_packet(enc, &pkt);
900 if (ret == AVERROR(EAGAIN))
905 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
907 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
910 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
911 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
912 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
913 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
916 output_packet(of, &pkt, ost);
921 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
925 static void do_subtitle_out(OutputFile *of,
929 int subtitle_out_max_size = 1024 * 1024;
930 int subtitle_out_size, nb, i;
935 if (sub->pts == AV_NOPTS_VALUE) {
936 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
945 subtitle_out = av_malloc(subtitle_out_max_size);
947 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
952 /* Note: DVB subtitle need one packet to draw them and one other
953 packet to clear them */
954 /* XXX: signal it in the codec context ? */
955 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
960 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
962 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
963 pts -= output_files[ost->file_index]->start_time;
964 for (i = 0; i < nb; i++) {
965 unsigned save_num_rects = sub->num_rects;
967 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
968 if (!check_recording_time(ost))
972 // start_display_time is required to be 0
973 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
974 sub->end_display_time -= sub->start_display_time;
975 sub->start_display_time = 0;
979 ost->frames_encoded++;
981 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
982 subtitle_out_max_size, sub);
984 sub->num_rects = save_num_rects;
985 if (subtitle_out_size < 0) {
986 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
990 av_init_packet(&pkt);
991 pkt.data = subtitle_out;
992 pkt.size = subtitle_out_size;
993 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
994 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
995 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
996 /* XXX: the pts correction is handled here. Maybe handling
997 it in the codec would be better */
999 pkt.pts += 90 * sub->start_display_time;
1001 pkt.pts += 90 * sub->end_display_time;
1004 output_packet(of, &pkt, ost);
1008 static void do_video_out(OutputFile *of,
1010 AVFrame *next_picture,
1013 int ret, format_video_sync;
1015 AVCodecContext *enc = ost->enc_ctx;
1016 AVCodecParameters *mux_par = ost->st->codecpar;
1017 AVRational frame_rate;
1018 int nb_frames, nb0_frames, i;
1019 double delta, delta0;
1020 double duration = 0;
1022 InputStream *ist = NULL;
1023 AVFilterContext *filter = ost->filter->filter;
1025 if (ost->source_index >= 0)
1026 ist = input_streams[ost->source_index];
1028 frame_rate = av_buffersink_get_frame_rate(filter);
1029 if (frame_rate.num > 0 && frame_rate.den > 0)
1030 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1032 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1033 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1035 if (!ost->filters_script &&
1039 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1040 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1043 if (!next_picture) {
1045 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1046 ost->last_nb0_frames[1],
1047 ost->last_nb0_frames[2]);
1049 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1050 delta = delta0 + duration;
1052 /* by default, we output a single frame */
1053 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1056 format_video_sync = video_sync_method;
1057 if (format_video_sync == VSYNC_AUTO) {
1058 if(!strcmp(of->ctx->oformat->name, "avi")) {
1059 format_video_sync = VSYNC_VFR;
1061 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1063 && format_video_sync == VSYNC_CFR
1064 && input_files[ist->file_index]->ctx->nb_streams == 1
1065 && input_files[ist->file_index]->input_ts_offset == 0) {
1066 format_video_sync = VSYNC_VSCFR;
1068 if (format_video_sync == VSYNC_CFR && copy_ts) {
1069 format_video_sync = VSYNC_VSCFR;
1072 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1076 format_video_sync != VSYNC_PASSTHROUGH &&
1077 format_video_sync != VSYNC_DROP) {
1078 if (delta0 < -0.6) {
1079 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1081 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1082 sync_ipts = ost->sync_opts;
1087 switch (format_video_sync) {
1089 if (ost->frame_number == 0 && delta0 >= 0.5) {
1090 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1093 ost->sync_opts = lrint(sync_ipts);
1096 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1097 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1099 } else if (delta < -1.1)
1101 else if (delta > 1.1) {
1102 nb_frames = lrintf(delta);
1104 nb0_frames = lrintf(delta0 - 0.6);
1110 else if (delta > 0.6)
1111 ost->sync_opts = lrint(sync_ipts);
1114 case VSYNC_PASSTHROUGH:
1115 ost->sync_opts = lrint(sync_ipts);
1122 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1123 nb0_frames = FFMIN(nb0_frames, nb_frames);
1125 memmove(ost->last_nb0_frames + 1,
1126 ost->last_nb0_frames,
1127 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1128 ost->last_nb0_frames[0] = nb0_frames;
1130 if (nb0_frames == 0 && ost->last_dropped) {
1132 av_log(NULL, AV_LOG_VERBOSE,
1133 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1134 ost->frame_number, ost->st->index, ost->last_frame->pts);
1136 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1137 if (nb_frames > dts_error_threshold * 30) {
1138 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1142 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1143 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1144 if (nb_frames_dup > dup_warning) {
1145 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1149 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1151 /* duplicates frame if needed */
1152 for (i = 0; i < nb_frames; i++) {
1153 AVFrame *in_picture;
1154 av_init_packet(&pkt);
1158 if (i < nb0_frames && ost->last_frame) {
1159 in_picture = ost->last_frame;
1161 in_picture = next_picture;
1166 in_picture->pts = ost->sync_opts;
1169 if (!check_recording_time(ost))
1171 if (ost->frame_number >= ost->max_frames)
1175 #if FF_API_LAVF_FMT_RAWPICTURE
1176 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1177 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1178 /* raw pictures are written as AVPicture structure to
1179 avoid any copies. We support temporarily the older
1181 if (in_picture->interlaced_frame)
1182 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1184 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1185 pkt.data = (uint8_t *)in_picture;
1186 pkt.size = sizeof(AVPicture);
1187 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1188 pkt.flags |= AV_PKT_FLAG_KEY;
1190 output_packet(of, &pkt, ost);
1194 int forced_keyframe = 0;
1197 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1198 ost->top_field_first >= 0)
1199 in_picture->top_field_first = !!ost->top_field_first;
1201 if (in_picture->interlaced_frame) {
1202 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1203 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1205 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1207 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1209 in_picture->quality = enc->global_quality;
1210 in_picture->pict_type = 0;
1212 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1213 in_picture->pts * av_q2d(enc->time_base) : NAN;
1214 if (ost->forced_kf_index < ost->forced_kf_count &&
1215 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1216 ost->forced_kf_index++;
1217 forced_keyframe = 1;
1218 } else if (ost->forced_keyframes_pexpr) {
1220 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1221 res = av_expr_eval(ost->forced_keyframes_pexpr,
1222 ost->forced_keyframes_expr_const_values, NULL);
1223 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1224 ost->forced_keyframes_expr_const_values[FKF_N],
1225 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1226 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1227 ost->forced_keyframes_expr_const_values[FKF_T],
1228 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1231 forced_keyframe = 1;
1232 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1233 ost->forced_keyframes_expr_const_values[FKF_N];
1234 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1235 ost->forced_keyframes_expr_const_values[FKF_T];
1236 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1239 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1240 } else if ( ost->forced_keyframes
1241 && !strncmp(ost->forced_keyframes, "source", 6)
1242 && in_picture->key_frame==1) {
1243 forced_keyframe = 1;
1246 if (forced_keyframe) {
1247 in_picture->pict_type = AV_PICTURE_TYPE_I;
1248 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1251 update_benchmark(NULL);
1253 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1254 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1255 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1256 enc->time_base.num, enc->time_base.den);
1259 ost->frames_encoded++;
1261 ret = avcodec_send_frame(enc, in_picture);
1266 ret = avcodec_receive_packet(enc, &pkt);
1267 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1268 if (ret == AVERROR(EAGAIN))
1274 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1275 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1276 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1277 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1280 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1281 pkt.pts = ost->sync_opts;
1283 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1286 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1287 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1288 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1289 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1292 frame_size = pkt.size;
1293 output_packet(of, &pkt, ost);
1295 /* if two pass, output log */
1296 if (ost->logfile && enc->stats_out) {
1297 fprintf(ost->logfile, "%s", enc->stats_out);
1303 * For video, number of frames in == number of packets out.
1304 * But there may be reordering, so we can't throw away frames on encoder
1305 * flush, we need to limit them here, before they go into encoder.
1307 ost->frame_number++;
1309 if (vstats_filename && frame_size)
1310 do_video_stats(ost, frame_size);
1313 if (!ost->last_frame)
1314 ost->last_frame = av_frame_alloc();
1315 av_frame_unref(ost->last_frame);
1316 if (next_picture && ost->last_frame)
1317 av_frame_ref(ost->last_frame, next_picture);
1319 av_frame_free(&ost->last_frame);
1323 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1327 static double psnr(double d)
1329 return -10.0 * log10(d);
1332 static void do_video_stats(OutputStream *ost, int frame_size)
1334 AVCodecContext *enc;
1336 double ti1, bitrate, avg_bitrate;
1338 /* this is executed just the first time do_video_stats is called */
1340 vstats_file = fopen(vstats_filename, "w");
1348 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1349 frame_number = ost->st->nb_frames;
1350 if (vstats_version <= 1) {
1351 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1352 ost->quality / (float)FF_QP2LAMBDA);
1354 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1355 ost->quality / (float)FF_QP2LAMBDA);
1358 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1359 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1361 fprintf(vstats_file,"f_size= %6d ", frame_size);
1362 /* compute pts value */
1363 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1367 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1368 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1369 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1370 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1371 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1375 static void finish_output_stream(OutputStream *ost)
1377 OutputFile *of = output_files[ost->file_index];
1380 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1383 for (i = 0; i < of->ctx->nb_streams; i++)
1384 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1389 * Get and encode new output from any of the filtergraphs, without causing
1392 * @return 0 for success, <0 for severe errors
1394 static int reap_filters(int flush)
1396 AVFrame *filtered_frame = NULL;
1399 /* Reap all buffers present in the buffer sinks */
1400 for (i = 0; i < nb_output_streams; i++) {
1401 OutputStream *ost = output_streams[i];
1402 OutputFile *of = output_files[ost->file_index];
1403 AVFilterContext *filter;
1404 AVCodecContext *enc = ost->enc_ctx;
1409 filter = ost->filter->filter;
1411 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1412 return AVERROR(ENOMEM);
1414 filtered_frame = ost->filtered_frame;
1417 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1418 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1419 AV_BUFFERSINK_FLAG_NO_REQUEST);
1421 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1422 av_log(NULL, AV_LOG_WARNING,
1423 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1424 } else if (flush && ret == AVERROR_EOF) {
1425 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1426 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1430 if (ost->finished) {
1431 av_frame_unref(filtered_frame);
1434 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1435 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1436 AVRational filter_tb = av_buffersink_get_time_base(filter);
1437 AVRational tb = enc->time_base;
1438 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1440 tb.den <<= extra_bits;
1442 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1443 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1444 float_pts /= 1 << extra_bits;
1445 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1446 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1448 filtered_frame->pts =
1449 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1450 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1452 //if (ost->source_index >= 0)
1453 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1455 switch (av_buffersink_get_type(filter)) {
1456 case AVMEDIA_TYPE_VIDEO:
1457 if (!ost->frame_aspect_ratio.num)
1458 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1461 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1462 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1464 enc->time_base.num, enc->time_base.den);
1467 do_video_out(of, ost, filtered_frame, float_pts);
1469 case AVMEDIA_TYPE_AUDIO:
1470 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1471 enc->channels != av_frame_get_channels(filtered_frame)) {
1472 av_log(NULL, AV_LOG_ERROR,
1473 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1476 do_audio_out(of, ost, filtered_frame);
1479 // TODO support subtitle filters
1483 av_frame_unref(filtered_frame);
1490 static void print_final_stats(int64_t total_size)
1492 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1493 uint64_t subtitle_size = 0;
1494 uint64_t data_size = 0;
1495 float percent = -1.0;
1499 for (i = 0; i < nb_output_streams; i++) {
1500 OutputStream *ost = output_streams[i];
1501 switch (ost->enc_ctx->codec_type) {
1502 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1503 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1504 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1505 default: other_size += ost->data_size; break;
1507 extra_size += ost->enc_ctx->extradata_size;
1508 data_size += ost->data_size;
1509 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1510 != AV_CODEC_FLAG_PASS1)
1514 if (data_size && total_size>0 && total_size >= data_size)
1515 percent = 100.0 * (total_size - data_size) / data_size;
1517 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1518 video_size / 1024.0,
1519 audio_size / 1024.0,
1520 subtitle_size / 1024.0,
1521 other_size / 1024.0,
1522 extra_size / 1024.0);
1524 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1526 av_log(NULL, AV_LOG_INFO, "unknown");
1527 av_log(NULL, AV_LOG_INFO, "\n");
1529 /* print verbose per-stream stats */
1530 for (i = 0; i < nb_input_files; i++) {
1531 InputFile *f = input_files[i];
1532 uint64_t total_packets = 0, total_size = 0;
1534 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1535 i, f->ctx->filename);
1537 for (j = 0; j < f->nb_streams; j++) {
1538 InputStream *ist = input_streams[f->ist_index + j];
1539 enum AVMediaType type = ist->dec_ctx->codec_type;
1541 total_size += ist->data_size;
1542 total_packets += ist->nb_packets;
1544 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1545 i, j, media_type_string(type));
1546 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1547 ist->nb_packets, ist->data_size);
1549 if (ist->decoding_needed) {
1550 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1551 ist->frames_decoded);
1552 if (type == AVMEDIA_TYPE_AUDIO)
1553 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1554 av_log(NULL, AV_LOG_VERBOSE, "; ");
1557 av_log(NULL, AV_LOG_VERBOSE, "\n");
1560 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1561 total_packets, total_size);
1564 for (i = 0; i < nb_output_files; i++) {
1565 OutputFile *of = output_files[i];
1566 uint64_t total_packets = 0, total_size = 0;
1568 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1569 i, of->ctx->filename);
1571 for (j = 0; j < of->ctx->nb_streams; j++) {
1572 OutputStream *ost = output_streams[of->ost_index + j];
1573 enum AVMediaType type = ost->enc_ctx->codec_type;
1575 total_size += ost->data_size;
1576 total_packets += ost->packets_written;
1578 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1579 i, j, media_type_string(type));
1580 if (ost->encoding_needed) {
1581 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1582 ost->frames_encoded);
1583 if (type == AVMEDIA_TYPE_AUDIO)
1584 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1585 av_log(NULL, AV_LOG_VERBOSE, "; ");
1588 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1589 ost->packets_written, ost->data_size);
1591 av_log(NULL, AV_LOG_VERBOSE, "\n");
1594 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1595 total_packets, total_size);
1597 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1598 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1600 av_log(NULL, AV_LOG_WARNING, "\n");
1602 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1607 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1610 AVBPrint buf_script;
1612 AVFormatContext *oc;
1614 AVCodecContext *enc;
1615 int frame_number, vid, i;
1618 int64_t pts = INT64_MIN + 1;
1619 static int64_t last_time = -1;
1620 static int qp_histogram[52];
1621 int hours, mins, secs, us;
1625 if (!print_stats && !is_last_report && !progress_avio)
1628 if (!is_last_report) {
1629 if (last_time == -1) {
1630 last_time = cur_time;
1633 if ((cur_time - last_time) < 500000)
1635 last_time = cur_time;
1638 t = (cur_time-timer_start) / 1000000.0;
1641 oc = output_files[0]->ctx;
1643 total_size = avio_size(oc->pb);
1644 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1645 total_size = avio_tell(oc->pb);
1649 av_bprint_init(&buf_script, 0, 1);
1650 for (i = 0; i < nb_output_streams; i++) {
1652 ost = output_streams[i];
1654 if (!ost->stream_copy)
1655 q = ost->quality / (float) FF_QP2LAMBDA;
1657 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1658 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1659 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1660 ost->file_index, ost->index, q);
1662 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1665 frame_number = ost->frame_number;
1666 fps = t > 1 ? frame_number / t : 0;
1667 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1668 frame_number, fps < 9.95, fps, q);
1669 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1670 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1671 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1672 ost->file_index, ost->index, q);
1674 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1678 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1680 for (j = 0; j < 32; j++)
1681 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1684 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1686 double error, error_sum = 0;
1687 double scale, scale_sum = 0;
1689 char type[3] = { 'Y','U','V' };
1690 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1691 for (j = 0; j < 3; j++) {
1692 if (is_last_report) {
1693 error = enc->error[j];
1694 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1696 error = ost->error[j];
1697 scale = enc->width * enc->height * 255.0 * 255.0;
1703 p = psnr(error / scale);
1704 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1705 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1706 ost->file_index, ost->index, type[j] | 32, p);
1708 p = psnr(error_sum / scale_sum);
1709 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1710 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1711 ost->file_index, ost->index, p);
1715 /* compute min output value */
1716 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1717 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1718 ost->st->time_base, AV_TIME_BASE_Q));
1720 nb_frames_drop += ost->last_dropped;
1723 secs = FFABS(pts) / AV_TIME_BASE;
1724 us = FFABS(pts) % AV_TIME_BASE;
1730 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1731 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1733 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1735 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1736 "size=%8.0fkB time=", total_size / 1024.0);
1738 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1739 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1740 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1741 (100 * us) / AV_TIME_BASE);
1744 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1745 av_bprintf(&buf_script, "bitrate=N/A\n");
1747 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1748 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1751 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1752 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1753 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1754 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1755 hours, mins, secs, us);
1757 if (nb_frames_dup || nb_frames_drop)
1758 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1759 nb_frames_dup, nb_frames_drop);
1760 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1761 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1764 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1765 av_bprintf(&buf_script, "speed=N/A\n");
1767 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1768 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1771 if (print_stats || is_last_report) {
1772 const char end = is_last_report ? '\n' : '\r';
1773 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1774 fprintf(stderr, "%s %c", buf, end);
1776 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1781 if (progress_avio) {
1782 av_bprintf(&buf_script, "progress=%s\n",
1783 is_last_report ? "end" : "continue");
1784 avio_write(progress_avio, buf_script.str,
1785 FFMIN(buf_script.len, buf_script.size - 1));
1786 avio_flush(progress_avio);
1787 av_bprint_finalize(&buf_script, NULL);
1788 if (is_last_report) {
1789 if ((ret = avio_closep(&progress_avio)) < 0)
1790 av_log(NULL, AV_LOG_ERROR,
1791 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1796 print_final_stats(total_size);
1799 static void flush_encoders(void)
1803 for (i = 0; i < nb_output_streams; i++) {
1804 OutputStream *ost = output_streams[i];
1805 AVCodecContext *enc = ost->enc_ctx;
1806 OutputFile *of = output_files[ost->file_index];
1808 if (!ost->encoding_needed)
1811 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1813 #if FF_API_LAVF_FMT_RAWPICTURE
1814 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1818 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1821 avcodec_send_frame(enc, NULL);
1824 const char *desc = NULL;
1828 switch (enc->codec_type) {
1829 case AVMEDIA_TYPE_AUDIO:
1832 case AVMEDIA_TYPE_VIDEO:
1839 av_init_packet(&pkt);
1843 update_benchmark(NULL);
1844 ret = avcodec_receive_packet(enc, &pkt);
1845 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1846 if (ret < 0 && ret != AVERROR_EOF) {
1847 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1852 if (ost->logfile && enc->stats_out) {
1853 fprintf(ost->logfile, "%s", enc->stats_out);
1855 if (ret == AVERROR_EOF) {
1858 if (ost->finished & MUXER_FINISHED) {
1859 av_packet_unref(&pkt);
1862 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1863 pkt_size = pkt.size;
1864 output_packet(of, &pkt, ost);
1865 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1866 do_video_stats(ost, pkt_size);
1873 * Check whether a packet from ist should be written into ost at this time
1875 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1877 OutputFile *of = output_files[ost->file_index];
1878 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1880 if (ost->source_index != ist_index)
1886 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1892 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1894 OutputFile *of = output_files[ost->file_index];
1895 InputFile *f = input_files [ist->file_index];
1896 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1897 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1901 av_init_packet(&opkt);
1903 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1904 !ost->copy_initial_nonkeyframes)
1907 if (!ost->frame_number && !ost->copy_prior_start) {
1908 int64_t comp_start = start_time;
1909 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1910 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1911 if (pkt->pts == AV_NOPTS_VALUE ?
1912 ist->pts < comp_start :
1913 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1917 if (of->recording_time != INT64_MAX &&
1918 ist->pts >= of->recording_time + start_time) {
1919 close_output_stream(ost);
1923 if (f->recording_time != INT64_MAX) {
1924 start_time = f->ctx->start_time;
1925 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1926 start_time += f->start_time;
1927 if (ist->pts >= f->recording_time + start_time) {
1928 close_output_stream(ost);
1933 /* force the input stream PTS */
1934 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1937 if (pkt->pts != AV_NOPTS_VALUE)
1938 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1940 opkt.pts = AV_NOPTS_VALUE;
1942 if (pkt->dts == AV_NOPTS_VALUE)
1943 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1945 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1946 opkt.dts -= ost_tb_start_time;
1948 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1949 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1951 duration = ist->dec_ctx->frame_size;
1952 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1953 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1954 ost->st->time_base) - ost_tb_start_time;
1957 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1958 opkt.flags = pkt->flags;
1959 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1960 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
1961 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
1962 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
1963 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
1965 int ret = av_parser_change(ost->parser, ost->parser_avctx,
1966 &opkt.data, &opkt.size,
1967 pkt->data, pkt->size,
1968 pkt->flags & AV_PKT_FLAG_KEY);
1970 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1975 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1980 opkt.data = pkt->data;
1981 opkt.size = pkt->size;
1983 av_copy_packet_side_data(&opkt, pkt);
1985 #if FF_API_LAVF_FMT_RAWPICTURE
1986 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1987 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
1988 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1989 /* store AVPicture in AVPacket, as expected by the output format */
1990 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
1992 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1996 opkt.data = (uint8_t *)&pict;
1997 opkt.size = sizeof(AVPicture);
1998 opkt.flags |= AV_PKT_FLAG_KEY;
2002 output_packet(of, &opkt, ost);
2005 int guess_input_channel_layout(InputStream *ist)
2007 AVCodecContext *dec = ist->dec_ctx;
2009 if (!dec->channel_layout) {
2010 char layout_name[256];
2012 if (dec->channels > ist->guess_layout_max)
2014 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2015 if (!dec->channel_layout)
2017 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2018 dec->channels, dec->channel_layout);
2019 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2020 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2025 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2027 if (*got_output || ret<0)
2028 decode_error_stat[ret<0] ++;
2030 if (ret < 0 && exit_on_error)
2033 if (exit_on_error && *got_output && ist) {
2034 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2035 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2041 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2042 // There is the following difference: if you got a frame, you must call
2043 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2044 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2045 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2052 ret = avcodec_send_packet(avctx, pkt);
2053 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2054 // decoded frames with avcodec_receive_frame() until done.
2055 if (ret < 0 && ret != AVERROR_EOF)
2059 ret = avcodec_receive_frame(avctx, frame);
2060 if (ret < 0 && ret != AVERROR(EAGAIN))
2068 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2073 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2074 for (i = 0; i < ist->nb_filters; i++) {
2075 if (i < ist->nb_filters - 1) {
2076 f = ist->filter_frame;
2077 ret = av_frame_ref(f, decoded_frame);
2082 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2083 AV_BUFFERSRC_FLAG_PUSH);
2084 if (ret == AVERROR_EOF)
2085 ret = 0; /* ignore */
2087 av_log(NULL, AV_LOG_ERROR,
2088 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2095 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2097 AVFrame *decoded_frame;
2098 AVCodecContext *avctx = ist->dec_ctx;
2099 int i, ret, err = 0, resample_changed;
2100 AVRational decoded_frame_tb;
2102 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2103 return AVERROR(ENOMEM);
2104 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2105 return AVERROR(ENOMEM);
2106 decoded_frame = ist->decoded_frame;
2108 update_benchmark(NULL);
2109 ret = decode(avctx, decoded_frame, got_output, pkt);
2110 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2112 if (ret >= 0 && avctx->sample_rate <= 0) {
2113 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2114 ret = AVERROR_INVALIDDATA;
2117 if (ret != AVERROR_EOF)
2118 check_decode_result(ist, got_output, ret);
2120 if (!*got_output || ret < 0)
2123 ist->samples_decoded += decoded_frame->nb_samples;
2124 ist->frames_decoded++;
2127 /* increment next_dts to use for the case where the input stream does not
2128 have timestamps or there are multiple frames in the packet */
2129 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2131 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2135 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2136 ist->resample_channels != avctx->channels ||
2137 ist->resample_channel_layout != decoded_frame->channel_layout ||
2138 ist->resample_sample_rate != decoded_frame->sample_rate;
2139 if (resample_changed) {
2140 char layout1[64], layout2[64];
2142 if (!guess_input_channel_layout(ist)) {
2143 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2144 "layout for Input Stream #%d.%d\n", ist->file_index,
2148 decoded_frame->channel_layout = avctx->channel_layout;
2150 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2151 ist->resample_channel_layout);
2152 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2153 decoded_frame->channel_layout);
2155 av_log(NULL, AV_LOG_INFO,
2156 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2157 ist->file_index, ist->st->index,
2158 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2159 ist->resample_channels, layout1,
2160 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2161 avctx->channels, layout2);
2163 ist->resample_sample_fmt = decoded_frame->format;
2164 ist->resample_sample_rate = decoded_frame->sample_rate;
2165 ist->resample_channel_layout = decoded_frame->channel_layout;
2166 ist->resample_channels = avctx->channels;
2168 for (i = 0; i < ist->nb_filters; i++) {
2169 err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2171 av_log(NULL, AV_LOG_ERROR,
2172 "Error reconfiguring input stream %d:%d filter %d\n",
2173 ist->file_index, ist->st->index, i);
2178 for (i = 0; i < nb_filtergraphs; i++)
2179 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2180 FilterGraph *fg = filtergraphs[i];
2181 if (configure_filtergraph(fg) < 0) {
2182 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2188 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2189 decoded_frame_tb = ist->st->time_base;
2190 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2191 decoded_frame->pts = pkt->pts;
2192 decoded_frame_tb = ist->st->time_base;
2194 decoded_frame->pts = ist->dts;
2195 decoded_frame_tb = AV_TIME_BASE_Q;
2197 if (decoded_frame->pts != AV_NOPTS_VALUE)
2198 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2199 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2200 (AVRational){1, avctx->sample_rate});
2201 ist->nb_samples = decoded_frame->nb_samples;
2202 err = send_frame_to_filters(ist, decoded_frame);
2203 decoded_frame->pts = AV_NOPTS_VALUE;
2206 av_frame_unref(ist->filter_frame);
2207 av_frame_unref(decoded_frame);
2208 return err < 0 ? err : ret;
2211 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
2213 AVFrame *decoded_frame;
2214 int i, ret = 0, err = 0, resample_changed;
2215 int64_t best_effort_timestamp;
2216 int64_t dts = AV_NOPTS_VALUE;
2219 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2220 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2222 if (!eof && pkt && pkt->size == 0)
2225 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2226 return AVERROR(ENOMEM);
2227 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2228 return AVERROR(ENOMEM);
2229 decoded_frame = ist->decoded_frame;
2230 if (ist->dts != AV_NOPTS_VALUE)
2231 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2234 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2237 // The old code used to set dts on the drain packet, which does not work
2238 // with the new API anymore.
2240 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2242 return AVERROR(ENOMEM);
2243 ist->dts_buffer = new;
2244 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2247 update_benchmark(NULL);
2248 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2249 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2251 // The following line may be required in some cases where there is no parser
2252 // or the parser does not has_b_frames correctly
2253 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2254 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2255 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2257 av_log(ist->dec_ctx, AV_LOG_WARNING,
2258 "video_delay is larger in decoder than demuxer %d > %d.\n"
2259 "If you want to help, upload a sample "
2260 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2261 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2262 ist->dec_ctx->has_b_frames,
2263 ist->st->codecpar->video_delay);
2266 if (ret != AVERROR_EOF)
2267 check_decode_result(ist, got_output, ret);
2269 if (*got_output && ret >= 0) {
2270 if (ist->dec_ctx->width != decoded_frame->width ||
2271 ist->dec_ctx->height != decoded_frame->height ||
2272 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2273 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2274 decoded_frame->width,
2275 decoded_frame->height,
2276 decoded_frame->format,
2277 ist->dec_ctx->width,
2278 ist->dec_ctx->height,
2279 ist->dec_ctx->pix_fmt);
2283 if (!*got_output || ret < 0)
2286 if(ist->top_field_first>=0)
2287 decoded_frame->top_field_first = ist->top_field_first;
2289 ist->frames_decoded++;
2291 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2292 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2296 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2298 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2300 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2301 best_effort_timestamp = ist->dts_buffer[0];
2303 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2304 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2305 ist->nb_dts_buffer--;
2308 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2309 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2311 if (ts != AV_NOPTS_VALUE)
2312 ist->next_pts = ist->pts = ts;
2316 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2317 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2318 ist->st->index, av_ts2str(decoded_frame->pts),
2319 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2320 best_effort_timestamp,
2321 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2322 decoded_frame->key_frame, decoded_frame->pict_type,
2323 ist->st->time_base.num, ist->st->time_base.den);
2326 if (ist->st->sample_aspect_ratio.num)
2327 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2329 resample_changed = ist->resample_width != decoded_frame->width ||
2330 ist->resample_height != decoded_frame->height ||
2331 ist->resample_pix_fmt != decoded_frame->format;
2332 if (resample_changed) {
2333 av_log(NULL, AV_LOG_INFO,
2334 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2335 ist->file_index, ist->st->index,
2336 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2337 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2339 ist->resample_width = decoded_frame->width;
2340 ist->resample_height = decoded_frame->height;
2341 ist->resample_pix_fmt = decoded_frame->format;
2343 for (i = 0; i < ist->nb_filters; i++) {
2344 err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2346 av_log(NULL, AV_LOG_ERROR,
2347 "Error reconfiguring input stream %d:%d filter %d\n",
2348 ist->file_index, ist->st->index, i);
2353 for (i = 0; i < nb_filtergraphs; i++) {
2354 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2355 configure_filtergraph(filtergraphs[i]) < 0) {
2356 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2362 err = send_frame_to_filters(ist, decoded_frame);
2365 av_frame_unref(ist->filter_frame);
2366 av_frame_unref(decoded_frame);
2367 return err < 0 ? err : ret;
2370 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2372 AVSubtitle subtitle;
2373 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2374 &subtitle, got_output, pkt);
2376 check_decode_result(NULL, got_output, ret);
2378 if (ret < 0 || !*got_output) {
2380 sub2video_flush(ist);
2384 if (ist->fix_sub_duration) {
2386 if (ist->prev_sub.got_output) {
2387 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2388 1000, AV_TIME_BASE);
2389 if (end < ist->prev_sub.subtitle.end_display_time) {
2390 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2391 "Subtitle duration reduced from %d to %d%s\n",
2392 ist->prev_sub.subtitle.end_display_time, end,
2393 end <= 0 ? ", dropping it" : "");
2394 ist->prev_sub.subtitle.end_display_time = end;
2397 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2398 FFSWAP(int, ret, ist->prev_sub.ret);
2399 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2407 sub2video_update(ist, &subtitle);
2409 if (!subtitle.num_rects)
2412 ist->frames_decoded++;
2414 for (i = 0; i < nb_output_streams; i++) {
2415 OutputStream *ost = output_streams[i];
2417 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2418 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2421 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2425 avsubtitle_free(&subtitle);
2429 static int send_filter_eof(InputStream *ist)
2432 for (i = 0; i < ist->nb_filters; i++) {
2433 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2440 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2441 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2445 int eof_reached = 0;
2448 if (!ist->saw_first_ts) {
2449 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2451 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2452 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2453 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2455 ist->saw_first_ts = 1;
2458 if (ist->next_dts == AV_NOPTS_VALUE)
2459 ist->next_dts = ist->dts;
2460 if (ist->next_pts == AV_NOPTS_VALUE)
2461 ist->next_pts = ist->pts;
2465 av_init_packet(&avpkt);
2472 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2473 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2474 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2475 ist->next_pts = ist->pts = ist->dts;
2478 // while we have more to decode or while the decoder did output something on EOF
2479 while (ist->decoding_needed) {
2483 ist->pts = ist->next_pts;
2484 ist->dts = ist->next_dts;
2486 switch (ist->dec_ctx->codec_type) {
2487 case AVMEDIA_TYPE_AUDIO:
2488 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
2490 case AVMEDIA_TYPE_VIDEO:
2491 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
2492 if (!repeating || !pkt || got_output) {
2493 if (pkt && pkt->duration) {
2494 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2495 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2496 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2497 duration = ((int64_t)AV_TIME_BASE *
2498 ist->dec_ctx->framerate.den * ticks) /
2499 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2502 if(ist->dts != AV_NOPTS_VALUE && duration) {
2503 ist->next_dts += duration;
2505 ist->next_dts = AV_NOPTS_VALUE;
2509 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2511 case AVMEDIA_TYPE_SUBTITLE:
2514 ret = transcode_subtitles(ist, &avpkt, &got_output);
2515 if (!pkt && ret >= 0)
2522 if (ret == AVERROR_EOF) {
2528 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2529 ist->file_index, ist->st->index, av_err2str(ret));
2532 // Decoding might not terminate if we're draining the decoder, and
2533 // the decoder keeps returning an error.
2534 // This should probably be considered a libavcodec issue.
2535 // Sample: fate-vsynth1-dnxhd-720p-hr-lb
2544 // During draining, we might get multiple output frames in this loop.
2545 // ffmpeg.c does not drain the filter chain on configuration changes,
2546 // which means if we send multiple frames at once to the filters, and
2547 // one of those frames changes configuration, the buffered frames will
2548 // be lost. This can upset certain FATE tests.
2549 // Decode only 1 frame per call on EOF to appease these FATE tests.
2550 // The ideal solution would be to rewrite decoding to use the new
2551 // decoding API in a better way.
2558 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2559 /* except when looping we need to flush but not to send an EOF */
2560 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2561 int ret = send_filter_eof(ist);
2563 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2568 /* handle stream copy */
2569 if (!ist->decoding_needed) {
2570 ist->dts = ist->next_dts;
2571 switch (ist->dec_ctx->codec_type) {
2572 case AVMEDIA_TYPE_AUDIO:
2573 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2574 ist->dec_ctx->sample_rate;
2576 case AVMEDIA_TYPE_VIDEO:
2577 if (ist->framerate.num) {
2578 // TODO: Remove work-around for c99-to-c89 issue 7
2579 AVRational time_base_q = AV_TIME_BASE_Q;
2580 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2581 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2582 } else if (pkt->duration) {
2583 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2584 } else if(ist->dec_ctx->framerate.num != 0) {
2585 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2586 ist->next_dts += ((int64_t)AV_TIME_BASE *
2587 ist->dec_ctx->framerate.den * ticks) /
2588 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2592 ist->pts = ist->dts;
2593 ist->next_pts = ist->next_dts;
2595 for (i = 0; pkt && i < nb_output_streams; i++) {
2596 OutputStream *ost = output_streams[i];
2598 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2601 do_streamcopy(ist, ost, pkt);
2604 return !eof_reached;
2607 static void print_sdp(void)
2612 AVIOContext *sdp_pb;
2613 AVFormatContext **avc;
2615 for (i = 0; i < nb_output_files; i++) {
2616 if (!output_files[i]->header_written)
2620 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2623 for (i = 0, j = 0; i < nb_output_files; i++) {
2624 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2625 avc[j] = output_files[i]->ctx;
2633 av_sdp_create(avc, j, sdp, sizeof(sdp));
2635 if (!sdp_filename) {
2636 printf("SDP:\n%s\n", sdp);
2639 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2640 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2642 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2643 avio_closep(&sdp_pb);
2644 av_freep(&sdp_filename);
2652 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2655 for (i = 0; hwaccels[i].name; i++)
2656 if (hwaccels[i].pix_fmt == pix_fmt)
2657 return &hwaccels[i];
2661 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2663 InputStream *ist = s->opaque;
2664 const enum AVPixelFormat *p;
2667 for (p = pix_fmts; *p != -1; p++) {
2668 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2669 const HWAccel *hwaccel;
2671 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2674 hwaccel = get_hwaccel(*p);
2676 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2677 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2680 ret = hwaccel->init(s);
2682 if (ist->hwaccel_id == hwaccel->id) {
2683 av_log(NULL, AV_LOG_FATAL,
2684 "%s hwaccel requested for input stream #%d:%d, "
2685 "but cannot be initialized.\n", hwaccel->name,
2686 ist->file_index, ist->st->index);
2687 return AV_PIX_FMT_NONE;
2692 if (ist->hw_frames_ctx) {
2693 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2694 if (!s->hw_frames_ctx)
2695 return AV_PIX_FMT_NONE;
2698 ist->active_hwaccel_id = hwaccel->id;
2699 ist->hwaccel_pix_fmt = *p;
2706 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2708 InputStream *ist = s->opaque;
2710 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2711 return ist->hwaccel_get_buffer(s, frame, flags);
2713 return avcodec_default_get_buffer2(s, frame, flags);
2716 static int init_input_stream(int ist_index, char *error, int error_len)
2719 InputStream *ist = input_streams[ist_index];
2721 for (i = 0; i < ist->nb_filters; i++) {
2722 ret = ifilter_parameters_from_decoder(ist->filters[i], ist->dec_ctx);
2724 av_log(NULL, AV_LOG_FATAL, "Error initializing filter input\n");
2729 if (ist->decoding_needed) {
2730 AVCodec *codec = ist->dec;
2732 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2733 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2734 return AVERROR(EINVAL);
2737 ist->dec_ctx->opaque = ist;
2738 ist->dec_ctx->get_format = get_format;
2739 ist->dec_ctx->get_buffer2 = get_buffer;
2740 ist->dec_ctx->thread_safe_callbacks = 1;
2742 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2743 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2744 (ist->decoding_needed & DECODING_FOR_OST)) {
2745 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2746 if (ist->decoding_needed & DECODING_FOR_FILTER)
2747 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2750 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2752 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2753 * audio, and video decoders such as cuvid or mediacodec */
2754 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2756 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2757 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2758 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2759 if (ret == AVERROR_EXPERIMENTAL)
2760 abort_codec_experimental(codec, 0);
2762 snprintf(error, error_len,
2763 "Error while opening decoder for input stream "
2765 ist->file_index, ist->st->index, av_err2str(ret));
2768 assert_avoptions(ist->decoder_opts);
2771 ist->next_pts = AV_NOPTS_VALUE;
2772 ist->next_dts = AV_NOPTS_VALUE;
2777 static InputStream *get_input_stream(OutputStream *ost)
2779 if (ost->source_index >= 0)
2780 return input_streams[ost->source_index];
2784 static int compare_int64(const void *a, const void *b)
2786 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2789 /* open the muxer when all the streams are initialized */
2790 static int check_init_output_file(OutputFile *of, int file_index)
2794 for (i = 0; i < of->ctx->nb_streams; i++) {
2795 OutputStream *ost = output_streams[of->ost_index + i];
2796 if (!ost->initialized)
2800 of->ctx->interrupt_callback = int_cb;
2802 ret = avformat_write_header(of->ctx, &of->opts);
2804 av_log(NULL, AV_LOG_ERROR,
2805 "Could not write header for output file #%d "
2806 "(incorrect codec parameters ?): %s\n",
2807 file_index, av_err2str(ret));
2810 //assert_avoptions(of->opts);
2811 of->header_written = 1;
2813 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2815 if (sdp_filename || want_sdp)
2818 /* flush the muxing queues */
2819 for (i = 0; i < of->ctx->nb_streams; i++) {
2820 OutputStream *ost = output_streams[of->ost_index + i];
2822 while (av_fifo_size(ost->muxing_queue)) {
2824 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2825 write_packet(of, &pkt, ost);
2832 static int init_output_bsfs(OutputStream *ost)
2837 if (!ost->nb_bitstream_filters)
2840 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2841 ctx = ost->bsf_ctx[i];
2843 ret = avcodec_parameters_copy(ctx->par_in,
2844 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2848 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2850 ret = av_bsf_init(ctx);
2852 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2853 ost->bsf_ctx[i]->filter->name);
2858 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2859 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2863 ost->st->time_base = ctx->time_base_out;
2868 static int init_output_stream_streamcopy(OutputStream *ost)
2870 OutputFile *of = output_files[ost->file_index];
2871 InputStream *ist = get_input_stream(ost);
2872 AVCodecParameters *par_dst = ost->st->codecpar;
2873 AVCodecParameters *par_src = ost->ref_par;
2876 uint32_t codec_tag = par_dst->codec_tag;
2878 av_assert0(ist && !ost->filter);
2880 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
2882 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2884 av_log(NULL, AV_LOG_FATAL,
2885 "Error setting up codec context options.\n");
2888 avcodec_parameters_from_context(par_src, ost->enc_ctx);
2891 unsigned int codec_tag_tmp;
2892 if (!of->ctx->oformat->codec_tag ||
2893 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
2894 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
2895 codec_tag = par_src->codec_tag;
2898 ret = avcodec_parameters_copy(par_dst, par_src);
2902 par_dst->codec_tag = codec_tag;
2904 if (!ost->frame_rate.num)
2905 ost->frame_rate = ist->framerate;
2906 ost->st->avg_frame_rate = ost->frame_rate;
2908 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
2912 // copy timebase while removing common factors
2913 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
2914 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
2916 // copy estimated duration as a hint to the muxer
2917 if (ost->st->duration <= 0 && ist->st->duration > 0)
2918 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
2921 ost->st->disposition = ist->st->disposition;
2923 if (ist->st->nb_side_data) {
2924 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2925 sizeof(*ist->st->side_data));
2926 if (!ost->st->side_data)
2927 return AVERROR(ENOMEM);
2929 ost->st->nb_side_data = 0;
2930 for (i = 0; i < ist->st->nb_side_data; i++) {
2931 const AVPacketSideData *sd_src = &ist->st->side_data[i];
2932 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2934 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2937 sd_dst->data = av_malloc(sd_src->size);
2939 return AVERROR(ENOMEM);
2940 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2941 sd_dst->size = sd_src->size;
2942 sd_dst->type = sd_src->type;
2943 ost->st->nb_side_data++;
2947 ost->parser = av_parser_init(par_dst->codec_id);
2948 ost->parser_avctx = avcodec_alloc_context3(NULL);
2949 if (!ost->parser_avctx)
2950 return AVERROR(ENOMEM);
2952 switch (par_dst->codec_type) {
2953 case AVMEDIA_TYPE_AUDIO:
2954 if (audio_volume != 256) {
2955 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2958 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2959 par_dst->block_align= 0;
2960 if(par_dst->codec_id == AV_CODEC_ID_AC3)
2961 par_dst->block_align= 0;
2963 case AVMEDIA_TYPE_VIDEO:
2964 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2966 av_mul_q(ost->frame_aspect_ratio,
2967 (AVRational){ par_dst->height, par_dst->width });
2968 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2969 "with stream copy may produce invalid files\n");
2971 else if (ist->st->sample_aspect_ratio.num)
2972 sar = ist->st->sample_aspect_ratio;
2974 sar = par_src->sample_aspect_ratio;
2975 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
2976 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2977 ost->st->r_frame_rate = ist->st->r_frame_rate;
2984 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2986 AVDictionaryEntry *e;
2988 uint8_t *encoder_string;
2989 int encoder_string_len;
2990 int format_flags = 0;
2991 int codec_flags = 0;
2993 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2996 e = av_dict_get(of->opts, "fflags", NULL, 0);
2998 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3001 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3003 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3005 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3008 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3011 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3012 encoder_string = av_mallocz(encoder_string_len);
3013 if (!encoder_string)
3016 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3017 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3019 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3020 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3021 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3022 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3025 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3026 AVCodecContext *avctx)
3029 int n = 1, i, size, index = 0;
3032 for (p = kf; *p; p++)
3036 pts = av_malloc_array(size, sizeof(*pts));
3038 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3043 for (i = 0; i < n; i++) {
3044 char *next = strchr(p, ',');
3049 if (!memcmp(p, "chapters", 8)) {
3051 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3054 if (avf->nb_chapters > INT_MAX - size ||
3055 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3057 av_log(NULL, AV_LOG_FATAL,
3058 "Could not allocate forced key frames array.\n");
3061 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3062 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3064 for (j = 0; j < avf->nb_chapters; j++) {
3065 AVChapter *c = avf->chapters[j];
3066 av_assert1(index < size);
3067 pts[index++] = av_rescale_q(c->start, c->time_base,
3068 avctx->time_base) + t;
3073 t = parse_time_or_die("force_key_frames", p, 1);
3074 av_assert1(index < size);
3075 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3082 av_assert0(index == size);
3083 qsort(pts, size, sizeof(*pts), compare_int64);
3084 ost->forced_kf_count = size;
3085 ost->forced_kf_pts = pts;
3088 static int init_output_stream_encode(OutputStream *ost)
3090 InputStream *ist = get_input_stream(ost);
3091 AVCodecContext *enc_ctx = ost->enc_ctx;
3092 AVCodecContext *dec_ctx = NULL;
3093 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3096 set_encoder_id(output_files[ost->file_index], ost);
3099 ost->st->disposition = ist->st->disposition;
3101 dec_ctx = ist->dec_ctx;
3103 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3105 for (j = 0; j < oc->nb_streams; j++) {
3106 AVStream *st = oc->streams[j];
3107 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3110 if (j == oc->nb_streams)
3111 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3112 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3113 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3116 if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3117 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3118 filtergraph_is_simple(ost->filter->graph)) {
3119 FilterGraph *fg = ost->filter->graph;
3121 if (configure_filtergraph(fg)) {
3122 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3127 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3128 if (!ost->frame_rate.num)
3129 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3130 if (ist && !ost->frame_rate.num)
3131 ost->frame_rate = ist->framerate;
3132 if (ist && !ost->frame_rate.num)
3133 ost->frame_rate = ist->st->r_frame_rate;
3134 if (ist && !ost->frame_rate.num) {
3135 ost->frame_rate = (AVRational){25, 1};
3136 av_log(NULL, AV_LOG_WARNING,
3138 "about the input framerate is available. Falling "
3139 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3140 "if you want a different framerate.\n",
3141 ost->file_index, ost->index);
3143 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3144 if (ost->enc->supported_framerates && !ost->force_fps) {
3145 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3146 ost->frame_rate = ost->enc->supported_framerates[idx];
3148 // reduce frame rate for mpeg4 to be within the spec limits
3149 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3150 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3151 ost->frame_rate.num, ost->frame_rate.den, 65535);
3155 switch (enc_ctx->codec_type) {
3156 case AVMEDIA_TYPE_AUDIO:
3157 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3159 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3160 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3161 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3162 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3163 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3164 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3166 case AVMEDIA_TYPE_VIDEO:
3167 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3168 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3169 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3170 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3171 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3172 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3173 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3175 for (j = 0; j < ost->forced_kf_count; j++)
3176 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3178 enc_ctx->time_base);
3180 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3181 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3182 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3183 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3184 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3185 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3186 if (!strncmp(ost->enc->name, "libx264", 7) &&
3187 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3188 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3189 av_log(NULL, AV_LOG_WARNING,
3190 "No pixel format specified, %s for H.264 encoding chosen.\n"
3191 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3192 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3193 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3194 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3195 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3196 av_log(NULL, AV_LOG_WARNING,
3197 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3198 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3199 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3200 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3202 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3203 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3205 ost->st->avg_frame_rate = ost->frame_rate;
3208 enc_ctx->width != dec_ctx->width ||
3209 enc_ctx->height != dec_ctx->height ||
3210 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3211 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3214 if (ost->forced_keyframes) {
3215 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3216 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3217 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3219 av_log(NULL, AV_LOG_ERROR,
3220 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3223 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3224 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3225 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3226 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3228 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3229 // parse it only for static kf timings
3230 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3231 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3235 case AVMEDIA_TYPE_SUBTITLE:
3236 enc_ctx->time_base = (AVRational){1, 1000};
3237 if (!enc_ctx->width) {
3238 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3239 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3242 case AVMEDIA_TYPE_DATA:
3252 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3256 if (ost->encoding_needed) {
3257 AVCodec *codec = ost->enc;
3258 AVCodecContext *dec = NULL;
3261 ret = init_output_stream_encode(ost);
3265 if ((ist = get_input_stream(ost)))
3267 if (dec && dec->subtitle_header) {
3268 /* ASS code assumes this buffer is null terminated so add extra byte. */
3269 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3270 if (!ost->enc_ctx->subtitle_header)
3271 return AVERROR(ENOMEM);
3272 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3273 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3275 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3276 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3277 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3279 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3280 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3281 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3283 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter)) {
3284 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3285 if (!ost->enc_ctx->hw_frames_ctx)
3286 return AVERROR(ENOMEM);
3289 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3290 if (ret == AVERROR_EXPERIMENTAL)
3291 abort_codec_experimental(codec, 1);
3292 snprintf(error, error_len,
3293 "Error while opening encoder for output stream #%d:%d - "
3294 "maybe incorrect parameters such as bit_rate, rate, width or height",
3295 ost->file_index, ost->index);
3298 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3299 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3300 av_buffersink_set_frame_size(ost->filter->filter,
3301 ost->enc_ctx->frame_size);
3302 assert_avoptions(ost->encoder_opts);
3303 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3304 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3305 " It takes bits/s as argument, not kbits/s\n");
3307 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3309 av_log(NULL, AV_LOG_FATAL,
3310 "Error initializing the output stream codec context.\n");
3314 * FIXME: ost->st->codec should't be needed here anymore.
3316 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3320 if (ost->enc_ctx->nb_coded_side_data) {
3323 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
3324 sizeof(*ost->st->side_data));
3325 if (!ost->st->side_data)
3326 return AVERROR(ENOMEM);
3328 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3329 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3330 AVPacketSideData *sd_dst = &ost->st->side_data[i];
3332 sd_dst->data = av_malloc(sd_src->size);
3334 return AVERROR(ENOMEM);
3335 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3336 sd_dst->size = sd_src->size;
3337 sd_dst->type = sd_src->type;
3338 ost->st->nb_side_data++;
3342 // copy timebase while removing common factors
3343 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3344 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3346 // copy estimated duration as a hint to the muxer
3347 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3348 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3350 ost->st->codec->codec= ost->enc_ctx->codec;
3351 } else if (ost->stream_copy) {
3352 ret = init_output_stream_streamcopy(ost);
3357 * FIXME: will the codec context used by the parser during streamcopy
3358 * This should go away with the new parser API.
3360 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3365 // parse user provided disposition, and update stream values
3366 if (ost->disposition) {
3367 static const AVOption opts[] = {
3368 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3369 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3370 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3371 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3372 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3373 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3374 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3375 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3376 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3377 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3378 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3379 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3380 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3381 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3384 static const AVClass class = {
3386 .item_name = av_default_item_name,
3388 .version = LIBAVUTIL_VERSION_INT,
3390 const AVClass *pclass = &class;
3392 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3397 /* initialize bitstream filters for the output stream
3398 * needs to be done here, because the codec id for streamcopy is not
3399 * known until now */
3400 ret = init_output_bsfs(ost);
3404 ost->initialized = 1;
3406 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3413 static void report_new_stream(int input_index, AVPacket *pkt)
3415 InputFile *file = input_files[input_index];
3416 AVStream *st = file->ctx->streams[pkt->stream_index];
3418 if (pkt->stream_index < file->nb_streams_warn)
3420 av_log(file->ctx, AV_LOG_WARNING,
3421 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3422 av_get_media_type_string(st->codecpar->codec_type),
3423 input_index, pkt->stream_index,
3424 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3425 file->nb_streams_warn = pkt->stream_index + 1;
3428 static int transcode_init(void)
3430 int ret = 0, i, j, k;
3431 AVFormatContext *oc;
3434 char error[1024] = {0};
3436 for (i = 0; i < nb_filtergraphs; i++) {
3437 FilterGraph *fg = filtergraphs[i];
3438 for (j = 0; j < fg->nb_outputs; j++) {
3439 OutputFilter *ofilter = fg->outputs[j];
3440 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3442 if (fg->nb_inputs != 1)
3444 for (k = nb_input_streams-1; k >= 0 ; k--)
3445 if (fg->inputs[0]->ist == input_streams[k])
3447 ofilter->ost->source_index = k;
3451 /* init framerate emulation */
3452 for (i = 0; i < nb_input_files; i++) {
3453 InputFile *ifile = input_files[i];
3454 if (ifile->rate_emu)
3455 for (j = 0; j < ifile->nb_streams; j++)
3456 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3459 /* hwaccel transcoding */
3460 for (i = 0; i < nb_output_streams; i++) {
3461 ost = output_streams[i];
3463 if (!ost->stream_copy) {
3465 if (qsv_transcode_init(ost))
3470 if (cuvid_transcode_init(ost))
3476 /* init input streams */
3477 for (i = 0; i < nb_input_streams; i++)
3478 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3479 for (i = 0; i < nb_output_streams; i++) {
3480 ost = output_streams[i];
3481 avcodec_close(ost->enc_ctx);
3486 /* open each encoder */
3487 for (i = 0; i < nb_output_streams; i++) {
3488 ret = init_output_stream(output_streams[i], error, sizeof(error));
3493 /* discard unused programs */
3494 for (i = 0; i < nb_input_files; i++) {
3495 InputFile *ifile = input_files[i];
3496 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3497 AVProgram *p = ifile->ctx->programs[j];
3498 int discard = AVDISCARD_ALL;
3500 for (k = 0; k < p->nb_stream_indexes; k++)
3501 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3502 discard = AVDISCARD_DEFAULT;
3505 p->discard = discard;
3509 /* write headers for files with no streams */
3510 for (i = 0; i < nb_output_files; i++) {
3511 oc = output_files[i]->ctx;
3512 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3513 ret = check_init_output_file(output_files[i], i);
3520 /* dump the stream mapping */
3521 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3522 for (i = 0; i < nb_input_streams; i++) {
3523 ist = input_streams[i];
3525 for (j = 0; j < ist->nb_filters; j++) {
3526 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3527 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3528 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3529 ist->filters[j]->name);
3530 if (nb_filtergraphs > 1)
3531 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3532 av_log(NULL, AV_LOG_INFO, "\n");
3537 for (i = 0; i < nb_output_streams; i++) {
3538 ost = output_streams[i];
3540 if (ost->attachment_filename) {
3541 /* an attached file */
3542 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3543 ost->attachment_filename, ost->file_index, ost->index);
3547 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3548 /* output from a complex graph */
3549 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3550 if (nb_filtergraphs > 1)
3551 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3553 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3554 ost->index, ost->enc ? ost->enc->name : "?");
3558 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3559 input_streams[ost->source_index]->file_index,
3560 input_streams[ost->source_index]->st->index,
3563 if (ost->sync_ist != input_streams[ost->source_index])
3564 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3565 ost->sync_ist->file_index,
3566 ost->sync_ist->st->index);
3567 if (ost->stream_copy)
3568 av_log(NULL, AV_LOG_INFO, " (copy)");
3570 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3571 const AVCodec *out_codec = ost->enc;
3572 const char *decoder_name = "?";
3573 const char *in_codec_name = "?";
3574 const char *encoder_name = "?";
3575 const char *out_codec_name = "?";
3576 const AVCodecDescriptor *desc;
3579 decoder_name = in_codec->name;
3580 desc = avcodec_descriptor_get(in_codec->id);
3582 in_codec_name = desc->name;
3583 if (!strcmp(decoder_name, in_codec_name))
3584 decoder_name = "native";
3588 encoder_name = out_codec->name;
3589 desc = avcodec_descriptor_get(out_codec->id);
3591 out_codec_name = desc->name;
3592 if (!strcmp(encoder_name, out_codec_name))
3593 encoder_name = "native";
3596 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3597 in_codec_name, decoder_name,
3598 out_codec_name, encoder_name);
3600 av_log(NULL, AV_LOG_INFO, "\n");
3604 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3608 transcode_init_done = 1;
3613 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3614 static int need_output(void)
3618 for (i = 0; i < nb_output_streams; i++) {
3619 OutputStream *ost = output_streams[i];
3620 OutputFile *of = output_files[ost->file_index];
3621 AVFormatContext *os = output_files[ost->file_index]->ctx;
3623 if (ost->finished ||
3624 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3626 if (ost->frame_number >= ost->max_frames) {
3628 for (j = 0; j < of->ctx->nb_streams; j++)
3629 close_output_stream(output_streams[of->ost_index + j]);
3640 * Select the output stream to process.
3642 * @return selected output stream, or NULL if none available
3644 static OutputStream *choose_output(void)
3647 int64_t opts_min = INT64_MAX;
3648 OutputStream *ost_min = NULL;
3650 for (i = 0; i < nb_output_streams; i++) {
3651 OutputStream *ost = output_streams[i];
3652 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3653 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3655 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3656 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3658 if (!ost->finished && opts < opts_min) {
3660 ost_min = ost->unavailable ? NULL : ost;
3666 static void set_tty_echo(int on)
3670 if (tcgetattr(0, &tty) == 0) {
3671 if (on) tty.c_lflag |= ECHO;
3672 else tty.c_lflag &= ~ECHO;
3673 tcsetattr(0, TCSANOW, &tty);
3678 static int check_keyboard_interaction(int64_t cur_time)
3681 static int64_t last_time;
3682 if (received_nb_signals)
3683 return AVERROR_EXIT;
3684 /* read_key() returns 0 on EOF */
3685 if(cur_time - last_time >= 100000 && !run_as_daemon){
3687 last_time = cur_time;
3691 return AVERROR_EXIT;
3692 if (key == '+') av_log_set_level(av_log_get_level()+10);
3693 if (key == '-') av_log_set_level(av_log_get_level()-10);
3694 if (key == 's') qp_hist ^= 1;
3697 do_hex_dump = do_pkt_dump = 0;
3698 } else if(do_pkt_dump){
3702 av_log_set_level(AV_LOG_DEBUG);
3704 if (key == 'c' || key == 'C'){
3705 char buf[4096], target[64], command[256], arg[256] = {0};
3708 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3711 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3716 fprintf(stderr, "\n");
3718 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3719 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3720 target, time, command, arg);
3721 for (i = 0; i < nb_filtergraphs; i++) {
3722 FilterGraph *fg = filtergraphs[i];
3725 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3726 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3727 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3728 } else if (key == 'c') {
3729 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3730 ret = AVERROR_PATCHWELCOME;
3732 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3734 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3739 av_log(NULL, AV_LOG_ERROR,
3740 "Parse error, at least 3 arguments were expected, "
3741 "only %d given in string '%s'\n", n, buf);
3744 if (key == 'd' || key == 'D'){
3747 debug = input_streams[0]->st->codec->debug<<1;
3748 if(!debug) debug = 1;
3749 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3756 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3761 fprintf(stderr, "\n");
3762 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3763 fprintf(stderr,"error parsing debug value\n");
3765 for(i=0;i<nb_input_streams;i++) {
3766 input_streams[i]->st->codec->debug = debug;
3768 for(i=0;i<nb_output_streams;i++) {
3769 OutputStream *ost = output_streams[i];
3770 ost->enc_ctx->debug = debug;
3772 if(debug) av_log_set_level(AV_LOG_DEBUG);
3773 fprintf(stderr,"debug=%d\n", debug);
3776 fprintf(stderr, "key function\n"
3777 "? show this help\n"
3778 "+ increase verbosity\n"
3779 "- decrease verbosity\n"
3780 "c Send command to first matching filter supporting it\n"
3781 "C Send/Queue command to all matching filters\n"
3782 "D cycle through available debug modes\n"
3783 "h dump packets/hex press to cycle through the 3 states\n"
3785 "s Show QP histogram\n"
3792 static void *input_thread(void *arg)
3795 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3800 ret = av_read_frame(f->ctx, &pkt);
3802 if (ret == AVERROR(EAGAIN)) {
3807 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3810 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3811 if (flags && ret == AVERROR(EAGAIN)) {
3813 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3814 av_log(f->ctx, AV_LOG_WARNING,
3815 "Thread message queue blocking; consider raising the "
3816 "thread_queue_size option (current value: %d)\n",
3817 f->thread_queue_size);
3820 if (ret != AVERROR_EOF)
3821 av_log(f->ctx, AV_LOG_ERROR,
3822 "Unable to send packet to main thread: %s\n",
3824 av_packet_unref(&pkt);
3825 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3833 static void free_input_threads(void)
3837 for (i = 0; i < nb_input_files; i++) {
3838 InputFile *f = input_files[i];
3841 if (!f || !f->in_thread_queue)
3843 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3844 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3845 av_packet_unref(&pkt);
3847 pthread_join(f->thread, NULL);
3849 av_thread_message_queue_free(&f->in_thread_queue);
3853 static int init_input_threads(void)
3857 if (nb_input_files == 1)
3860 for (i = 0; i < nb_input_files; i++) {
3861 InputFile *f = input_files[i];
3863 if (f->ctx->pb ? !f->ctx->pb->seekable :
3864 strcmp(f->ctx->iformat->name, "lavfi"))
3865 f->non_blocking = 1;
3866 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3867 f->thread_queue_size, sizeof(AVPacket));
3871 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3872 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3873 av_thread_message_queue_free(&f->in_thread_queue);
3874 return AVERROR(ret);
3880 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3882 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3884 AV_THREAD_MESSAGE_NONBLOCK : 0);
3888 static int get_input_packet(InputFile *f, AVPacket *pkt)
3892 for (i = 0; i < f->nb_streams; i++) {
3893 InputStream *ist = input_streams[f->ist_index + i];
3894 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3895 int64_t now = av_gettime_relative() - ist->start;
3897 return AVERROR(EAGAIN);
3902 if (nb_input_files > 1)
3903 return get_input_packet_mt(f, pkt);
3905 return av_read_frame(f->ctx, pkt);
3908 static int got_eagain(void)
3911 for (i = 0; i < nb_output_streams; i++)
3912 if (output_streams[i]->unavailable)
3917 static void reset_eagain(void)
3920 for (i = 0; i < nb_input_files; i++)
3921 input_files[i]->eagain = 0;
3922 for (i = 0; i < nb_output_streams; i++)
3923 output_streams[i]->unavailable = 0;
3926 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3927 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3928 AVRational time_base)
3934 return tmp_time_base;
3937 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3940 return tmp_time_base;
3946 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3949 AVCodecContext *avctx;
3950 int i, ret, has_audio = 0;
3951 int64_t duration = 0;
3953 ret = av_seek_frame(is, -1, is->start_time, 0);
3957 for (i = 0; i < ifile->nb_streams; i++) {
3958 ist = input_streams[ifile->ist_index + i];
3959 avctx = ist->dec_ctx;
3962 if (ist->decoding_needed) {
3963 process_input_packet(ist, NULL, 1);
3964 avcodec_flush_buffers(avctx);
3967 /* duration is the length of the last frame in a stream
3968 * when audio stream is present we don't care about
3969 * last video frame length because it's not defined exactly */
3970 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3974 for (i = 0; i < ifile->nb_streams; i++) {
3975 ist = input_streams[ifile->ist_index + i];
3976 avctx = ist->dec_ctx;
3979 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3980 AVRational sample_rate = {1, avctx->sample_rate};
3982 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3986 if (ist->framerate.num) {
3987 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3988 } else if (ist->st->avg_frame_rate.num) {
3989 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3990 } else duration = 1;
3992 if (!ifile->duration)
3993 ifile->time_base = ist->st->time_base;
3994 /* the total duration of the stream, max_pts - min_pts is
3995 * the duration of the stream without the last frame */
3996 duration += ist->max_pts - ist->min_pts;
3997 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4001 if (ifile->loop > 0)
4009 * - 0 -- one packet was read and processed
4010 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4011 * this function should be called again
4012 * - AVERROR_EOF -- this function should not be called again
4014 static int process_input(int file_index)
4016 InputFile *ifile = input_files[file_index];
4017 AVFormatContext *is;
4025 ret = get_input_packet(ifile, &pkt);
4027 if (ret == AVERROR(EAGAIN)) {
4031 if (ret < 0 && ifile->loop) {
4032 if ((ret = seek_to_start(ifile, is)) < 0)
4034 ret = get_input_packet(ifile, &pkt);
4035 if (ret == AVERROR(EAGAIN)) {
4041 if (ret != AVERROR_EOF) {
4042 print_error(is->filename, ret);
4047 for (i = 0; i < ifile->nb_streams; i++) {
4048 ist = input_streams[ifile->ist_index + i];
4049 if (ist->decoding_needed) {
4050 ret = process_input_packet(ist, NULL, 0);
4055 /* mark all outputs that don't go through lavfi as finished */
4056 for (j = 0; j < nb_output_streams; j++) {
4057 OutputStream *ost = output_streams[j];
4059 if (ost->source_index == ifile->ist_index + i &&
4060 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4061 finish_output_stream(ost);
4065 ifile->eof_reached = 1;
4066 return AVERROR(EAGAIN);
4072 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4073 is->streams[pkt.stream_index]);
4075 /* the following test is needed in case new streams appear
4076 dynamically in stream : we ignore them */
4077 if (pkt.stream_index >= ifile->nb_streams) {
4078 report_new_stream(file_index, &pkt);
4079 goto discard_packet;
4082 ist = input_streams[ifile->ist_index + pkt.stream_index];
4084 ist->data_size += pkt.size;
4088 goto discard_packet;
4090 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4091 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4096 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4097 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4098 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4099 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4100 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4101 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4102 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4103 av_ts2str(input_files[ist->file_index]->ts_offset),
4104 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4107 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4108 int64_t stime, stime2;
4109 // Correcting starttime based on the enabled streams
4110 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4111 // so we instead do it here as part of discontinuity handling
4112 if ( ist->next_dts == AV_NOPTS_VALUE
4113 && ifile->ts_offset == -is->start_time
4114 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4115 int64_t new_start_time = INT64_MAX;
4116 for (i=0; i<is->nb_streams; i++) {
4117 AVStream *st = is->streams[i];
4118 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4120 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4122 if (new_start_time > is->start_time) {
4123 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4124 ifile->ts_offset = -new_start_time;
4128 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4129 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4130 ist->wrap_correction_done = 1;
4132 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4133 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4134 ist->wrap_correction_done = 0;
4136 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4137 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4138 ist->wrap_correction_done = 0;
4142 /* add the stream-global side data to the first packet */
4143 if (ist->nb_packets == 1) {
4144 if (ist->st->nb_side_data)
4145 av_packet_split_side_data(&pkt);
4146 for (i = 0; i < ist->st->nb_side_data; i++) {
4147 AVPacketSideData *src_sd = &ist->st->side_data[i];
4150 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4152 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4155 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4159 memcpy(dst_data, src_sd->data, src_sd->size);
4163 if (pkt.dts != AV_NOPTS_VALUE)
4164 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4165 if (pkt.pts != AV_NOPTS_VALUE)
4166 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4168 if (pkt.pts != AV_NOPTS_VALUE)
4169 pkt.pts *= ist->ts_scale;
4170 if (pkt.dts != AV_NOPTS_VALUE)
4171 pkt.dts *= ist->ts_scale;
4173 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4174 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4175 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4176 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4177 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4178 int64_t delta = pkt_dts - ifile->last_ts;
4179 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4180 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4181 ifile->ts_offset -= delta;
4182 av_log(NULL, AV_LOG_DEBUG,
4183 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4184 delta, ifile->ts_offset);
4185 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4186 if (pkt.pts != AV_NOPTS_VALUE)
4187 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4191 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4192 if (pkt.pts != AV_NOPTS_VALUE) {
4193 pkt.pts += duration;
4194 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4195 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4198 if (pkt.dts != AV_NOPTS_VALUE)
4199 pkt.dts += duration;
4201 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4202 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4203 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4204 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4206 int64_t delta = pkt_dts - ist->next_dts;
4207 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4208 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4209 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4210 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4211 ifile->ts_offset -= delta;
4212 av_log(NULL, AV_LOG_DEBUG,
4213 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4214 delta, ifile->ts_offset);
4215 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4216 if (pkt.pts != AV_NOPTS_VALUE)
4217 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4220 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4221 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4222 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4223 pkt.dts = AV_NOPTS_VALUE;
4225 if (pkt.pts != AV_NOPTS_VALUE){
4226 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4227 delta = pkt_pts - ist->next_dts;
4228 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4229 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4230 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4231 pkt.pts = AV_NOPTS_VALUE;
4237 if (pkt.dts != AV_NOPTS_VALUE)
4238 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4241 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4242 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4243 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4244 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4245 av_ts2str(input_files[ist->file_index]->ts_offset),
4246 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4249 sub2video_heartbeat(ist, pkt.pts);
4251 process_input_packet(ist, &pkt, 0);
4254 av_packet_unref(&pkt);
4260 * Perform a step of transcoding for the specified filter graph.
4262 * @param[in] graph filter graph to consider
4263 * @param[out] best_ist input stream where a frame would allow to continue
4264 * @return 0 for success, <0 for error
4266 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4269 int nb_requests, nb_requests_max = 0;
4270 InputFilter *ifilter;
4274 ret = avfilter_graph_request_oldest(graph->graph);
4276 return reap_filters(0);
4278 if (ret == AVERROR_EOF) {
4279 ret = reap_filters(1);
4280 for (i = 0; i < graph->nb_outputs; i++)
4281 close_output_stream(graph->outputs[i]->ost);
4284 if (ret != AVERROR(EAGAIN))
4287 for (i = 0; i < graph->nb_inputs; i++) {
4288 ifilter = graph->inputs[i];
4290 if (input_files[ist->file_index]->eagain ||
4291 input_files[ist->file_index]->eof_reached)
4293 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4294 if (nb_requests > nb_requests_max) {
4295 nb_requests_max = nb_requests;
4301 for (i = 0; i < graph->nb_outputs; i++)
4302 graph->outputs[i]->ost->unavailable = 1;
4308 * Run a single step of transcoding.
4310 * @return 0 for success, <0 for error
4312 static int transcode_step(void)
4318 ost = choose_output();
4325 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4330 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4335 av_assert0(ost->source_index >= 0);
4336 ist = input_streams[ost->source_index];
4339 ret = process_input(ist->file_index);
4340 if (ret == AVERROR(EAGAIN)) {
4341 if (input_files[ist->file_index]->eagain)
4342 ost->unavailable = 1;
4347 return ret == AVERROR_EOF ? 0 : ret;
4349 return reap_filters(0);
4353 * The following code is the main loop of the file converter
4355 static int transcode(void)
4358 AVFormatContext *os;
4361 int64_t timer_start;
4362 int64_t total_packets_written = 0;
4364 ret = transcode_init();
4368 if (stdin_interaction) {
4369 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4372 timer_start = av_gettime_relative();
4375 if ((ret = init_input_threads()) < 0)
4379 while (!received_sigterm) {
4380 int64_t cur_time= av_gettime_relative();
4382 /* if 'q' pressed, exits */
4383 if (stdin_interaction)
4384 if (check_keyboard_interaction(cur_time) < 0)
4387 /* check if there's any stream where output is still needed */
4388 if (!need_output()) {
4389 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4393 ret = transcode_step();
4394 if (ret < 0 && ret != AVERROR_EOF) {
4396 av_strerror(ret, errbuf, sizeof(errbuf));
4398 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4402 /* dump report by using the output first video and audio streams */
4403 print_report(0, timer_start, cur_time);
4406 free_input_threads();
4409 /* at the end of stream, we must flush the decoder buffers */
4410 for (i = 0; i < nb_input_streams; i++) {
4411 ist = input_streams[i];
4412 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4413 process_input_packet(ist, NULL, 0);
4420 /* write the trailer if needed and close file */
4421 for (i = 0; i < nb_output_files; i++) {
4422 os = output_files[i]->ctx;
4423 if (!output_files[i]->header_written) {
4424 av_log(NULL, AV_LOG_ERROR,
4425 "Nothing was written into output file %d (%s), because "
4426 "at least one of its streams received no packets.\n",
4430 if ((ret = av_write_trailer(os)) < 0) {
4431 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4437 /* dump report by using the first video and audio streams */
4438 print_report(1, timer_start, av_gettime_relative());
4440 /* close each encoder */
4441 for (i = 0; i < nb_output_streams; i++) {
4442 ost = output_streams[i];
4443 if (ost->encoding_needed) {
4444 av_freep(&ost->enc_ctx->stats_in);
4446 total_packets_written += ost->packets_written;
4449 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4450 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4454 /* close each decoder */
4455 for (i = 0; i < nb_input_streams; i++) {
4456 ist = input_streams[i];
4457 if (ist->decoding_needed) {
4458 avcodec_close(ist->dec_ctx);
4459 if (ist->hwaccel_uninit)
4460 ist->hwaccel_uninit(ist->dec_ctx);
4464 av_buffer_unref(&hw_device_ctx);
4471 free_input_threads();
4474 if (output_streams) {
4475 for (i = 0; i < nb_output_streams; i++) {
4476 ost = output_streams[i];
4479 if (fclose(ost->logfile))
4480 av_log(NULL, AV_LOG_ERROR,
4481 "Error closing logfile, loss of information possible: %s\n",
4482 av_err2str(AVERROR(errno)));
4483 ost->logfile = NULL;
4485 av_freep(&ost->forced_kf_pts);
4486 av_freep(&ost->apad);
4487 av_freep(&ost->disposition);
4488 av_dict_free(&ost->encoder_opts);
4489 av_dict_free(&ost->sws_dict);
4490 av_dict_free(&ost->swr_opts);
4491 av_dict_free(&ost->resample_opts);
4499 static int64_t getutime(void)
4502 struct rusage rusage;
4504 getrusage(RUSAGE_SELF, &rusage);
4505 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4506 #elif HAVE_GETPROCESSTIMES
4508 FILETIME c, e, k, u;
4509 proc = GetCurrentProcess();
4510 GetProcessTimes(proc, &c, &e, &k, &u);
4511 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4513 return av_gettime_relative();
4517 static int64_t getmaxrss(void)
4519 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4520 struct rusage rusage;
4521 getrusage(RUSAGE_SELF, &rusage);
4522 return (int64_t)rusage.ru_maxrss * 1024;
4523 #elif HAVE_GETPROCESSMEMORYINFO
4525 PROCESS_MEMORY_COUNTERS memcounters;
4526 proc = GetCurrentProcess();
4527 memcounters.cb = sizeof(memcounters);
4528 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4529 return memcounters.PeakPagefileUsage;
4535 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4539 int main(int argc, char **argv)
4546 register_exit(ffmpeg_cleanup);
4548 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4550 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4551 parse_loglevel(argc, argv, options);
4553 if(argc>1 && !strcmp(argv[1], "-d")){
4555 av_log_set_callback(log_callback_null);
4560 avcodec_register_all();
4562 avdevice_register_all();
4564 avfilter_register_all();
4566 avformat_network_init();
4568 show_banner(argc, argv, options);
4570 /* parse options and open all input/output files */
4571 ret = ffmpeg_parse_options(argc, argv);
4575 if (nb_output_files <= 0 && nb_input_files == 0) {
4577 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4581 /* file converter / grab */
4582 if (nb_output_files <= 0) {
4583 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4587 // if (nb_input_files == 0) {
4588 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4592 for (i = 0; i < nb_output_files; i++) {
4593 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4597 current_time = ti = getutime();
4598 if (transcode() < 0)
4600 ti = getutime() - ti;
4602 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4604 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4605 decode_error_stat[0], decode_error_stat[1]);
4606 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4609 exit_program(received_nb_signals ? 255 : main_return_code);
4610 return main_return_code;