2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
126 static int ifilter_has_all_input_formats(FilterGraph *fg);
128 static int run_as_daemon = 0;
129 static int nb_frames_dup = 0;
130 static unsigned dup_warning = 1000;
131 static int nb_frames_drop = 0;
132 static int64_t decode_error_stat[2];
134 static int want_sdp = 1;
136 static int current_time;
137 AVIOContext *progress_avio = NULL;
139 static uint8_t *subtitle_out;
141 InputStream **input_streams = NULL;
142 int nb_input_streams = 0;
143 InputFile **input_files = NULL;
144 int nb_input_files = 0;
146 OutputStream **output_streams = NULL;
147 int nb_output_streams = 0;
148 OutputFile **output_files = NULL;
149 int nb_output_files = 0;
151 FilterGraph **filtergraphs;
156 /* init terminal so that we can grab keys */
157 static struct termios oldtty;
158 static int restore_tty;
162 static void free_input_threads(void);
166 Convert subtitles to video with alpha to insert them in filter graphs.
167 This is a temporary solution until libavfilter gets real subtitles support.
170 static int sub2video_get_blank_frame(InputStream *ist)
173 AVFrame *frame = ist->sub2video.frame;
175 av_frame_unref(frame);
176 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
177 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
178 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
179 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
181 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
185 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
188 uint32_t *pal, *dst2;
192 if (r->type != SUBTITLE_BITMAP) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
196 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
197 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
198 r->x, r->y, r->w, r->h, w, h
203 dst += r->y * dst_linesize + r->x * 4;
205 pal = (uint32_t *)r->data[1];
206 for (y = 0; y < r->h; y++) {
207 dst2 = (uint32_t *)dst;
209 for (x = 0; x < r->w; x++)
210 *(dst2++) = pal[*(src2++)];
212 src += r->linesize[0];
216 static void sub2video_push_ref(InputStream *ist, int64_t pts)
218 AVFrame *frame = ist->sub2video.frame;
221 av_assert1(frame->data[0]);
222 ist->sub2video.last_pts = frame->pts = pts;
223 for (i = 0; i < ist->nb_filters; i++)
224 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
225 AV_BUFFERSRC_FLAG_KEEP_REF |
226 AV_BUFFERSRC_FLAG_PUSH);
229 void sub2video_update(InputStream *ist, AVSubtitle *sub)
231 AVFrame *frame = ist->sub2video.frame;
235 int64_t pts, end_pts;
240 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
241 AV_TIME_BASE_Q, ist->st->time_base);
242 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
243 AV_TIME_BASE_Q, ist->st->time_base);
244 num_rects = sub->num_rects;
246 pts = ist->sub2video.end_pts;
250 if (sub2video_get_blank_frame(ist) < 0) {
251 av_log(ist->dec_ctx, AV_LOG_ERROR,
252 "Impossible to get a blank canvas.\n");
255 dst = frame->data [0];
256 dst_linesize = frame->linesize[0];
257 for (i = 0; i < num_rects; i++)
258 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
259 sub2video_push_ref(ist, pts);
260 ist->sub2video.end_pts = end_pts;
263 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
265 InputFile *infile = input_files[ist->file_index];
269 /* When a frame is read from a file, examine all sub2video streams in
270 the same file and send the sub2video frame again. Otherwise, decoded
271 video frames could be accumulating in the filter graph while a filter
272 (possibly overlay) is desperately waiting for a subtitle frame. */
273 for (i = 0; i < infile->nb_streams; i++) {
274 InputStream *ist2 = input_streams[infile->ist_index + i];
275 if (!ist2->sub2video.frame)
277 /* subtitles seem to be usually muxed ahead of other streams;
278 if not, subtracting a larger time here is necessary */
279 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
280 /* do not send the heartbeat frame if the subtitle is already ahead */
281 if (pts2 <= ist2->sub2video.last_pts)
283 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
284 sub2video_update(ist2, NULL);
285 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
286 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
288 sub2video_push_ref(ist2, pts2);
292 static void sub2video_flush(InputStream *ist)
296 if (ist->sub2video.end_pts < INT64_MAX)
297 sub2video_update(ist, NULL);
298 for (i = 0; i < ist->nb_filters; i++)
299 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
302 /* end of sub2video hack */
304 static void term_exit_sigsafe(void)
308 tcsetattr (0, TCSANOW, &oldtty);
314 av_log(NULL, AV_LOG_QUIET, "%s", "");
318 static volatile int received_sigterm = 0;
319 static volatile int received_nb_signals = 0;
320 static volatile int transcode_init_done = 0;
321 static volatile int ffmpeg_exited = 0;
322 static int main_return_code = 0;
325 sigterm_handler(int sig)
327 received_sigterm = sig;
328 received_nb_signals++;
330 if(received_nb_signals > 3) {
331 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
332 strlen("Received > 3 system signals, hard exiting\n"));
338 #if HAVE_SETCONSOLECTRLHANDLER
339 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
341 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
346 case CTRL_BREAK_EVENT:
347 sigterm_handler(SIGINT);
350 case CTRL_CLOSE_EVENT:
351 case CTRL_LOGOFF_EVENT:
352 case CTRL_SHUTDOWN_EVENT:
353 sigterm_handler(SIGTERM);
354 /* Basically, with these 3 events, when we return from this method the
355 process is hard terminated, so stall as long as we need to
356 to try and let the main thread(s) clean up and gracefully terminate
357 (we have at most 5 seconds, but should be done far before that). */
358 while (!ffmpeg_exited) {
364 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
373 if (!run_as_daemon && stdin_interaction) {
375 if (tcgetattr (0, &tty) == 0) {
379 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
380 |INLCR|IGNCR|ICRNL|IXON);
381 tty.c_oflag |= OPOST;
382 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
383 tty.c_cflag &= ~(CSIZE|PARENB);
388 tcsetattr (0, TCSANOW, &tty);
390 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
394 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
395 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
397 signal(SIGXCPU, sigterm_handler);
399 #if HAVE_SETCONSOLECTRLHANDLER
400 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
404 /* read a key without blocking */
405 static int read_key(void)
417 n = select(1, &rfds, NULL, NULL, &tv);
426 # if HAVE_PEEKNAMEDPIPE
428 static HANDLE input_handle;
431 input_handle = GetStdHandle(STD_INPUT_HANDLE);
432 is_pipe = !GetConsoleMode(input_handle, &dw);
436 /* When running under a GUI, you will end here. */
437 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
438 // input pipe may have been closed by the program that ran ffmpeg
456 static int decode_interrupt_cb(void *ctx)
458 return received_nb_signals > transcode_init_done;
461 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
463 static void ffmpeg_cleanup(int ret)
468 int maxrss = getmaxrss() / 1024;
469 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
472 for (i = 0; i < nb_filtergraphs; i++) {
473 FilterGraph *fg = filtergraphs[i];
474 avfilter_graph_free(&fg->graph);
475 for (j = 0; j < fg->nb_inputs; j++) {
476 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
478 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
479 sizeof(frame), NULL);
480 av_frame_free(&frame);
482 av_fifo_free(fg->inputs[j]->frame_queue);
483 if (fg->inputs[j]->ist->sub2video.sub_queue) {
484 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
486 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
487 &sub, sizeof(sub), NULL);
488 avsubtitle_free(&sub);
490 av_fifo_free(fg->inputs[j]->ist->sub2video.sub_queue);
492 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
493 av_freep(&fg->inputs[j]->name);
494 av_freep(&fg->inputs[j]);
496 av_freep(&fg->inputs);
497 for (j = 0; j < fg->nb_outputs; j++) {
498 av_freep(&fg->outputs[j]->name);
499 av_freep(&fg->outputs[j]->formats);
500 av_freep(&fg->outputs[j]->channel_layouts);
501 av_freep(&fg->outputs[j]->sample_rates);
502 av_freep(&fg->outputs[j]);
504 av_freep(&fg->outputs);
505 av_freep(&fg->graph_desc);
507 av_freep(&filtergraphs[i]);
509 av_freep(&filtergraphs);
511 av_freep(&subtitle_out);
514 for (i = 0; i < nb_output_files; i++) {
515 OutputFile *of = output_files[i];
520 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
522 avformat_free_context(s);
523 av_dict_free(&of->opts);
525 av_freep(&output_files[i]);
527 for (i = 0; i < nb_output_streams; i++) {
528 OutputStream *ost = output_streams[i];
533 for (j = 0; j < ost->nb_bitstream_filters; j++)
534 av_bsf_free(&ost->bsf_ctx[j]);
535 av_freep(&ost->bsf_ctx);
536 av_freep(&ost->bsf_extradata_updated);
538 av_frame_free(&ost->filtered_frame);
539 av_frame_free(&ost->last_frame);
540 av_dict_free(&ost->encoder_opts);
542 av_parser_close(ost->parser);
543 avcodec_free_context(&ost->parser_avctx);
545 av_freep(&ost->forced_keyframes);
546 av_expr_free(ost->forced_keyframes_pexpr);
547 av_freep(&ost->avfilter);
548 av_freep(&ost->logfile_prefix);
550 av_freep(&ost->audio_channels_map);
551 ost->audio_channels_mapped = 0;
553 av_dict_free(&ost->sws_dict);
555 avcodec_free_context(&ost->enc_ctx);
556 avcodec_parameters_free(&ost->ref_par);
558 if (ost->muxing_queue) {
559 while (av_fifo_size(ost->muxing_queue)) {
561 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
562 av_packet_unref(&pkt);
564 av_fifo_freep(&ost->muxing_queue);
567 av_freep(&output_streams[i]);
570 free_input_threads();
572 for (i = 0; i < nb_input_files; i++) {
573 avformat_close_input(&input_files[i]->ctx);
574 av_freep(&input_files[i]);
576 for (i = 0; i < nb_input_streams; i++) {
577 InputStream *ist = input_streams[i];
579 av_frame_free(&ist->decoded_frame);
580 av_frame_free(&ist->filter_frame);
581 av_dict_free(&ist->decoder_opts);
582 avsubtitle_free(&ist->prev_sub.subtitle);
583 av_frame_free(&ist->sub2video.frame);
584 av_freep(&ist->filters);
585 av_freep(&ist->hwaccel_device);
586 av_freep(&ist->dts_buffer);
588 avcodec_free_context(&ist->dec_ctx);
590 av_freep(&input_streams[i]);
594 if (fclose(vstats_file))
595 av_log(NULL, AV_LOG_ERROR,
596 "Error closing vstats file, loss of information possible: %s\n",
597 av_err2str(AVERROR(errno)));
599 av_freep(&vstats_filename);
601 av_freep(&input_streams);
602 av_freep(&input_files);
603 av_freep(&output_streams);
604 av_freep(&output_files);
608 avformat_network_deinit();
610 if (received_sigterm) {
611 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
612 (int) received_sigterm);
613 } else if (ret && transcode_init_done) {
614 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
620 void remove_avoptions(AVDictionary **a, AVDictionary *b)
622 AVDictionaryEntry *t = NULL;
624 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
625 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
629 void assert_avoptions(AVDictionary *m)
631 AVDictionaryEntry *t;
632 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
633 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
638 static void abort_codec_experimental(AVCodec *c, int encoder)
643 static void update_benchmark(const char *fmt, ...)
645 if (do_benchmark_all) {
646 int64_t t = getutime();
652 vsnprintf(buf, sizeof(buf), fmt, va);
654 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
660 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
663 for (i = 0; i < nb_output_streams; i++) {
664 OutputStream *ost2 = output_streams[i];
665 ost2->finished |= ost == ost2 ? this_stream : others;
669 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
671 AVFormatContext *s = of->ctx;
672 AVStream *st = ost->st;
675 if (!of->header_written) {
676 AVPacket tmp_pkt = {0};
677 /* the muxer is not initialized yet, buffer the packet */
678 if (!av_fifo_space(ost->muxing_queue)) {
679 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
680 ost->max_muxing_queue_size);
681 if (new_size <= av_fifo_size(ost->muxing_queue)) {
682 av_log(NULL, AV_LOG_ERROR,
683 "Too many packets buffered for output stream %d:%d.\n",
684 ost->file_index, ost->st->index);
687 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
691 ret = av_packet_ref(&tmp_pkt, pkt);
694 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
695 av_packet_unref(pkt);
699 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
700 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
701 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
704 * Audio encoders may split the packets -- #frames in != #packets out.
705 * But there is no reordering, so we can limit the number of output packets
706 * by simply dropping them here.
707 * Counting encoded video frames needs to be done separately because of
708 * reordering, see do_video_out()
710 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
711 if (ost->frame_number >= ost->max_frames) {
712 av_packet_unref(pkt);
717 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
719 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
721 ost->quality = sd ? AV_RL32(sd) : -1;
722 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
724 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
726 ost->error[i] = AV_RL64(sd + 8 + 8*i);
731 if (ost->frame_rate.num && ost->is_cfr) {
732 if (pkt->duration > 0)
733 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
734 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
739 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
741 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
742 if (pkt->dts != AV_NOPTS_VALUE &&
743 pkt->pts != AV_NOPTS_VALUE &&
744 pkt->dts > pkt->pts) {
745 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
747 ost->file_index, ost->st->index);
749 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
750 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
751 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
753 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
754 pkt->dts != AV_NOPTS_VALUE &&
755 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
756 ost->last_mux_dts != AV_NOPTS_VALUE) {
757 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
758 if (pkt->dts < max) {
759 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
760 av_log(s, loglevel, "Non-monotonous DTS in output stream "
761 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
762 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
764 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
767 av_log(s, loglevel, "changing to %"PRId64". This may result "
768 "in incorrect timestamps in the output file.\n",
770 if (pkt->pts >= pkt->dts)
771 pkt->pts = FFMAX(pkt->pts, max);
776 ost->last_mux_dts = pkt->dts;
778 ost->data_size += pkt->size;
779 ost->packets_written++;
781 pkt->stream_index = ost->index;
784 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
785 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
786 av_get_media_type_string(ost->enc_ctx->codec_type),
787 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
788 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
793 ret = av_interleaved_write_frame(s, pkt);
795 print_error("av_interleaved_write_frame()", ret);
796 main_return_code = 1;
797 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
799 av_packet_unref(pkt);
802 static void close_output_stream(OutputStream *ost)
804 OutputFile *of = output_files[ost->file_index];
806 ost->finished |= ENCODER_FINISHED;
808 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
809 of->recording_time = FFMIN(of->recording_time, end);
813 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
817 /* apply the output bitstream filters, if any */
818 if (ost->nb_bitstream_filters) {
821 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
827 /* get a packet from the previous filter up the chain */
828 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
829 if (ret == AVERROR(EAGAIN)) {
835 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
836 * the api states this shouldn't happen after init(). Propagate it here to the
837 * muxer and to the next filters in the chain to workaround this.
838 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
839 * par_out->extradata and adapt muxers accordingly to get rid of this. */
840 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
841 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
844 ost->bsf_extradata_updated[idx - 1] |= 1;
847 /* send it to the next filter down the chain or to the muxer */
848 if (idx < ost->nb_bitstream_filters) {
849 /* HACK/FIXME! - See above */
850 if (!(ost->bsf_extradata_updated[idx] & 2)) {
851 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
854 ost->bsf_extradata_updated[idx] |= 2;
856 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
861 write_packet(of, pkt, ost);
864 write_packet(of, pkt, ost);
867 if (ret < 0 && ret != AVERROR_EOF) {
868 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
869 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
875 static int check_recording_time(OutputStream *ost)
877 OutputFile *of = output_files[ost->file_index];
879 if (of->recording_time != INT64_MAX &&
880 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
881 AV_TIME_BASE_Q) >= 0) {
882 close_output_stream(ost);
888 static void do_audio_out(OutputFile *of, OutputStream *ost,
891 AVCodecContext *enc = ost->enc_ctx;
895 av_init_packet(&pkt);
899 if (!check_recording_time(ost))
902 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
903 frame->pts = ost->sync_opts;
904 ost->sync_opts = frame->pts + frame->nb_samples;
905 ost->samples_encoded += frame->nb_samples;
906 ost->frames_encoded++;
908 av_assert0(pkt.size || !pkt.data);
909 update_benchmark(NULL);
911 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
912 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
913 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
914 enc->time_base.num, enc->time_base.den);
917 ret = avcodec_send_frame(enc, frame);
922 ret = avcodec_receive_packet(enc, &pkt);
923 if (ret == AVERROR(EAGAIN))
928 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
930 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
933 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
934 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
935 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
936 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
939 output_packet(of, &pkt, ost);
944 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
948 static void do_subtitle_out(OutputFile *of,
952 int subtitle_out_max_size = 1024 * 1024;
953 int subtitle_out_size, nb, i;
958 if (sub->pts == AV_NOPTS_VALUE) {
959 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
968 subtitle_out = av_malloc(subtitle_out_max_size);
970 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
975 /* Note: DVB subtitle need one packet to draw them and one other
976 packet to clear them */
977 /* XXX: signal it in the codec context ? */
978 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
983 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
985 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
986 pts -= output_files[ost->file_index]->start_time;
987 for (i = 0; i < nb; i++) {
988 unsigned save_num_rects = sub->num_rects;
990 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
991 if (!check_recording_time(ost))
995 // start_display_time is required to be 0
996 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
997 sub->end_display_time -= sub->start_display_time;
998 sub->start_display_time = 0;
1002 ost->frames_encoded++;
1004 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1005 subtitle_out_max_size, sub);
1007 sub->num_rects = save_num_rects;
1008 if (subtitle_out_size < 0) {
1009 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1013 av_init_packet(&pkt);
1014 pkt.data = subtitle_out;
1015 pkt.size = subtitle_out_size;
1016 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1017 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1018 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1019 /* XXX: the pts correction is handled here. Maybe handling
1020 it in the codec would be better */
1022 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1024 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1027 output_packet(of, &pkt, ost);
1031 static void do_video_out(OutputFile *of,
1033 AVFrame *next_picture,
1036 int ret, format_video_sync;
1038 AVCodecContext *enc = ost->enc_ctx;
1039 AVCodecParameters *mux_par = ost->st->codecpar;
1040 AVRational frame_rate;
1041 int nb_frames, nb0_frames, i;
1042 double delta, delta0;
1043 double duration = 0;
1045 InputStream *ist = NULL;
1046 AVFilterContext *filter = ost->filter->filter;
1048 if (ost->source_index >= 0)
1049 ist = input_streams[ost->source_index];
1051 frame_rate = av_buffersink_get_frame_rate(filter);
1052 if (frame_rate.num > 0 && frame_rate.den > 0)
1053 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1055 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1056 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1058 if (!ost->filters_script &&
1062 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1063 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1066 if (!next_picture) {
1068 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1069 ost->last_nb0_frames[1],
1070 ost->last_nb0_frames[2]);
1072 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1073 delta = delta0 + duration;
1075 /* by default, we output a single frame */
1076 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1079 format_video_sync = video_sync_method;
1080 if (format_video_sync == VSYNC_AUTO) {
1081 if(!strcmp(of->ctx->oformat->name, "avi")) {
1082 format_video_sync = VSYNC_VFR;
1084 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1086 && format_video_sync == VSYNC_CFR
1087 && input_files[ist->file_index]->ctx->nb_streams == 1
1088 && input_files[ist->file_index]->input_ts_offset == 0) {
1089 format_video_sync = VSYNC_VSCFR;
1091 if (format_video_sync == VSYNC_CFR && copy_ts) {
1092 format_video_sync = VSYNC_VSCFR;
1095 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1099 format_video_sync != VSYNC_PASSTHROUGH &&
1100 format_video_sync != VSYNC_DROP) {
1101 if (delta0 < -0.6) {
1102 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1104 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1105 sync_ipts = ost->sync_opts;
1110 switch (format_video_sync) {
1112 if (ost->frame_number == 0 && delta0 >= 0.5) {
1113 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1116 ost->sync_opts = lrint(sync_ipts);
1119 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1120 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1122 } else if (delta < -1.1)
1124 else if (delta > 1.1) {
1125 nb_frames = lrintf(delta);
1127 nb0_frames = lrintf(delta0 - 0.6);
1133 else if (delta > 0.6)
1134 ost->sync_opts = lrint(sync_ipts);
1137 case VSYNC_PASSTHROUGH:
1138 ost->sync_opts = lrint(sync_ipts);
1145 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1146 nb0_frames = FFMIN(nb0_frames, nb_frames);
1148 memmove(ost->last_nb0_frames + 1,
1149 ost->last_nb0_frames,
1150 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1151 ost->last_nb0_frames[0] = nb0_frames;
1153 if (nb0_frames == 0 && ost->last_dropped) {
1155 av_log(NULL, AV_LOG_VERBOSE,
1156 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1157 ost->frame_number, ost->st->index, ost->last_frame->pts);
1159 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1160 if (nb_frames > dts_error_threshold * 30) {
1161 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1165 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1166 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1167 if (nb_frames_dup > dup_warning) {
1168 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1172 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1174 /* duplicates frame if needed */
1175 for (i = 0; i < nb_frames; i++) {
1176 AVFrame *in_picture;
1177 av_init_packet(&pkt);
1181 if (i < nb0_frames && ost->last_frame) {
1182 in_picture = ost->last_frame;
1184 in_picture = next_picture;
1189 in_picture->pts = ost->sync_opts;
1192 if (!check_recording_time(ost))
1194 if (ost->frame_number >= ost->max_frames)
1198 #if FF_API_LAVF_FMT_RAWPICTURE
1199 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1200 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1201 /* raw pictures are written as AVPicture structure to
1202 avoid any copies. We support temporarily the older
1204 if (in_picture->interlaced_frame)
1205 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1207 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1208 pkt.data = (uint8_t *)in_picture;
1209 pkt.size = sizeof(AVPicture);
1210 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1211 pkt.flags |= AV_PKT_FLAG_KEY;
1213 output_packet(of, &pkt, ost);
1217 int forced_keyframe = 0;
1220 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1221 ost->top_field_first >= 0)
1222 in_picture->top_field_first = !!ost->top_field_first;
1224 if (in_picture->interlaced_frame) {
1225 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1226 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1228 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1230 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1232 in_picture->quality = enc->global_quality;
1233 in_picture->pict_type = 0;
1235 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1236 in_picture->pts * av_q2d(enc->time_base) : NAN;
1237 if (ost->forced_kf_index < ost->forced_kf_count &&
1238 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1239 ost->forced_kf_index++;
1240 forced_keyframe = 1;
1241 } else if (ost->forced_keyframes_pexpr) {
1243 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1244 res = av_expr_eval(ost->forced_keyframes_pexpr,
1245 ost->forced_keyframes_expr_const_values, NULL);
1246 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1247 ost->forced_keyframes_expr_const_values[FKF_N],
1248 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1249 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1250 ost->forced_keyframes_expr_const_values[FKF_T],
1251 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1254 forced_keyframe = 1;
1255 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1256 ost->forced_keyframes_expr_const_values[FKF_N];
1257 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1258 ost->forced_keyframes_expr_const_values[FKF_T];
1259 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1262 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1263 } else if ( ost->forced_keyframes
1264 && !strncmp(ost->forced_keyframes, "source", 6)
1265 && in_picture->key_frame==1) {
1266 forced_keyframe = 1;
1269 if (forced_keyframe) {
1270 in_picture->pict_type = AV_PICTURE_TYPE_I;
1271 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1274 update_benchmark(NULL);
1276 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1277 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1278 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1279 enc->time_base.num, enc->time_base.den);
1282 ost->frames_encoded++;
1284 ret = avcodec_send_frame(enc, in_picture);
1289 ret = avcodec_receive_packet(enc, &pkt);
1290 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1291 if (ret == AVERROR(EAGAIN))
1297 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1298 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1299 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1300 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1303 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1304 pkt.pts = ost->sync_opts;
1306 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1309 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1310 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1311 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1312 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1315 frame_size = pkt.size;
1316 output_packet(of, &pkt, ost);
1318 /* if two pass, output log */
1319 if (ost->logfile && enc->stats_out) {
1320 fprintf(ost->logfile, "%s", enc->stats_out);
1326 * For video, number of frames in == number of packets out.
1327 * But there may be reordering, so we can't throw away frames on encoder
1328 * flush, we need to limit them here, before they go into encoder.
1330 ost->frame_number++;
1332 if (vstats_filename && frame_size)
1333 do_video_stats(ost, frame_size);
1336 if (!ost->last_frame)
1337 ost->last_frame = av_frame_alloc();
1338 av_frame_unref(ost->last_frame);
1339 if (next_picture && ost->last_frame)
1340 av_frame_ref(ost->last_frame, next_picture);
1342 av_frame_free(&ost->last_frame);
1346 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1350 static double psnr(double d)
1352 return -10.0 * log10(d);
1355 static void do_video_stats(OutputStream *ost, int frame_size)
1357 AVCodecContext *enc;
1359 double ti1, bitrate, avg_bitrate;
1361 /* this is executed just the first time do_video_stats is called */
1363 vstats_file = fopen(vstats_filename, "w");
1371 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1372 frame_number = ost->st->nb_frames;
1373 if (vstats_version <= 1) {
1374 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1375 ost->quality / (float)FF_QP2LAMBDA);
1377 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1378 ost->quality / (float)FF_QP2LAMBDA);
1381 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1382 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1384 fprintf(vstats_file,"f_size= %6d ", frame_size);
1385 /* compute pts value */
1386 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1390 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1391 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1392 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1393 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1394 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1398 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1400 static void finish_output_stream(OutputStream *ost)
1402 OutputFile *of = output_files[ost->file_index];
1405 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1408 for (i = 0; i < of->ctx->nb_streams; i++)
1409 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1414 * Get and encode new output from any of the filtergraphs, without causing
1417 * @return 0 for success, <0 for severe errors
1419 static int reap_filters(int flush)
1421 AVFrame *filtered_frame = NULL;
1424 /* Reap all buffers present in the buffer sinks */
1425 for (i = 0; i < nb_output_streams; i++) {
1426 OutputStream *ost = output_streams[i];
1427 OutputFile *of = output_files[ost->file_index];
1428 AVFilterContext *filter;
1429 AVCodecContext *enc = ost->enc_ctx;
1432 if (!ost->filter || !ost->filter->graph->graph)
1434 filter = ost->filter->filter;
1436 if (!ost->initialized) {
1437 char error[1024] = "";
1438 ret = init_output_stream(ost, error, sizeof(error));
1440 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1441 ost->file_index, ost->index, error);
1446 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1447 return AVERROR(ENOMEM);
1449 filtered_frame = ost->filtered_frame;
1452 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1453 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1454 AV_BUFFERSINK_FLAG_NO_REQUEST);
1456 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1457 av_log(NULL, AV_LOG_WARNING,
1458 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1459 } else if (flush && ret == AVERROR_EOF) {
1460 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1461 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1465 if (ost->finished) {
1466 av_frame_unref(filtered_frame);
1469 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1470 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1471 AVRational filter_tb = av_buffersink_get_time_base(filter);
1472 AVRational tb = enc->time_base;
1473 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1475 tb.den <<= extra_bits;
1477 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1478 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1479 float_pts /= 1 << extra_bits;
1480 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1481 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1483 filtered_frame->pts =
1484 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1485 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1487 //if (ost->source_index >= 0)
1488 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1490 switch (av_buffersink_get_type(filter)) {
1491 case AVMEDIA_TYPE_VIDEO:
1492 if (!ost->frame_aspect_ratio.num)
1493 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1496 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1497 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1499 enc->time_base.num, enc->time_base.den);
1502 do_video_out(of, ost, filtered_frame, float_pts);
1504 case AVMEDIA_TYPE_AUDIO:
1505 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1506 enc->channels != av_frame_get_channels(filtered_frame)) {
1507 av_log(NULL, AV_LOG_ERROR,
1508 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1511 do_audio_out(of, ost, filtered_frame);
1514 // TODO support subtitle filters
1518 av_frame_unref(filtered_frame);
1525 static void print_final_stats(int64_t total_size)
1527 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1528 uint64_t subtitle_size = 0;
1529 uint64_t data_size = 0;
1530 float percent = -1.0;
1534 for (i = 0; i < nb_output_streams; i++) {
1535 OutputStream *ost = output_streams[i];
1536 switch (ost->enc_ctx->codec_type) {
1537 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1538 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1539 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1540 default: other_size += ost->data_size; break;
1542 extra_size += ost->enc_ctx->extradata_size;
1543 data_size += ost->data_size;
1544 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1545 != AV_CODEC_FLAG_PASS1)
1549 if (data_size && total_size>0 && total_size >= data_size)
1550 percent = 100.0 * (total_size - data_size) / data_size;
1552 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1553 video_size / 1024.0,
1554 audio_size / 1024.0,
1555 subtitle_size / 1024.0,
1556 other_size / 1024.0,
1557 extra_size / 1024.0);
1559 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1561 av_log(NULL, AV_LOG_INFO, "unknown");
1562 av_log(NULL, AV_LOG_INFO, "\n");
1564 /* print verbose per-stream stats */
1565 for (i = 0; i < nb_input_files; i++) {
1566 InputFile *f = input_files[i];
1567 uint64_t total_packets = 0, total_size = 0;
1569 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1570 i, f->ctx->filename);
1572 for (j = 0; j < f->nb_streams; j++) {
1573 InputStream *ist = input_streams[f->ist_index + j];
1574 enum AVMediaType type = ist->dec_ctx->codec_type;
1576 total_size += ist->data_size;
1577 total_packets += ist->nb_packets;
1579 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1580 i, j, media_type_string(type));
1581 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1582 ist->nb_packets, ist->data_size);
1584 if (ist->decoding_needed) {
1585 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1586 ist->frames_decoded);
1587 if (type == AVMEDIA_TYPE_AUDIO)
1588 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1589 av_log(NULL, AV_LOG_VERBOSE, "; ");
1592 av_log(NULL, AV_LOG_VERBOSE, "\n");
1595 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1596 total_packets, total_size);
1599 for (i = 0; i < nb_output_files; i++) {
1600 OutputFile *of = output_files[i];
1601 uint64_t total_packets = 0, total_size = 0;
1603 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1604 i, of->ctx->filename);
1606 for (j = 0; j < of->ctx->nb_streams; j++) {
1607 OutputStream *ost = output_streams[of->ost_index + j];
1608 enum AVMediaType type = ost->enc_ctx->codec_type;
1610 total_size += ost->data_size;
1611 total_packets += ost->packets_written;
1613 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1614 i, j, media_type_string(type));
1615 if (ost->encoding_needed) {
1616 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1617 ost->frames_encoded);
1618 if (type == AVMEDIA_TYPE_AUDIO)
1619 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1620 av_log(NULL, AV_LOG_VERBOSE, "; ");
1623 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1624 ost->packets_written, ost->data_size);
1626 av_log(NULL, AV_LOG_VERBOSE, "\n");
1629 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1630 total_packets, total_size);
1632 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1633 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1635 av_log(NULL, AV_LOG_WARNING, "\n");
1637 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1642 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1645 AVBPrint buf_script;
1647 AVFormatContext *oc;
1649 AVCodecContext *enc;
1650 int frame_number, vid, i;
1653 int64_t pts = INT64_MIN + 1;
1654 static int64_t last_time = -1;
1655 static int qp_histogram[52];
1656 int hours, mins, secs, us;
1660 if (!print_stats && !is_last_report && !progress_avio)
1663 if (!is_last_report) {
1664 if (last_time == -1) {
1665 last_time = cur_time;
1668 if ((cur_time - last_time) < 500000)
1670 last_time = cur_time;
1673 t = (cur_time-timer_start) / 1000000.0;
1676 oc = output_files[0]->ctx;
1678 total_size = avio_size(oc->pb);
1679 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1680 total_size = avio_tell(oc->pb);
1684 av_bprint_init(&buf_script, 0, 1);
1685 for (i = 0; i < nb_output_streams; i++) {
1687 ost = output_streams[i];
1689 if (!ost->stream_copy)
1690 q = ost->quality / (float) FF_QP2LAMBDA;
1692 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1693 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1694 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1695 ost->file_index, ost->index, q);
1697 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1700 frame_number = ost->frame_number;
1701 fps = t > 1 ? frame_number / t : 0;
1702 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1703 frame_number, fps < 9.95, fps, q);
1704 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1705 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1706 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1707 ost->file_index, ost->index, q);
1709 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1713 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1715 for (j = 0; j < 32; j++)
1716 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1719 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1721 double error, error_sum = 0;
1722 double scale, scale_sum = 0;
1724 char type[3] = { 'Y','U','V' };
1725 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1726 for (j = 0; j < 3; j++) {
1727 if (is_last_report) {
1728 error = enc->error[j];
1729 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1731 error = ost->error[j];
1732 scale = enc->width * enc->height * 255.0 * 255.0;
1738 p = psnr(error / scale);
1739 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1740 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1741 ost->file_index, ost->index, type[j] | 32, p);
1743 p = psnr(error_sum / scale_sum);
1744 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1745 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1746 ost->file_index, ost->index, p);
1750 /* compute min output value */
1751 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1752 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1753 ost->st->time_base, AV_TIME_BASE_Q));
1755 nb_frames_drop += ost->last_dropped;
1758 secs = FFABS(pts) / AV_TIME_BASE;
1759 us = FFABS(pts) % AV_TIME_BASE;
1765 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1766 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1768 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1770 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1771 "size=%8.0fkB time=", total_size / 1024.0);
1773 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1774 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1775 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1776 (100 * us) / AV_TIME_BASE);
1779 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1780 av_bprintf(&buf_script, "bitrate=N/A\n");
1782 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1783 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1786 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1787 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1788 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1789 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1790 hours, mins, secs, us);
1792 if (nb_frames_dup || nb_frames_drop)
1793 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1794 nb_frames_dup, nb_frames_drop);
1795 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1796 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1799 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1800 av_bprintf(&buf_script, "speed=N/A\n");
1802 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1803 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1806 if (print_stats || is_last_report) {
1807 const char end = is_last_report ? '\n' : '\r';
1808 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1809 fprintf(stderr, "%s %c", buf, end);
1811 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1816 if (progress_avio) {
1817 av_bprintf(&buf_script, "progress=%s\n",
1818 is_last_report ? "end" : "continue");
1819 avio_write(progress_avio, buf_script.str,
1820 FFMIN(buf_script.len, buf_script.size - 1));
1821 avio_flush(progress_avio);
1822 av_bprint_finalize(&buf_script, NULL);
1823 if (is_last_report) {
1824 if ((ret = avio_closep(&progress_avio)) < 0)
1825 av_log(NULL, AV_LOG_ERROR,
1826 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1831 print_final_stats(total_size);
1834 static void flush_encoders(void)
1838 for (i = 0; i < nb_output_streams; i++) {
1839 OutputStream *ost = output_streams[i];
1840 AVCodecContext *enc = ost->enc_ctx;
1841 OutputFile *of = output_files[ost->file_index];
1843 if (!ost->encoding_needed)
1846 // Try to enable encoding with no input frames.
1847 // Maybe we should just let encoding fail instead.
1848 if (!ost->initialized) {
1849 FilterGraph *fg = ost->filter->graph;
1850 char error[1024] = "";
1852 av_log(NULL, AV_LOG_WARNING,
1853 "Finishing stream %d:%d without any data written to it.\n",
1854 ost->file_index, ost->st->index);
1856 if (ost->filter && !fg->graph) {
1858 for (x = 0; x < fg->nb_inputs; x++) {
1859 InputFilter *ifilter = fg->inputs[x];
1860 if (ifilter->format < 0) {
1861 AVCodecParameters *par = ifilter->ist->st->codecpar;
1862 // We never got any input. Set a fake format, which will
1863 // come from libavformat.
1864 ifilter->format = par->format;
1865 ifilter->sample_rate = par->sample_rate;
1866 ifilter->channels = par->channels;
1867 ifilter->channel_layout = par->channel_layout;
1868 ifilter->width = par->width;
1869 ifilter->height = par->height;
1870 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1874 if (!ifilter_has_all_input_formats(fg))
1877 ret = configure_filtergraph(fg);
1879 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1883 finish_output_stream(ost);
1886 ret = init_output_stream(ost, error, sizeof(error));
1888 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1889 ost->file_index, ost->index, error);
1894 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1896 #if FF_API_LAVF_FMT_RAWPICTURE
1897 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1901 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1904 avcodec_send_frame(enc, NULL);
1907 const char *desc = NULL;
1911 switch (enc->codec_type) {
1912 case AVMEDIA_TYPE_AUDIO:
1915 case AVMEDIA_TYPE_VIDEO:
1922 av_init_packet(&pkt);
1926 update_benchmark(NULL);
1927 ret = avcodec_receive_packet(enc, &pkt);
1928 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1929 if (ret < 0 && ret != AVERROR_EOF) {
1930 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1935 if (ost->logfile && enc->stats_out) {
1936 fprintf(ost->logfile, "%s", enc->stats_out);
1938 if (ret == AVERROR_EOF) {
1941 if (ost->finished & MUXER_FINISHED) {
1942 av_packet_unref(&pkt);
1945 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1946 pkt_size = pkt.size;
1947 output_packet(of, &pkt, ost);
1948 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1949 do_video_stats(ost, pkt_size);
1956 * Check whether a packet from ist should be written into ost at this time
1958 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1960 OutputFile *of = output_files[ost->file_index];
1961 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1963 if (ost->source_index != ist_index)
1969 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1975 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1977 OutputFile *of = output_files[ost->file_index];
1978 InputFile *f = input_files [ist->file_index];
1979 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1980 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1984 av_init_packet(&opkt);
1986 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1987 !ost->copy_initial_nonkeyframes)
1990 if (!ost->frame_number && !ost->copy_prior_start) {
1991 int64_t comp_start = start_time;
1992 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1993 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1994 if (pkt->pts == AV_NOPTS_VALUE ?
1995 ist->pts < comp_start :
1996 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2000 if (of->recording_time != INT64_MAX &&
2001 ist->pts >= of->recording_time + start_time) {
2002 close_output_stream(ost);
2006 if (f->recording_time != INT64_MAX) {
2007 start_time = f->ctx->start_time;
2008 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2009 start_time += f->start_time;
2010 if (ist->pts >= f->recording_time + start_time) {
2011 close_output_stream(ost);
2016 /* force the input stream PTS */
2017 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2020 if (pkt->pts != AV_NOPTS_VALUE)
2021 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2023 opkt.pts = AV_NOPTS_VALUE;
2025 if (pkt->dts == AV_NOPTS_VALUE)
2026 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2028 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2029 opkt.dts -= ost_tb_start_time;
2031 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2032 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2034 duration = ist->dec_ctx->frame_size;
2035 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2036 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2037 ost->mux_timebase) - ost_tb_start_time;
2040 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2042 opkt.flags = pkt->flags;
2043 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2044 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2045 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2046 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2047 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2049 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2050 &opkt.data, &opkt.size,
2051 pkt->data, pkt->size,
2052 pkt->flags & AV_PKT_FLAG_KEY);
2054 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2059 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2064 opkt.data = pkt->data;
2065 opkt.size = pkt->size;
2067 av_copy_packet_side_data(&opkt, pkt);
2069 #if FF_API_LAVF_FMT_RAWPICTURE
2070 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2071 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2072 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2073 /* store AVPicture in AVPacket, as expected by the output format */
2074 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2076 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2080 opkt.data = (uint8_t *)&pict;
2081 opkt.size = sizeof(AVPicture);
2082 opkt.flags |= AV_PKT_FLAG_KEY;
2086 output_packet(of, &opkt, ost);
2089 int guess_input_channel_layout(InputStream *ist)
2091 AVCodecContext *dec = ist->dec_ctx;
2093 if (!dec->channel_layout) {
2094 char layout_name[256];
2096 if (dec->channels > ist->guess_layout_max)
2098 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2099 if (!dec->channel_layout)
2101 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2102 dec->channels, dec->channel_layout);
2103 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2104 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2109 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2111 if (*got_output || ret<0)
2112 decode_error_stat[ret<0] ++;
2114 if (ret < 0 && exit_on_error)
2117 if (exit_on_error && *got_output && ist) {
2118 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2119 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2125 // Filters can be configured only if the formats of all inputs are known.
2126 static int ifilter_has_all_input_formats(FilterGraph *fg)
2129 for (i = 0; i < fg->nb_inputs; i++) {
2130 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2131 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2137 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2139 FilterGraph *fg = ifilter->graph;
2140 int need_reinit, ret, i;
2142 /* determine if the parameters for this input changed */
2143 need_reinit = ifilter->format != frame->format;
2144 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2145 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2148 switch (ifilter->ist->st->codecpar->codec_type) {
2149 case AVMEDIA_TYPE_AUDIO:
2150 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2151 ifilter->channels != frame->channels ||
2152 ifilter->channel_layout != frame->channel_layout;
2154 case AVMEDIA_TYPE_VIDEO:
2155 need_reinit |= ifilter->width != frame->width ||
2156 ifilter->height != frame->height;
2161 ret = ifilter_parameters_from_frame(ifilter, frame);
2166 /* (re)init the graph if possible, otherwise buffer the frame and return */
2167 if (need_reinit || !fg->graph) {
2168 for (i = 0; i < fg->nb_inputs; i++) {
2169 if (!ifilter_has_all_input_formats(fg)) {
2170 AVFrame *tmp = av_frame_clone(frame);
2172 return AVERROR(ENOMEM);
2173 av_frame_unref(frame);
2175 if (!av_fifo_space(ifilter->frame_queue)) {
2176 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2180 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2185 ret = reap_filters(1);
2186 if (ret < 0 && ret != AVERROR_EOF) {
2188 av_strerror(ret, errbuf, sizeof(errbuf));
2190 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2194 ret = configure_filtergraph(fg);
2196 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2201 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2203 av_log(NULL, AV_LOG_ERROR, "Error while filtering\n");
2210 static int ifilter_send_eof(InputFilter *ifilter)
2216 if (ifilter->filter) {
2217 ret = av_buffersrc_add_frame_flags(ifilter->filter, NULL, AV_BUFFERSRC_FLAG_PUSH);
2221 // the filtergraph was never configured
2222 FilterGraph *fg = ifilter->graph;
2223 for (i = 0; i < fg->nb_inputs; i++)
2224 if (!fg->inputs[i]->eof)
2226 if (i == fg->nb_inputs) {
2227 // All the input streams have finished without the filtergraph
2228 // ever being configured.
2229 // Mark the output streams as finished.
2230 for (j = 0; j < fg->nb_outputs; j++)
2231 finish_output_stream(fg->outputs[j]->ost);
2238 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2239 // There is the following difference: if you got a frame, you must call
2240 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2241 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2242 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2249 ret = avcodec_send_packet(avctx, pkt);
2250 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2251 // decoded frames with avcodec_receive_frame() until done.
2252 if (ret < 0 && ret != AVERROR_EOF)
2256 ret = avcodec_receive_frame(avctx, frame);
2257 if (ret < 0 && ret != AVERROR(EAGAIN))
2265 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2270 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2271 for (i = 0; i < ist->nb_filters; i++) {
2272 if (i < ist->nb_filters - 1) {
2273 f = ist->filter_frame;
2274 ret = av_frame_ref(f, decoded_frame);
2279 ret = ifilter_send_frame(ist->filters[i], f);
2280 if (ret == AVERROR_EOF)
2281 ret = 0; /* ignore */
2283 av_log(NULL, AV_LOG_ERROR,
2284 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2291 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2293 AVFrame *decoded_frame;
2294 AVCodecContext *avctx = ist->dec_ctx;
2296 AVRational decoded_frame_tb;
2298 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2299 return AVERROR(ENOMEM);
2300 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2301 return AVERROR(ENOMEM);
2302 decoded_frame = ist->decoded_frame;
2304 update_benchmark(NULL);
2305 ret = decode(avctx, decoded_frame, got_output, pkt);
2306 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2308 if (ret >= 0 && avctx->sample_rate <= 0) {
2309 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2310 ret = AVERROR_INVALIDDATA;
2313 if (ret != AVERROR_EOF)
2314 check_decode_result(ist, got_output, ret);
2316 if (!*got_output || ret < 0)
2319 ist->samples_decoded += decoded_frame->nb_samples;
2320 ist->frames_decoded++;
2323 /* increment next_dts to use for the case where the input stream does not
2324 have timestamps or there are multiple frames in the packet */
2325 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2327 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2331 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2332 decoded_frame_tb = ist->st->time_base;
2333 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2334 decoded_frame->pts = pkt->pts;
2335 decoded_frame_tb = ist->st->time_base;
2337 decoded_frame->pts = ist->dts;
2338 decoded_frame_tb = AV_TIME_BASE_Q;
2340 if (decoded_frame->pts != AV_NOPTS_VALUE)
2341 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2342 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2343 (AVRational){1, avctx->sample_rate});
2344 ist->nb_samples = decoded_frame->nb_samples;
2345 err = send_frame_to_filters(ist, decoded_frame);
2347 av_frame_unref(ist->filter_frame);
2348 av_frame_unref(decoded_frame);
2349 return err < 0 ? err : ret;
2352 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
2354 AVFrame *decoded_frame;
2355 int i, ret = 0, err = 0;
2356 int64_t best_effort_timestamp;
2357 int64_t dts = AV_NOPTS_VALUE;
2360 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2361 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2363 if (!eof && pkt && pkt->size == 0)
2366 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2367 return AVERROR(ENOMEM);
2368 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2369 return AVERROR(ENOMEM);
2370 decoded_frame = ist->decoded_frame;
2371 if (ist->dts != AV_NOPTS_VALUE)
2372 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2375 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2378 // The old code used to set dts on the drain packet, which does not work
2379 // with the new API anymore.
2381 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2383 return AVERROR(ENOMEM);
2384 ist->dts_buffer = new;
2385 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2388 update_benchmark(NULL);
2389 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2390 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2392 // The following line may be required in some cases where there is no parser
2393 // or the parser does not has_b_frames correctly
2394 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2395 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2396 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2398 av_log(ist->dec_ctx, AV_LOG_WARNING,
2399 "video_delay is larger in decoder than demuxer %d > %d.\n"
2400 "If you want to help, upload a sample "
2401 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2402 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2403 ist->dec_ctx->has_b_frames,
2404 ist->st->codecpar->video_delay);
2407 if (ret != AVERROR_EOF)
2408 check_decode_result(ist, got_output, ret);
2410 if (*got_output && ret >= 0) {
2411 if (ist->dec_ctx->width != decoded_frame->width ||
2412 ist->dec_ctx->height != decoded_frame->height ||
2413 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2414 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2415 decoded_frame->width,
2416 decoded_frame->height,
2417 decoded_frame->format,
2418 ist->dec_ctx->width,
2419 ist->dec_ctx->height,
2420 ist->dec_ctx->pix_fmt);
2424 if (!*got_output || ret < 0)
2427 if(ist->top_field_first>=0)
2428 decoded_frame->top_field_first = ist->top_field_first;
2430 ist->frames_decoded++;
2432 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2433 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2437 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2439 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2441 if (ist->framerate.num)
2442 best_effort_timestamp = ist->cfr_next_pts++;
2444 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2445 best_effort_timestamp = ist->dts_buffer[0];
2447 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2448 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2449 ist->nb_dts_buffer--;
2452 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2453 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2455 if (ts != AV_NOPTS_VALUE)
2456 ist->next_pts = ist->pts = ts;
2460 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2461 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2462 ist->st->index, av_ts2str(decoded_frame->pts),
2463 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2464 best_effort_timestamp,
2465 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2466 decoded_frame->key_frame, decoded_frame->pict_type,
2467 ist->st->time_base.num, ist->st->time_base.den);
2470 if (ist->st->sample_aspect_ratio.num)
2471 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2473 err = send_frame_to_filters(ist, decoded_frame);
2476 av_frame_unref(ist->filter_frame);
2477 av_frame_unref(decoded_frame);
2478 return err < 0 ? err : ret;
2481 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2483 AVSubtitle subtitle;
2485 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2486 &subtitle, got_output, pkt);
2488 check_decode_result(NULL, got_output, ret);
2490 if (ret < 0 || !*got_output) {
2492 sub2video_flush(ist);
2496 if (ist->fix_sub_duration) {
2498 if (ist->prev_sub.got_output) {
2499 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2500 1000, AV_TIME_BASE);
2501 if (end < ist->prev_sub.subtitle.end_display_time) {
2502 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2503 "Subtitle duration reduced from %d to %d%s\n",
2504 ist->prev_sub.subtitle.end_display_time, end,
2505 end <= 0 ? ", dropping it" : "");
2506 ist->prev_sub.subtitle.end_display_time = end;
2509 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2510 FFSWAP(int, ret, ist->prev_sub.ret);
2511 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2519 if (ist->sub2video.frame) {
2520 sub2video_update(ist, &subtitle);
2521 } else if (ist->nb_filters) {
2522 if (!ist->sub2video.sub_queue)
2523 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2524 if (!ist->sub2video.sub_queue)
2526 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2527 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2531 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2535 if (!subtitle.num_rects)
2538 ist->frames_decoded++;
2540 for (i = 0; i < nb_output_streams; i++) {
2541 OutputStream *ost = output_streams[i];
2543 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2544 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2547 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2552 avsubtitle_free(&subtitle);
2556 static int send_filter_eof(InputStream *ist)
2559 for (i = 0; i < ist->nb_filters; i++) {
2560 ret = ifilter_send_eof(ist->filters[i]);
2567 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2568 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2572 int eof_reached = 0;
2575 if (!ist->saw_first_ts) {
2576 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2578 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2579 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2580 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2582 ist->saw_first_ts = 1;
2585 if (ist->next_dts == AV_NOPTS_VALUE)
2586 ist->next_dts = ist->dts;
2587 if (ist->next_pts == AV_NOPTS_VALUE)
2588 ist->next_pts = ist->pts;
2592 av_init_packet(&avpkt);
2599 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2600 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2601 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2602 ist->next_pts = ist->pts = ist->dts;
2605 // while we have more to decode or while the decoder did output something on EOF
2606 while (ist->decoding_needed) {
2610 ist->pts = ist->next_pts;
2611 ist->dts = ist->next_dts;
2613 switch (ist->dec_ctx->codec_type) {
2614 case AVMEDIA_TYPE_AUDIO:
2615 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
2617 case AVMEDIA_TYPE_VIDEO:
2618 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
2619 if (!repeating || !pkt || got_output) {
2620 if (pkt && pkt->duration) {
2621 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2622 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2623 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2624 duration = ((int64_t)AV_TIME_BASE *
2625 ist->dec_ctx->framerate.den * ticks) /
2626 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2629 if(ist->dts != AV_NOPTS_VALUE && duration) {
2630 ist->next_dts += duration;
2632 ist->next_dts = AV_NOPTS_VALUE;
2636 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2638 case AVMEDIA_TYPE_SUBTITLE:
2641 ret = transcode_subtitles(ist, &avpkt, &got_output);
2642 if (!pkt && ret >= 0)
2649 if (ret == AVERROR_EOF) {
2655 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2656 ist->file_index, ist->st->index, av_err2str(ret));
2663 ist->got_output = 1;
2668 // During draining, we might get multiple output frames in this loop.
2669 // ffmpeg.c does not drain the filter chain on configuration changes,
2670 // which means if we send multiple frames at once to the filters, and
2671 // one of those frames changes configuration, the buffered frames will
2672 // be lost. This can upset certain FATE tests.
2673 // Decode only 1 frame per call on EOF to appease these FATE tests.
2674 // The ideal solution would be to rewrite decoding to use the new
2675 // decoding API in a better way.
2682 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2683 /* except when looping we need to flush but not to send an EOF */
2684 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2685 int ret = send_filter_eof(ist);
2687 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2692 /* handle stream copy */
2693 if (!ist->decoding_needed) {
2694 ist->dts = ist->next_dts;
2695 switch (ist->dec_ctx->codec_type) {
2696 case AVMEDIA_TYPE_AUDIO:
2697 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2698 ist->dec_ctx->sample_rate;
2700 case AVMEDIA_TYPE_VIDEO:
2701 if (ist->framerate.num) {
2702 // TODO: Remove work-around for c99-to-c89 issue 7
2703 AVRational time_base_q = AV_TIME_BASE_Q;
2704 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2705 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2706 } else if (pkt->duration) {
2707 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2708 } else if(ist->dec_ctx->framerate.num != 0) {
2709 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2710 ist->next_dts += ((int64_t)AV_TIME_BASE *
2711 ist->dec_ctx->framerate.den * ticks) /
2712 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2716 ist->pts = ist->dts;
2717 ist->next_pts = ist->next_dts;
2719 for (i = 0; pkt && i < nb_output_streams; i++) {
2720 OutputStream *ost = output_streams[i];
2722 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2725 do_streamcopy(ist, ost, pkt);
2728 return !eof_reached;
2731 static void print_sdp(void)
2736 AVIOContext *sdp_pb;
2737 AVFormatContext **avc;
2739 for (i = 0; i < nb_output_files; i++) {
2740 if (!output_files[i]->header_written)
2744 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2747 for (i = 0, j = 0; i < nb_output_files; i++) {
2748 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2749 avc[j] = output_files[i]->ctx;
2757 av_sdp_create(avc, j, sdp, sizeof(sdp));
2759 if (!sdp_filename) {
2760 printf("SDP:\n%s\n", sdp);
2763 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2764 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2766 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2767 avio_closep(&sdp_pb);
2768 av_freep(&sdp_filename);
2776 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2779 for (i = 0; hwaccels[i].name; i++)
2780 if (hwaccels[i].pix_fmt == pix_fmt)
2781 return &hwaccels[i];
2785 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2787 InputStream *ist = s->opaque;
2788 const enum AVPixelFormat *p;
2791 for (p = pix_fmts; *p != -1; p++) {
2792 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2793 const HWAccel *hwaccel;
2795 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2798 hwaccel = get_hwaccel(*p);
2800 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2801 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2804 ret = hwaccel->init(s);
2806 if (ist->hwaccel_id == hwaccel->id) {
2807 av_log(NULL, AV_LOG_FATAL,
2808 "%s hwaccel requested for input stream #%d:%d, "
2809 "but cannot be initialized.\n", hwaccel->name,
2810 ist->file_index, ist->st->index);
2811 return AV_PIX_FMT_NONE;
2816 if (ist->hw_frames_ctx) {
2817 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2818 if (!s->hw_frames_ctx)
2819 return AV_PIX_FMT_NONE;
2822 ist->active_hwaccel_id = hwaccel->id;
2823 ist->hwaccel_pix_fmt = *p;
2830 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2832 InputStream *ist = s->opaque;
2834 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2835 return ist->hwaccel_get_buffer(s, frame, flags);
2837 return avcodec_default_get_buffer2(s, frame, flags);
2840 static int init_input_stream(int ist_index, char *error, int error_len)
2843 InputStream *ist = input_streams[ist_index];
2845 if (ist->decoding_needed) {
2846 AVCodec *codec = ist->dec;
2848 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2849 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2850 return AVERROR(EINVAL);
2853 ist->dec_ctx->opaque = ist;
2854 ist->dec_ctx->get_format = get_format;
2855 ist->dec_ctx->get_buffer2 = get_buffer;
2856 ist->dec_ctx->thread_safe_callbacks = 1;
2858 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2859 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2860 (ist->decoding_needed & DECODING_FOR_OST)) {
2861 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2862 if (ist->decoding_needed & DECODING_FOR_FILTER)
2863 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2866 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2868 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2869 * audio, and video decoders such as cuvid or mediacodec */
2870 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2872 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2873 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2874 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2875 if (ret == AVERROR_EXPERIMENTAL)
2876 abort_codec_experimental(codec, 0);
2878 snprintf(error, error_len,
2879 "Error while opening decoder for input stream "
2881 ist->file_index, ist->st->index, av_err2str(ret));
2884 assert_avoptions(ist->decoder_opts);
2887 ist->next_pts = AV_NOPTS_VALUE;
2888 ist->next_dts = AV_NOPTS_VALUE;
2893 static InputStream *get_input_stream(OutputStream *ost)
2895 if (ost->source_index >= 0)
2896 return input_streams[ost->source_index];
2900 static int compare_int64(const void *a, const void *b)
2902 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2905 /* open the muxer when all the streams are initialized */
2906 static int check_init_output_file(OutputFile *of, int file_index)
2910 for (i = 0; i < of->ctx->nb_streams; i++) {
2911 OutputStream *ost = output_streams[of->ost_index + i];
2912 if (!ost->initialized)
2916 of->ctx->interrupt_callback = int_cb;
2918 ret = avformat_write_header(of->ctx, &of->opts);
2920 av_log(NULL, AV_LOG_ERROR,
2921 "Could not write header for output file #%d "
2922 "(incorrect codec parameters ?): %s\n",
2923 file_index, av_err2str(ret));
2926 //assert_avoptions(of->opts);
2927 of->header_written = 1;
2929 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2931 if (sdp_filename || want_sdp)
2934 /* flush the muxing queues */
2935 for (i = 0; i < of->ctx->nb_streams; i++) {
2936 OutputStream *ost = output_streams[of->ost_index + i];
2938 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2939 if (!av_fifo_size(ost->muxing_queue))
2940 ost->mux_timebase = ost->st->time_base;
2942 while (av_fifo_size(ost->muxing_queue)) {
2944 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2945 write_packet(of, &pkt, ost);
2952 static int init_output_bsfs(OutputStream *ost)
2957 if (!ost->nb_bitstream_filters)
2960 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2961 ctx = ost->bsf_ctx[i];
2963 ret = avcodec_parameters_copy(ctx->par_in,
2964 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2968 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2970 ret = av_bsf_init(ctx);
2972 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2973 ost->bsf_ctx[i]->filter->name);
2978 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2979 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2983 ost->st->time_base = ctx->time_base_out;
2988 static int init_output_stream_streamcopy(OutputStream *ost)
2990 OutputFile *of = output_files[ost->file_index];
2991 InputStream *ist = get_input_stream(ost);
2992 AVCodecParameters *par_dst = ost->st->codecpar;
2993 AVCodecParameters *par_src = ost->ref_par;
2996 uint32_t codec_tag = par_dst->codec_tag;
2998 av_assert0(ist && !ost->filter);
3000 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3002 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3004 av_log(NULL, AV_LOG_FATAL,
3005 "Error setting up codec context options.\n");
3008 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3011 unsigned int codec_tag_tmp;
3012 if (!of->ctx->oformat->codec_tag ||
3013 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3014 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3015 codec_tag = par_src->codec_tag;
3018 ret = avcodec_parameters_copy(par_dst, par_src);
3022 par_dst->codec_tag = codec_tag;
3024 if (!ost->frame_rate.num)
3025 ost->frame_rate = ist->framerate;
3026 ost->st->avg_frame_rate = ost->frame_rate;
3028 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3032 // copy timebase while removing common factors
3033 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3034 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3036 // copy estimated duration as a hint to the muxer
3037 if (ost->st->duration <= 0 && ist->st->duration > 0)
3038 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3041 ost->st->disposition = ist->st->disposition;
3043 if (ist->st->nb_side_data) {
3044 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
3045 sizeof(*ist->st->side_data));
3046 if (!ost->st->side_data)
3047 return AVERROR(ENOMEM);
3049 ost->st->nb_side_data = 0;
3050 for (i = 0; i < ist->st->nb_side_data; i++) {
3051 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3052 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
3054 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
3057 sd_dst->data = av_malloc(sd_src->size);
3059 return AVERROR(ENOMEM);
3060 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3061 sd_dst->size = sd_src->size;
3062 sd_dst->type = sd_src->type;
3063 ost->st->nb_side_data++;
3067 ost->parser = av_parser_init(par_dst->codec_id);
3068 ost->parser_avctx = avcodec_alloc_context3(NULL);
3069 if (!ost->parser_avctx)
3070 return AVERROR(ENOMEM);
3072 switch (par_dst->codec_type) {
3073 case AVMEDIA_TYPE_AUDIO:
3074 if (audio_volume != 256) {
3075 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3078 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3079 par_dst->block_align= 0;
3080 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3081 par_dst->block_align= 0;
3083 case AVMEDIA_TYPE_VIDEO:
3084 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3086 av_mul_q(ost->frame_aspect_ratio,
3087 (AVRational){ par_dst->height, par_dst->width });
3088 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3089 "with stream copy may produce invalid files\n");
3091 else if (ist->st->sample_aspect_ratio.num)
3092 sar = ist->st->sample_aspect_ratio;
3094 sar = par_src->sample_aspect_ratio;
3095 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3096 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3097 ost->st->r_frame_rate = ist->st->r_frame_rate;
3101 ost->mux_timebase = ist->st->time_base;
3106 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3108 AVDictionaryEntry *e;
3110 uint8_t *encoder_string;
3111 int encoder_string_len;
3112 int format_flags = 0;
3113 int codec_flags = 0;
3115 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3118 e = av_dict_get(of->opts, "fflags", NULL, 0);
3120 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3123 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3125 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3127 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3130 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3133 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3134 encoder_string = av_mallocz(encoder_string_len);
3135 if (!encoder_string)
3138 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3139 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3141 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3142 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3143 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3144 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3147 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3148 AVCodecContext *avctx)
3151 int n = 1, i, size, index = 0;
3154 for (p = kf; *p; p++)
3158 pts = av_malloc_array(size, sizeof(*pts));
3160 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3165 for (i = 0; i < n; i++) {
3166 char *next = strchr(p, ',');
3171 if (!memcmp(p, "chapters", 8)) {
3173 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3176 if (avf->nb_chapters > INT_MAX - size ||
3177 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3179 av_log(NULL, AV_LOG_FATAL,
3180 "Could not allocate forced key frames array.\n");
3183 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3184 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3186 for (j = 0; j < avf->nb_chapters; j++) {
3187 AVChapter *c = avf->chapters[j];
3188 av_assert1(index < size);
3189 pts[index++] = av_rescale_q(c->start, c->time_base,
3190 avctx->time_base) + t;
3195 t = parse_time_or_die("force_key_frames", p, 1);
3196 av_assert1(index < size);
3197 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3204 av_assert0(index == size);
3205 qsort(pts, size, sizeof(*pts), compare_int64);
3206 ost->forced_kf_count = size;
3207 ost->forced_kf_pts = pts;
3210 static int init_output_stream_encode(OutputStream *ost)
3212 InputStream *ist = get_input_stream(ost);
3213 AVCodecContext *enc_ctx = ost->enc_ctx;
3214 AVCodecContext *dec_ctx = NULL;
3215 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3218 set_encoder_id(output_files[ost->file_index], ost);
3221 ost->st->disposition = ist->st->disposition;
3223 dec_ctx = ist->dec_ctx;
3225 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3227 for (j = 0; j < oc->nb_streams; j++) {
3228 AVStream *st = oc->streams[j];
3229 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3232 if (j == oc->nb_streams)
3233 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3234 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3235 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3238 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3239 if (!ost->frame_rate.num)
3240 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3241 if (ist && !ost->frame_rate.num)
3242 ost->frame_rate = ist->framerate;
3243 if (ist && !ost->frame_rate.num)
3244 ost->frame_rate = ist->st->r_frame_rate;
3245 if (ist && !ost->frame_rate.num) {
3246 ost->frame_rate = (AVRational){25, 1};
3247 av_log(NULL, AV_LOG_WARNING,
3249 "about the input framerate is available. Falling "
3250 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3251 "if you want a different framerate.\n",
3252 ost->file_index, ost->index);
3254 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3255 if (ost->enc->supported_framerates && !ost->force_fps) {
3256 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3257 ost->frame_rate = ost->enc->supported_framerates[idx];
3259 // reduce frame rate for mpeg4 to be within the spec limits
3260 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3261 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3262 ost->frame_rate.num, ost->frame_rate.den, 65535);
3266 switch (enc_ctx->codec_type) {
3267 case AVMEDIA_TYPE_AUDIO:
3268 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3270 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3271 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3272 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3273 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3274 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3275 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3277 case AVMEDIA_TYPE_VIDEO:
3278 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3279 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3280 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3281 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3282 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3283 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3284 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3286 for (j = 0; j < ost->forced_kf_count; j++)
3287 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3289 enc_ctx->time_base);
3291 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3292 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3293 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3294 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3295 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3296 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3297 if (!strncmp(ost->enc->name, "libx264", 7) &&
3298 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3299 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3300 av_log(NULL, AV_LOG_WARNING,
3301 "No pixel format specified, %s for H.264 encoding chosen.\n"
3302 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3303 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3304 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3305 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3306 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3307 av_log(NULL, AV_LOG_WARNING,
3308 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3309 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3310 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3311 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3313 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3314 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3316 enc_ctx->framerate = ost->frame_rate;
3318 ost->st->avg_frame_rate = ost->frame_rate;
3321 enc_ctx->width != dec_ctx->width ||
3322 enc_ctx->height != dec_ctx->height ||
3323 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3324 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3327 if (ost->forced_keyframes) {
3328 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3329 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3330 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3332 av_log(NULL, AV_LOG_ERROR,
3333 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3336 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3337 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3338 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3339 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3341 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3342 // parse it only for static kf timings
3343 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3344 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3348 case AVMEDIA_TYPE_SUBTITLE:
3349 enc_ctx->time_base = AV_TIME_BASE_Q;
3350 if (!enc_ctx->width) {
3351 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3352 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3355 case AVMEDIA_TYPE_DATA:
3362 ost->mux_timebase = enc_ctx->time_base;
3367 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3371 if (ost->encoding_needed) {
3372 AVCodec *codec = ost->enc;
3373 AVCodecContext *dec = NULL;
3376 ret = init_output_stream_encode(ost);
3380 if ((ist = get_input_stream(ost)))
3382 if (dec && dec->subtitle_header) {
3383 /* ASS code assumes this buffer is null terminated so add extra byte. */
3384 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3385 if (!ost->enc_ctx->subtitle_header)
3386 return AVERROR(ENOMEM);
3387 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3388 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3390 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3391 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3392 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3394 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3395 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3396 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3398 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter)) {
3399 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3400 if (!ost->enc_ctx->hw_frames_ctx)
3401 return AVERROR(ENOMEM);
3404 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3405 if (ret == AVERROR_EXPERIMENTAL)
3406 abort_codec_experimental(codec, 1);
3407 snprintf(error, error_len,
3408 "Error while opening encoder for output stream #%d:%d - "
3409 "maybe incorrect parameters such as bit_rate, rate, width or height",
3410 ost->file_index, ost->index);
3413 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3414 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3415 av_buffersink_set_frame_size(ost->filter->filter,
3416 ost->enc_ctx->frame_size);
3417 assert_avoptions(ost->encoder_opts);
3418 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3419 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3420 " It takes bits/s as argument, not kbits/s\n");
3422 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3424 av_log(NULL, AV_LOG_FATAL,
3425 "Error initializing the output stream codec context.\n");
3429 * FIXME: ost->st->codec should't be needed here anymore.
3431 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3435 if (ost->enc_ctx->nb_coded_side_data) {
3438 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
3439 sizeof(*ost->st->side_data));
3440 if (!ost->st->side_data)
3441 return AVERROR(ENOMEM);
3443 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3444 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3445 AVPacketSideData *sd_dst = &ost->st->side_data[i];
3447 sd_dst->data = av_malloc(sd_src->size);
3449 return AVERROR(ENOMEM);
3450 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3451 sd_dst->size = sd_src->size;
3452 sd_dst->type = sd_src->type;
3453 ost->st->nb_side_data++;
3457 // copy timebase while removing common factors
3458 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3459 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3461 // copy estimated duration as a hint to the muxer
3462 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3463 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3465 ost->st->codec->codec= ost->enc_ctx->codec;
3466 } else if (ost->stream_copy) {
3467 ret = init_output_stream_streamcopy(ost);
3472 * FIXME: will the codec context used by the parser during streamcopy
3473 * This should go away with the new parser API.
3475 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3480 // parse user provided disposition, and update stream values
3481 if (ost->disposition) {
3482 static const AVOption opts[] = {
3483 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3484 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3485 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3486 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3487 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3488 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3489 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3490 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3491 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3492 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3493 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3494 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3495 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3496 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3499 static const AVClass class = {
3501 .item_name = av_default_item_name,
3503 .version = LIBAVUTIL_VERSION_INT,
3505 const AVClass *pclass = &class;
3507 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3512 /* initialize bitstream filters for the output stream
3513 * needs to be done here, because the codec id for streamcopy is not
3514 * known until now */
3515 ret = init_output_bsfs(ost);
3519 ost->initialized = 1;
3521 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3528 static void report_new_stream(int input_index, AVPacket *pkt)
3530 InputFile *file = input_files[input_index];
3531 AVStream *st = file->ctx->streams[pkt->stream_index];
3533 if (pkt->stream_index < file->nb_streams_warn)
3535 av_log(file->ctx, AV_LOG_WARNING,
3536 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3537 av_get_media_type_string(st->codecpar->codec_type),
3538 input_index, pkt->stream_index,
3539 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3540 file->nb_streams_warn = pkt->stream_index + 1;
3543 static int transcode_init(void)
3545 int ret = 0, i, j, k;
3546 AVFormatContext *oc;
3549 char error[1024] = {0};
3551 for (i = 0; i < nb_filtergraphs; i++) {
3552 FilterGraph *fg = filtergraphs[i];
3553 for (j = 0; j < fg->nb_outputs; j++) {
3554 OutputFilter *ofilter = fg->outputs[j];
3555 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3557 if (fg->nb_inputs != 1)
3559 for (k = nb_input_streams-1; k >= 0 ; k--)
3560 if (fg->inputs[0]->ist == input_streams[k])
3562 ofilter->ost->source_index = k;
3566 /* init framerate emulation */
3567 for (i = 0; i < nb_input_files; i++) {
3568 InputFile *ifile = input_files[i];
3569 if (ifile->rate_emu)
3570 for (j = 0; j < ifile->nb_streams; j++)
3571 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3574 /* init input streams */
3575 for (i = 0; i < nb_input_streams; i++)
3576 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3577 for (i = 0; i < nb_output_streams; i++) {
3578 ost = output_streams[i];
3579 avcodec_close(ost->enc_ctx);
3584 /* open each encoder */
3585 for (i = 0; i < nb_output_streams; i++) {
3586 // skip streams fed from filtergraphs until we have a frame for them
3587 if (output_streams[i]->filter)
3590 ret = init_output_stream(output_streams[i], error, sizeof(error));
3595 /* discard unused programs */
3596 for (i = 0; i < nb_input_files; i++) {
3597 InputFile *ifile = input_files[i];
3598 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3599 AVProgram *p = ifile->ctx->programs[j];
3600 int discard = AVDISCARD_ALL;
3602 for (k = 0; k < p->nb_stream_indexes; k++)
3603 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3604 discard = AVDISCARD_DEFAULT;
3607 p->discard = discard;
3611 /* write headers for files with no streams */
3612 for (i = 0; i < nb_output_files; i++) {
3613 oc = output_files[i]->ctx;
3614 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3615 ret = check_init_output_file(output_files[i], i);
3622 /* dump the stream mapping */
3623 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3624 for (i = 0; i < nb_input_streams; i++) {
3625 ist = input_streams[i];
3627 for (j = 0; j < ist->nb_filters; j++) {
3628 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3629 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3630 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3631 ist->filters[j]->name);
3632 if (nb_filtergraphs > 1)
3633 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3634 av_log(NULL, AV_LOG_INFO, "\n");
3639 for (i = 0; i < nb_output_streams; i++) {
3640 ost = output_streams[i];
3642 if (ost->attachment_filename) {
3643 /* an attached file */
3644 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3645 ost->attachment_filename, ost->file_index, ost->index);
3649 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3650 /* output from a complex graph */
3651 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3652 if (nb_filtergraphs > 1)
3653 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3655 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3656 ost->index, ost->enc ? ost->enc->name : "?");
3660 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3661 input_streams[ost->source_index]->file_index,
3662 input_streams[ost->source_index]->st->index,
3665 if (ost->sync_ist != input_streams[ost->source_index])
3666 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3667 ost->sync_ist->file_index,
3668 ost->sync_ist->st->index);
3669 if (ost->stream_copy)
3670 av_log(NULL, AV_LOG_INFO, " (copy)");
3672 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3673 const AVCodec *out_codec = ost->enc;
3674 const char *decoder_name = "?";
3675 const char *in_codec_name = "?";
3676 const char *encoder_name = "?";
3677 const char *out_codec_name = "?";
3678 const AVCodecDescriptor *desc;
3681 decoder_name = in_codec->name;
3682 desc = avcodec_descriptor_get(in_codec->id);
3684 in_codec_name = desc->name;
3685 if (!strcmp(decoder_name, in_codec_name))
3686 decoder_name = "native";
3690 encoder_name = out_codec->name;
3691 desc = avcodec_descriptor_get(out_codec->id);
3693 out_codec_name = desc->name;
3694 if (!strcmp(encoder_name, out_codec_name))
3695 encoder_name = "native";
3698 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3699 in_codec_name, decoder_name,
3700 out_codec_name, encoder_name);
3702 av_log(NULL, AV_LOG_INFO, "\n");
3706 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3710 transcode_init_done = 1;
3715 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3716 static int need_output(void)
3720 for (i = 0; i < nb_output_streams; i++) {
3721 OutputStream *ost = output_streams[i];
3722 OutputFile *of = output_files[ost->file_index];
3723 AVFormatContext *os = output_files[ost->file_index]->ctx;
3725 if (ost->finished ||
3726 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3728 if (ost->frame_number >= ost->max_frames) {
3730 for (j = 0; j < of->ctx->nb_streams; j++)
3731 close_output_stream(output_streams[of->ost_index + j]);
3742 * Select the output stream to process.
3744 * @return selected output stream, or NULL if none available
3746 static OutputStream *choose_output(void)
3749 int64_t opts_min = INT64_MAX;
3750 OutputStream *ost_min = NULL;
3752 for (i = 0; i < nb_output_streams; i++) {
3753 OutputStream *ost = output_streams[i];
3754 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3755 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3757 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3758 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3760 if (!ost->initialized && !ost->inputs_done)
3763 if (!ost->finished && opts < opts_min) {
3765 ost_min = ost->unavailable ? NULL : ost;
3771 static void set_tty_echo(int on)
3775 if (tcgetattr(0, &tty) == 0) {
3776 if (on) tty.c_lflag |= ECHO;
3777 else tty.c_lflag &= ~ECHO;
3778 tcsetattr(0, TCSANOW, &tty);
3783 static int check_keyboard_interaction(int64_t cur_time)
3786 static int64_t last_time;
3787 if (received_nb_signals)
3788 return AVERROR_EXIT;
3789 /* read_key() returns 0 on EOF */
3790 if(cur_time - last_time >= 100000 && !run_as_daemon){
3792 last_time = cur_time;
3796 return AVERROR_EXIT;
3797 if (key == '+') av_log_set_level(av_log_get_level()+10);
3798 if (key == '-') av_log_set_level(av_log_get_level()-10);
3799 if (key == 's') qp_hist ^= 1;
3802 do_hex_dump = do_pkt_dump = 0;
3803 } else if(do_pkt_dump){
3807 av_log_set_level(AV_LOG_DEBUG);
3809 if (key == 'c' || key == 'C'){
3810 char buf[4096], target[64], command[256], arg[256] = {0};
3813 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3816 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3821 fprintf(stderr, "\n");
3823 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3824 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3825 target, time, command, arg);
3826 for (i = 0; i < nb_filtergraphs; i++) {
3827 FilterGraph *fg = filtergraphs[i];
3830 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3831 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3832 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3833 } else if (key == 'c') {
3834 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3835 ret = AVERROR_PATCHWELCOME;
3837 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3839 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3844 av_log(NULL, AV_LOG_ERROR,
3845 "Parse error, at least 3 arguments were expected, "
3846 "only %d given in string '%s'\n", n, buf);
3849 if (key == 'd' || key == 'D'){
3852 debug = input_streams[0]->st->codec->debug<<1;
3853 if(!debug) debug = 1;
3854 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3861 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3866 fprintf(stderr, "\n");
3867 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3868 fprintf(stderr,"error parsing debug value\n");
3870 for(i=0;i<nb_input_streams;i++) {
3871 input_streams[i]->st->codec->debug = debug;
3873 for(i=0;i<nb_output_streams;i++) {
3874 OutputStream *ost = output_streams[i];
3875 ost->enc_ctx->debug = debug;
3877 if(debug) av_log_set_level(AV_LOG_DEBUG);
3878 fprintf(stderr,"debug=%d\n", debug);
3881 fprintf(stderr, "key function\n"
3882 "? show this help\n"
3883 "+ increase verbosity\n"
3884 "- decrease verbosity\n"
3885 "c Send command to first matching filter supporting it\n"
3886 "C Send/Queue command to all matching filters\n"
3887 "D cycle through available debug modes\n"
3888 "h dump packets/hex press to cycle through the 3 states\n"
3890 "s Show QP histogram\n"
3897 static void *input_thread(void *arg)
3900 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3905 ret = av_read_frame(f->ctx, &pkt);
3907 if (ret == AVERROR(EAGAIN)) {
3912 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3915 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3916 if (flags && ret == AVERROR(EAGAIN)) {
3918 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3919 av_log(f->ctx, AV_LOG_WARNING,
3920 "Thread message queue blocking; consider raising the "
3921 "thread_queue_size option (current value: %d)\n",
3922 f->thread_queue_size);
3925 if (ret != AVERROR_EOF)
3926 av_log(f->ctx, AV_LOG_ERROR,
3927 "Unable to send packet to main thread: %s\n",
3929 av_packet_unref(&pkt);
3930 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3938 static void free_input_threads(void)
3942 for (i = 0; i < nb_input_files; i++) {
3943 InputFile *f = input_files[i];
3946 if (!f || !f->in_thread_queue)
3948 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3949 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3950 av_packet_unref(&pkt);
3952 pthread_join(f->thread, NULL);
3954 av_thread_message_queue_free(&f->in_thread_queue);
3958 static int init_input_threads(void)
3962 if (nb_input_files == 1)
3965 for (i = 0; i < nb_input_files; i++) {
3966 InputFile *f = input_files[i];
3968 if (f->ctx->pb ? !f->ctx->pb->seekable :
3969 strcmp(f->ctx->iformat->name, "lavfi"))
3970 f->non_blocking = 1;
3971 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3972 f->thread_queue_size, sizeof(AVPacket));
3976 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3977 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3978 av_thread_message_queue_free(&f->in_thread_queue);
3979 return AVERROR(ret);
3985 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3987 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3989 AV_THREAD_MESSAGE_NONBLOCK : 0);
3993 static int get_input_packet(InputFile *f, AVPacket *pkt)
3997 for (i = 0; i < f->nb_streams; i++) {
3998 InputStream *ist = input_streams[f->ist_index + i];
3999 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4000 int64_t now = av_gettime_relative() - ist->start;
4002 return AVERROR(EAGAIN);
4007 if (nb_input_files > 1)
4008 return get_input_packet_mt(f, pkt);
4010 return av_read_frame(f->ctx, pkt);
4013 static int got_eagain(void)
4016 for (i = 0; i < nb_output_streams; i++)
4017 if (output_streams[i]->unavailable)
4022 static void reset_eagain(void)
4025 for (i = 0; i < nb_input_files; i++)
4026 input_files[i]->eagain = 0;
4027 for (i = 0; i < nb_output_streams; i++)
4028 output_streams[i]->unavailable = 0;
4031 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4032 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4033 AVRational time_base)
4039 return tmp_time_base;
4042 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4045 return tmp_time_base;
4051 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4054 AVCodecContext *avctx;
4055 int i, ret, has_audio = 0;
4056 int64_t duration = 0;
4058 ret = av_seek_frame(is, -1, is->start_time, 0);
4062 for (i = 0; i < ifile->nb_streams; i++) {
4063 ist = input_streams[ifile->ist_index + i];
4064 avctx = ist->dec_ctx;
4067 if (ist->decoding_needed) {
4068 process_input_packet(ist, NULL, 1);
4069 avcodec_flush_buffers(avctx);
4072 /* duration is the length of the last frame in a stream
4073 * when audio stream is present we don't care about
4074 * last video frame length because it's not defined exactly */
4075 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4079 for (i = 0; i < ifile->nb_streams; i++) {
4080 ist = input_streams[ifile->ist_index + i];
4081 avctx = ist->dec_ctx;
4084 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4085 AVRational sample_rate = {1, avctx->sample_rate};
4087 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4091 if (ist->framerate.num) {
4092 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4093 } else if (ist->st->avg_frame_rate.num) {
4094 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4095 } else duration = 1;
4097 if (!ifile->duration)
4098 ifile->time_base = ist->st->time_base;
4099 /* the total duration of the stream, max_pts - min_pts is
4100 * the duration of the stream without the last frame */
4101 duration += ist->max_pts - ist->min_pts;
4102 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4106 if (ifile->loop > 0)
4114 * - 0 -- one packet was read and processed
4115 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4116 * this function should be called again
4117 * - AVERROR_EOF -- this function should not be called again
4119 static int process_input(int file_index)
4121 InputFile *ifile = input_files[file_index];
4122 AVFormatContext *is;
4130 ret = get_input_packet(ifile, &pkt);
4132 if (ret == AVERROR(EAGAIN)) {
4136 if (ret < 0 && ifile->loop) {
4137 if ((ret = seek_to_start(ifile, is)) < 0)
4139 ret = get_input_packet(ifile, &pkt);
4140 if (ret == AVERROR(EAGAIN)) {
4146 if (ret != AVERROR_EOF) {
4147 print_error(is->filename, ret);
4152 for (i = 0; i < ifile->nb_streams; i++) {
4153 ist = input_streams[ifile->ist_index + i];
4154 if (ist->decoding_needed) {
4155 ret = process_input_packet(ist, NULL, 0);
4160 /* mark all outputs that don't go through lavfi as finished */
4161 for (j = 0; j < nb_output_streams; j++) {
4162 OutputStream *ost = output_streams[j];
4164 if (ost->source_index == ifile->ist_index + i &&
4165 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4166 finish_output_stream(ost);
4170 ifile->eof_reached = 1;
4171 return AVERROR(EAGAIN);
4177 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4178 is->streams[pkt.stream_index]);
4180 /* the following test is needed in case new streams appear
4181 dynamically in stream : we ignore them */
4182 if (pkt.stream_index >= ifile->nb_streams) {
4183 report_new_stream(file_index, &pkt);
4184 goto discard_packet;
4187 ist = input_streams[ifile->ist_index + pkt.stream_index];
4189 ist->data_size += pkt.size;
4193 goto discard_packet;
4195 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4196 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4201 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4202 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4203 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4204 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4205 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4206 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4207 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4208 av_ts2str(input_files[ist->file_index]->ts_offset),
4209 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4212 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4213 int64_t stime, stime2;
4214 // Correcting starttime based on the enabled streams
4215 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4216 // so we instead do it here as part of discontinuity handling
4217 if ( ist->next_dts == AV_NOPTS_VALUE
4218 && ifile->ts_offset == -is->start_time
4219 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4220 int64_t new_start_time = INT64_MAX;
4221 for (i=0; i<is->nb_streams; i++) {
4222 AVStream *st = is->streams[i];
4223 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4225 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4227 if (new_start_time > is->start_time) {
4228 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4229 ifile->ts_offset = -new_start_time;
4233 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4234 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4235 ist->wrap_correction_done = 1;
4237 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4238 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4239 ist->wrap_correction_done = 0;
4241 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4242 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4243 ist->wrap_correction_done = 0;
4247 /* add the stream-global side data to the first packet */
4248 if (ist->nb_packets == 1) {
4249 for (i = 0; i < ist->st->nb_side_data; i++) {
4250 AVPacketSideData *src_sd = &ist->st->side_data[i];
4253 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4255 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4258 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4262 memcpy(dst_data, src_sd->data, src_sd->size);
4266 if (pkt.dts != AV_NOPTS_VALUE)
4267 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4268 if (pkt.pts != AV_NOPTS_VALUE)
4269 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4271 if (pkt.pts != AV_NOPTS_VALUE)
4272 pkt.pts *= ist->ts_scale;
4273 if (pkt.dts != AV_NOPTS_VALUE)
4274 pkt.dts *= ist->ts_scale;
4276 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4277 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4278 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4279 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4280 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4281 int64_t delta = pkt_dts - ifile->last_ts;
4282 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4283 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4284 ifile->ts_offset -= delta;
4285 av_log(NULL, AV_LOG_DEBUG,
4286 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4287 delta, ifile->ts_offset);
4288 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4289 if (pkt.pts != AV_NOPTS_VALUE)
4290 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4294 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4295 if (pkt.pts != AV_NOPTS_VALUE) {
4296 pkt.pts += duration;
4297 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4298 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4301 if (pkt.dts != AV_NOPTS_VALUE)
4302 pkt.dts += duration;
4304 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4305 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4306 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4307 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4309 int64_t delta = pkt_dts - ist->next_dts;
4310 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4311 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4312 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4313 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4314 ifile->ts_offset -= delta;
4315 av_log(NULL, AV_LOG_DEBUG,
4316 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4317 delta, ifile->ts_offset);
4318 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4319 if (pkt.pts != AV_NOPTS_VALUE)
4320 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4323 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4324 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4325 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4326 pkt.dts = AV_NOPTS_VALUE;
4328 if (pkt.pts != AV_NOPTS_VALUE){
4329 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4330 delta = pkt_pts - ist->next_dts;
4331 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4332 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4333 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4334 pkt.pts = AV_NOPTS_VALUE;
4340 if (pkt.dts != AV_NOPTS_VALUE)
4341 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4344 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4345 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4346 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4347 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4348 av_ts2str(input_files[ist->file_index]->ts_offset),
4349 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4352 sub2video_heartbeat(ist, pkt.pts);
4354 process_input_packet(ist, &pkt, 0);
4357 av_packet_unref(&pkt);
4363 * Perform a step of transcoding for the specified filter graph.
4365 * @param[in] graph filter graph to consider
4366 * @param[out] best_ist input stream where a frame would allow to continue
4367 * @return 0 for success, <0 for error
4369 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4372 int nb_requests, nb_requests_max = 0;
4373 InputFilter *ifilter;
4377 ret = avfilter_graph_request_oldest(graph->graph);
4379 return reap_filters(0);
4381 if (ret == AVERROR_EOF) {
4382 ret = reap_filters(1);
4383 for (i = 0; i < graph->nb_outputs; i++)
4384 close_output_stream(graph->outputs[i]->ost);
4387 if (ret != AVERROR(EAGAIN))
4390 for (i = 0; i < graph->nb_inputs; i++) {
4391 ifilter = graph->inputs[i];
4393 if (input_files[ist->file_index]->eagain ||
4394 input_files[ist->file_index]->eof_reached)
4396 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4397 if (nb_requests > nb_requests_max) {
4398 nb_requests_max = nb_requests;
4404 for (i = 0; i < graph->nb_outputs; i++)
4405 graph->outputs[i]->ost->unavailable = 1;
4411 * Run a single step of transcoding.
4413 * @return 0 for success, <0 for error
4415 static int transcode_step(void)
4418 InputStream *ist = NULL;
4421 ost = choose_output();
4428 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4432 if (ost->filter && !ost->filter->graph->graph) {
4433 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4434 ret = configure_filtergraph(ost->filter->graph);
4436 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4442 if (ost->filter && ost->filter->graph->graph) {
4443 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4447 } else if (ost->filter) {
4449 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4450 InputFilter *ifilter = ost->filter->graph->inputs[i];
4451 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4457 ost->inputs_done = 1;
4461 av_assert0(ost->source_index >= 0);
4462 ist = input_streams[ost->source_index];
4465 ret = process_input(ist->file_index);
4466 if (ret == AVERROR(EAGAIN)) {
4467 if (input_files[ist->file_index]->eagain)
4468 ost->unavailable = 1;
4473 return ret == AVERROR_EOF ? 0 : ret;
4475 return reap_filters(0);
4479 * The following code is the main loop of the file converter
4481 static int transcode(void)
4484 AVFormatContext *os;
4487 int64_t timer_start;
4488 int64_t total_packets_written = 0;
4490 ret = transcode_init();
4494 if (stdin_interaction) {
4495 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4498 timer_start = av_gettime_relative();
4501 if ((ret = init_input_threads()) < 0)
4505 while (!received_sigterm) {
4506 int64_t cur_time= av_gettime_relative();
4508 /* if 'q' pressed, exits */
4509 if (stdin_interaction)
4510 if (check_keyboard_interaction(cur_time) < 0)
4513 /* check if there's any stream where output is still needed */
4514 if (!need_output()) {
4515 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4519 ret = transcode_step();
4520 if (ret < 0 && ret != AVERROR_EOF) {
4522 av_strerror(ret, errbuf, sizeof(errbuf));
4524 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4528 /* dump report by using the output first video and audio streams */
4529 print_report(0, timer_start, cur_time);
4532 free_input_threads();
4535 /* at the end of stream, we must flush the decoder buffers */
4536 for (i = 0; i < nb_input_streams; i++) {
4537 ist = input_streams[i];
4538 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4539 process_input_packet(ist, NULL, 0);
4546 /* write the trailer if needed and close file */
4547 for (i = 0; i < nb_output_files; i++) {
4548 os = output_files[i]->ctx;
4549 if (!output_files[i]->header_written) {
4550 av_log(NULL, AV_LOG_ERROR,
4551 "Nothing was written into output file %d (%s), because "
4552 "at least one of its streams received no packets.\n",
4556 if ((ret = av_write_trailer(os)) < 0) {
4557 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4563 /* dump report by using the first video and audio streams */
4564 print_report(1, timer_start, av_gettime_relative());
4566 /* close each encoder */
4567 for (i = 0; i < nb_output_streams; i++) {
4568 ost = output_streams[i];
4569 if (ost->encoding_needed) {
4570 av_freep(&ost->enc_ctx->stats_in);
4572 total_packets_written += ost->packets_written;
4575 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4576 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4580 /* close each decoder */
4581 for (i = 0; i < nb_input_streams; i++) {
4582 ist = input_streams[i];
4583 if (ist->decoding_needed) {
4584 avcodec_close(ist->dec_ctx);
4585 if (ist->hwaccel_uninit)
4586 ist->hwaccel_uninit(ist->dec_ctx);
4590 av_buffer_unref(&hw_device_ctx);
4597 free_input_threads();
4600 if (output_streams) {
4601 for (i = 0; i < nb_output_streams; i++) {
4602 ost = output_streams[i];
4605 if (fclose(ost->logfile))
4606 av_log(NULL, AV_LOG_ERROR,
4607 "Error closing logfile, loss of information possible: %s\n",
4608 av_err2str(AVERROR(errno)));
4609 ost->logfile = NULL;
4611 av_freep(&ost->forced_kf_pts);
4612 av_freep(&ost->apad);
4613 av_freep(&ost->disposition);
4614 av_dict_free(&ost->encoder_opts);
4615 av_dict_free(&ost->sws_dict);
4616 av_dict_free(&ost->swr_opts);
4617 av_dict_free(&ost->resample_opts);
4625 static int64_t getutime(void)
4628 struct rusage rusage;
4630 getrusage(RUSAGE_SELF, &rusage);
4631 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4632 #elif HAVE_GETPROCESSTIMES
4634 FILETIME c, e, k, u;
4635 proc = GetCurrentProcess();
4636 GetProcessTimes(proc, &c, &e, &k, &u);
4637 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4639 return av_gettime_relative();
4643 static int64_t getmaxrss(void)
4645 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4646 struct rusage rusage;
4647 getrusage(RUSAGE_SELF, &rusage);
4648 return (int64_t)rusage.ru_maxrss * 1024;
4649 #elif HAVE_GETPROCESSMEMORYINFO
4651 PROCESS_MEMORY_COUNTERS memcounters;
4652 proc = GetCurrentProcess();
4653 memcounters.cb = sizeof(memcounters);
4654 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4655 return memcounters.PeakPagefileUsage;
4661 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4665 int main(int argc, char **argv)
4672 register_exit(ffmpeg_cleanup);
4674 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4676 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4677 parse_loglevel(argc, argv, options);
4679 if(argc>1 && !strcmp(argv[1], "-d")){
4681 av_log_set_callback(log_callback_null);
4686 avcodec_register_all();
4688 avdevice_register_all();
4690 avfilter_register_all();
4692 avformat_network_init();
4694 show_banner(argc, argv, options);
4696 /* parse options and open all input/output files */
4697 ret = ffmpeg_parse_options(argc, argv);
4701 if (nb_output_files <= 0 && nb_input_files == 0) {
4703 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4707 /* file converter / grab */
4708 if (nb_output_files <= 0) {
4709 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4713 // if (nb_input_files == 0) {
4714 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4718 for (i = 0; i < nb_output_files; i++) {
4719 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4723 current_time = ti = getutime();
4724 if (transcode() < 0)
4726 ti = getutime() - ti;
4728 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4730 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4731 decode_error_stat[0], decode_error_stat[1]);
4732 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4735 exit_program(received_nb_signals ? 255 : main_return_code);
4736 return main_return_code;