2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
126 static int ifilter_has_all_input_formats(FilterGraph *fg);
128 static int run_as_daemon = 0;
129 static int nb_frames_dup = 0;
130 static unsigned dup_warning = 1000;
131 static int nb_frames_drop = 0;
132 static int64_t decode_error_stat[2];
134 static int want_sdp = 1;
136 static int current_time;
137 AVIOContext *progress_avio = NULL;
139 static uint8_t *subtitle_out;
141 InputStream **input_streams = NULL;
142 int nb_input_streams = 0;
143 InputFile **input_files = NULL;
144 int nb_input_files = 0;
146 OutputStream **output_streams = NULL;
147 int nb_output_streams = 0;
148 OutputFile **output_files = NULL;
149 int nb_output_files = 0;
151 FilterGraph **filtergraphs;
156 /* init terminal so that we can grab keys */
157 static struct termios oldtty;
158 static int restore_tty;
162 static void free_input_threads(void);
166 Convert subtitles to video with alpha to insert them in filter graphs.
167 This is a temporary solution until libavfilter gets real subtitles support.
170 static int sub2video_get_blank_frame(InputStream *ist)
173 AVFrame *frame = ist->sub2video.frame;
175 av_frame_unref(frame);
176 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
177 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
178 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
179 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
181 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
185 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
188 uint32_t *pal, *dst2;
192 if (r->type != SUBTITLE_BITMAP) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
196 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
197 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
198 r->x, r->y, r->w, r->h, w, h
203 dst += r->y * dst_linesize + r->x * 4;
205 pal = (uint32_t *)r->data[1];
206 for (y = 0; y < r->h; y++) {
207 dst2 = (uint32_t *)dst;
209 for (x = 0; x < r->w; x++)
210 *(dst2++) = pal[*(src2++)];
212 src += r->linesize[0];
216 static void sub2video_push_ref(InputStream *ist, int64_t pts)
218 AVFrame *frame = ist->sub2video.frame;
221 av_assert1(frame->data[0]);
222 ist->sub2video.last_pts = frame->pts = pts;
223 for (i = 0; i < ist->nb_filters; i++)
224 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
225 AV_BUFFERSRC_FLAG_KEEP_REF |
226 AV_BUFFERSRC_FLAG_PUSH);
229 void sub2video_update(InputStream *ist, AVSubtitle *sub)
231 AVFrame *frame = ist->sub2video.frame;
235 int64_t pts, end_pts;
240 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
241 AV_TIME_BASE_Q, ist->st->time_base);
242 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
243 AV_TIME_BASE_Q, ist->st->time_base);
244 num_rects = sub->num_rects;
246 pts = ist->sub2video.end_pts;
250 if (sub2video_get_blank_frame(ist) < 0) {
251 av_log(ist->dec_ctx, AV_LOG_ERROR,
252 "Impossible to get a blank canvas.\n");
255 dst = frame->data [0];
256 dst_linesize = frame->linesize[0];
257 for (i = 0; i < num_rects; i++)
258 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
259 sub2video_push_ref(ist, pts);
260 ist->sub2video.end_pts = end_pts;
263 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
265 InputFile *infile = input_files[ist->file_index];
269 /* When a frame is read from a file, examine all sub2video streams in
270 the same file and send the sub2video frame again. Otherwise, decoded
271 video frames could be accumulating in the filter graph while a filter
272 (possibly overlay) is desperately waiting for a subtitle frame. */
273 for (i = 0; i < infile->nb_streams; i++) {
274 InputStream *ist2 = input_streams[infile->ist_index + i];
275 if (!ist2->sub2video.frame)
277 /* subtitles seem to be usually muxed ahead of other streams;
278 if not, subtracting a larger time here is necessary */
279 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
280 /* do not send the heartbeat frame if the subtitle is already ahead */
281 if (pts2 <= ist2->sub2video.last_pts)
283 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
284 sub2video_update(ist2, NULL);
285 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
286 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
288 sub2video_push_ref(ist2, pts2);
292 static void sub2video_flush(InputStream *ist)
296 if (ist->sub2video.end_pts < INT64_MAX)
297 sub2video_update(ist, NULL);
298 for (i = 0; i < ist->nb_filters; i++)
299 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
302 /* end of sub2video hack */
304 static void term_exit_sigsafe(void)
308 tcsetattr (0, TCSANOW, &oldtty);
314 av_log(NULL, AV_LOG_QUIET, "%s", "");
318 static volatile int received_sigterm = 0;
319 static volatile int received_nb_signals = 0;
320 static volatile int transcode_init_done = 0;
321 static volatile int ffmpeg_exited = 0;
322 static int main_return_code = 0;
325 sigterm_handler(int sig)
327 received_sigterm = sig;
328 received_nb_signals++;
330 if(received_nb_signals > 3) {
331 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
332 strlen("Received > 3 system signals, hard exiting\n"));
338 #if HAVE_SETCONSOLECTRLHANDLER
339 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
341 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
346 case CTRL_BREAK_EVENT:
347 sigterm_handler(SIGINT);
350 case CTRL_CLOSE_EVENT:
351 case CTRL_LOGOFF_EVENT:
352 case CTRL_SHUTDOWN_EVENT:
353 sigterm_handler(SIGTERM);
354 /* Basically, with these 3 events, when we return from this method the
355 process is hard terminated, so stall as long as we need to
356 to try and let the main thread(s) clean up and gracefully terminate
357 (we have at most 5 seconds, but should be done far before that). */
358 while (!ffmpeg_exited) {
364 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
373 if (!run_as_daemon && stdin_interaction) {
375 if (tcgetattr (0, &tty) == 0) {
379 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
380 |INLCR|IGNCR|ICRNL|IXON);
381 tty.c_oflag |= OPOST;
382 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
383 tty.c_cflag &= ~(CSIZE|PARENB);
388 tcsetattr (0, TCSANOW, &tty);
390 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
394 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
395 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
397 signal(SIGXCPU, sigterm_handler);
399 #if HAVE_SETCONSOLECTRLHANDLER
400 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
404 /* read a key without blocking */
405 static int read_key(void)
417 n = select(1, &rfds, NULL, NULL, &tv);
426 # if HAVE_PEEKNAMEDPIPE
428 static HANDLE input_handle;
431 input_handle = GetStdHandle(STD_INPUT_HANDLE);
432 is_pipe = !GetConsoleMode(input_handle, &dw);
436 /* When running under a GUI, you will end here. */
437 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
438 // input pipe may have been closed by the program that ran ffmpeg
456 static int decode_interrupt_cb(void *ctx)
458 return received_nb_signals > transcode_init_done;
461 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
463 static void ffmpeg_cleanup(int ret)
468 int maxrss = getmaxrss() / 1024;
469 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
472 for (i = 0; i < nb_filtergraphs; i++) {
473 FilterGraph *fg = filtergraphs[i];
474 avfilter_graph_free(&fg->graph);
475 for (j = 0; j < fg->nb_inputs; j++) {
476 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
478 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
479 sizeof(frame), NULL);
480 av_frame_free(&frame);
482 av_fifo_free(fg->inputs[j]->frame_queue);
483 if (fg->inputs[j]->ist->sub2video.sub_queue) {
484 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
486 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
487 &sub, sizeof(sub), NULL);
488 avsubtitle_free(&sub);
490 av_fifo_free(fg->inputs[j]->ist->sub2video.sub_queue);
492 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
493 av_freep(&fg->inputs[j]->name);
494 av_freep(&fg->inputs[j]);
496 av_freep(&fg->inputs);
497 for (j = 0; j < fg->nb_outputs; j++) {
498 av_freep(&fg->outputs[j]->name);
499 av_freep(&fg->outputs[j]->formats);
500 av_freep(&fg->outputs[j]->channel_layouts);
501 av_freep(&fg->outputs[j]->sample_rates);
502 av_freep(&fg->outputs[j]);
504 av_freep(&fg->outputs);
505 av_freep(&fg->graph_desc);
507 av_freep(&filtergraphs[i]);
509 av_freep(&filtergraphs);
511 av_freep(&subtitle_out);
514 for (i = 0; i < nb_output_files; i++) {
515 OutputFile *of = output_files[i];
520 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
522 avformat_free_context(s);
523 av_dict_free(&of->opts);
525 av_freep(&output_files[i]);
527 for (i = 0; i < nb_output_streams; i++) {
528 OutputStream *ost = output_streams[i];
533 for (j = 0; j < ost->nb_bitstream_filters; j++)
534 av_bsf_free(&ost->bsf_ctx[j]);
535 av_freep(&ost->bsf_ctx);
536 av_freep(&ost->bsf_extradata_updated);
538 av_frame_free(&ost->filtered_frame);
539 av_frame_free(&ost->last_frame);
540 av_dict_free(&ost->encoder_opts);
542 av_parser_close(ost->parser);
543 avcodec_free_context(&ost->parser_avctx);
545 av_freep(&ost->forced_keyframes);
546 av_expr_free(ost->forced_keyframes_pexpr);
547 av_freep(&ost->avfilter);
548 av_freep(&ost->logfile_prefix);
550 av_freep(&ost->audio_channels_map);
551 ost->audio_channels_mapped = 0;
553 av_dict_free(&ost->sws_dict);
555 avcodec_free_context(&ost->enc_ctx);
556 avcodec_parameters_free(&ost->ref_par);
558 if (ost->muxing_queue) {
559 while (av_fifo_size(ost->muxing_queue)) {
561 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
562 av_packet_unref(&pkt);
564 av_fifo_freep(&ost->muxing_queue);
567 av_freep(&output_streams[i]);
570 free_input_threads();
572 for (i = 0; i < nb_input_files; i++) {
573 avformat_close_input(&input_files[i]->ctx);
574 av_freep(&input_files[i]);
576 for (i = 0; i < nb_input_streams; i++) {
577 InputStream *ist = input_streams[i];
579 av_frame_free(&ist->decoded_frame);
580 av_frame_free(&ist->filter_frame);
581 av_dict_free(&ist->decoder_opts);
582 avsubtitle_free(&ist->prev_sub.subtitle);
583 av_frame_free(&ist->sub2video.frame);
584 av_freep(&ist->filters);
585 av_freep(&ist->hwaccel_device);
586 av_freep(&ist->dts_buffer);
588 avcodec_free_context(&ist->dec_ctx);
590 av_freep(&input_streams[i]);
594 if (fclose(vstats_file))
595 av_log(NULL, AV_LOG_ERROR,
596 "Error closing vstats file, loss of information possible: %s\n",
597 av_err2str(AVERROR(errno)));
599 av_freep(&vstats_filename);
601 av_freep(&input_streams);
602 av_freep(&input_files);
603 av_freep(&output_streams);
604 av_freep(&output_files);
608 avformat_network_deinit();
610 if (received_sigterm) {
611 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
612 (int) received_sigterm);
613 } else if (ret && transcode_init_done) {
614 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
620 void remove_avoptions(AVDictionary **a, AVDictionary *b)
622 AVDictionaryEntry *t = NULL;
624 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
625 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
629 void assert_avoptions(AVDictionary *m)
631 AVDictionaryEntry *t;
632 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
633 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
638 static void abort_codec_experimental(AVCodec *c, int encoder)
643 static void update_benchmark(const char *fmt, ...)
645 if (do_benchmark_all) {
646 int64_t t = getutime();
652 vsnprintf(buf, sizeof(buf), fmt, va);
654 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
660 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
663 for (i = 0; i < nb_output_streams; i++) {
664 OutputStream *ost2 = output_streams[i];
665 ost2->finished |= ost == ost2 ? this_stream : others;
669 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
671 AVFormatContext *s = of->ctx;
672 AVStream *st = ost->st;
675 if (!of->header_written) {
676 AVPacket tmp_pkt = {0};
677 /* the muxer is not initialized yet, buffer the packet */
678 if (!av_fifo_space(ost->muxing_queue)) {
679 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
680 ost->max_muxing_queue_size);
681 if (new_size <= av_fifo_size(ost->muxing_queue)) {
682 av_log(NULL, AV_LOG_ERROR,
683 "Too many packets buffered for output stream %d:%d.\n",
684 ost->file_index, ost->st->index);
687 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
691 ret = av_packet_ref(&tmp_pkt, pkt);
694 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
695 av_packet_unref(pkt);
699 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
700 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
701 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
704 * Audio encoders may split the packets -- #frames in != #packets out.
705 * But there is no reordering, so we can limit the number of output packets
706 * by simply dropping them here.
707 * Counting encoded video frames needs to be done separately because of
708 * reordering, see do_video_out()
710 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
711 if (ost->frame_number >= ost->max_frames) {
712 av_packet_unref(pkt);
717 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
719 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
721 ost->quality = sd ? AV_RL32(sd) : -1;
722 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
724 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
726 ost->error[i] = AV_RL64(sd + 8 + 8*i);
731 if (ost->frame_rate.num && ost->is_cfr) {
732 if (pkt->duration > 0)
733 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
734 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
739 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
741 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
742 if (pkt->dts != AV_NOPTS_VALUE &&
743 pkt->pts != AV_NOPTS_VALUE &&
744 pkt->dts > pkt->pts) {
745 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
747 ost->file_index, ost->st->index);
749 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
750 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
751 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
753 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
754 pkt->dts != AV_NOPTS_VALUE &&
755 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
756 ost->last_mux_dts != AV_NOPTS_VALUE) {
757 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
758 if (pkt->dts < max) {
759 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
760 av_log(s, loglevel, "Non-monotonous DTS in output stream "
761 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
762 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
764 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
767 av_log(s, loglevel, "changing to %"PRId64". This may result "
768 "in incorrect timestamps in the output file.\n",
770 if (pkt->pts >= pkt->dts)
771 pkt->pts = FFMAX(pkt->pts, max);
776 ost->last_mux_dts = pkt->dts;
778 ost->data_size += pkt->size;
779 ost->packets_written++;
781 pkt->stream_index = ost->index;
784 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
785 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
786 av_get_media_type_string(ost->enc_ctx->codec_type),
787 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
788 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
793 ret = av_interleaved_write_frame(s, pkt);
795 print_error("av_interleaved_write_frame()", ret);
796 main_return_code = 1;
797 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
799 av_packet_unref(pkt);
802 static void close_output_stream(OutputStream *ost)
804 OutputFile *of = output_files[ost->file_index];
806 ost->finished |= ENCODER_FINISHED;
808 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
809 of->recording_time = FFMIN(of->recording_time, end);
813 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
817 /* apply the output bitstream filters, if any */
818 if (ost->nb_bitstream_filters) {
821 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
827 /* get a packet from the previous filter up the chain */
828 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
829 if (ret == AVERROR(EAGAIN)) {
835 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
836 * the api states this shouldn't happen after init(). Propagate it here to the
837 * muxer and to the next filters in the chain to workaround this.
838 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
839 * par_out->extradata and adapt muxers accordingly to get rid of this. */
840 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
841 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
844 ost->bsf_extradata_updated[idx - 1] |= 1;
847 /* send it to the next filter down the chain or to the muxer */
848 if (idx < ost->nb_bitstream_filters) {
849 /* HACK/FIXME! - See above */
850 if (!(ost->bsf_extradata_updated[idx] & 2)) {
851 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
854 ost->bsf_extradata_updated[idx] |= 2;
856 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
861 write_packet(of, pkt, ost);
864 write_packet(of, pkt, ost);
867 if (ret < 0 && ret != AVERROR_EOF) {
868 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
869 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
875 static int check_recording_time(OutputStream *ost)
877 OutputFile *of = output_files[ost->file_index];
879 if (of->recording_time != INT64_MAX &&
880 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
881 AV_TIME_BASE_Q) >= 0) {
882 close_output_stream(ost);
888 static void do_audio_out(OutputFile *of, OutputStream *ost,
891 AVCodecContext *enc = ost->enc_ctx;
895 av_init_packet(&pkt);
899 if (!check_recording_time(ost))
902 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
903 frame->pts = ost->sync_opts;
904 ost->sync_opts = frame->pts + frame->nb_samples;
905 ost->samples_encoded += frame->nb_samples;
906 ost->frames_encoded++;
908 av_assert0(pkt.size || !pkt.data);
909 update_benchmark(NULL);
911 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
912 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
913 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
914 enc->time_base.num, enc->time_base.den);
917 ret = avcodec_send_frame(enc, frame);
922 ret = avcodec_receive_packet(enc, &pkt);
923 if (ret == AVERROR(EAGAIN))
928 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
930 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
933 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
934 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
935 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
936 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
939 output_packet(of, &pkt, ost);
944 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
948 static void do_subtitle_out(OutputFile *of,
952 int subtitle_out_max_size = 1024 * 1024;
953 int subtitle_out_size, nb, i;
958 if (sub->pts == AV_NOPTS_VALUE) {
959 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
968 subtitle_out = av_malloc(subtitle_out_max_size);
970 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
975 /* Note: DVB subtitle need one packet to draw them and one other
976 packet to clear them */
977 /* XXX: signal it in the codec context ? */
978 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
983 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
985 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
986 pts -= output_files[ost->file_index]->start_time;
987 for (i = 0; i < nb; i++) {
988 unsigned save_num_rects = sub->num_rects;
990 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
991 if (!check_recording_time(ost))
995 // start_display_time is required to be 0
996 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
997 sub->end_display_time -= sub->start_display_time;
998 sub->start_display_time = 0;
1002 ost->frames_encoded++;
1004 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1005 subtitle_out_max_size, sub);
1007 sub->num_rects = save_num_rects;
1008 if (subtitle_out_size < 0) {
1009 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1013 av_init_packet(&pkt);
1014 pkt.data = subtitle_out;
1015 pkt.size = subtitle_out_size;
1016 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1017 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1018 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1019 /* XXX: the pts correction is handled here. Maybe handling
1020 it in the codec would be better */
1022 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1024 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1027 output_packet(of, &pkt, ost);
1031 static void do_video_out(OutputFile *of,
1033 AVFrame *next_picture,
1036 int ret, format_video_sync;
1038 AVCodecContext *enc = ost->enc_ctx;
1039 AVCodecParameters *mux_par = ost->st->codecpar;
1040 AVRational frame_rate;
1041 int nb_frames, nb0_frames, i;
1042 double delta, delta0;
1043 double duration = 0;
1045 InputStream *ist = NULL;
1046 AVFilterContext *filter = ost->filter->filter;
1048 if (ost->source_index >= 0)
1049 ist = input_streams[ost->source_index];
1051 frame_rate = av_buffersink_get_frame_rate(filter);
1052 if (frame_rate.num > 0 && frame_rate.den > 0)
1053 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1055 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1056 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1058 if (!ost->filters_script &&
1062 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1063 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1066 if (!next_picture) {
1068 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1069 ost->last_nb0_frames[1],
1070 ost->last_nb0_frames[2]);
1072 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1073 delta = delta0 + duration;
1075 /* by default, we output a single frame */
1076 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1079 format_video_sync = video_sync_method;
1080 if (format_video_sync == VSYNC_AUTO) {
1081 if(!strcmp(of->ctx->oformat->name, "avi")) {
1082 format_video_sync = VSYNC_VFR;
1084 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1086 && format_video_sync == VSYNC_CFR
1087 && input_files[ist->file_index]->ctx->nb_streams == 1
1088 && input_files[ist->file_index]->input_ts_offset == 0) {
1089 format_video_sync = VSYNC_VSCFR;
1091 if (format_video_sync == VSYNC_CFR && copy_ts) {
1092 format_video_sync = VSYNC_VSCFR;
1095 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1099 format_video_sync != VSYNC_PASSTHROUGH &&
1100 format_video_sync != VSYNC_DROP) {
1101 if (delta0 < -0.6) {
1102 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1104 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1105 sync_ipts = ost->sync_opts;
1110 switch (format_video_sync) {
1112 if (ost->frame_number == 0 && delta0 >= 0.5) {
1113 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1116 ost->sync_opts = lrint(sync_ipts);
1119 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1120 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1122 } else if (delta < -1.1)
1124 else if (delta > 1.1) {
1125 nb_frames = lrintf(delta);
1127 nb0_frames = lrintf(delta0 - 0.6);
1133 else if (delta > 0.6)
1134 ost->sync_opts = lrint(sync_ipts);
1137 case VSYNC_PASSTHROUGH:
1138 ost->sync_opts = lrint(sync_ipts);
1145 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1146 nb0_frames = FFMIN(nb0_frames, nb_frames);
1148 memmove(ost->last_nb0_frames + 1,
1149 ost->last_nb0_frames,
1150 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1151 ost->last_nb0_frames[0] = nb0_frames;
1153 if (nb0_frames == 0 && ost->last_dropped) {
1155 av_log(NULL, AV_LOG_VERBOSE,
1156 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1157 ost->frame_number, ost->st->index, ost->last_frame->pts);
1159 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1160 if (nb_frames > dts_error_threshold * 30) {
1161 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1165 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1166 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1167 if (nb_frames_dup > dup_warning) {
1168 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1172 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1174 /* duplicates frame if needed */
1175 for (i = 0; i < nb_frames; i++) {
1176 AVFrame *in_picture;
1177 av_init_packet(&pkt);
1181 if (i < nb0_frames && ost->last_frame) {
1182 in_picture = ost->last_frame;
1184 in_picture = next_picture;
1189 in_picture->pts = ost->sync_opts;
1192 if (!check_recording_time(ost))
1194 if (ost->frame_number >= ost->max_frames)
1198 #if FF_API_LAVF_FMT_RAWPICTURE
1199 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1200 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1201 /* raw pictures are written as AVPicture structure to
1202 avoid any copies. We support temporarily the older
1204 if (in_picture->interlaced_frame)
1205 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1207 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1208 pkt.data = (uint8_t *)in_picture;
1209 pkt.size = sizeof(AVPicture);
1210 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1211 pkt.flags |= AV_PKT_FLAG_KEY;
1213 output_packet(of, &pkt, ost);
1217 int forced_keyframe = 0;
1220 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1221 ost->top_field_first >= 0)
1222 in_picture->top_field_first = !!ost->top_field_first;
1224 if (in_picture->interlaced_frame) {
1225 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1226 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1228 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1230 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1232 in_picture->quality = enc->global_quality;
1233 in_picture->pict_type = 0;
1235 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1236 in_picture->pts * av_q2d(enc->time_base) : NAN;
1237 if (ost->forced_kf_index < ost->forced_kf_count &&
1238 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1239 ost->forced_kf_index++;
1240 forced_keyframe = 1;
1241 } else if (ost->forced_keyframes_pexpr) {
1243 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1244 res = av_expr_eval(ost->forced_keyframes_pexpr,
1245 ost->forced_keyframes_expr_const_values, NULL);
1246 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1247 ost->forced_keyframes_expr_const_values[FKF_N],
1248 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1249 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1250 ost->forced_keyframes_expr_const_values[FKF_T],
1251 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1254 forced_keyframe = 1;
1255 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1256 ost->forced_keyframes_expr_const_values[FKF_N];
1257 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1258 ost->forced_keyframes_expr_const_values[FKF_T];
1259 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1262 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1263 } else if ( ost->forced_keyframes
1264 && !strncmp(ost->forced_keyframes, "source", 6)
1265 && in_picture->key_frame==1) {
1266 forced_keyframe = 1;
1269 if (forced_keyframe) {
1270 in_picture->pict_type = AV_PICTURE_TYPE_I;
1271 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1274 update_benchmark(NULL);
1276 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1277 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1278 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1279 enc->time_base.num, enc->time_base.den);
1282 ost->frames_encoded++;
1284 ret = avcodec_send_frame(enc, in_picture);
1289 ret = avcodec_receive_packet(enc, &pkt);
1290 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1291 if (ret == AVERROR(EAGAIN))
1297 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1298 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1299 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1300 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1303 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1304 pkt.pts = ost->sync_opts;
1306 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1309 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1310 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1311 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1312 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1315 frame_size = pkt.size;
1316 output_packet(of, &pkt, ost);
1318 /* if two pass, output log */
1319 if (ost->logfile && enc->stats_out) {
1320 fprintf(ost->logfile, "%s", enc->stats_out);
1326 * For video, number of frames in == number of packets out.
1327 * But there may be reordering, so we can't throw away frames on encoder
1328 * flush, we need to limit them here, before they go into encoder.
1330 ost->frame_number++;
1332 if (vstats_filename && frame_size)
1333 do_video_stats(ost, frame_size);
1336 if (!ost->last_frame)
1337 ost->last_frame = av_frame_alloc();
1338 av_frame_unref(ost->last_frame);
1339 if (next_picture && ost->last_frame)
1340 av_frame_ref(ost->last_frame, next_picture);
1342 av_frame_free(&ost->last_frame);
1346 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1350 static double psnr(double d)
1352 return -10.0 * log10(d);
1355 static void do_video_stats(OutputStream *ost, int frame_size)
1357 AVCodecContext *enc;
1359 double ti1, bitrate, avg_bitrate;
1361 /* this is executed just the first time do_video_stats is called */
1363 vstats_file = fopen(vstats_filename, "w");
1371 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1372 frame_number = ost->st->nb_frames;
1373 if (vstats_version <= 1) {
1374 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1375 ost->quality / (float)FF_QP2LAMBDA);
1377 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1378 ost->quality / (float)FF_QP2LAMBDA);
1381 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1382 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1384 fprintf(vstats_file,"f_size= %6d ", frame_size);
1385 /* compute pts value */
1386 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1390 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1391 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1392 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1393 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1394 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1398 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1400 static void finish_output_stream(OutputStream *ost)
1402 OutputFile *of = output_files[ost->file_index];
1405 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1408 for (i = 0; i < of->ctx->nb_streams; i++)
1409 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1414 * Get and encode new output from any of the filtergraphs, without causing
1417 * @return 0 for success, <0 for severe errors
1419 static int reap_filters(int flush)
1421 AVFrame *filtered_frame = NULL;
1424 /* Reap all buffers present in the buffer sinks */
1425 for (i = 0; i < nb_output_streams; i++) {
1426 OutputStream *ost = output_streams[i];
1427 OutputFile *of = output_files[ost->file_index];
1428 AVFilterContext *filter;
1429 AVCodecContext *enc = ost->enc_ctx;
1432 if (!ost->filter || !ost->filter->graph->graph)
1434 filter = ost->filter->filter;
1436 if (!ost->initialized) {
1437 char error[1024] = "";
1438 ret = init_output_stream(ost, error, sizeof(error));
1440 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1441 ost->file_index, ost->index, error);
1446 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1447 return AVERROR(ENOMEM);
1449 filtered_frame = ost->filtered_frame;
1452 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1453 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1454 AV_BUFFERSINK_FLAG_NO_REQUEST);
1456 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1457 av_log(NULL, AV_LOG_WARNING,
1458 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1459 } else if (flush && ret == AVERROR_EOF) {
1460 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1461 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1465 if (ost->finished) {
1466 av_frame_unref(filtered_frame);
1469 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1470 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1471 AVRational filter_tb = av_buffersink_get_time_base(filter);
1472 AVRational tb = enc->time_base;
1473 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1475 tb.den <<= extra_bits;
1477 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1478 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1479 float_pts /= 1 << extra_bits;
1480 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1481 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1483 filtered_frame->pts =
1484 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1485 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1487 //if (ost->source_index >= 0)
1488 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1490 switch (av_buffersink_get_type(filter)) {
1491 case AVMEDIA_TYPE_VIDEO:
1492 if (!ost->frame_aspect_ratio.num)
1493 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1496 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1497 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1499 enc->time_base.num, enc->time_base.den);
1502 do_video_out(of, ost, filtered_frame, float_pts);
1504 case AVMEDIA_TYPE_AUDIO:
1505 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1506 enc->channels != av_frame_get_channels(filtered_frame)) {
1507 av_log(NULL, AV_LOG_ERROR,
1508 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1511 do_audio_out(of, ost, filtered_frame);
1514 // TODO support subtitle filters
1518 av_frame_unref(filtered_frame);
1525 static void print_final_stats(int64_t total_size)
1527 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1528 uint64_t subtitle_size = 0;
1529 uint64_t data_size = 0;
1530 float percent = -1.0;
1534 for (i = 0; i < nb_output_streams; i++) {
1535 OutputStream *ost = output_streams[i];
1536 switch (ost->enc_ctx->codec_type) {
1537 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1538 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1539 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1540 default: other_size += ost->data_size; break;
1542 extra_size += ost->enc_ctx->extradata_size;
1543 data_size += ost->data_size;
1544 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1545 != AV_CODEC_FLAG_PASS1)
1549 if (data_size && total_size>0 && total_size >= data_size)
1550 percent = 100.0 * (total_size - data_size) / data_size;
1552 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1553 video_size / 1024.0,
1554 audio_size / 1024.0,
1555 subtitle_size / 1024.0,
1556 other_size / 1024.0,
1557 extra_size / 1024.0);
1559 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1561 av_log(NULL, AV_LOG_INFO, "unknown");
1562 av_log(NULL, AV_LOG_INFO, "\n");
1564 /* print verbose per-stream stats */
1565 for (i = 0; i < nb_input_files; i++) {
1566 InputFile *f = input_files[i];
1567 uint64_t total_packets = 0, total_size = 0;
1569 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1570 i, f->ctx->filename);
1572 for (j = 0; j < f->nb_streams; j++) {
1573 InputStream *ist = input_streams[f->ist_index + j];
1574 enum AVMediaType type = ist->dec_ctx->codec_type;
1576 total_size += ist->data_size;
1577 total_packets += ist->nb_packets;
1579 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1580 i, j, media_type_string(type));
1581 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1582 ist->nb_packets, ist->data_size);
1584 if (ist->decoding_needed) {
1585 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1586 ist->frames_decoded);
1587 if (type == AVMEDIA_TYPE_AUDIO)
1588 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1589 av_log(NULL, AV_LOG_VERBOSE, "; ");
1592 av_log(NULL, AV_LOG_VERBOSE, "\n");
1595 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1596 total_packets, total_size);
1599 for (i = 0; i < nb_output_files; i++) {
1600 OutputFile *of = output_files[i];
1601 uint64_t total_packets = 0, total_size = 0;
1603 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1604 i, of->ctx->filename);
1606 for (j = 0; j < of->ctx->nb_streams; j++) {
1607 OutputStream *ost = output_streams[of->ost_index + j];
1608 enum AVMediaType type = ost->enc_ctx->codec_type;
1610 total_size += ost->data_size;
1611 total_packets += ost->packets_written;
1613 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1614 i, j, media_type_string(type));
1615 if (ost->encoding_needed) {
1616 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1617 ost->frames_encoded);
1618 if (type == AVMEDIA_TYPE_AUDIO)
1619 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1620 av_log(NULL, AV_LOG_VERBOSE, "; ");
1623 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1624 ost->packets_written, ost->data_size);
1626 av_log(NULL, AV_LOG_VERBOSE, "\n");
1629 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1630 total_packets, total_size);
1632 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1633 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1635 av_log(NULL, AV_LOG_WARNING, "\n");
1637 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1642 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1645 AVBPrint buf_script;
1647 AVFormatContext *oc;
1649 AVCodecContext *enc;
1650 int frame_number, vid, i;
1653 int64_t pts = INT64_MIN + 1;
1654 static int64_t last_time = -1;
1655 static int qp_histogram[52];
1656 int hours, mins, secs, us;
1660 if (!print_stats && !is_last_report && !progress_avio)
1663 if (!is_last_report) {
1664 if (last_time == -1) {
1665 last_time = cur_time;
1668 if ((cur_time - last_time) < 500000)
1670 last_time = cur_time;
1673 t = (cur_time-timer_start) / 1000000.0;
1676 oc = output_files[0]->ctx;
1678 total_size = avio_size(oc->pb);
1679 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1680 total_size = avio_tell(oc->pb);
1684 av_bprint_init(&buf_script, 0, 1);
1685 for (i = 0; i < nb_output_streams; i++) {
1687 ost = output_streams[i];
1689 if (!ost->stream_copy)
1690 q = ost->quality / (float) FF_QP2LAMBDA;
1692 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1693 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1694 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1695 ost->file_index, ost->index, q);
1697 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1700 frame_number = ost->frame_number;
1701 fps = t > 1 ? frame_number / t : 0;
1702 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1703 frame_number, fps < 9.95, fps, q);
1704 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1705 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1706 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1707 ost->file_index, ost->index, q);
1709 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1713 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1715 for (j = 0; j < 32; j++)
1716 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1719 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1721 double error, error_sum = 0;
1722 double scale, scale_sum = 0;
1724 char type[3] = { 'Y','U','V' };
1725 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1726 for (j = 0; j < 3; j++) {
1727 if (is_last_report) {
1728 error = enc->error[j];
1729 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1731 error = ost->error[j];
1732 scale = enc->width * enc->height * 255.0 * 255.0;
1738 p = psnr(error / scale);
1739 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1740 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1741 ost->file_index, ost->index, type[j] | 32, p);
1743 p = psnr(error_sum / scale_sum);
1744 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1745 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1746 ost->file_index, ost->index, p);
1750 /* compute min output value */
1751 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1752 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1753 ost->st->time_base, AV_TIME_BASE_Q));
1755 nb_frames_drop += ost->last_dropped;
1758 secs = FFABS(pts) / AV_TIME_BASE;
1759 us = FFABS(pts) % AV_TIME_BASE;
1765 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1766 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1768 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1770 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1771 "size=%8.0fkB time=", total_size / 1024.0);
1773 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1774 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1775 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1776 (100 * us) / AV_TIME_BASE);
1779 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1780 av_bprintf(&buf_script, "bitrate=N/A\n");
1782 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1783 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1786 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1787 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1788 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1789 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1790 hours, mins, secs, us);
1792 if (nb_frames_dup || nb_frames_drop)
1793 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1794 nb_frames_dup, nb_frames_drop);
1795 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1796 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1799 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1800 av_bprintf(&buf_script, "speed=N/A\n");
1802 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1803 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1806 if (print_stats || is_last_report) {
1807 const char end = is_last_report ? '\n' : '\r';
1808 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1809 fprintf(stderr, "%s %c", buf, end);
1811 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1816 if (progress_avio) {
1817 av_bprintf(&buf_script, "progress=%s\n",
1818 is_last_report ? "end" : "continue");
1819 avio_write(progress_avio, buf_script.str,
1820 FFMIN(buf_script.len, buf_script.size - 1));
1821 avio_flush(progress_avio);
1822 av_bprint_finalize(&buf_script, NULL);
1823 if (is_last_report) {
1824 if ((ret = avio_closep(&progress_avio)) < 0)
1825 av_log(NULL, AV_LOG_ERROR,
1826 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1831 print_final_stats(total_size);
1834 static void flush_encoders(void)
1838 for (i = 0; i < nb_output_streams; i++) {
1839 OutputStream *ost = output_streams[i];
1840 AVCodecContext *enc = ost->enc_ctx;
1841 OutputFile *of = output_files[ost->file_index];
1843 if (!ost->encoding_needed)
1846 // Try to enable encoding with no input frames.
1847 // Maybe we should just let encoding fail instead.
1848 if (!ost->initialized) {
1849 FilterGraph *fg = ost->filter->graph;
1850 char error[1024] = "";
1852 av_log(NULL, AV_LOG_WARNING,
1853 "Finishing stream %d:%d without any data written to it.\n",
1854 ost->file_index, ost->st->index);
1856 if (ost->filter && !fg->graph) {
1858 for (x = 0; x < fg->nb_inputs; x++) {
1859 InputFilter *ifilter = fg->inputs[x];
1860 if (ifilter->format < 0) {
1861 AVCodecParameters *par = ifilter->ist->st->codecpar;
1862 // We never got any input. Set a fake format, which will
1863 // come from libavformat.
1864 ifilter->format = par->format;
1865 ifilter->sample_rate = par->sample_rate;
1866 ifilter->channels = par->channels;
1867 ifilter->channel_layout = par->channel_layout;
1868 ifilter->width = par->width;
1869 ifilter->height = par->height;
1870 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1874 if (!ifilter_has_all_input_formats(fg))
1877 ret = configure_filtergraph(fg);
1879 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1883 finish_output_stream(ost);
1886 ret = init_output_stream(ost, error, sizeof(error));
1888 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1889 ost->file_index, ost->index, error);
1894 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1896 #if FF_API_LAVF_FMT_RAWPICTURE
1897 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1901 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1904 avcodec_send_frame(enc, NULL);
1907 const char *desc = NULL;
1911 switch (enc->codec_type) {
1912 case AVMEDIA_TYPE_AUDIO:
1915 case AVMEDIA_TYPE_VIDEO:
1922 av_init_packet(&pkt);
1926 update_benchmark(NULL);
1927 ret = avcodec_receive_packet(enc, &pkt);
1928 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1929 if (ret < 0 && ret != AVERROR_EOF) {
1930 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1935 if (ost->logfile && enc->stats_out) {
1936 fprintf(ost->logfile, "%s", enc->stats_out);
1938 if (ret == AVERROR_EOF) {
1941 if (ost->finished & MUXER_FINISHED) {
1942 av_packet_unref(&pkt);
1945 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1946 pkt_size = pkt.size;
1947 output_packet(of, &pkt, ost);
1948 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1949 do_video_stats(ost, pkt_size);
1956 * Check whether a packet from ist should be written into ost at this time
1958 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1960 OutputFile *of = output_files[ost->file_index];
1961 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1963 if (ost->source_index != ist_index)
1969 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1975 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1977 OutputFile *of = output_files[ost->file_index];
1978 InputFile *f = input_files [ist->file_index];
1979 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1980 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1984 av_init_packet(&opkt);
1986 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1987 !ost->copy_initial_nonkeyframes)
1990 if (!ost->frame_number && !ost->copy_prior_start) {
1991 int64_t comp_start = start_time;
1992 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1993 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1994 if (pkt->pts == AV_NOPTS_VALUE ?
1995 ist->pts < comp_start :
1996 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2000 if (of->recording_time != INT64_MAX &&
2001 ist->pts >= of->recording_time + start_time) {
2002 close_output_stream(ost);
2006 if (f->recording_time != INT64_MAX) {
2007 start_time = f->ctx->start_time;
2008 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2009 start_time += f->start_time;
2010 if (ist->pts >= f->recording_time + start_time) {
2011 close_output_stream(ost);
2016 /* force the input stream PTS */
2017 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2020 if (pkt->pts != AV_NOPTS_VALUE)
2021 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2023 opkt.pts = AV_NOPTS_VALUE;
2025 if (pkt->dts == AV_NOPTS_VALUE)
2026 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2028 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2029 opkt.dts -= ost_tb_start_time;
2031 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2032 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2034 duration = ist->dec_ctx->frame_size;
2035 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2036 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2037 ost->mux_timebase) - ost_tb_start_time;
2040 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2042 opkt.flags = pkt->flags;
2043 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2044 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2045 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2046 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2047 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2049 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2050 &opkt.data, &opkt.size,
2051 pkt->data, pkt->size,
2052 pkt->flags & AV_PKT_FLAG_KEY);
2054 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2059 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2064 opkt.data = pkt->data;
2065 opkt.size = pkt->size;
2067 av_copy_packet_side_data(&opkt, pkt);
2069 #if FF_API_LAVF_FMT_RAWPICTURE
2070 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2071 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2072 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2073 /* store AVPicture in AVPacket, as expected by the output format */
2074 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2076 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2080 opkt.data = (uint8_t *)&pict;
2081 opkt.size = sizeof(AVPicture);
2082 opkt.flags |= AV_PKT_FLAG_KEY;
2086 output_packet(of, &opkt, ost);
2089 int guess_input_channel_layout(InputStream *ist)
2091 AVCodecContext *dec = ist->dec_ctx;
2093 if (!dec->channel_layout) {
2094 char layout_name[256];
2096 if (dec->channels > ist->guess_layout_max)
2098 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2099 if (!dec->channel_layout)
2101 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2102 dec->channels, dec->channel_layout);
2103 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2104 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2109 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2111 if (*got_output || ret<0)
2112 decode_error_stat[ret<0] ++;
2114 if (ret < 0 && exit_on_error)
2117 if (exit_on_error && *got_output && ist) {
2118 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2119 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2125 // Filters can be configured only if the formats of all inputs are known.
2126 static int ifilter_has_all_input_formats(FilterGraph *fg)
2129 for (i = 0; i < fg->nb_inputs; i++) {
2130 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2131 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2137 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2139 FilterGraph *fg = ifilter->graph;
2140 int need_reinit, ret, i;
2142 /* determine if the parameters for this input changed */
2143 need_reinit = ifilter->format != frame->format;
2144 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2145 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2148 switch (ifilter->ist->st->codecpar->codec_type) {
2149 case AVMEDIA_TYPE_AUDIO:
2150 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2151 ifilter->channels != frame->channels ||
2152 ifilter->channel_layout != frame->channel_layout;
2154 case AVMEDIA_TYPE_VIDEO:
2155 need_reinit |= ifilter->width != frame->width ||
2156 ifilter->height != frame->height;
2161 ret = ifilter_parameters_from_frame(ifilter, frame);
2166 /* (re)init the graph if possible, otherwise buffer the frame and return */
2167 if (need_reinit || !fg->graph) {
2168 for (i = 0; i < fg->nb_inputs; i++) {
2169 if (!ifilter_has_all_input_formats(fg)) {
2170 AVFrame *tmp = av_frame_clone(frame);
2172 return AVERROR(ENOMEM);
2173 av_frame_unref(frame);
2175 if (!av_fifo_space(ifilter->frame_queue)) {
2176 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2180 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2185 ret = reap_filters(1);
2186 if (ret < 0 && ret != AVERROR_EOF) {
2188 av_strerror(ret, errbuf, sizeof(errbuf));
2190 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2194 ret = configure_filtergraph(fg);
2196 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2201 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2203 av_log(NULL, AV_LOG_ERROR, "Error while filtering\n");
2210 static int ifilter_send_eof(InputFilter *ifilter)
2216 if (ifilter->filter) {
2217 ret = av_buffersrc_add_frame_flags(ifilter->filter, NULL, AV_BUFFERSRC_FLAG_PUSH);
2221 // the filtergraph was never configured
2222 FilterGraph *fg = ifilter->graph;
2223 for (i = 0; i < fg->nb_inputs; i++)
2224 if (!fg->inputs[i]->eof)
2226 if (i == fg->nb_inputs) {
2227 // All the input streams have finished without the filtergraph
2228 // ever being configured.
2229 // Mark the output streams as finished.
2230 for (j = 0; j < fg->nb_outputs; j++)
2231 finish_output_stream(fg->outputs[j]->ost);
2238 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2239 // There is the following difference: if you got a frame, you must call
2240 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2241 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2242 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2249 ret = avcodec_send_packet(avctx, pkt);
2250 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2251 // decoded frames with avcodec_receive_frame() until done.
2252 if (ret < 0 && ret != AVERROR_EOF)
2256 ret = avcodec_receive_frame(avctx, frame);
2257 if (ret < 0 && ret != AVERROR(EAGAIN))
2265 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2270 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2271 for (i = 0; i < ist->nb_filters; i++) {
2272 if (i < ist->nb_filters - 1) {
2273 f = ist->filter_frame;
2274 ret = av_frame_ref(f, decoded_frame);
2279 ret = ifilter_send_frame(ist->filters[i], f);
2280 if (ret == AVERROR_EOF)
2281 ret = 0; /* ignore */
2283 av_log(NULL, AV_LOG_ERROR,
2284 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2291 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2294 AVFrame *decoded_frame;
2295 AVCodecContext *avctx = ist->dec_ctx;
2297 AVRational decoded_frame_tb;
2299 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2300 return AVERROR(ENOMEM);
2301 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2302 return AVERROR(ENOMEM);
2303 decoded_frame = ist->decoded_frame;
2305 update_benchmark(NULL);
2306 ret = decode(avctx, decoded_frame, got_output, pkt);
2307 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2311 if (ret >= 0 && avctx->sample_rate <= 0) {
2312 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2313 ret = AVERROR_INVALIDDATA;
2316 if (ret != AVERROR_EOF)
2317 check_decode_result(ist, got_output, ret);
2319 if (!*got_output || ret < 0)
2322 ist->samples_decoded += decoded_frame->nb_samples;
2323 ist->frames_decoded++;
2326 /* increment next_dts to use for the case where the input stream does not
2327 have timestamps or there are multiple frames in the packet */
2328 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2330 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2334 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2335 decoded_frame_tb = ist->st->time_base;
2336 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2337 decoded_frame->pts = pkt->pts;
2338 decoded_frame_tb = ist->st->time_base;
2340 decoded_frame->pts = ist->dts;
2341 decoded_frame_tb = AV_TIME_BASE_Q;
2343 if (decoded_frame->pts != AV_NOPTS_VALUE)
2344 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2345 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2346 (AVRational){1, avctx->sample_rate});
2347 ist->nb_samples = decoded_frame->nb_samples;
2348 err = send_frame_to_filters(ist, decoded_frame);
2350 av_frame_unref(ist->filter_frame);
2351 av_frame_unref(decoded_frame);
2352 return err < 0 ? err : ret;
2355 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof,
2358 AVFrame *decoded_frame;
2359 int i, ret = 0, err = 0;
2360 int64_t best_effort_timestamp;
2361 int64_t dts = AV_NOPTS_VALUE;
2364 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2365 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2367 if (!eof && pkt && pkt->size == 0)
2370 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2371 return AVERROR(ENOMEM);
2372 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2373 return AVERROR(ENOMEM);
2374 decoded_frame = ist->decoded_frame;
2375 if (ist->dts != AV_NOPTS_VALUE)
2376 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2379 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2382 // The old code used to set dts on the drain packet, which does not work
2383 // with the new API anymore.
2385 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2387 return AVERROR(ENOMEM);
2388 ist->dts_buffer = new;
2389 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2392 update_benchmark(NULL);
2393 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2394 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2398 // The following line may be required in some cases where there is no parser
2399 // or the parser does not has_b_frames correctly
2400 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2401 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2402 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2404 av_log(ist->dec_ctx, AV_LOG_WARNING,
2405 "video_delay is larger in decoder than demuxer %d > %d.\n"
2406 "If you want to help, upload a sample "
2407 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2408 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2409 ist->dec_ctx->has_b_frames,
2410 ist->st->codecpar->video_delay);
2413 if (ret != AVERROR_EOF)
2414 check_decode_result(ist, got_output, ret);
2416 if (*got_output && ret >= 0) {
2417 if (ist->dec_ctx->width != decoded_frame->width ||
2418 ist->dec_ctx->height != decoded_frame->height ||
2419 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2420 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2421 decoded_frame->width,
2422 decoded_frame->height,
2423 decoded_frame->format,
2424 ist->dec_ctx->width,
2425 ist->dec_ctx->height,
2426 ist->dec_ctx->pix_fmt);
2430 if (!*got_output || ret < 0)
2433 if(ist->top_field_first>=0)
2434 decoded_frame->top_field_first = ist->top_field_first;
2436 ist->frames_decoded++;
2438 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2439 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2443 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2445 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2447 if (ist->framerate.num)
2448 best_effort_timestamp = ist->cfr_next_pts++;
2450 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2451 best_effort_timestamp = ist->dts_buffer[0];
2453 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2454 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2455 ist->nb_dts_buffer--;
2458 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2459 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2461 if (ts != AV_NOPTS_VALUE)
2462 ist->next_pts = ist->pts = ts;
2466 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2467 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2468 ist->st->index, av_ts2str(decoded_frame->pts),
2469 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2470 best_effort_timestamp,
2471 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2472 decoded_frame->key_frame, decoded_frame->pict_type,
2473 ist->st->time_base.num, ist->st->time_base.den);
2476 if (ist->st->sample_aspect_ratio.num)
2477 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2479 err = send_frame_to_filters(ist, decoded_frame);
2482 av_frame_unref(ist->filter_frame);
2483 av_frame_unref(decoded_frame);
2484 return err < 0 ? err : ret;
2487 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2490 AVSubtitle subtitle;
2492 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2493 &subtitle, got_output, pkt);
2495 check_decode_result(NULL, got_output, ret);
2497 if (ret < 0 || !*got_output) {
2500 sub2video_flush(ist);
2504 if (ist->fix_sub_duration) {
2506 if (ist->prev_sub.got_output) {
2507 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2508 1000, AV_TIME_BASE);
2509 if (end < ist->prev_sub.subtitle.end_display_time) {
2510 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2511 "Subtitle duration reduced from %d to %d%s\n",
2512 ist->prev_sub.subtitle.end_display_time, end,
2513 end <= 0 ? ", dropping it" : "");
2514 ist->prev_sub.subtitle.end_display_time = end;
2517 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2518 FFSWAP(int, ret, ist->prev_sub.ret);
2519 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2527 if (ist->sub2video.frame) {
2528 sub2video_update(ist, &subtitle);
2529 } else if (ist->nb_filters) {
2530 if (!ist->sub2video.sub_queue)
2531 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2532 if (!ist->sub2video.sub_queue)
2534 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2535 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2539 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2543 if (!subtitle.num_rects)
2546 ist->frames_decoded++;
2548 for (i = 0; i < nb_output_streams; i++) {
2549 OutputStream *ost = output_streams[i];
2551 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2552 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2555 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2560 avsubtitle_free(&subtitle);
2564 static int send_filter_eof(InputStream *ist)
2567 for (i = 0; i < ist->nb_filters; i++) {
2568 ret = ifilter_send_eof(ist->filters[i]);
2575 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2576 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2580 int eof_reached = 0;
2583 if (!ist->saw_first_ts) {
2584 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2586 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2587 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2588 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2590 ist->saw_first_ts = 1;
2593 if (ist->next_dts == AV_NOPTS_VALUE)
2594 ist->next_dts = ist->dts;
2595 if (ist->next_pts == AV_NOPTS_VALUE)
2596 ist->next_pts = ist->pts;
2600 av_init_packet(&avpkt);
2607 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2608 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2609 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2610 ist->next_pts = ist->pts = ist->dts;
2613 // while we have more to decode or while the decoder did output something on EOF
2614 while (ist->decoding_needed) {
2617 int decode_failed = 0;
2619 ist->pts = ist->next_pts;
2620 ist->dts = ist->next_dts;
2622 switch (ist->dec_ctx->codec_type) {
2623 case AVMEDIA_TYPE_AUDIO:
2624 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2627 case AVMEDIA_TYPE_VIDEO:
2628 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt,
2630 if (!repeating || !pkt || got_output) {
2631 if (pkt && pkt->duration) {
2632 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2633 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2634 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2635 duration = ((int64_t)AV_TIME_BASE *
2636 ist->dec_ctx->framerate.den * ticks) /
2637 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2640 if(ist->dts != AV_NOPTS_VALUE && duration) {
2641 ist->next_dts += duration;
2643 ist->next_dts = AV_NOPTS_VALUE;
2647 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2649 case AVMEDIA_TYPE_SUBTITLE:
2652 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2653 if (!pkt && ret >= 0)
2660 if (ret == AVERROR_EOF) {
2666 if (decode_failed) {
2667 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2668 ist->file_index, ist->st->index, av_err2str(ret));
2670 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2671 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2673 if (!decode_failed || exit_on_error)
2679 ist->got_output = 1;
2684 // During draining, we might get multiple output frames in this loop.
2685 // ffmpeg.c does not drain the filter chain on configuration changes,
2686 // which means if we send multiple frames at once to the filters, and
2687 // one of those frames changes configuration, the buffered frames will
2688 // be lost. This can upset certain FATE tests.
2689 // Decode only 1 frame per call on EOF to appease these FATE tests.
2690 // The ideal solution would be to rewrite decoding to use the new
2691 // decoding API in a better way.
2698 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2699 /* except when looping we need to flush but not to send an EOF */
2700 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2701 int ret = send_filter_eof(ist);
2703 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2708 /* handle stream copy */
2709 if (!ist->decoding_needed) {
2710 ist->dts = ist->next_dts;
2711 switch (ist->dec_ctx->codec_type) {
2712 case AVMEDIA_TYPE_AUDIO:
2713 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2714 ist->dec_ctx->sample_rate;
2716 case AVMEDIA_TYPE_VIDEO:
2717 if (ist->framerate.num) {
2718 // TODO: Remove work-around for c99-to-c89 issue 7
2719 AVRational time_base_q = AV_TIME_BASE_Q;
2720 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2721 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2722 } else if (pkt->duration) {
2723 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2724 } else if(ist->dec_ctx->framerate.num != 0) {
2725 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2726 ist->next_dts += ((int64_t)AV_TIME_BASE *
2727 ist->dec_ctx->framerate.den * ticks) /
2728 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2732 ist->pts = ist->dts;
2733 ist->next_pts = ist->next_dts;
2735 for (i = 0; pkt && i < nb_output_streams; i++) {
2736 OutputStream *ost = output_streams[i];
2738 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2741 do_streamcopy(ist, ost, pkt);
2744 return !eof_reached;
2747 static void print_sdp(void)
2752 AVIOContext *sdp_pb;
2753 AVFormatContext **avc;
2755 for (i = 0; i < nb_output_files; i++) {
2756 if (!output_files[i]->header_written)
2760 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2763 for (i = 0, j = 0; i < nb_output_files; i++) {
2764 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2765 avc[j] = output_files[i]->ctx;
2773 av_sdp_create(avc, j, sdp, sizeof(sdp));
2775 if (!sdp_filename) {
2776 printf("SDP:\n%s\n", sdp);
2779 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2780 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2782 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2783 avio_closep(&sdp_pb);
2784 av_freep(&sdp_filename);
2792 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2795 for (i = 0; hwaccels[i].name; i++)
2796 if (hwaccels[i].pix_fmt == pix_fmt)
2797 return &hwaccels[i];
2801 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2803 InputStream *ist = s->opaque;
2804 const enum AVPixelFormat *p;
2807 for (p = pix_fmts; *p != -1; p++) {
2808 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2809 const HWAccel *hwaccel;
2811 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2814 hwaccel = get_hwaccel(*p);
2816 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2817 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2820 ret = hwaccel->init(s);
2822 if (ist->hwaccel_id == hwaccel->id) {
2823 av_log(NULL, AV_LOG_FATAL,
2824 "%s hwaccel requested for input stream #%d:%d, "
2825 "but cannot be initialized.\n", hwaccel->name,
2826 ist->file_index, ist->st->index);
2827 return AV_PIX_FMT_NONE;
2832 if (ist->hw_frames_ctx) {
2833 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2834 if (!s->hw_frames_ctx)
2835 return AV_PIX_FMT_NONE;
2838 ist->active_hwaccel_id = hwaccel->id;
2839 ist->hwaccel_pix_fmt = *p;
2846 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2848 InputStream *ist = s->opaque;
2850 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2851 return ist->hwaccel_get_buffer(s, frame, flags);
2853 return avcodec_default_get_buffer2(s, frame, flags);
2856 static int init_input_stream(int ist_index, char *error, int error_len)
2859 InputStream *ist = input_streams[ist_index];
2861 if (ist->decoding_needed) {
2862 AVCodec *codec = ist->dec;
2864 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2865 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2866 return AVERROR(EINVAL);
2869 ist->dec_ctx->opaque = ist;
2870 ist->dec_ctx->get_format = get_format;
2871 ist->dec_ctx->get_buffer2 = get_buffer;
2872 ist->dec_ctx->thread_safe_callbacks = 1;
2874 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2875 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2876 (ist->decoding_needed & DECODING_FOR_OST)) {
2877 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2878 if (ist->decoding_needed & DECODING_FOR_FILTER)
2879 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2882 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2884 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2885 * audio, and video decoders such as cuvid or mediacodec */
2886 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2888 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2889 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2890 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2891 if (ret == AVERROR_EXPERIMENTAL)
2892 abort_codec_experimental(codec, 0);
2894 snprintf(error, error_len,
2895 "Error while opening decoder for input stream "
2897 ist->file_index, ist->st->index, av_err2str(ret));
2900 assert_avoptions(ist->decoder_opts);
2903 ist->next_pts = AV_NOPTS_VALUE;
2904 ist->next_dts = AV_NOPTS_VALUE;
2909 static InputStream *get_input_stream(OutputStream *ost)
2911 if (ost->source_index >= 0)
2912 return input_streams[ost->source_index];
2916 static int compare_int64(const void *a, const void *b)
2918 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2921 /* open the muxer when all the streams are initialized */
2922 static int check_init_output_file(OutputFile *of, int file_index)
2926 for (i = 0; i < of->ctx->nb_streams; i++) {
2927 OutputStream *ost = output_streams[of->ost_index + i];
2928 if (!ost->initialized)
2932 of->ctx->interrupt_callback = int_cb;
2934 ret = avformat_write_header(of->ctx, &of->opts);
2936 av_log(NULL, AV_LOG_ERROR,
2937 "Could not write header for output file #%d "
2938 "(incorrect codec parameters ?): %s\n",
2939 file_index, av_err2str(ret));
2942 //assert_avoptions(of->opts);
2943 of->header_written = 1;
2945 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2947 if (sdp_filename || want_sdp)
2950 /* flush the muxing queues */
2951 for (i = 0; i < of->ctx->nb_streams; i++) {
2952 OutputStream *ost = output_streams[of->ost_index + i];
2954 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2955 if (!av_fifo_size(ost->muxing_queue))
2956 ost->mux_timebase = ost->st->time_base;
2958 while (av_fifo_size(ost->muxing_queue)) {
2960 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2961 write_packet(of, &pkt, ost);
2968 static int init_output_bsfs(OutputStream *ost)
2973 if (!ost->nb_bitstream_filters)
2976 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2977 ctx = ost->bsf_ctx[i];
2979 ret = avcodec_parameters_copy(ctx->par_in,
2980 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2984 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2986 ret = av_bsf_init(ctx);
2988 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2989 ost->bsf_ctx[i]->filter->name);
2994 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2995 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2999 ost->st->time_base = ctx->time_base_out;
3004 static int init_output_stream_streamcopy(OutputStream *ost)
3006 OutputFile *of = output_files[ost->file_index];
3007 InputStream *ist = get_input_stream(ost);
3008 AVCodecParameters *par_dst = ost->st->codecpar;
3009 AVCodecParameters *par_src = ost->ref_par;
3012 uint32_t codec_tag = par_dst->codec_tag;
3014 av_assert0(ist && !ost->filter);
3016 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3018 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3020 av_log(NULL, AV_LOG_FATAL,
3021 "Error setting up codec context options.\n");
3024 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3027 unsigned int codec_tag_tmp;
3028 if (!of->ctx->oformat->codec_tag ||
3029 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3030 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3031 codec_tag = par_src->codec_tag;
3034 ret = avcodec_parameters_copy(par_dst, par_src);
3038 par_dst->codec_tag = codec_tag;
3040 if (!ost->frame_rate.num)
3041 ost->frame_rate = ist->framerate;
3042 ost->st->avg_frame_rate = ost->frame_rate;
3044 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3048 // copy timebase while removing common factors
3049 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3050 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3052 // copy estimated duration as a hint to the muxer
3053 if (ost->st->duration <= 0 && ist->st->duration > 0)
3054 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3057 ost->st->disposition = ist->st->disposition;
3059 if (ist->st->nb_side_data) {
3060 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
3061 sizeof(*ist->st->side_data));
3062 if (!ost->st->side_data)
3063 return AVERROR(ENOMEM);
3065 ost->st->nb_side_data = 0;
3066 for (i = 0; i < ist->st->nb_side_data; i++) {
3067 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3068 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
3070 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
3073 sd_dst->data = av_malloc(sd_src->size);
3075 return AVERROR(ENOMEM);
3076 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3077 sd_dst->size = sd_src->size;
3078 sd_dst->type = sd_src->type;
3079 ost->st->nb_side_data++;
3083 ost->parser = av_parser_init(par_dst->codec_id);
3084 ost->parser_avctx = avcodec_alloc_context3(NULL);
3085 if (!ost->parser_avctx)
3086 return AVERROR(ENOMEM);
3088 switch (par_dst->codec_type) {
3089 case AVMEDIA_TYPE_AUDIO:
3090 if (audio_volume != 256) {
3091 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3094 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3095 par_dst->block_align= 0;
3096 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3097 par_dst->block_align= 0;
3099 case AVMEDIA_TYPE_VIDEO:
3100 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3102 av_mul_q(ost->frame_aspect_ratio,
3103 (AVRational){ par_dst->height, par_dst->width });
3104 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3105 "with stream copy may produce invalid files\n");
3107 else if (ist->st->sample_aspect_ratio.num)
3108 sar = ist->st->sample_aspect_ratio;
3110 sar = par_src->sample_aspect_ratio;
3111 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3112 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3113 ost->st->r_frame_rate = ist->st->r_frame_rate;
3117 ost->mux_timebase = ist->st->time_base;
3122 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3124 AVDictionaryEntry *e;
3126 uint8_t *encoder_string;
3127 int encoder_string_len;
3128 int format_flags = 0;
3129 int codec_flags = 0;
3131 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3134 e = av_dict_get(of->opts, "fflags", NULL, 0);
3136 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3139 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3141 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3143 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3146 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3149 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3150 encoder_string = av_mallocz(encoder_string_len);
3151 if (!encoder_string)
3154 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3155 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3157 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3158 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3159 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3160 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3163 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3164 AVCodecContext *avctx)
3167 int n = 1, i, size, index = 0;
3170 for (p = kf; *p; p++)
3174 pts = av_malloc_array(size, sizeof(*pts));
3176 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3181 for (i = 0; i < n; i++) {
3182 char *next = strchr(p, ',');
3187 if (!memcmp(p, "chapters", 8)) {
3189 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3192 if (avf->nb_chapters > INT_MAX - size ||
3193 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3195 av_log(NULL, AV_LOG_FATAL,
3196 "Could not allocate forced key frames array.\n");
3199 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3200 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3202 for (j = 0; j < avf->nb_chapters; j++) {
3203 AVChapter *c = avf->chapters[j];
3204 av_assert1(index < size);
3205 pts[index++] = av_rescale_q(c->start, c->time_base,
3206 avctx->time_base) + t;
3211 t = parse_time_or_die("force_key_frames", p, 1);
3212 av_assert1(index < size);
3213 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3220 av_assert0(index == size);
3221 qsort(pts, size, sizeof(*pts), compare_int64);
3222 ost->forced_kf_count = size;
3223 ost->forced_kf_pts = pts;
3226 static int init_output_stream_encode(OutputStream *ost)
3228 InputStream *ist = get_input_stream(ost);
3229 AVCodecContext *enc_ctx = ost->enc_ctx;
3230 AVCodecContext *dec_ctx = NULL;
3231 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3234 set_encoder_id(output_files[ost->file_index], ost);
3237 ost->st->disposition = ist->st->disposition;
3239 dec_ctx = ist->dec_ctx;
3241 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3243 for (j = 0; j < oc->nb_streams; j++) {
3244 AVStream *st = oc->streams[j];
3245 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3248 if (j == oc->nb_streams)
3249 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3250 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3251 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3254 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3255 if (!ost->frame_rate.num)
3256 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3257 if (ist && !ost->frame_rate.num)
3258 ost->frame_rate = ist->framerate;
3259 if (ist && !ost->frame_rate.num)
3260 ost->frame_rate = ist->st->r_frame_rate;
3261 if (ist && !ost->frame_rate.num) {
3262 ost->frame_rate = (AVRational){25, 1};
3263 av_log(NULL, AV_LOG_WARNING,
3265 "about the input framerate is available. Falling "
3266 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3267 "if you want a different framerate.\n",
3268 ost->file_index, ost->index);
3270 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3271 if (ost->enc->supported_framerates && !ost->force_fps) {
3272 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3273 ost->frame_rate = ost->enc->supported_framerates[idx];
3275 // reduce frame rate for mpeg4 to be within the spec limits
3276 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3277 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3278 ost->frame_rate.num, ost->frame_rate.den, 65535);
3282 switch (enc_ctx->codec_type) {
3283 case AVMEDIA_TYPE_AUDIO:
3284 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3286 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3287 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3288 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3289 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3290 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3291 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3293 case AVMEDIA_TYPE_VIDEO:
3294 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3295 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3296 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3297 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3298 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3299 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3300 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3302 for (j = 0; j < ost->forced_kf_count; j++)
3303 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3305 enc_ctx->time_base);
3307 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3308 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3309 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3310 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3311 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3312 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3313 if (!strncmp(ost->enc->name, "libx264", 7) &&
3314 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3315 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3316 av_log(NULL, AV_LOG_WARNING,
3317 "No pixel format specified, %s for H.264 encoding chosen.\n"
3318 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3319 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3320 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3321 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3322 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3323 av_log(NULL, AV_LOG_WARNING,
3324 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3325 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3326 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3327 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3329 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3330 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3332 enc_ctx->framerate = ost->frame_rate;
3334 ost->st->avg_frame_rate = ost->frame_rate;
3337 enc_ctx->width != dec_ctx->width ||
3338 enc_ctx->height != dec_ctx->height ||
3339 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3340 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3343 if (ost->forced_keyframes) {
3344 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3345 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3346 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3348 av_log(NULL, AV_LOG_ERROR,
3349 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3352 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3353 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3354 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3355 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3357 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3358 // parse it only for static kf timings
3359 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3360 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3364 case AVMEDIA_TYPE_SUBTITLE:
3365 enc_ctx->time_base = AV_TIME_BASE_Q;
3366 if (!enc_ctx->width) {
3367 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3368 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3371 case AVMEDIA_TYPE_DATA:
3378 ost->mux_timebase = enc_ctx->time_base;
3383 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3387 if (ost->encoding_needed) {
3388 AVCodec *codec = ost->enc;
3389 AVCodecContext *dec = NULL;
3392 ret = init_output_stream_encode(ost);
3396 if ((ist = get_input_stream(ost)))
3398 if (dec && dec->subtitle_header) {
3399 /* ASS code assumes this buffer is null terminated so add extra byte. */
3400 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3401 if (!ost->enc_ctx->subtitle_header)
3402 return AVERROR(ENOMEM);
3403 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3404 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3406 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3407 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3408 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3410 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3411 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3412 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3414 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter)) {
3415 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3416 if (!ost->enc_ctx->hw_frames_ctx)
3417 return AVERROR(ENOMEM);
3420 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3421 if (ret == AVERROR_EXPERIMENTAL)
3422 abort_codec_experimental(codec, 1);
3423 snprintf(error, error_len,
3424 "Error while opening encoder for output stream #%d:%d - "
3425 "maybe incorrect parameters such as bit_rate, rate, width or height",
3426 ost->file_index, ost->index);
3429 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3430 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3431 av_buffersink_set_frame_size(ost->filter->filter,
3432 ost->enc_ctx->frame_size);
3433 assert_avoptions(ost->encoder_opts);
3434 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3435 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3436 " It takes bits/s as argument, not kbits/s\n");
3438 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3440 av_log(NULL, AV_LOG_FATAL,
3441 "Error initializing the output stream codec context.\n");
3445 * FIXME: ost->st->codec should't be needed here anymore.
3447 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3451 if (ost->enc_ctx->nb_coded_side_data) {
3454 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
3455 sizeof(*ost->st->side_data));
3456 if (!ost->st->side_data)
3457 return AVERROR(ENOMEM);
3459 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3460 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3461 AVPacketSideData *sd_dst = &ost->st->side_data[i];
3463 sd_dst->data = av_malloc(sd_src->size);
3465 return AVERROR(ENOMEM);
3466 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3467 sd_dst->size = sd_src->size;
3468 sd_dst->type = sd_src->type;
3469 ost->st->nb_side_data++;
3473 // copy timebase while removing common factors
3474 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3475 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3477 // copy estimated duration as a hint to the muxer
3478 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3479 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3481 ost->st->codec->codec= ost->enc_ctx->codec;
3482 } else if (ost->stream_copy) {
3483 ret = init_output_stream_streamcopy(ost);
3488 * FIXME: will the codec context used by the parser during streamcopy
3489 * This should go away with the new parser API.
3491 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3496 // parse user provided disposition, and update stream values
3497 if (ost->disposition) {
3498 static const AVOption opts[] = {
3499 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3500 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3501 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3502 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3503 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3504 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3505 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3506 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3507 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3508 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3509 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3510 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3511 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3512 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3515 static const AVClass class = {
3517 .item_name = av_default_item_name,
3519 .version = LIBAVUTIL_VERSION_INT,
3521 const AVClass *pclass = &class;
3523 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3528 /* initialize bitstream filters for the output stream
3529 * needs to be done here, because the codec id for streamcopy is not
3530 * known until now */
3531 ret = init_output_bsfs(ost);
3535 ost->initialized = 1;
3537 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3544 static void report_new_stream(int input_index, AVPacket *pkt)
3546 InputFile *file = input_files[input_index];
3547 AVStream *st = file->ctx->streams[pkt->stream_index];
3549 if (pkt->stream_index < file->nb_streams_warn)
3551 av_log(file->ctx, AV_LOG_WARNING,
3552 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3553 av_get_media_type_string(st->codecpar->codec_type),
3554 input_index, pkt->stream_index,
3555 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3556 file->nb_streams_warn = pkt->stream_index + 1;
3559 static int transcode_init(void)
3561 int ret = 0, i, j, k;
3562 AVFormatContext *oc;
3565 char error[1024] = {0};
3567 for (i = 0; i < nb_filtergraphs; i++) {
3568 FilterGraph *fg = filtergraphs[i];
3569 for (j = 0; j < fg->nb_outputs; j++) {
3570 OutputFilter *ofilter = fg->outputs[j];
3571 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3573 if (fg->nb_inputs != 1)
3575 for (k = nb_input_streams-1; k >= 0 ; k--)
3576 if (fg->inputs[0]->ist == input_streams[k])
3578 ofilter->ost->source_index = k;
3582 /* init framerate emulation */
3583 for (i = 0; i < nb_input_files; i++) {
3584 InputFile *ifile = input_files[i];
3585 if (ifile->rate_emu)
3586 for (j = 0; j < ifile->nb_streams; j++)
3587 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3590 /* init input streams */
3591 for (i = 0; i < nb_input_streams; i++)
3592 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3593 for (i = 0; i < nb_output_streams; i++) {
3594 ost = output_streams[i];
3595 avcodec_close(ost->enc_ctx);
3600 /* open each encoder */
3601 for (i = 0; i < nb_output_streams; i++) {
3602 // skip streams fed from filtergraphs until we have a frame for them
3603 if (output_streams[i]->filter)
3606 ret = init_output_stream(output_streams[i], error, sizeof(error));
3611 /* discard unused programs */
3612 for (i = 0; i < nb_input_files; i++) {
3613 InputFile *ifile = input_files[i];
3614 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3615 AVProgram *p = ifile->ctx->programs[j];
3616 int discard = AVDISCARD_ALL;
3618 for (k = 0; k < p->nb_stream_indexes; k++)
3619 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3620 discard = AVDISCARD_DEFAULT;
3623 p->discard = discard;
3627 /* write headers for files with no streams */
3628 for (i = 0; i < nb_output_files; i++) {
3629 oc = output_files[i]->ctx;
3630 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3631 ret = check_init_output_file(output_files[i], i);
3638 /* dump the stream mapping */
3639 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3640 for (i = 0; i < nb_input_streams; i++) {
3641 ist = input_streams[i];
3643 for (j = 0; j < ist->nb_filters; j++) {
3644 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3645 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3646 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3647 ist->filters[j]->name);
3648 if (nb_filtergraphs > 1)
3649 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3650 av_log(NULL, AV_LOG_INFO, "\n");
3655 for (i = 0; i < nb_output_streams; i++) {
3656 ost = output_streams[i];
3658 if (ost->attachment_filename) {
3659 /* an attached file */
3660 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3661 ost->attachment_filename, ost->file_index, ost->index);
3665 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3666 /* output from a complex graph */
3667 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3668 if (nb_filtergraphs > 1)
3669 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3671 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3672 ost->index, ost->enc ? ost->enc->name : "?");
3676 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3677 input_streams[ost->source_index]->file_index,
3678 input_streams[ost->source_index]->st->index,
3681 if (ost->sync_ist != input_streams[ost->source_index])
3682 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3683 ost->sync_ist->file_index,
3684 ost->sync_ist->st->index);
3685 if (ost->stream_copy)
3686 av_log(NULL, AV_LOG_INFO, " (copy)");
3688 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3689 const AVCodec *out_codec = ost->enc;
3690 const char *decoder_name = "?";
3691 const char *in_codec_name = "?";
3692 const char *encoder_name = "?";
3693 const char *out_codec_name = "?";
3694 const AVCodecDescriptor *desc;
3697 decoder_name = in_codec->name;
3698 desc = avcodec_descriptor_get(in_codec->id);
3700 in_codec_name = desc->name;
3701 if (!strcmp(decoder_name, in_codec_name))
3702 decoder_name = "native";
3706 encoder_name = out_codec->name;
3707 desc = avcodec_descriptor_get(out_codec->id);
3709 out_codec_name = desc->name;
3710 if (!strcmp(encoder_name, out_codec_name))
3711 encoder_name = "native";
3714 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3715 in_codec_name, decoder_name,
3716 out_codec_name, encoder_name);
3718 av_log(NULL, AV_LOG_INFO, "\n");
3722 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3726 transcode_init_done = 1;
3731 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3732 static int need_output(void)
3736 for (i = 0; i < nb_output_streams; i++) {
3737 OutputStream *ost = output_streams[i];
3738 OutputFile *of = output_files[ost->file_index];
3739 AVFormatContext *os = output_files[ost->file_index]->ctx;
3741 if (ost->finished ||
3742 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3744 if (ost->frame_number >= ost->max_frames) {
3746 for (j = 0; j < of->ctx->nb_streams; j++)
3747 close_output_stream(output_streams[of->ost_index + j]);
3758 * Select the output stream to process.
3760 * @return selected output stream, or NULL if none available
3762 static OutputStream *choose_output(void)
3765 int64_t opts_min = INT64_MAX;
3766 OutputStream *ost_min = NULL;
3768 for (i = 0; i < nb_output_streams; i++) {
3769 OutputStream *ost = output_streams[i];
3770 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3771 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3773 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3774 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3776 if (!ost->initialized && !ost->inputs_done)
3779 if (!ost->finished && opts < opts_min) {
3781 ost_min = ost->unavailable ? NULL : ost;
3787 static void set_tty_echo(int on)
3791 if (tcgetattr(0, &tty) == 0) {
3792 if (on) tty.c_lflag |= ECHO;
3793 else tty.c_lflag &= ~ECHO;
3794 tcsetattr(0, TCSANOW, &tty);
3799 static int check_keyboard_interaction(int64_t cur_time)
3802 static int64_t last_time;
3803 if (received_nb_signals)
3804 return AVERROR_EXIT;
3805 /* read_key() returns 0 on EOF */
3806 if(cur_time - last_time >= 100000 && !run_as_daemon){
3808 last_time = cur_time;
3812 return AVERROR_EXIT;
3813 if (key == '+') av_log_set_level(av_log_get_level()+10);
3814 if (key == '-') av_log_set_level(av_log_get_level()-10);
3815 if (key == 's') qp_hist ^= 1;
3818 do_hex_dump = do_pkt_dump = 0;
3819 } else if(do_pkt_dump){
3823 av_log_set_level(AV_LOG_DEBUG);
3825 if (key == 'c' || key == 'C'){
3826 char buf[4096], target[64], command[256], arg[256] = {0};
3829 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3832 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3837 fprintf(stderr, "\n");
3839 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3840 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3841 target, time, command, arg);
3842 for (i = 0; i < nb_filtergraphs; i++) {
3843 FilterGraph *fg = filtergraphs[i];
3846 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3847 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3848 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3849 } else if (key == 'c') {
3850 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3851 ret = AVERROR_PATCHWELCOME;
3853 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3855 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3860 av_log(NULL, AV_LOG_ERROR,
3861 "Parse error, at least 3 arguments were expected, "
3862 "only %d given in string '%s'\n", n, buf);
3865 if (key == 'd' || key == 'D'){
3868 debug = input_streams[0]->st->codec->debug<<1;
3869 if(!debug) debug = 1;
3870 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3877 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3882 fprintf(stderr, "\n");
3883 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3884 fprintf(stderr,"error parsing debug value\n");
3886 for(i=0;i<nb_input_streams;i++) {
3887 input_streams[i]->st->codec->debug = debug;
3889 for(i=0;i<nb_output_streams;i++) {
3890 OutputStream *ost = output_streams[i];
3891 ost->enc_ctx->debug = debug;
3893 if(debug) av_log_set_level(AV_LOG_DEBUG);
3894 fprintf(stderr,"debug=%d\n", debug);
3897 fprintf(stderr, "key function\n"
3898 "? show this help\n"
3899 "+ increase verbosity\n"
3900 "- decrease verbosity\n"
3901 "c Send command to first matching filter supporting it\n"
3902 "C Send/Queue command to all matching filters\n"
3903 "D cycle through available debug modes\n"
3904 "h dump packets/hex press to cycle through the 3 states\n"
3906 "s Show QP histogram\n"
3913 static void *input_thread(void *arg)
3916 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3921 ret = av_read_frame(f->ctx, &pkt);
3923 if (ret == AVERROR(EAGAIN)) {
3928 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3931 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3932 if (flags && ret == AVERROR(EAGAIN)) {
3934 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3935 av_log(f->ctx, AV_LOG_WARNING,
3936 "Thread message queue blocking; consider raising the "
3937 "thread_queue_size option (current value: %d)\n",
3938 f->thread_queue_size);
3941 if (ret != AVERROR_EOF)
3942 av_log(f->ctx, AV_LOG_ERROR,
3943 "Unable to send packet to main thread: %s\n",
3945 av_packet_unref(&pkt);
3946 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3954 static void free_input_threads(void)
3958 for (i = 0; i < nb_input_files; i++) {
3959 InputFile *f = input_files[i];
3962 if (!f || !f->in_thread_queue)
3964 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3965 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3966 av_packet_unref(&pkt);
3968 pthread_join(f->thread, NULL);
3970 av_thread_message_queue_free(&f->in_thread_queue);
3974 static int init_input_threads(void)
3978 if (nb_input_files == 1)
3981 for (i = 0; i < nb_input_files; i++) {
3982 InputFile *f = input_files[i];
3984 if (f->ctx->pb ? !f->ctx->pb->seekable :
3985 strcmp(f->ctx->iformat->name, "lavfi"))
3986 f->non_blocking = 1;
3987 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3988 f->thread_queue_size, sizeof(AVPacket));
3992 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3993 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3994 av_thread_message_queue_free(&f->in_thread_queue);
3995 return AVERROR(ret);
4001 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4003 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4005 AV_THREAD_MESSAGE_NONBLOCK : 0);
4009 static int get_input_packet(InputFile *f, AVPacket *pkt)
4013 for (i = 0; i < f->nb_streams; i++) {
4014 InputStream *ist = input_streams[f->ist_index + i];
4015 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4016 int64_t now = av_gettime_relative() - ist->start;
4018 return AVERROR(EAGAIN);
4023 if (nb_input_files > 1)
4024 return get_input_packet_mt(f, pkt);
4026 return av_read_frame(f->ctx, pkt);
4029 static int got_eagain(void)
4032 for (i = 0; i < nb_output_streams; i++)
4033 if (output_streams[i]->unavailable)
4038 static void reset_eagain(void)
4041 for (i = 0; i < nb_input_files; i++)
4042 input_files[i]->eagain = 0;
4043 for (i = 0; i < nb_output_streams; i++)
4044 output_streams[i]->unavailable = 0;
4047 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4048 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4049 AVRational time_base)
4055 return tmp_time_base;
4058 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4061 return tmp_time_base;
4067 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4070 AVCodecContext *avctx;
4071 int i, ret, has_audio = 0;
4072 int64_t duration = 0;
4074 ret = av_seek_frame(is, -1, is->start_time, 0);
4078 for (i = 0; i < ifile->nb_streams; i++) {
4079 ist = input_streams[ifile->ist_index + i];
4080 avctx = ist->dec_ctx;
4083 if (ist->decoding_needed) {
4084 process_input_packet(ist, NULL, 1);
4085 avcodec_flush_buffers(avctx);
4088 /* duration is the length of the last frame in a stream
4089 * when audio stream is present we don't care about
4090 * last video frame length because it's not defined exactly */
4091 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4095 for (i = 0; i < ifile->nb_streams; i++) {
4096 ist = input_streams[ifile->ist_index + i];
4097 avctx = ist->dec_ctx;
4100 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4101 AVRational sample_rate = {1, avctx->sample_rate};
4103 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4107 if (ist->framerate.num) {
4108 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4109 } else if (ist->st->avg_frame_rate.num) {
4110 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4111 } else duration = 1;
4113 if (!ifile->duration)
4114 ifile->time_base = ist->st->time_base;
4115 /* the total duration of the stream, max_pts - min_pts is
4116 * the duration of the stream without the last frame */
4117 duration += ist->max_pts - ist->min_pts;
4118 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4122 if (ifile->loop > 0)
4130 * - 0 -- one packet was read and processed
4131 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4132 * this function should be called again
4133 * - AVERROR_EOF -- this function should not be called again
4135 static int process_input(int file_index)
4137 InputFile *ifile = input_files[file_index];
4138 AVFormatContext *is;
4146 ret = get_input_packet(ifile, &pkt);
4148 if (ret == AVERROR(EAGAIN)) {
4152 if (ret < 0 && ifile->loop) {
4153 if ((ret = seek_to_start(ifile, is)) < 0)
4155 ret = get_input_packet(ifile, &pkt);
4156 if (ret == AVERROR(EAGAIN)) {
4162 if (ret != AVERROR_EOF) {
4163 print_error(is->filename, ret);
4168 for (i = 0; i < ifile->nb_streams; i++) {
4169 ist = input_streams[ifile->ist_index + i];
4170 if (ist->decoding_needed) {
4171 ret = process_input_packet(ist, NULL, 0);
4176 /* mark all outputs that don't go through lavfi as finished */
4177 for (j = 0; j < nb_output_streams; j++) {
4178 OutputStream *ost = output_streams[j];
4180 if (ost->source_index == ifile->ist_index + i &&
4181 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4182 finish_output_stream(ost);
4186 ifile->eof_reached = 1;
4187 return AVERROR(EAGAIN);
4193 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4194 is->streams[pkt.stream_index]);
4196 /* the following test is needed in case new streams appear
4197 dynamically in stream : we ignore them */
4198 if (pkt.stream_index >= ifile->nb_streams) {
4199 report_new_stream(file_index, &pkt);
4200 goto discard_packet;
4203 ist = input_streams[ifile->ist_index + pkt.stream_index];
4205 ist->data_size += pkt.size;
4209 goto discard_packet;
4211 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4212 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4217 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4218 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4219 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4220 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4221 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4222 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4223 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4224 av_ts2str(input_files[ist->file_index]->ts_offset),
4225 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4228 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4229 int64_t stime, stime2;
4230 // Correcting starttime based on the enabled streams
4231 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4232 // so we instead do it here as part of discontinuity handling
4233 if ( ist->next_dts == AV_NOPTS_VALUE
4234 && ifile->ts_offset == -is->start_time
4235 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4236 int64_t new_start_time = INT64_MAX;
4237 for (i=0; i<is->nb_streams; i++) {
4238 AVStream *st = is->streams[i];
4239 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4241 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4243 if (new_start_time > is->start_time) {
4244 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4245 ifile->ts_offset = -new_start_time;
4249 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4250 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4251 ist->wrap_correction_done = 1;
4253 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4254 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4255 ist->wrap_correction_done = 0;
4257 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4258 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4259 ist->wrap_correction_done = 0;
4263 /* add the stream-global side data to the first packet */
4264 if (ist->nb_packets == 1) {
4265 for (i = 0; i < ist->st->nb_side_data; i++) {
4266 AVPacketSideData *src_sd = &ist->st->side_data[i];
4269 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4271 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4274 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4278 memcpy(dst_data, src_sd->data, src_sd->size);
4282 if (pkt.dts != AV_NOPTS_VALUE)
4283 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4284 if (pkt.pts != AV_NOPTS_VALUE)
4285 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4287 if (pkt.pts != AV_NOPTS_VALUE)
4288 pkt.pts *= ist->ts_scale;
4289 if (pkt.dts != AV_NOPTS_VALUE)
4290 pkt.dts *= ist->ts_scale;
4292 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4293 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4294 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4295 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4296 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4297 int64_t delta = pkt_dts - ifile->last_ts;
4298 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4299 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4300 ifile->ts_offset -= delta;
4301 av_log(NULL, AV_LOG_DEBUG,
4302 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4303 delta, ifile->ts_offset);
4304 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4305 if (pkt.pts != AV_NOPTS_VALUE)
4306 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4310 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4311 if (pkt.pts != AV_NOPTS_VALUE) {
4312 pkt.pts += duration;
4313 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4314 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4317 if (pkt.dts != AV_NOPTS_VALUE)
4318 pkt.dts += duration;
4320 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4321 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4322 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4323 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4325 int64_t delta = pkt_dts - ist->next_dts;
4326 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4327 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4328 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4329 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4330 ifile->ts_offset -= delta;
4331 av_log(NULL, AV_LOG_DEBUG,
4332 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4333 delta, ifile->ts_offset);
4334 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4335 if (pkt.pts != AV_NOPTS_VALUE)
4336 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4339 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4340 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4341 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4342 pkt.dts = AV_NOPTS_VALUE;
4344 if (pkt.pts != AV_NOPTS_VALUE){
4345 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4346 delta = pkt_pts - ist->next_dts;
4347 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4348 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4349 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4350 pkt.pts = AV_NOPTS_VALUE;
4356 if (pkt.dts != AV_NOPTS_VALUE)
4357 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4360 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4361 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4362 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4363 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4364 av_ts2str(input_files[ist->file_index]->ts_offset),
4365 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4368 sub2video_heartbeat(ist, pkt.pts);
4370 process_input_packet(ist, &pkt, 0);
4373 av_packet_unref(&pkt);
4379 * Perform a step of transcoding for the specified filter graph.
4381 * @param[in] graph filter graph to consider
4382 * @param[out] best_ist input stream where a frame would allow to continue
4383 * @return 0 for success, <0 for error
4385 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4388 int nb_requests, nb_requests_max = 0;
4389 InputFilter *ifilter;
4393 ret = avfilter_graph_request_oldest(graph->graph);
4395 return reap_filters(0);
4397 if (ret == AVERROR_EOF) {
4398 ret = reap_filters(1);
4399 for (i = 0; i < graph->nb_outputs; i++)
4400 close_output_stream(graph->outputs[i]->ost);
4403 if (ret != AVERROR(EAGAIN))
4406 for (i = 0; i < graph->nb_inputs; i++) {
4407 ifilter = graph->inputs[i];
4409 if (input_files[ist->file_index]->eagain ||
4410 input_files[ist->file_index]->eof_reached)
4412 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4413 if (nb_requests > nb_requests_max) {
4414 nb_requests_max = nb_requests;
4420 for (i = 0; i < graph->nb_outputs; i++)
4421 graph->outputs[i]->ost->unavailable = 1;
4427 * Run a single step of transcoding.
4429 * @return 0 for success, <0 for error
4431 static int transcode_step(void)
4434 InputStream *ist = NULL;
4437 ost = choose_output();
4444 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4448 if (ost->filter && !ost->filter->graph->graph) {
4449 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4450 ret = configure_filtergraph(ost->filter->graph);
4452 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4458 if (ost->filter && ost->filter->graph->graph) {
4459 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4463 } else if (ost->filter) {
4465 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4466 InputFilter *ifilter = ost->filter->graph->inputs[i];
4467 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4473 ost->inputs_done = 1;
4477 av_assert0(ost->source_index >= 0);
4478 ist = input_streams[ost->source_index];
4481 ret = process_input(ist->file_index);
4482 if (ret == AVERROR(EAGAIN)) {
4483 if (input_files[ist->file_index]->eagain)
4484 ost->unavailable = 1;
4489 return ret == AVERROR_EOF ? 0 : ret;
4491 return reap_filters(0);
4495 * The following code is the main loop of the file converter
4497 static int transcode(void)
4500 AVFormatContext *os;
4503 int64_t timer_start;
4504 int64_t total_packets_written = 0;
4506 ret = transcode_init();
4510 if (stdin_interaction) {
4511 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4514 timer_start = av_gettime_relative();
4517 if ((ret = init_input_threads()) < 0)
4521 while (!received_sigterm) {
4522 int64_t cur_time= av_gettime_relative();
4524 /* if 'q' pressed, exits */
4525 if (stdin_interaction)
4526 if (check_keyboard_interaction(cur_time) < 0)
4529 /* check if there's any stream where output is still needed */
4530 if (!need_output()) {
4531 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4535 ret = transcode_step();
4536 if (ret < 0 && ret != AVERROR_EOF) {
4538 av_strerror(ret, errbuf, sizeof(errbuf));
4540 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4544 /* dump report by using the output first video and audio streams */
4545 print_report(0, timer_start, cur_time);
4548 free_input_threads();
4551 /* at the end of stream, we must flush the decoder buffers */
4552 for (i = 0; i < nb_input_streams; i++) {
4553 ist = input_streams[i];
4554 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4555 process_input_packet(ist, NULL, 0);
4562 /* write the trailer if needed and close file */
4563 for (i = 0; i < nb_output_files; i++) {
4564 os = output_files[i]->ctx;
4565 if (!output_files[i]->header_written) {
4566 av_log(NULL, AV_LOG_ERROR,
4567 "Nothing was written into output file %d (%s), because "
4568 "at least one of its streams received no packets.\n",
4572 if ((ret = av_write_trailer(os)) < 0) {
4573 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4579 /* dump report by using the first video and audio streams */
4580 print_report(1, timer_start, av_gettime_relative());
4582 /* close each encoder */
4583 for (i = 0; i < nb_output_streams; i++) {
4584 ost = output_streams[i];
4585 if (ost->encoding_needed) {
4586 av_freep(&ost->enc_ctx->stats_in);
4588 total_packets_written += ost->packets_written;
4591 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4592 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4596 /* close each decoder */
4597 for (i = 0; i < nb_input_streams; i++) {
4598 ist = input_streams[i];
4599 if (ist->decoding_needed) {
4600 avcodec_close(ist->dec_ctx);
4601 if (ist->hwaccel_uninit)
4602 ist->hwaccel_uninit(ist->dec_ctx);
4606 av_buffer_unref(&hw_device_ctx);
4613 free_input_threads();
4616 if (output_streams) {
4617 for (i = 0; i < nb_output_streams; i++) {
4618 ost = output_streams[i];
4621 if (fclose(ost->logfile))
4622 av_log(NULL, AV_LOG_ERROR,
4623 "Error closing logfile, loss of information possible: %s\n",
4624 av_err2str(AVERROR(errno)));
4625 ost->logfile = NULL;
4627 av_freep(&ost->forced_kf_pts);
4628 av_freep(&ost->apad);
4629 av_freep(&ost->disposition);
4630 av_dict_free(&ost->encoder_opts);
4631 av_dict_free(&ost->sws_dict);
4632 av_dict_free(&ost->swr_opts);
4633 av_dict_free(&ost->resample_opts);
4641 static int64_t getutime(void)
4644 struct rusage rusage;
4646 getrusage(RUSAGE_SELF, &rusage);
4647 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4648 #elif HAVE_GETPROCESSTIMES
4650 FILETIME c, e, k, u;
4651 proc = GetCurrentProcess();
4652 GetProcessTimes(proc, &c, &e, &k, &u);
4653 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4655 return av_gettime_relative();
4659 static int64_t getmaxrss(void)
4661 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4662 struct rusage rusage;
4663 getrusage(RUSAGE_SELF, &rusage);
4664 return (int64_t)rusage.ru_maxrss * 1024;
4665 #elif HAVE_GETPROCESSMEMORYINFO
4667 PROCESS_MEMORY_COUNTERS memcounters;
4668 proc = GetCurrentProcess();
4669 memcounters.cb = sizeof(memcounters);
4670 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4671 return memcounters.PeakPagefileUsage;
4677 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4681 int main(int argc, char **argv)
4688 register_exit(ffmpeg_cleanup);
4690 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4692 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4693 parse_loglevel(argc, argv, options);
4695 if(argc>1 && !strcmp(argv[1], "-d")){
4697 av_log_set_callback(log_callback_null);
4702 avcodec_register_all();
4704 avdevice_register_all();
4706 avfilter_register_all();
4708 avformat_network_init();
4710 show_banner(argc, argv, options);
4712 /* parse options and open all input/output files */
4713 ret = ffmpeg_parse_options(argc, argv);
4717 if (nb_output_files <= 0 && nb_input_files == 0) {
4719 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4723 /* file converter / grab */
4724 if (nb_output_files <= 0) {
4725 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4729 // if (nb_input_files == 0) {
4730 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4734 for (i = 0; i < nb_output_files; i++) {
4735 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4739 current_time = ti = getutime();
4740 if (transcode() < 0)
4742 ti = getutime() - ti;
4744 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4746 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4747 decode_error_stat[0], decode_error_stat[1]);
4748 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4751 exit_program(received_nb_signals ? 255 : main_return_code);
4752 return main_return_code;