2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/display.h"
54 #include "libavutil/mathematics.h"
55 #include "libavutil/pixdesc.h"
56 #include "libavutil/avstring.h"
57 #include "libavutil/libm.h"
58 #include "libavutil/imgutils.h"
59 #include "libavutil/timestamp.h"
60 #include "libavutil/bprint.h"
61 #include "libavutil/time.h"
62 #include "libavutil/threadmessage.h"
63 #include "libavcodec/mathops.h"
64 #include "libavformat/os_support.h"
66 # include "libavfilter/avfilter.h"
67 # include "libavfilter/buffersrc.h"
68 # include "libavfilter/buffersink.h"
70 #if HAVE_SYS_RESOURCE_H
72 #include <sys/types.h>
73 #include <sys/resource.h>
74 #elif HAVE_GETPROCESSTIMES
77 #if HAVE_GETPROCESSMEMORYINFO
81 #if HAVE_SETCONSOLECTRLHANDLER
87 #include <sys/select.h>
92 #include <sys/ioctl.h>
106 #include "cmdutils.h"
108 #include "libavutil/avassert.h"
110 const char program_name[] = "ffmpeg";
111 const int program_birth_year = 2000;
113 static FILE *vstats_file;
115 const char *const forced_keyframes_const_names[] = {
124 static void do_video_stats(OutputStream *ost, int frame_size);
125 static int64_t getutime(void);
126 static int64_t getmaxrss(void);
127 static int ifilter_has_all_input_formats(FilterGraph *fg);
129 static int run_as_daemon = 0;
130 static int nb_frames_dup = 0;
131 static unsigned dup_warning = 1000;
132 static int nb_frames_drop = 0;
133 static int64_t decode_error_stat[2];
135 static int want_sdp = 1;
137 static int current_time;
138 AVIOContext *progress_avio = NULL;
140 static uint8_t *subtitle_out;
142 InputStream **input_streams = NULL;
143 int nb_input_streams = 0;
144 InputFile **input_files = NULL;
145 int nb_input_files = 0;
147 OutputStream **output_streams = NULL;
148 int nb_output_streams = 0;
149 OutputFile **output_files = NULL;
150 int nb_output_files = 0;
152 FilterGraph **filtergraphs;
157 /* init terminal so that we can grab keys */
158 static struct termios oldtty;
159 static int restore_tty;
163 static void free_input_threads(void);
167 Convert subtitles to video with alpha to insert them in filter graphs.
168 This is a temporary solution until libavfilter gets real subtitles support.
171 static int sub2video_get_blank_frame(InputStream *ist)
174 AVFrame *frame = ist->sub2video.frame;
176 av_frame_unref(frame);
177 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
178 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
179 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
180 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
182 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
186 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
189 uint32_t *pal, *dst2;
193 if (r->type != SUBTITLE_BITMAP) {
194 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
197 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
198 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
199 r->x, r->y, r->w, r->h, w, h
204 dst += r->y * dst_linesize + r->x * 4;
206 pal = (uint32_t *)r->data[1];
207 for (y = 0; y < r->h; y++) {
208 dst2 = (uint32_t *)dst;
210 for (x = 0; x < r->w; x++)
211 *(dst2++) = pal[*(src2++)];
213 src += r->linesize[0];
217 static void sub2video_push_ref(InputStream *ist, int64_t pts)
219 AVFrame *frame = ist->sub2video.frame;
222 av_assert1(frame->data[0]);
223 ist->sub2video.last_pts = frame->pts = pts;
224 for (i = 0; i < ist->nb_filters; i++)
225 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
226 AV_BUFFERSRC_FLAG_KEEP_REF |
227 AV_BUFFERSRC_FLAG_PUSH);
230 void sub2video_update(InputStream *ist, AVSubtitle *sub)
232 AVFrame *frame = ist->sub2video.frame;
236 int64_t pts, end_pts;
241 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
242 AV_TIME_BASE_Q, ist->st->time_base);
243 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
244 AV_TIME_BASE_Q, ist->st->time_base);
245 num_rects = sub->num_rects;
247 pts = ist->sub2video.end_pts;
251 if (sub2video_get_blank_frame(ist) < 0) {
252 av_log(ist->dec_ctx, AV_LOG_ERROR,
253 "Impossible to get a blank canvas.\n");
256 dst = frame->data [0];
257 dst_linesize = frame->linesize[0];
258 for (i = 0; i < num_rects; i++)
259 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
260 sub2video_push_ref(ist, pts);
261 ist->sub2video.end_pts = end_pts;
264 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
266 InputFile *infile = input_files[ist->file_index];
270 /* When a frame is read from a file, examine all sub2video streams in
271 the same file and send the sub2video frame again. Otherwise, decoded
272 video frames could be accumulating in the filter graph while a filter
273 (possibly overlay) is desperately waiting for a subtitle frame. */
274 for (i = 0; i < infile->nb_streams; i++) {
275 InputStream *ist2 = input_streams[infile->ist_index + i];
276 if (!ist2->sub2video.frame)
278 /* subtitles seem to be usually muxed ahead of other streams;
279 if not, subtracting a larger time here is necessary */
280 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
281 /* do not send the heartbeat frame if the subtitle is already ahead */
282 if (pts2 <= ist2->sub2video.last_pts)
284 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
285 sub2video_update(ist2, NULL);
286 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
287 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
289 sub2video_push_ref(ist2, pts2);
293 static void sub2video_flush(InputStream *ist)
297 if (ist->sub2video.end_pts < INT64_MAX)
298 sub2video_update(ist, NULL);
299 for (i = 0; i < ist->nb_filters; i++)
300 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
303 /* end of sub2video hack */
305 static void term_exit_sigsafe(void)
309 tcsetattr (0, TCSANOW, &oldtty);
315 av_log(NULL, AV_LOG_QUIET, "%s", "");
319 static volatile int received_sigterm = 0;
320 static volatile int received_nb_signals = 0;
321 static volatile int transcode_init_done = 0;
322 static volatile int ffmpeg_exited = 0;
323 static int main_return_code = 0;
326 sigterm_handler(int sig)
328 received_sigterm = sig;
329 received_nb_signals++;
331 if(received_nb_signals > 3) {
332 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
333 strlen("Received > 3 system signals, hard exiting\n"));
339 #if HAVE_SETCONSOLECTRLHANDLER
340 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
342 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
347 case CTRL_BREAK_EVENT:
348 sigterm_handler(SIGINT);
351 case CTRL_CLOSE_EVENT:
352 case CTRL_LOGOFF_EVENT:
353 case CTRL_SHUTDOWN_EVENT:
354 sigterm_handler(SIGTERM);
355 /* Basically, with these 3 events, when we return from this method the
356 process is hard terminated, so stall as long as we need to
357 to try and let the main thread(s) clean up and gracefully terminate
358 (we have at most 5 seconds, but should be done far before that). */
359 while (!ffmpeg_exited) {
365 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
374 if (!run_as_daemon && stdin_interaction) {
376 if (tcgetattr (0, &tty) == 0) {
380 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
381 |INLCR|IGNCR|ICRNL|IXON);
382 tty.c_oflag |= OPOST;
383 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
384 tty.c_cflag &= ~(CSIZE|PARENB);
389 tcsetattr (0, TCSANOW, &tty);
391 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
395 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
396 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
398 signal(SIGXCPU, sigterm_handler);
400 #if HAVE_SETCONSOLECTRLHANDLER
401 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
405 /* read a key without blocking */
406 static int read_key(void)
418 n = select(1, &rfds, NULL, NULL, &tv);
427 # if HAVE_PEEKNAMEDPIPE
429 static HANDLE input_handle;
432 input_handle = GetStdHandle(STD_INPUT_HANDLE);
433 is_pipe = !GetConsoleMode(input_handle, &dw);
437 /* When running under a GUI, you will end here. */
438 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
439 // input pipe may have been closed by the program that ran ffmpeg
457 static int decode_interrupt_cb(void *ctx)
459 return received_nb_signals > transcode_init_done;
462 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
464 static void ffmpeg_cleanup(int ret)
469 int maxrss = getmaxrss() / 1024;
470 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
473 for (i = 0; i < nb_filtergraphs; i++) {
474 FilterGraph *fg = filtergraphs[i];
475 avfilter_graph_free(&fg->graph);
476 for (j = 0; j < fg->nb_inputs; j++) {
477 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
479 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
480 sizeof(frame), NULL);
481 av_frame_free(&frame);
483 av_fifo_free(fg->inputs[j]->frame_queue);
484 if (fg->inputs[j]->ist->sub2video.sub_queue) {
485 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
487 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
488 &sub, sizeof(sub), NULL);
489 avsubtitle_free(&sub);
491 av_fifo_free(fg->inputs[j]->ist->sub2video.sub_queue);
493 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
494 av_freep(&fg->inputs[j]->name);
495 av_freep(&fg->inputs[j]);
497 av_freep(&fg->inputs);
498 for (j = 0; j < fg->nb_outputs; j++) {
499 av_freep(&fg->outputs[j]->name);
500 av_freep(&fg->outputs[j]->formats);
501 av_freep(&fg->outputs[j]->channel_layouts);
502 av_freep(&fg->outputs[j]->sample_rates);
503 av_freep(&fg->outputs[j]);
505 av_freep(&fg->outputs);
506 av_freep(&fg->graph_desc);
508 av_freep(&filtergraphs[i]);
510 av_freep(&filtergraphs);
512 av_freep(&subtitle_out);
515 for (i = 0; i < nb_output_files; i++) {
516 OutputFile *of = output_files[i];
521 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
523 avformat_free_context(s);
524 av_dict_free(&of->opts);
526 av_freep(&output_files[i]);
528 for (i = 0; i < nb_output_streams; i++) {
529 OutputStream *ost = output_streams[i];
534 for (j = 0; j < ost->nb_bitstream_filters; j++)
535 av_bsf_free(&ost->bsf_ctx[j]);
536 av_freep(&ost->bsf_ctx);
537 av_freep(&ost->bsf_extradata_updated);
539 av_frame_free(&ost->filtered_frame);
540 av_frame_free(&ost->last_frame);
541 av_dict_free(&ost->encoder_opts);
543 av_parser_close(ost->parser);
544 avcodec_free_context(&ost->parser_avctx);
546 av_freep(&ost->forced_keyframes);
547 av_expr_free(ost->forced_keyframes_pexpr);
548 av_freep(&ost->avfilter);
549 av_freep(&ost->logfile_prefix);
551 av_freep(&ost->audio_channels_map);
552 ost->audio_channels_mapped = 0;
554 av_dict_free(&ost->sws_dict);
556 avcodec_free_context(&ost->enc_ctx);
557 avcodec_parameters_free(&ost->ref_par);
559 if (ost->muxing_queue) {
560 while (av_fifo_size(ost->muxing_queue)) {
562 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
563 av_packet_unref(&pkt);
565 av_fifo_freep(&ost->muxing_queue);
568 av_freep(&output_streams[i]);
571 free_input_threads();
573 for (i = 0; i < nb_input_files; i++) {
574 avformat_close_input(&input_files[i]->ctx);
575 av_freep(&input_files[i]);
577 for (i = 0; i < nb_input_streams; i++) {
578 InputStream *ist = input_streams[i];
580 av_frame_free(&ist->decoded_frame);
581 av_frame_free(&ist->filter_frame);
582 av_dict_free(&ist->decoder_opts);
583 avsubtitle_free(&ist->prev_sub.subtitle);
584 av_frame_free(&ist->sub2video.frame);
585 av_freep(&ist->filters);
586 av_freep(&ist->hwaccel_device);
587 av_freep(&ist->dts_buffer);
589 avcodec_free_context(&ist->dec_ctx);
591 av_freep(&input_streams[i]);
595 if (fclose(vstats_file))
596 av_log(NULL, AV_LOG_ERROR,
597 "Error closing vstats file, loss of information possible: %s\n",
598 av_err2str(AVERROR(errno)));
600 av_freep(&vstats_filename);
602 av_freep(&input_streams);
603 av_freep(&input_files);
604 av_freep(&output_streams);
605 av_freep(&output_files);
609 avformat_network_deinit();
611 if (received_sigterm) {
612 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
613 (int) received_sigterm);
614 } else if (ret && transcode_init_done) {
615 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
621 void remove_avoptions(AVDictionary **a, AVDictionary *b)
623 AVDictionaryEntry *t = NULL;
625 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
626 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
630 void assert_avoptions(AVDictionary *m)
632 AVDictionaryEntry *t;
633 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
634 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
639 static void abort_codec_experimental(AVCodec *c, int encoder)
644 static void update_benchmark(const char *fmt, ...)
646 if (do_benchmark_all) {
647 int64_t t = getutime();
653 vsnprintf(buf, sizeof(buf), fmt, va);
655 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
661 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
664 for (i = 0; i < nb_output_streams; i++) {
665 OutputStream *ost2 = output_streams[i];
666 ost2->finished |= ost == ost2 ? this_stream : others;
670 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
672 AVFormatContext *s = of->ctx;
673 AVStream *st = ost->st;
676 if (!of->header_written) {
677 AVPacket tmp_pkt = {0};
678 /* the muxer is not initialized yet, buffer the packet */
679 if (!av_fifo_space(ost->muxing_queue)) {
680 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
681 ost->max_muxing_queue_size);
682 if (new_size <= av_fifo_size(ost->muxing_queue)) {
683 av_log(NULL, AV_LOG_ERROR,
684 "Too many packets buffered for output stream %d:%d.\n",
685 ost->file_index, ost->st->index);
688 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
692 ret = av_packet_ref(&tmp_pkt, pkt);
695 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
696 av_packet_unref(pkt);
700 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
701 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
702 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
705 * Audio encoders may split the packets -- #frames in != #packets out.
706 * But there is no reordering, so we can limit the number of output packets
707 * by simply dropping them here.
708 * Counting encoded video frames needs to be done separately because of
709 * reordering, see do_video_out()
711 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
712 if (ost->frame_number >= ost->max_frames) {
713 av_packet_unref(pkt);
718 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
720 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
722 ost->quality = sd ? AV_RL32(sd) : -1;
723 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
725 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
727 ost->error[i] = AV_RL64(sd + 8 + 8*i);
732 if (ost->frame_rate.num && ost->is_cfr) {
733 if (pkt->duration > 0)
734 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
735 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
740 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
742 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
743 if (pkt->dts != AV_NOPTS_VALUE &&
744 pkt->pts != AV_NOPTS_VALUE &&
745 pkt->dts > pkt->pts) {
746 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
748 ost->file_index, ost->st->index);
750 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
751 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
752 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
754 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
755 pkt->dts != AV_NOPTS_VALUE &&
756 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
757 ost->last_mux_dts != AV_NOPTS_VALUE) {
758 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
759 if (pkt->dts < max) {
760 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
761 av_log(s, loglevel, "Non-monotonous DTS in output stream "
762 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
763 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
765 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
768 av_log(s, loglevel, "changing to %"PRId64". This may result "
769 "in incorrect timestamps in the output file.\n",
771 if (pkt->pts >= pkt->dts)
772 pkt->pts = FFMAX(pkt->pts, max);
777 ost->last_mux_dts = pkt->dts;
779 ost->data_size += pkt->size;
780 ost->packets_written++;
782 pkt->stream_index = ost->index;
785 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
786 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
787 av_get_media_type_string(ost->enc_ctx->codec_type),
788 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
789 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
794 ret = av_interleaved_write_frame(s, pkt);
796 print_error("av_interleaved_write_frame()", ret);
797 main_return_code = 1;
798 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
800 av_packet_unref(pkt);
803 static void close_output_stream(OutputStream *ost)
805 OutputFile *of = output_files[ost->file_index];
807 ost->finished |= ENCODER_FINISHED;
809 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
810 of->recording_time = FFMIN(of->recording_time, end);
814 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
818 /* apply the output bitstream filters, if any */
819 if (ost->nb_bitstream_filters) {
822 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
828 /* get a packet from the previous filter up the chain */
829 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
830 if (ret == AVERROR(EAGAIN)) {
836 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
837 * the api states this shouldn't happen after init(). Propagate it here to the
838 * muxer and to the next filters in the chain to workaround this.
839 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
840 * par_out->extradata and adapt muxers accordingly to get rid of this. */
841 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
842 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
845 ost->bsf_extradata_updated[idx - 1] |= 1;
848 /* send it to the next filter down the chain or to the muxer */
849 if (idx < ost->nb_bitstream_filters) {
850 /* HACK/FIXME! - See above */
851 if (!(ost->bsf_extradata_updated[idx] & 2)) {
852 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
855 ost->bsf_extradata_updated[idx] |= 2;
857 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
862 write_packet(of, pkt, ost);
865 write_packet(of, pkt, ost);
868 if (ret < 0 && ret != AVERROR_EOF) {
869 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
870 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
876 static int check_recording_time(OutputStream *ost)
878 OutputFile *of = output_files[ost->file_index];
880 if (of->recording_time != INT64_MAX &&
881 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
882 AV_TIME_BASE_Q) >= 0) {
883 close_output_stream(ost);
889 static void do_audio_out(OutputFile *of, OutputStream *ost,
892 AVCodecContext *enc = ost->enc_ctx;
896 av_init_packet(&pkt);
900 if (!check_recording_time(ost))
903 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
904 frame->pts = ost->sync_opts;
905 ost->sync_opts = frame->pts + frame->nb_samples;
906 ost->samples_encoded += frame->nb_samples;
907 ost->frames_encoded++;
909 av_assert0(pkt.size || !pkt.data);
910 update_benchmark(NULL);
912 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
913 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
914 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
915 enc->time_base.num, enc->time_base.den);
918 ret = avcodec_send_frame(enc, frame);
923 ret = avcodec_receive_packet(enc, &pkt);
924 if (ret == AVERROR(EAGAIN))
929 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
931 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
934 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
935 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
936 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
937 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
940 output_packet(of, &pkt, ost);
945 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
949 static void do_subtitle_out(OutputFile *of,
953 int subtitle_out_max_size = 1024 * 1024;
954 int subtitle_out_size, nb, i;
959 if (sub->pts == AV_NOPTS_VALUE) {
960 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
969 subtitle_out = av_malloc(subtitle_out_max_size);
971 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
976 /* Note: DVB subtitle need one packet to draw them and one other
977 packet to clear them */
978 /* XXX: signal it in the codec context ? */
979 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
984 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
986 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
987 pts -= output_files[ost->file_index]->start_time;
988 for (i = 0; i < nb; i++) {
989 unsigned save_num_rects = sub->num_rects;
991 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
992 if (!check_recording_time(ost))
996 // start_display_time is required to be 0
997 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
998 sub->end_display_time -= sub->start_display_time;
999 sub->start_display_time = 0;
1003 ost->frames_encoded++;
1005 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1006 subtitle_out_max_size, sub);
1008 sub->num_rects = save_num_rects;
1009 if (subtitle_out_size < 0) {
1010 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1014 av_init_packet(&pkt);
1015 pkt.data = subtitle_out;
1016 pkt.size = subtitle_out_size;
1017 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1018 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1019 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1020 /* XXX: the pts correction is handled here. Maybe handling
1021 it in the codec would be better */
1023 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1025 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1028 output_packet(of, &pkt, ost);
1032 static void do_video_out(OutputFile *of,
1034 AVFrame *next_picture,
1037 int ret, format_video_sync;
1039 AVCodecContext *enc = ost->enc_ctx;
1040 AVCodecParameters *mux_par = ost->st->codecpar;
1041 AVRational frame_rate;
1042 int nb_frames, nb0_frames, i;
1043 double delta, delta0;
1044 double duration = 0;
1046 InputStream *ist = NULL;
1047 AVFilterContext *filter = ost->filter->filter;
1049 if (ost->source_index >= 0)
1050 ist = input_streams[ost->source_index];
1052 frame_rate = av_buffersink_get_frame_rate(filter);
1053 if (frame_rate.num > 0 && frame_rate.den > 0)
1054 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1056 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1057 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1059 if (!ost->filters_script &&
1063 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1064 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1067 if (!next_picture) {
1069 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1070 ost->last_nb0_frames[1],
1071 ost->last_nb0_frames[2]);
1073 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1074 delta = delta0 + duration;
1076 /* by default, we output a single frame */
1077 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1080 format_video_sync = video_sync_method;
1081 if (format_video_sync == VSYNC_AUTO) {
1082 if(!strcmp(of->ctx->oformat->name, "avi")) {
1083 format_video_sync = VSYNC_VFR;
1085 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1087 && format_video_sync == VSYNC_CFR
1088 && input_files[ist->file_index]->ctx->nb_streams == 1
1089 && input_files[ist->file_index]->input_ts_offset == 0) {
1090 format_video_sync = VSYNC_VSCFR;
1092 if (format_video_sync == VSYNC_CFR && copy_ts) {
1093 format_video_sync = VSYNC_VSCFR;
1096 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1100 format_video_sync != VSYNC_PASSTHROUGH &&
1101 format_video_sync != VSYNC_DROP) {
1102 if (delta0 < -0.6) {
1103 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1105 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1106 sync_ipts = ost->sync_opts;
1111 switch (format_video_sync) {
1113 if (ost->frame_number == 0 && delta0 >= 0.5) {
1114 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1117 ost->sync_opts = lrint(sync_ipts);
1120 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1121 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1123 } else if (delta < -1.1)
1125 else if (delta > 1.1) {
1126 nb_frames = lrintf(delta);
1128 nb0_frames = lrintf(delta0 - 0.6);
1134 else if (delta > 0.6)
1135 ost->sync_opts = lrint(sync_ipts);
1138 case VSYNC_PASSTHROUGH:
1139 ost->sync_opts = lrint(sync_ipts);
1146 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1147 nb0_frames = FFMIN(nb0_frames, nb_frames);
1149 memmove(ost->last_nb0_frames + 1,
1150 ost->last_nb0_frames,
1151 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1152 ost->last_nb0_frames[0] = nb0_frames;
1154 if (nb0_frames == 0 && ost->last_dropped) {
1156 av_log(NULL, AV_LOG_VERBOSE,
1157 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1158 ost->frame_number, ost->st->index, ost->last_frame->pts);
1160 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1161 if (nb_frames > dts_error_threshold * 30) {
1162 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1166 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1167 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1168 if (nb_frames_dup > dup_warning) {
1169 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1173 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1175 /* duplicates frame if needed */
1176 for (i = 0; i < nb_frames; i++) {
1177 AVFrame *in_picture;
1178 av_init_packet(&pkt);
1182 if (i < nb0_frames && ost->last_frame) {
1183 in_picture = ost->last_frame;
1185 in_picture = next_picture;
1190 in_picture->pts = ost->sync_opts;
1193 if (!check_recording_time(ost))
1195 if (ost->frame_number >= ost->max_frames)
1199 #if FF_API_LAVF_FMT_RAWPICTURE
1200 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1201 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1202 /* raw pictures are written as AVPicture structure to
1203 avoid any copies. We support temporarily the older
1205 if (in_picture->interlaced_frame)
1206 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1208 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1209 pkt.data = (uint8_t *)in_picture;
1210 pkt.size = sizeof(AVPicture);
1211 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1212 pkt.flags |= AV_PKT_FLAG_KEY;
1214 output_packet(of, &pkt, ost);
1218 int forced_keyframe = 0;
1221 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1222 ost->top_field_first >= 0)
1223 in_picture->top_field_first = !!ost->top_field_first;
1225 if (in_picture->interlaced_frame) {
1226 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1227 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1229 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1231 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1233 in_picture->quality = enc->global_quality;
1234 in_picture->pict_type = 0;
1236 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1237 in_picture->pts * av_q2d(enc->time_base) : NAN;
1238 if (ost->forced_kf_index < ost->forced_kf_count &&
1239 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1240 ost->forced_kf_index++;
1241 forced_keyframe = 1;
1242 } else if (ost->forced_keyframes_pexpr) {
1244 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1245 res = av_expr_eval(ost->forced_keyframes_pexpr,
1246 ost->forced_keyframes_expr_const_values, NULL);
1247 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1248 ost->forced_keyframes_expr_const_values[FKF_N],
1249 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1250 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1251 ost->forced_keyframes_expr_const_values[FKF_T],
1252 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1255 forced_keyframe = 1;
1256 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1257 ost->forced_keyframes_expr_const_values[FKF_N];
1258 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1259 ost->forced_keyframes_expr_const_values[FKF_T];
1260 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1263 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1264 } else if ( ost->forced_keyframes
1265 && !strncmp(ost->forced_keyframes, "source", 6)
1266 && in_picture->key_frame==1) {
1267 forced_keyframe = 1;
1270 if (forced_keyframe) {
1271 in_picture->pict_type = AV_PICTURE_TYPE_I;
1272 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1275 update_benchmark(NULL);
1277 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1278 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1279 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1280 enc->time_base.num, enc->time_base.den);
1283 ost->frames_encoded++;
1285 ret = avcodec_send_frame(enc, in_picture);
1290 ret = avcodec_receive_packet(enc, &pkt);
1291 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1292 if (ret == AVERROR(EAGAIN))
1298 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1299 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1300 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1301 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1304 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1305 pkt.pts = ost->sync_opts;
1307 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1310 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1311 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1312 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1313 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1316 frame_size = pkt.size;
1317 output_packet(of, &pkt, ost);
1319 /* if two pass, output log */
1320 if (ost->logfile && enc->stats_out) {
1321 fprintf(ost->logfile, "%s", enc->stats_out);
1327 * For video, number of frames in == number of packets out.
1328 * But there may be reordering, so we can't throw away frames on encoder
1329 * flush, we need to limit them here, before they go into encoder.
1331 ost->frame_number++;
1333 if (vstats_filename && frame_size)
1334 do_video_stats(ost, frame_size);
1337 if (!ost->last_frame)
1338 ost->last_frame = av_frame_alloc();
1339 av_frame_unref(ost->last_frame);
1340 if (next_picture && ost->last_frame)
1341 av_frame_ref(ost->last_frame, next_picture);
1343 av_frame_free(&ost->last_frame);
1347 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1351 static double psnr(double d)
1353 return -10.0 * log10(d);
1356 static void do_video_stats(OutputStream *ost, int frame_size)
1358 AVCodecContext *enc;
1360 double ti1, bitrate, avg_bitrate;
1362 /* this is executed just the first time do_video_stats is called */
1364 vstats_file = fopen(vstats_filename, "w");
1372 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1373 frame_number = ost->st->nb_frames;
1374 if (vstats_version <= 1) {
1375 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1376 ost->quality / (float)FF_QP2LAMBDA);
1378 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1379 ost->quality / (float)FF_QP2LAMBDA);
1382 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1383 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1385 fprintf(vstats_file,"f_size= %6d ", frame_size);
1386 /* compute pts value */
1387 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1391 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1392 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1393 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1394 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1395 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1399 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1401 static void finish_output_stream(OutputStream *ost)
1403 OutputFile *of = output_files[ost->file_index];
1406 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1409 for (i = 0; i < of->ctx->nb_streams; i++)
1410 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1415 * Get and encode new output from any of the filtergraphs, without causing
1418 * @return 0 for success, <0 for severe errors
1420 static int reap_filters(int flush)
1422 AVFrame *filtered_frame = NULL;
1425 /* Reap all buffers present in the buffer sinks */
1426 for (i = 0; i < nb_output_streams; i++) {
1427 OutputStream *ost = output_streams[i];
1428 OutputFile *of = output_files[ost->file_index];
1429 AVFilterContext *filter;
1430 AVCodecContext *enc = ost->enc_ctx;
1433 if (!ost->filter || !ost->filter->graph->graph)
1435 filter = ost->filter->filter;
1437 if (!ost->initialized) {
1438 char error[1024] = "";
1439 ret = init_output_stream(ost, error, sizeof(error));
1441 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1442 ost->file_index, ost->index, error);
1447 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1448 return AVERROR(ENOMEM);
1450 filtered_frame = ost->filtered_frame;
1453 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1454 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1455 AV_BUFFERSINK_FLAG_NO_REQUEST);
1457 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1458 av_log(NULL, AV_LOG_WARNING,
1459 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1460 } else if (flush && ret == AVERROR_EOF) {
1461 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1462 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1466 if (ost->finished) {
1467 av_frame_unref(filtered_frame);
1470 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1471 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1472 AVRational filter_tb = av_buffersink_get_time_base(filter);
1473 AVRational tb = enc->time_base;
1474 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1476 tb.den <<= extra_bits;
1478 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1479 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1480 float_pts /= 1 << extra_bits;
1481 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1482 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1484 filtered_frame->pts =
1485 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1486 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1488 //if (ost->source_index >= 0)
1489 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1491 switch (av_buffersink_get_type(filter)) {
1492 case AVMEDIA_TYPE_VIDEO:
1493 if (!ost->frame_aspect_ratio.num)
1494 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1497 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1498 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1500 enc->time_base.num, enc->time_base.den);
1503 do_video_out(of, ost, filtered_frame, float_pts);
1505 case AVMEDIA_TYPE_AUDIO:
1506 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1507 enc->channels != av_frame_get_channels(filtered_frame)) {
1508 av_log(NULL, AV_LOG_ERROR,
1509 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1512 do_audio_out(of, ost, filtered_frame);
1515 // TODO support subtitle filters
1519 av_frame_unref(filtered_frame);
1526 static void print_final_stats(int64_t total_size)
1528 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1529 uint64_t subtitle_size = 0;
1530 uint64_t data_size = 0;
1531 float percent = -1.0;
1535 for (i = 0; i < nb_output_streams; i++) {
1536 OutputStream *ost = output_streams[i];
1537 switch (ost->enc_ctx->codec_type) {
1538 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1539 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1540 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1541 default: other_size += ost->data_size; break;
1543 extra_size += ost->enc_ctx->extradata_size;
1544 data_size += ost->data_size;
1545 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1546 != AV_CODEC_FLAG_PASS1)
1550 if (data_size && total_size>0 && total_size >= data_size)
1551 percent = 100.0 * (total_size - data_size) / data_size;
1553 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1554 video_size / 1024.0,
1555 audio_size / 1024.0,
1556 subtitle_size / 1024.0,
1557 other_size / 1024.0,
1558 extra_size / 1024.0);
1560 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1562 av_log(NULL, AV_LOG_INFO, "unknown");
1563 av_log(NULL, AV_LOG_INFO, "\n");
1565 /* print verbose per-stream stats */
1566 for (i = 0; i < nb_input_files; i++) {
1567 InputFile *f = input_files[i];
1568 uint64_t total_packets = 0, total_size = 0;
1570 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1571 i, f->ctx->filename);
1573 for (j = 0; j < f->nb_streams; j++) {
1574 InputStream *ist = input_streams[f->ist_index + j];
1575 enum AVMediaType type = ist->dec_ctx->codec_type;
1577 total_size += ist->data_size;
1578 total_packets += ist->nb_packets;
1580 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1581 i, j, media_type_string(type));
1582 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1583 ist->nb_packets, ist->data_size);
1585 if (ist->decoding_needed) {
1586 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1587 ist->frames_decoded);
1588 if (type == AVMEDIA_TYPE_AUDIO)
1589 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1590 av_log(NULL, AV_LOG_VERBOSE, "; ");
1593 av_log(NULL, AV_LOG_VERBOSE, "\n");
1596 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1597 total_packets, total_size);
1600 for (i = 0; i < nb_output_files; i++) {
1601 OutputFile *of = output_files[i];
1602 uint64_t total_packets = 0, total_size = 0;
1604 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1605 i, of->ctx->filename);
1607 for (j = 0; j < of->ctx->nb_streams; j++) {
1608 OutputStream *ost = output_streams[of->ost_index + j];
1609 enum AVMediaType type = ost->enc_ctx->codec_type;
1611 total_size += ost->data_size;
1612 total_packets += ost->packets_written;
1614 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1615 i, j, media_type_string(type));
1616 if (ost->encoding_needed) {
1617 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1618 ost->frames_encoded);
1619 if (type == AVMEDIA_TYPE_AUDIO)
1620 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1621 av_log(NULL, AV_LOG_VERBOSE, "; ");
1624 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1625 ost->packets_written, ost->data_size);
1627 av_log(NULL, AV_LOG_VERBOSE, "\n");
1630 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1631 total_packets, total_size);
1633 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1634 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1636 av_log(NULL, AV_LOG_WARNING, "\n");
1638 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1643 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1646 AVBPrint buf_script;
1648 AVFormatContext *oc;
1650 AVCodecContext *enc;
1651 int frame_number, vid, i;
1654 int64_t pts = INT64_MIN + 1;
1655 static int64_t last_time = -1;
1656 static int qp_histogram[52];
1657 int hours, mins, secs, us;
1661 if (!print_stats && !is_last_report && !progress_avio)
1664 if (!is_last_report) {
1665 if (last_time == -1) {
1666 last_time = cur_time;
1669 if ((cur_time - last_time) < 500000)
1671 last_time = cur_time;
1674 t = (cur_time-timer_start) / 1000000.0;
1677 oc = output_files[0]->ctx;
1679 total_size = avio_size(oc->pb);
1680 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1681 total_size = avio_tell(oc->pb);
1685 av_bprint_init(&buf_script, 0, 1);
1686 for (i = 0; i < nb_output_streams; i++) {
1688 ost = output_streams[i];
1690 if (!ost->stream_copy)
1691 q = ost->quality / (float) FF_QP2LAMBDA;
1693 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1694 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1695 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1696 ost->file_index, ost->index, q);
1698 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1701 frame_number = ost->frame_number;
1702 fps = t > 1 ? frame_number / t : 0;
1703 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1704 frame_number, fps < 9.95, fps, q);
1705 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1706 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1707 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1708 ost->file_index, ost->index, q);
1710 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1714 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1716 for (j = 0; j < 32; j++)
1717 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1720 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1722 double error, error_sum = 0;
1723 double scale, scale_sum = 0;
1725 char type[3] = { 'Y','U','V' };
1726 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1727 for (j = 0; j < 3; j++) {
1728 if (is_last_report) {
1729 error = enc->error[j];
1730 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1732 error = ost->error[j];
1733 scale = enc->width * enc->height * 255.0 * 255.0;
1739 p = psnr(error / scale);
1740 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1741 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1742 ost->file_index, ost->index, type[j] | 32, p);
1744 p = psnr(error_sum / scale_sum);
1745 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1746 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1747 ost->file_index, ost->index, p);
1751 /* compute min output value */
1752 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1753 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1754 ost->st->time_base, AV_TIME_BASE_Q));
1756 nb_frames_drop += ost->last_dropped;
1759 secs = FFABS(pts) / AV_TIME_BASE;
1760 us = FFABS(pts) % AV_TIME_BASE;
1766 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1767 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1769 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1771 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1772 "size=%8.0fkB time=", total_size / 1024.0);
1774 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1775 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1776 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1777 (100 * us) / AV_TIME_BASE);
1780 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1781 av_bprintf(&buf_script, "bitrate=N/A\n");
1783 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1784 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1787 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1788 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1789 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1790 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1791 hours, mins, secs, us);
1793 if (nb_frames_dup || nb_frames_drop)
1794 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1795 nb_frames_dup, nb_frames_drop);
1796 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1797 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1800 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1801 av_bprintf(&buf_script, "speed=N/A\n");
1803 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1804 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1807 if (print_stats || is_last_report) {
1808 const char end = is_last_report ? '\n' : '\r';
1809 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1810 fprintf(stderr, "%s %c", buf, end);
1812 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1817 if (progress_avio) {
1818 av_bprintf(&buf_script, "progress=%s\n",
1819 is_last_report ? "end" : "continue");
1820 avio_write(progress_avio, buf_script.str,
1821 FFMIN(buf_script.len, buf_script.size - 1));
1822 avio_flush(progress_avio);
1823 av_bprint_finalize(&buf_script, NULL);
1824 if (is_last_report) {
1825 if ((ret = avio_closep(&progress_avio)) < 0)
1826 av_log(NULL, AV_LOG_ERROR,
1827 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1832 print_final_stats(total_size);
1835 static void flush_encoders(void)
1839 for (i = 0; i < nb_output_streams; i++) {
1840 OutputStream *ost = output_streams[i];
1841 AVCodecContext *enc = ost->enc_ctx;
1842 OutputFile *of = output_files[ost->file_index];
1844 if (!ost->encoding_needed)
1847 // Try to enable encoding with no input frames.
1848 // Maybe we should just let encoding fail instead.
1849 if (!ost->initialized) {
1850 FilterGraph *fg = ost->filter->graph;
1851 char error[1024] = "";
1853 av_log(NULL, AV_LOG_WARNING,
1854 "Finishing stream %d:%d without any data written to it.\n",
1855 ost->file_index, ost->st->index);
1857 if (ost->filter && !fg->graph) {
1859 for (x = 0; x < fg->nb_inputs; x++) {
1860 InputFilter *ifilter = fg->inputs[x];
1861 if (ifilter->format < 0) {
1862 AVCodecParameters *par = ifilter->ist->st->codecpar;
1863 // We never got any input. Set a fake format, which will
1864 // come from libavformat.
1865 ifilter->format = par->format;
1866 ifilter->sample_rate = par->sample_rate;
1867 ifilter->channels = par->channels;
1868 ifilter->channel_layout = par->channel_layout;
1869 ifilter->width = par->width;
1870 ifilter->height = par->height;
1871 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1875 if (!ifilter_has_all_input_formats(fg))
1878 ret = configure_filtergraph(fg);
1880 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1884 finish_output_stream(ost);
1887 ret = init_output_stream(ost, error, sizeof(error));
1889 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1890 ost->file_index, ost->index, error);
1895 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1897 #if FF_API_LAVF_FMT_RAWPICTURE
1898 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1902 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1905 avcodec_send_frame(enc, NULL);
1908 const char *desc = NULL;
1912 switch (enc->codec_type) {
1913 case AVMEDIA_TYPE_AUDIO:
1916 case AVMEDIA_TYPE_VIDEO:
1923 av_init_packet(&pkt);
1927 update_benchmark(NULL);
1928 ret = avcodec_receive_packet(enc, &pkt);
1929 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1930 if (ret < 0 && ret != AVERROR_EOF) {
1931 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1936 if (ost->logfile && enc->stats_out) {
1937 fprintf(ost->logfile, "%s", enc->stats_out);
1939 if (ret == AVERROR_EOF) {
1942 if (ost->finished & MUXER_FINISHED) {
1943 av_packet_unref(&pkt);
1946 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1947 pkt_size = pkt.size;
1948 output_packet(of, &pkt, ost);
1949 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1950 do_video_stats(ost, pkt_size);
1957 * Check whether a packet from ist should be written into ost at this time
1959 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1961 OutputFile *of = output_files[ost->file_index];
1962 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1964 if (ost->source_index != ist_index)
1970 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1976 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1978 OutputFile *of = output_files[ost->file_index];
1979 InputFile *f = input_files [ist->file_index];
1980 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1981 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1985 av_init_packet(&opkt);
1987 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1988 !ost->copy_initial_nonkeyframes)
1991 if (!ost->frame_number && !ost->copy_prior_start) {
1992 int64_t comp_start = start_time;
1993 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1994 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1995 if (pkt->pts == AV_NOPTS_VALUE ?
1996 ist->pts < comp_start :
1997 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2001 if (of->recording_time != INT64_MAX &&
2002 ist->pts >= of->recording_time + start_time) {
2003 close_output_stream(ost);
2007 if (f->recording_time != INT64_MAX) {
2008 start_time = f->ctx->start_time;
2009 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2010 start_time += f->start_time;
2011 if (ist->pts >= f->recording_time + start_time) {
2012 close_output_stream(ost);
2017 /* force the input stream PTS */
2018 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2021 if (pkt->pts != AV_NOPTS_VALUE)
2022 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2024 opkt.pts = AV_NOPTS_VALUE;
2026 if (pkt->dts == AV_NOPTS_VALUE)
2027 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2029 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2030 opkt.dts -= ost_tb_start_time;
2032 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2033 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2035 duration = ist->dec_ctx->frame_size;
2036 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2037 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2038 ost->mux_timebase) - ost_tb_start_time;
2041 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2043 opkt.flags = pkt->flags;
2044 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2045 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2046 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2047 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2048 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2050 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2051 &opkt.data, &opkt.size,
2052 pkt->data, pkt->size,
2053 pkt->flags & AV_PKT_FLAG_KEY);
2055 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2060 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2065 opkt.data = pkt->data;
2066 opkt.size = pkt->size;
2068 av_copy_packet_side_data(&opkt, pkt);
2070 #if FF_API_LAVF_FMT_RAWPICTURE
2071 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2072 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2073 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2074 /* store AVPicture in AVPacket, as expected by the output format */
2075 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2077 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2081 opkt.data = (uint8_t *)&pict;
2082 opkt.size = sizeof(AVPicture);
2083 opkt.flags |= AV_PKT_FLAG_KEY;
2087 output_packet(of, &opkt, ost);
2090 int guess_input_channel_layout(InputStream *ist)
2092 AVCodecContext *dec = ist->dec_ctx;
2094 if (!dec->channel_layout) {
2095 char layout_name[256];
2097 if (dec->channels > ist->guess_layout_max)
2099 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2100 if (!dec->channel_layout)
2102 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2103 dec->channels, dec->channel_layout);
2104 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2105 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2110 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2112 if (*got_output || ret<0)
2113 decode_error_stat[ret<0] ++;
2115 if (ret < 0 && exit_on_error)
2118 if (exit_on_error && *got_output && ist) {
2119 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2120 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2126 // Filters can be configured only if the formats of all inputs are known.
2127 static int ifilter_has_all_input_formats(FilterGraph *fg)
2130 for (i = 0; i < fg->nb_inputs; i++) {
2131 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2132 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2138 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2140 FilterGraph *fg = ifilter->graph;
2141 int need_reinit, ret, i;
2143 /* determine if the parameters for this input changed */
2144 need_reinit = ifilter->format != frame->format;
2145 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2146 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2149 switch (ifilter->ist->st->codecpar->codec_type) {
2150 case AVMEDIA_TYPE_AUDIO:
2151 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2152 ifilter->channels != frame->channels ||
2153 ifilter->channel_layout != frame->channel_layout;
2155 case AVMEDIA_TYPE_VIDEO:
2156 need_reinit |= ifilter->width != frame->width ||
2157 ifilter->height != frame->height;
2162 ret = ifilter_parameters_from_frame(ifilter, frame);
2167 /* (re)init the graph if possible, otherwise buffer the frame and return */
2168 if (need_reinit || !fg->graph) {
2169 for (i = 0; i < fg->nb_inputs; i++) {
2170 if (!ifilter_has_all_input_formats(fg)) {
2171 AVFrame *tmp = av_frame_clone(frame);
2173 return AVERROR(ENOMEM);
2174 av_frame_unref(frame);
2176 if (!av_fifo_space(ifilter->frame_queue)) {
2177 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2181 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2186 ret = reap_filters(1);
2187 if (ret < 0 && ret != AVERROR_EOF) {
2189 av_strerror(ret, errbuf, sizeof(errbuf));
2191 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2195 ret = configure_filtergraph(fg);
2197 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2202 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2204 av_log(NULL, AV_LOG_ERROR, "Error while filtering\n");
2211 static int ifilter_send_eof(InputFilter *ifilter)
2217 if (ifilter->filter) {
2218 ret = av_buffersrc_add_frame_flags(ifilter->filter, NULL, AV_BUFFERSRC_FLAG_PUSH);
2222 // the filtergraph was never configured
2223 FilterGraph *fg = ifilter->graph;
2224 for (i = 0; i < fg->nb_inputs; i++)
2225 if (!fg->inputs[i]->eof)
2227 if (i == fg->nb_inputs) {
2228 // All the input streams have finished without the filtergraph
2229 // ever being configured.
2230 // Mark the output streams as finished.
2231 for (j = 0; j < fg->nb_outputs; j++)
2232 finish_output_stream(fg->outputs[j]->ost);
2239 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2240 // There is the following difference: if you got a frame, you must call
2241 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2242 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2243 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2250 ret = avcodec_send_packet(avctx, pkt);
2251 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2252 // decoded frames with avcodec_receive_frame() until done.
2253 if (ret < 0 && ret != AVERROR_EOF)
2257 ret = avcodec_receive_frame(avctx, frame);
2258 if (ret < 0 && ret != AVERROR(EAGAIN))
2266 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2271 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2272 for (i = 0; i < ist->nb_filters; i++) {
2273 if (i < ist->nb_filters - 1) {
2274 f = ist->filter_frame;
2275 ret = av_frame_ref(f, decoded_frame);
2280 ret = ifilter_send_frame(ist->filters[i], f);
2281 if (ret == AVERROR_EOF)
2282 ret = 0; /* ignore */
2284 av_log(NULL, AV_LOG_ERROR,
2285 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2292 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2295 AVFrame *decoded_frame;
2296 AVCodecContext *avctx = ist->dec_ctx;
2298 AVRational decoded_frame_tb;
2300 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2301 return AVERROR(ENOMEM);
2302 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2303 return AVERROR(ENOMEM);
2304 decoded_frame = ist->decoded_frame;
2306 update_benchmark(NULL);
2307 ret = decode(avctx, decoded_frame, got_output, pkt);
2308 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2312 if (ret >= 0 && avctx->sample_rate <= 0) {
2313 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2314 ret = AVERROR_INVALIDDATA;
2317 if (ret != AVERROR_EOF)
2318 check_decode_result(ist, got_output, ret);
2320 if (!*got_output || ret < 0)
2323 ist->samples_decoded += decoded_frame->nb_samples;
2324 ist->frames_decoded++;
2327 /* increment next_dts to use for the case where the input stream does not
2328 have timestamps or there are multiple frames in the packet */
2329 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2331 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2335 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2336 decoded_frame_tb = ist->st->time_base;
2337 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2338 decoded_frame->pts = pkt->pts;
2339 decoded_frame_tb = ist->st->time_base;
2341 decoded_frame->pts = ist->dts;
2342 decoded_frame_tb = AV_TIME_BASE_Q;
2344 if (decoded_frame->pts != AV_NOPTS_VALUE)
2345 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2346 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2347 (AVRational){1, avctx->sample_rate});
2348 ist->nb_samples = decoded_frame->nb_samples;
2349 err = send_frame_to_filters(ist, decoded_frame);
2351 av_frame_unref(ist->filter_frame);
2352 av_frame_unref(decoded_frame);
2353 return err < 0 ? err : ret;
2356 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof,
2359 AVFrame *decoded_frame;
2360 int i, ret = 0, err = 0;
2361 int64_t best_effort_timestamp;
2362 int64_t dts = AV_NOPTS_VALUE;
2365 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2366 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2368 if (!eof && pkt && pkt->size == 0)
2371 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2372 return AVERROR(ENOMEM);
2373 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2374 return AVERROR(ENOMEM);
2375 decoded_frame = ist->decoded_frame;
2376 if (ist->dts != AV_NOPTS_VALUE)
2377 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2380 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2383 // The old code used to set dts on the drain packet, which does not work
2384 // with the new API anymore.
2386 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2388 return AVERROR(ENOMEM);
2389 ist->dts_buffer = new;
2390 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2393 update_benchmark(NULL);
2394 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2395 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2399 // The following line may be required in some cases where there is no parser
2400 // or the parser does not has_b_frames correctly
2401 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2402 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2403 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2405 av_log(ist->dec_ctx, AV_LOG_WARNING,
2406 "video_delay is larger in decoder than demuxer %d > %d.\n"
2407 "If you want to help, upload a sample "
2408 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2409 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2410 ist->dec_ctx->has_b_frames,
2411 ist->st->codecpar->video_delay);
2414 if (ret != AVERROR_EOF)
2415 check_decode_result(ist, got_output, ret);
2417 if (*got_output && ret >= 0) {
2418 if (ist->dec_ctx->width != decoded_frame->width ||
2419 ist->dec_ctx->height != decoded_frame->height ||
2420 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2421 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2422 decoded_frame->width,
2423 decoded_frame->height,
2424 decoded_frame->format,
2425 ist->dec_ctx->width,
2426 ist->dec_ctx->height,
2427 ist->dec_ctx->pix_fmt);
2431 if (!*got_output || ret < 0)
2434 if(ist->top_field_first>=0)
2435 decoded_frame->top_field_first = ist->top_field_first;
2437 ist->frames_decoded++;
2439 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2440 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2444 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2446 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2448 if (ist->framerate.num)
2449 best_effort_timestamp = ist->cfr_next_pts++;
2451 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2452 best_effort_timestamp = ist->dts_buffer[0];
2454 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2455 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2456 ist->nb_dts_buffer--;
2459 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2460 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2462 if (ts != AV_NOPTS_VALUE)
2463 ist->next_pts = ist->pts = ts;
2467 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2468 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2469 ist->st->index, av_ts2str(decoded_frame->pts),
2470 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2471 best_effort_timestamp,
2472 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2473 decoded_frame->key_frame, decoded_frame->pict_type,
2474 ist->st->time_base.num, ist->st->time_base.den);
2477 if (ist->st->sample_aspect_ratio.num)
2478 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2480 err = send_frame_to_filters(ist, decoded_frame);
2483 av_frame_unref(ist->filter_frame);
2484 av_frame_unref(decoded_frame);
2485 return err < 0 ? err : ret;
2488 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2491 AVSubtitle subtitle;
2493 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2494 &subtitle, got_output, pkt);
2496 check_decode_result(NULL, got_output, ret);
2498 if (ret < 0 || !*got_output) {
2501 sub2video_flush(ist);
2505 if (ist->fix_sub_duration) {
2507 if (ist->prev_sub.got_output) {
2508 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2509 1000, AV_TIME_BASE);
2510 if (end < ist->prev_sub.subtitle.end_display_time) {
2511 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2512 "Subtitle duration reduced from %d to %d%s\n",
2513 ist->prev_sub.subtitle.end_display_time, end,
2514 end <= 0 ? ", dropping it" : "");
2515 ist->prev_sub.subtitle.end_display_time = end;
2518 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2519 FFSWAP(int, ret, ist->prev_sub.ret);
2520 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2528 if (ist->sub2video.frame) {
2529 sub2video_update(ist, &subtitle);
2530 } else if (ist->nb_filters) {
2531 if (!ist->sub2video.sub_queue)
2532 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2533 if (!ist->sub2video.sub_queue)
2535 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2536 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2540 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2544 if (!subtitle.num_rects)
2547 ist->frames_decoded++;
2549 for (i = 0; i < nb_output_streams; i++) {
2550 OutputStream *ost = output_streams[i];
2552 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2553 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2556 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2561 avsubtitle_free(&subtitle);
2565 static int send_filter_eof(InputStream *ist)
2568 for (i = 0; i < ist->nb_filters; i++) {
2569 ret = ifilter_send_eof(ist->filters[i]);
2576 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2577 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2581 int eof_reached = 0;
2584 if (!ist->saw_first_ts) {
2585 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2587 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2588 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2589 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2591 ist->saw_first_ts = 1;
2594 if (ist->next_dts == AV_NOPTS_VALUE)
2595 ist->next_dts = ist->dts;
2596 if (ist->next_pts == AV_NOPTS_VALUE)
2597 ist->next_pts = ist->pts;
2601 av_init_packet(&avpkt);
2608 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2609 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2610 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2611 ist->next_pts = ist->pts = ist->dts;
2614 // while we have more to decode or while the decoder did output something on EOF
2615 while (ist->decoding_needed) {
2618 int decode_failed = 0;
2620 ist->pts = ist->next_pts;
2621 ist->dts = ist->next_dts;
2623 switch (ist->dec_ctx->codec_type) {
2624 case AVMEDIA_TYPE_AUDIO:
2625 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2628 case AVMEDIA_TYPE_VIDEO:
2629 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt,
2631 if (!repeating || !pkt || got_output) {
2632 if (pkt && pkt->duration) {
2633 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2634 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2635 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2636 duration = ((int64_t)AV_TIME_BASE *
2637 ist->dec_ctx->framerate.den * ticks) /
2638 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2641 if(ist->dts != AV_NOPTS_VALUE && duration) {
2642 ist->next_dts += duration;
2644 ist->next_dts = AV_NOPTS_VALUE;
2648 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2650 case AVMEDIA_TYPE_SUBTITLE:
2653 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2654 if (!pkt && ret >= 0)
2661 if (ret == AVERROR_EOF) {
2667 if (decode_failed) {
2668 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2669 ist->file_index, ist->st->index, av_err2str(ret));
2671 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2672 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2674 if (!decode_failed || exit_on_error)
2680 ist->got_output = 1;
2685 // During draining, we might get multiple output frames in this loop.
2686 // ffmpeg.c does not drain the filter chain on configuration changes,
2687 // which means if we send multiple frames at once to the filters, and
2688 // one of those frames changes configuration, the buffered frames will
2689 // be lost. This can upset certain FATE tests.
2690 // Decode only 1 frame per call on EOF to appease these FATE tests.
2691 // The ideal solution would be to rewrite decoding to use the new
2692 // decoding API in a better way.
2699 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2700 /* except when looping we need to flush but not to send an EOF */
2701 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2702 int ret = send_filter_eof(ist);
2704 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2709 /* handle stream copy */
2710 if (!ist->decoding_needed) {
2711 ist->dts = ist->next_dts;
2712 switch (ist->dec_ctx->codec_type) {
2713 case AVMEDIA_TYPE_AUDIO:
2714 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2715 ist->dec_ctx->sample_rate;
2717 case AVMEDIA_TYPE_VIDEO:
2718 if (ist->framerate.num) {
2719 // TODO: Remove work-around for c99-to-c89 issue 7
2720 AVRational time_base_q = AV_TIME_BASE_Q;
2721 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2722 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2723 } else if (pkt->duration) {
2724 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2725 } else if(ist->dec_ctx->framerate.num != 0) {
2726 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2727 ist->next_dts += ((int64_t)AV_TIME_BASE *
2728 ist->dec_ctx->framerate.den * ticks) /
2729 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2733 ist->pts = ist->dts;
2734 ist->next_pts = ist->next_dts;
2736 for (i = 0; pkt && i < nb_output_streams; i++) {
2737 OutputStream *ost = output_streams[i];
2739 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2742 do_streamcopy(ist, ost, pkt);
2745 return !eof_reached;
2748 static void print_sdp(void)
2753 AVIOContext *sdp_pb;
2754 AVFormatContext **avc;
2756 for (i = 0; i < nb_output_files; i++) {
2757 if (!output_files[i]->header_written)
2761 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2764 for (i = 0, j = 0; i < nb_output_files; i++) {
2765 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2766 avc[j] = output_files[i]->ctx;
2774 av_sdp_create(avc, j, sdp, sizeof(sdp));
2776 if (!sdp_filename) {
2777 printf("SDP:\n%s\n", sdp);
2780 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2781 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2783 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2784 avio_closep(&sdp_pb);
2785 av_freep(&sdp_filename);
2793 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2796 for (i = 0; hwaccels[i].name; i++)
2797 if (hwaccels[i].pix_fmt == pix_fmt)
2798 return &hwaccels[i];
2802 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2804 InputStream *ist = s->opaque;
2805 const enum AVPixelFormat *p;
2808 for (p = pix_fmts; *p != -1; p++) {
2809 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2810 const HWAccel *hwaccel;
2812 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2815 hwaccel = get_hwaccel(*p);
2817 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2818 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2821 ret = hwaccel->init(s);
2823 if (ist->hwaccel_id == hwaccel->id) {
2824 av_log(NULL, AV_LOG_FATAL,
2825 "%s hwaccel requested for input stream #%d:%d, "
2826 "but cannot be initialized.\n", hwaccel->name,
2827 ist->file_index, ist->st->index);
2828 return AV_PIX_FMT_NONE;
2833 if (ist->hw_frames_ctx) {
2834 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2835 if (!s->hw_frames_ctx)
2836 return AV_PIX_FMT_NONE;
2839 ist->active_hwaccel_id = hwaccel->id;
2840 ist->hwaccel_pix_fmt = *p;
2847 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2849 InputStream *ist = s->opaque;
2851 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2852 return ist->hwaccel_get_buffer(s, frame, flags);
2854 return avcodec_default_get_buffer2(s, frame, flags);
2857 static int init_input_stream(int ist_index, char *error, int error_len)
2860 InputStream *ist = input_streams[ist_index];
2862 if (ist->decoding_needed) {
2863 AVCodec *codec = ist->dec;
2865 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2866 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2867 return AVERROR(EINVAL);
2870 ist->dec_ctx->opaque = ist;
2871 ist->dec_ctx->get_format = get_format;
2872 ist->dec_ctx->get_buffer2 = get_buffer;
2873 ist->dec_ctx->thread_safe_callbacks = 1;
2875 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2876 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2877 (ist->decoding_needed & DECODING_FOR_OST)) {
2878 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2879 if (ist->decoding_needed & DECODING_FOR_FILTER)
2880 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2883 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2885 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2886 * audio, and video decoders such as cuvid or mediacodec */
2887 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2889 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2890 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2891 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2892 if (ret == AVERROR_EXPERIMENTAL)
2893 abort_codec_experimental(codec, 0);
2895 snprintf(error, error_len,
2896 "Error while opening decoder for input stream "
2898 ist->file_index, ist->st->index, av_err2str(ret));
2901 assert_avoptions(ist->decoder_opts);
2904 ist->next_pts = AV_NOPTS_VALUE;
2905 ist->next_dts = AV_NOPTS_VALUE;
2910 static InputStream *get_input_stream(OutputStream *ost)
2912 if (ost->source_index >= 0)
2913 return input_streams[ost->source_index];
2917 static int compare_int64(const void *a, const void *b)
2919 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2922 /* open the muxer when all the streams are initialized */
2923 static int check_init_output_file(OutputFile *of, int file_index)
2927 for (i = 0; i < of->ctx->nb_streams; i++) {
2928 OutputStream *ost = output_streams[of->ost_index + i];
2929 if (!ost->initialized)
2933 of->ctx->interrupt_callback = int_cb;
2935 ret = avformat_write_header(of->ctx, &of->opts);
2937 av_log(NULL, AV_LOG_ERROR,
2938 "Could not write header for output file #%d "
2939 "(incorrect codec parameters ?): %s\n",
2940 file_index, av_err2str(ret));
2943 //assert_avoptions(of->opts);
2944 of->header_written = 1;
2946 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2948 if (sdp_filename || want_sdp)
2951 /* flush the muxing queues */
2952 for (i = 0; i < of->ctx->nb_streams; i++) {
2953 OutputStream *ost = output_streams[of->ost_index + i];
2955 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2956 if (!av_fifo_size(ost->muxing_queue))
2957 ost->mux_timebase = ost->st->time_base;
2959 while (av_fifo_size(ost->muxing_queue)) {
2961 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2962 write_packet(of, &pkt, ost);
2969 static int init_output_bsfs(OutputStream *ost)
2974 if (!ost->nb_bitstream_filters)
2977 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2978 ctx = ost->bsf_ctx[i];
2980 ret = avcodec_parameters_copy(ctx->par_in,
2981 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2985 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2987 ret = av_bsf_init(ctx);
2989 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2990 ost->bsf_ctx[i]->filter->name);
2995 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2996 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3000 ost->st->time_base = ctx->time_base_out;
3005 static int init_output_stream_streamcopy(OutputStream *ost)
3007 OutputFile *of = output_files[ost->file_index];
3008 InputStream *ist = get_input_stream(ost);
3009 AVCodecParameters *par_dst = ost->st->codecpar;
3010 AVCodecParameters *par_src = ost->ref_par;
3013 uint32_t codec_tag = par_dst->codec_tag;
3015 av_assert0(ist && !ost->filter);
3017 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3019 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3021 av_log(NULL, AV_LOG_FATAL,
3022 "Error setting up codec context options.\n");
3025 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3028 unsigned int codec_tag_tmp;
3029 if (!of->ctx->oformat->codec_tag ||
3030 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3031 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3032 codec_tag = par_src->codec_tag;
3035 ret = avcodec_parameters_copy(par_dst, par_src);
3039 par_dst->codec_tag = codec_tag;
3041 if (!ost->frame_rate.num)
3042 ost->frame_rate = ist->framerate;
3043 ost->st->avg_frame_rate = ost->frame_rate;
3045 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3049 // copy timebase while removing common factors
3050 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3051 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3053 // copy estimated duration as a hint to the muxer
3054 if (ost->st->duration <= 0 && ist->st->duration > 0)
3055 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3058 ost->st->disposition = ist->st->disposition;
3060 if (ist->st->nb_side_data) {
3061 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
3062 sizeof(*ist->st->side_data));
3063 if (!ost->st->side_data)
3064 return AVERROR(ENOMEM);
3066 ost->st->nb_side_data = 0;
3067 for (i = 0; i < ist->st->nb_side_data; i++) {
3068 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3069 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
3071 sd_dst->data = av_malloc(sd_src->size);
3073 return AVERROR(ENOMEM);
3074 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3075 sd_dst->size = sd_src->size;
3076 sd_dst->type = sd_src->type;
3077 ost->st->nb_side_data++;
3081 if (ost->rotate_overridden) {
3082 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3083 sizeof(int32_t) * 9);
3085 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3088 ost->parser = av_parser_init(par_dst->codec_id);
3089 ost->parser_avctx = avcodec_alloc_context3(NULL);
3090 if (!ost->parser_avctx)
3091 return AVERROR(ENOMEM);
3093 switch (par_dst->codec_type) {
3094 case AVMEDIA_TYPE_AUDIO:
3095 if (audio_volume != 256) {
3096 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3099 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3100 par_dst->block_align= 0;
3101 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3102 par_dst->block_align= 0;
3104 case AVMEDIA_TYPE_VIDEO:
3105 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3107 av_mul_q(ost->frame_aspect_ratio,
3108 (AVRational){ par_dst->height, par_dst->width });
3109 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3110 "with stream copy may produce invalid files\n");
3112 else if (ist->st->sample_aspect_ratio.num)
3113 sar = ist->st->sample_aspect_ratio;
3115 sar = par_src->sample_aspect_ratio;
3116 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3117 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3118 ost->st->r_frame_rate = ist->st->r_frame_rate;
3122 ost->mux_timebase = ist->st->time_base;
3127 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3129 AVDictionaryEntry *e;
3131 uint8_t *encoder_string;
3132 int encoder_string_len;
3133 int format_flags = 0;
3134 int codec_flags = 0;
3136 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3139 e = av_dict_get(of->opts, "fflags", NULL, 0);
3141 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3144 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3146 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3148 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3151 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3154 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3155 encoder_string = av_mallocz(encoder_string_len);
3156 if (!encoder_string)
3159 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3160 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3162 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3163 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3164 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3165 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3168 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3169 AVCodecContext *avctx)
3172 int n = 1, i, size, index = 0;
3175 for (p = kf; *p; p++)
3179 pts = av_malloc_array(size, sizeof(*pts));
3181 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3186 for (i = 0; i < n; i++) {
3187 char *next = strchr(p, ',');
3192 if (!memcmp(p, "chapters", 8)) {
3194 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3197 if (avf->nb_chapters > INT_MAX - size ||
3198 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3200 av_log(NULL, AV_LOG_FATAL,
3201 "Could not allocate forced key frames array.\n");
3204 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3205 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3207 for (j = 0; j < avf->nb_chapters; j++) {
3208 AVChapter *c = avf->chapters[j];
3209 av_assert1(index < size);
3210 pts[index++] = av_rescale_q(c->start, c->time_base,
3211 avctx->time_base) + t;
3216 t = parse_time_or_die("force_key_frames", p, 1);
3217 av_assert1(index < size);
3218 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3225 av_assert0(index == size);
3226 qsort(pts, size, sizeof(*pts), compare_int64);
3227 ost->forced_kf_count = size;
3228 ost->forced_kf_pts = pts;
3231 static int init_output_stream_encode(OutputStream *ost)
3233 InputStream *ist = get_input_stream(ost);
3234 AVCodecContext *enc_ctx = ost->enc_ctx;
3235 AVCodecContext *dec_ctx = NULL;
3236 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3239 set_encoder_id(output_files[ost->file_index], ost);
3241 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3242 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3243 // which have to be filtered out to prevent leaking them to output files.
3244 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3247 ost->st->disposition = ist->st->disposition;
3249 dec_ctx = ist->dec_ctx;
3251 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3253 for (j = 0; j < oc->nb_streams; j++) {
3254 AVStream *st = oc->streams[j];
3255 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3258 if (j == oc->nb_streams)
3259 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3260 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3261 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3264 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3265 if (!ost->frame_rate.num)
3266 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3267 if (ist && !ost->frame_rate.num)
3268 ost->frame_rate = ist->framerate;
3269 if (ist && !ost->frame_rate.num)
3270 ost->frame_rate = ist->st->r_frame_rate;
3271 if (ist && !ost->frame_rate.num) {
3272 ost->frame_rate = (AVRational){25, 1};
3273 av_log(NULL, AV_LOG_WARNING,
3275 "about the input framerate is available. Falling "
3276 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3277 "if you want a different framerate.\n",
3278 ost->file_index, ost->index);
3280 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3281 if (ost->enc->supported_framerates && !ost->force_fps) {
3282 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3283 ost->frame_rate = ost->enc->supported_framerates[idx];
3285 // reduce frame rate for mpeg4 to be within the spec limits
3286 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3287 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3288 ost->frame_rate.num, ost->frame_rate.den, 65535);
3292 switch (enc_ctx->codec_type) {
3293 case AVMEDIA_TYPE_AUDIO:
3294 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3296 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3297 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3298 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3299 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3300 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3301 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3303 case AVMEDIA_TYPE_VIDEO:
3304 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3305 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3306 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3307 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3308 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3309 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3310 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3312 for (j = 0; j < ost->forced_kf_count; j++)
3313 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3315 enc_ctx->time_base);
3317 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3318 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3319 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3320 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3321 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3322 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3323 if (!strncmp(ost->enc->name, "libx264", 7) &&
3324 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3325 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3326 av_log(NULL, AV_LOG_WARNING,
3327 "No pixel format specified, %s for H.264 encoding chosen.\n"
3328 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3329 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3330 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3331 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3332 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3333 av_log(NULL, AV_LOG_WARNING,
3334 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3335 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3336 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3337 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3339 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3340 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3342 enc_ctx->framerate = ost->frame_rate;
3344 ost->st->avg_frame_rate = ost->frame_rate;
3347 enc_ctx->width != dec_ctx->width ||
3348 enc_ctx->height != dec_ctx->height ||
3349 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3350 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3353 if (ost->forced_keyframes) {
3354 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3355 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3356 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3358 av_log(NULL, AV_LOG_ERROR,
3359 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3362 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3363 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3364 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3365 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3367 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3368 // parse it only for static kf timings
3369 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3370 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3374 case AVMEDIA_TYPE_SUBTITLE:
3375 enc_ctx->time_base = AV_TIME_BASE_Q;
3376 if (!enc_ctx->width) {
3377 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3378 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3381 case AVMEDIA_TYPE_DATA:
3388 ost->mux_timebase = enc_ctx->time_base;
3393 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3397 if (ost->encoding_needed) {
3398 AVCodec *codec = ost->enc;
3399 AVCodecContext *dec = NULL;
3402 ret = init_output_stream_encode(ost);
3406 if ((ist = get_input_stream(ost)))
3408 if (dec && dec->subtitle_header) {
3409 /* ASS code assumes this buffer is null terminated so add extra byte. */
3410 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3411 if (!ost->enc_ctx->subtitle_header)
3412 return AVERROR(ENOMEM);
3413 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3414 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3416 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3417 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3418 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3420 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3421 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3422 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3424 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter)) {
3425 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3426 if (!ost->enc_ctx->hw_frames_ctx)
3427 return AVERROR(ENOMEM);
3430 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3431 if (ret == AVERROR_EXPERIMENTAL)
3432 abort_codec_experimental(codec, 1);
3433 snprintf(error, error_len,
3434 "Error while opening encoder for output stream #%d:%d - "
3435 "maybe incorrect parameters such as bit_rate, rate, width or height",
3436 ost->file_index, ost->index);
3439 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3440 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3441 av_buffersink_set_frame_size(ost->filter->filter,
3442 ost->enc_ctx->frame_size);
3443 assert_avoptions(ost->encoder_opts);
3444 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3445 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3446 " It takes bits/s as argument, not kbits/s\n");
3448 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3450 av_log(NULL, AV_LOG_FATAL,
3451 "Error initializing the output stream codec context.\n");
3455 * FIXME: ost->st->codec should't be needed here anymore.
3457 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3461 if (ost->enc_ctx->nb_coded_side_data) {
3464 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
3465 sizeof(*ost->st->side_data));
3466 if (!ost->st->side_data)
3467 return AVERROR(ENOMEM);
3469 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3470 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3471 AVPacketSideData *sd_dst = &ost->st->side_data[i];
3473 sd_dst->data = av_malloc(sd_src->size);
3475 return AVERROR(ENOMEM);
3476 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3477 sd_dst->size = sd_src->size;
3478 sd_dst->type = sd_src->type;
3479 ost->st->nb_side_data++;
3484 * Add global input side data. For now this is naive, and copies it
3485 * from the input stream's global side data. All side data should
3486 * really be funneled over AVFrame and libavfilter, then added back to
3487 * packet side data, and then potentially using the first packet for
3492 for (i = 0; i < ist->st->nb_side_data; i++) {
3493 AVPacketSideData *sd = &ist->st->side_data[i];
3494 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3496 return AVERROR(ENOMEM);
3497 memcpy(dst, sd->data, sd->size);
3498 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3499 av_display_rotation_set((uint32_t *)dst, 0);
3503 // copy timebase while removing common factors
3504 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3505 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3507 // copy estimated duration as a hint to the muxer
3508 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3509 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3511 ost->st->codec->codec= ost->enc_ctx->codec;
3512 } else if (ost->stream_copy) {
3513 ret = init_output_stream_streamcopy(ost);
3518 * FIXME: will the codec context used by the parser during streamcopy
3519 * This should go away with the new parser API.
3521 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3526 // parse user provided disposition, and update stream values
3527 if (ost->disposition) {
3528 static const AVOption opts[] = {
3529 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3530 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3531 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3532 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3533 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3534 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3535 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3536 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3537 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3538 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3539 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3540 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3541 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3542 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3545 static const AVClass class = {
3547 .item_name = av_default_item_name,
3549 .version = LIBAVUTIL_VERSION_INT,
3551 const AVClass *pclass = &class;
3553 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3558 /* initialize bitstream filters for the output stream
3559 * needs to be done here, because the codec id for streamcopy is not
3560 * known until now */
3561 ret = init_output_bsfs(ost);
3565 ost->initialized = 1;
3567 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3574 static void report_new_stream(int input_index, AVPacket *pkt)
3576 InputFile *file = input_files[input_index];
3577 AVStream *st = file->ctx->streams[pkt->stream_index];
3579 if (pkt->stream_index < file->nb_streams_warn)
3581 av_log(file->ctx, AV_LOG_WARNING,
3582 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3583 av_get_media_type_string(st->codecpar->codec_type),
3584 input_index, pkt->stream_index,
3585 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3586 file->nb_streams_warn = pkt->stream_index + 1;
3589 static int transcode_init(void)
3591 int ret = 0, i, j, k;
3592 AVFormatContext *oc;
3595 char error[1024] = {0};
3597 for (i = 0; i < nb_filtergraphs; i++) {
3598 FilterGraph *fg = filtergraphs[i];
3599 for (j = 0; j < fg->nb_outputs; j++) {
3600 OutputFilter *ofilter = fg->outputs[j];
3601 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3603 if (fg->nb_inputs != 1)
3605 for (k = nb_input_streams-1; k >= 0 ; k--)
3606 if (fg->inputs[0]->ist == input_streams[k])
3608 ofilter->ost->source_index = k;
3612 /* init framerate emulation */
3613 for (i = 0; i < nb_input_files; i++) {
3614 InputFile *ifile = input_files[i];
3615 if (ifile->rate_emu)
3616 for (j = 0; j < ifile->nb_streams; j++)
3617 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3620 /* init input streams */
3621 for (i = 0; i < nb_input_streams; i++)
3622 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3623 for (i = 0; i < nb_output_streams; i++) {
3624 ost = output_streams[i];
3625 avcodec_close(ost->enc_ctx);
3630 /* open each encoder */
3631 for (i = 0; i < nb_output_streams; i++) {
3632 // skip streams fed from filtergraphs until we have a frame for them
3633 if (output_streams[i]->filter)
3636 ret = init_output_stream(output_streams[i], error, sizeof(error));
3641 /* discard unused programs */
3642 for (i = 0; i < nb_input_files; i++) {
3643 InputFile *ifile = input_files[i];
3644 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3645 AVProgram *p = ifile->ctx->programs[j];
3646 int discard = AVDISCARD_ALL;
3648 for (k = 0; k < p->nb_stream_indexes; k++)
3649 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3650 discard = AVDISCARD_DEFAULT;
3653 p->discard = discard;
3657 /* write headers for files with no streams */
3658 for (i = 0; i < nb_output_files; i++) {
3659 oc = output_files[i]->ctx;
3660 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3661 ret = check_init_output_file(output_files[i], i);
3668 /* dump the stream mapping */
3669 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3670 for (i = 0; i < nb_input_streams; i++) {
3671 ist = input_streams[i];
3673 for (j = 0; j < ist->nb_filters; j++) {
3674 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3675 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3676 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3677 ist->filters[j]->name);
3678 if (nb_filtergraphs > 1)
3679 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3680 av_log(NULL, AV_LOG_INFO, "\n");
3685 for (i = 0; i < nb_output_streams; i++) {
3686 ost = output_streams[i];
3688 if (ost->attachment_filename) {
3689 /* an attached file */
3690 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3691 ost->attachment_filename, ost->file_index, ost->index);
3695 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3696 /* output from a complex graph */
3697 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3698 if (nb_filtergraphs > 1)
3699 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3701 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3702 ost->index, ost->enc ? ost->enc->name : "?");
3706 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3707 input_streams[ost->source_index]->file_index,
3708 input_streams[ost->source_index]->st->index,
3711 if (ost->sync_ist != input_streams[ost->source_index])
3712 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3713 ost->sync_ist->file_index,
3714 ost->sync_ist->st->index);
3715 if (ost->stream_copy)
3716 av_log(NULL, AV_LOG_INFO, " (copy)");
3718 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3719 const AVCodec *out_codec = ost->enc;
3720 const char *decoder_name = "?";
3721 const char *in_codec_name = "?";
3722 const char *encoder_name = "?";
3723 const char *out_codec_name = "?";
3724 const AVCodecDescriptor *desc;
3727 decoder_name = in_codec->name;
3728 desc = avcodec_descriptor_get(in_codec->id);
3730 in_codec_name = desc->name;
3731 if (!strcmp(decoder_name, in_codec_name))
3732 decoder_name = "native";
3736 encoder_name = out_codec->name;
3737 desc = avcodec_descriptor_get(out_codec->id);
3739 out_codec_name = desc->name;
3740 if (!strcmp(encoder_name, out_codec_name))
3741 encoder_name = "native";
3744 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3745 in_codec_name, decoder_name,
3746 out_codec_name, encoder_name);
3748 av_log(NULL, AV_LOG_INFO, "\n");
3752 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3756 transcode_init_done = 1;
3761 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3762 static int need_output(void)
3766 for (i = 0; i < nb_output_streams; i++) {
3767 OutputStream *ost = output_streams[i];
3768 OutputFile *of = output_files[ost->file_index];
3769 AVFormatContext *os = output_files[ost->file_index]->ctx;
3771 if (ost->finished ||
3772 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3774 if (ost->frame_number >= ost->max_frames) {
3776 for (j = 0; j < of->ctx->nb_streams; j++)
3777 close_output_stream(output_streams[of->ost_index + j]);
3788 * Select the output stream to process.
3790 * @return selected output stream, or NULL if none available
3792 static OutputStream *choose_output(void)
3795 int64_t opts_min = INT64_MAX;
3796 OutputStream *ost_min = NULL;
3798 for (i = 0; i < nb_output_streams; i++) {
3799 OutputStream *ost = output_streams[i];
3800 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3801 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3803 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3804 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3806 if (!ost->initialized && !ost->inputs_done)
3809 if (!ost->finished && opts < opts_min) {
3811 ost_min = ost->unavailable ? NULL : ost;
3817 static void set_tty_echo(int on)
3821 if (tcgetattr(0, &tty) == 0) {
3822 if (on) tty.c_lflag |= ECHO;
3823 else tty.c_lflag &= ~ECHO;
3824 tcsetattr(0, TCSANOW, &tty);
3829 static int check_keyboard_interaction(int64_t cur_time)
3832 static int64_t last_time;
3833 if (received_nb_signals)
3834 return AVERROR_EXIT;
3835 /* read_key() returns 0 on EOF */
3836 if(cur_time - last_time >= 100000 && !run_as_daemon){
3838 last_time = cur_time;
3842 return AVERROR_EXIT;
3843 if (key == '+') av_log_set_level(av_log_get_level()+10);
3844 if (key == '-') av_log_set_level(av_log_get_level()-10);
3845 if (key == 's') qp_hist ^= 1;
3848 do_hex_dump = do_pkt_dump = 0;
3849 } else if(do_pkt_dump){
3853 av_log_set_level(AV_LOG_DEBUG);
3855 if (key == 'c' || key == 'C'){
3856 char buf[4096], target[64], command[256], arg[256] = {0};
3859 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3862 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3867 fprintf(stderr, "\n");
3869 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3870 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3871 target, time, command, arg);
3872 for (i = 0; i < nb_filtergraphs; i++) {
3873 FilterGraph *fg = filtergraphs[i];
3876 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3877 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3878 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3879 } else if (key == 'c') {
3880 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3881 ret = AVERROR_PATCHWELCOME;
3883 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3885 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3890 av_log(NULL, AV_LOG_ERROR,
3891 "Parse error, at least 3 arguments were expected, "
3892 "only %d given in string '%s'\n", n, buf);
3895 if (key == 'd' || key == 'D'){
3898 debug = input_streams[0]->st->codec->debug<<1;
3899 if(!debug) debug = 1;
3900 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3907 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3912 fprintf(stderr, "\n");
3913 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3914 fprintf(stderr,"error parsing debug value\n");
3916 for(i=0;i<nb_input_streams;i++) {
3917 input_streams[i]->st->codec->debug = debug;
3919 for(i=0;i<nb_output_streams;i++) {
3920 OutputStream *ost = output_streams[i];
3921 ost->enc_ctx->debug = debug;
3923 if(debug) av_log_set_level(AV_LOG_DEBUG);
3924 fprintf(stderr,"debug=%d\n", debug);
3927 fprintf(stderr, "key function\n"
3928 "? show this help\n"
3929 "+ increase verbosity\n"
3930 "- decrease verbosity\n"
3931 "c Send command to first matching filter supporting it\n"
3932 "C Send/Queue command to all matching filters\n"
3933 "D cycle through available debug modes\n"
3934 "h dump packets/hex press to cycle through the 3 states\n"
3936 "s Show QP histogram\n"
3943 static void *input_thread(void *arg)
3946 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3951 ret = av_read_frame(f->ctx, &pkt);
3953 if (ret == AVERROR(EAGAIN)) {
3958 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3961 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3962 if (flags && ret == AVERROR(EAGAIN)) {
3964 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3965 av_log(f->ctx, AV_LOG_WARNING,
3966 "Thread message queue blocking; consider raising the "
3967 "thread_queue_size option (current value: %d)\n",
3968 f->thread_queue_size);
3971 if (ret != AVERROR_EOF)
3972 av_log(f->ctx, AV_LOG_ERROR,
3973 "Unable to send packet to main thread: %s\n",
3975 av_packet_unref(&pkt);
3976 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3984 static void free_input_threads(void)
3988 for (i = 0; i < nb_input_files; i++) {
3989 InputFile *f = input_files[i];
3992 if (!f || !f->in_thread_queue)
3994 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3995 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3996 av_packet_unref(&pkt);
3998 pthread_join(f->thread, NULL);
4000 av_thread_message_queue_free(&f->in_thread_queue);
4004 static int init_input_threads(void)
4008 if (nb_input_files == 1)
4011 for (i = 0; i < nb_input_files; i++) {
4012 InputFile *f = input_files[i];
4014 if (f->ctx->pb ? !f->ctx->pb->seekable :
4015 strcmp(f->ctx->iformat->name, "lavfi"))
4016 f->non_blocking = 1;
4017 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4018 f->thread_queue_size, sizeof(AVPacket));
4022 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4023 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4024 av_thread_message_queue_free(&f->in_thread_queue);
4025 return AVERROR(ret);
4031 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4033 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4035 AV_THREAD_MESSAGE_NONBLOCK : 0);
4039 static int get_input_packet(InputFile *f, AVPacket *pkt)
4043 for (i = 0; i < f->nb_streams; i++) {
4044 InputStream *ist = input_streams[f->ist_index + i];
4045 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4046 int64_t now = av_gettime_relative() - ist->start;
4048 return AVERROR(EAGAIN);
4053 if (nb_input_files > 1)
4054 return get_input_packet_mt(f, pkt);
4056 return av_read_frame(f->ctx, pkt);
4059 static int got_eagain(void)
4062 for (i = 0; i < nb_output_streams; i++)
4063 if (output_streams[i]->unavailable)
4068 static void reset_eagain(void)
4071 for (i = 0; i < nb_input_files; i++)
4072 input_files[i]->eagain = 0;
4073 for (i = 0; i < nb_output_streams; i++)
4074 output_streams[i]->unavailable = 0;
4077 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4078 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4079 AVRational time_base)
4085 return tmp_time_base;
4088 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4091 return tmp_time_base;
4097 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4100 AVCodecContext *avctx;
4101 int i, ret, has_audio = 0;
4102 int64_t duration = 0;
4104 ret = av_seek_frame(is, -1, is->start_time, 0);
4108 for (i = 0; i < ifile->nb_streams; i++) {
4109 ist = input_streams[ifile->ist_index + i];
4110 avctx = ist->dec_ctx;
4113 if (ist->decoding_needed) {
4114 process_input_packet(ist, NULL, 1);
4115 avcodec_flush_buffers(avctx);
4118 /* duration is the length of the last frame in a stream
4119 * when audio stream is present we don't care about
4120 * last video frame length because it's not defined exactly */
4121 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4125 for (i = 0; i < ifile->nb_streams; i++) {
4126 ist = input_streams[ifile->ist_index + i];
4127 avctx = ist->dec_ctx;
4130 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4131 AVRational sample_rate = {1, avctx->sample_rate};
4133 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4137 if (ist->framerate.num) {
4138 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4139 } else if (ist->st->avg_frame_rate.num) {
4140 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4141 } else duration = 1;
4143 if (!ifile->duration)
4144 ifile->time_base = ist->st->time_base;
4145 /* the total duration of the stream, max_pts - min_pts is
4146 * the duration of the stream without the last frame */
4147 duration += ist->max_pts - ist->min_pts;
4148 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4152 if (ifile->loop > 0)
4160 * - 0 -- one packet was read and processed
4161 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4162 * this function should be called again
4163 * - AVERROR_EOF -- this function should not be called again
4165 static int process_input(int file_index)
4167 InputFile *ifile = input_files[file_index];
4168 AVFormatContext *is;
4176 ret = get_input_packet(ifile, &pkt);
4178 if (ret == AVERROR(EAGAIN)) {
4182 if (ret < 0 && ifile->loop) {
4183 if ((ret = seek_to_start(ifile, is)) < 0)
4185 ret = get_input_packet(ifile, &pkt);
4186 if (ret == AVERROR(EAGAIN)) {
4192 if (ret != AVERROR_EOF) {
4193 print_error(is->filename, ret);
4198 for (i = 0; i < ifile->nb_streams; i++) {
4199 ist = input_streams[ifile->ist_index + i];
4200 if (ist->decoding_needed) {
4201 ret = process_input_packet(ist, NULL, 0);
4206 /* mark all outputs that don't go through lavfi as finished */
4207 for (j = 0; j < nb_output_streams; j++) {
4208 OutputStream *ost = output_streams[j];
4210 if (ost->source_index == ifile->ist_index + i &&
4211 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4212 finish_output_stream(ost);
4216 ifile->eof_reached = 1;
4217 return AVERROR(EAGAIN);
4223 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4224 is->streams[pkt.stream_index]);
4226 /* the following test is needed in case new streams appear
4227 dynamically in stream : we ignore them */
4228 if (pkt.stream_index >= ifile->nb_streams) {
4229 report_new_stream(file_index, &pkt);
4230 goto discard_packet;
4233 ist = input_streams[ifile->ist_index + pkt.stream_index];
4235 ist->data_size += pkt.size;
4239 goto discard_packet;
4241 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4242 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4247 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4248 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4249 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4250 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4251 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4252 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4253 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4254 av_ts2str(input_files[ist->file_index]->ts_offset),
4255 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4258 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4259 int64_t stime, stime2;
4260 // Correcting starttime based on the enabled streams
4261 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4262 // so we instead do it here as part of discontinuity handling
4263 if ( ist->next_dts == AV_NOPTS_VALUE
4264 && ifile->ts_offset == -is->start_time
4265 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4266 int64_t new_start_time = INT64_MAX;
4267 for (i=0; i<is->nb_streams; i++) {
4268 AVStream *st = is->streams[i];
4269 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4271 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4273 if (new_start_time > is->start_time) {
4274 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4275 ifile->ts_offset = -new_start_time;
4279 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4280 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4281 ist->wrap_correction_done = 1;
4283 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4284 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4285 ist->wrap_correction_done = 0;
4287 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4288 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4289 ist->wrap_correction_done = 0;
4293 /* add the stream-global side data to the first packet */
4294 if (ist->nb_packets == 1) {
4295 for (i = 0; i < ist->st->nb_side_data; i++) {
4296 AVPacketSideData *src_sd = &ist->st->side_data[i];
4299 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4302 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4305 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4309 memcpy(dst_data, src_sd->data, src_sd->size);
4313 if (pkt.dts != AV_NOPTS_VALUE)
4314 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4315 if (pkt.pts != AV_NOPTS_VALUE)
4316 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4318 if (pkt.pts != AV_NOPTS_VALUE)
4319 pkt.pts *= ist->ts_scale;
4320 if (pkt.dts != AV_NOPTS_VALUE)
4321 pkt.dts *= ist->ts_scale;
4323 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4324 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4325 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4326 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4327 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4328 int64_t delta = pkt_dts - ifile->last_ts;
4329 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4330 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4331 ifile->ts_offset -= delta;
4332 av_log(NULL, AV_LOG_DEBUG,
4333 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4334 delta, ifile->ts_offset);
4335 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4336 if (pkt.pts != AV_NOPTS_VALUE)
4337 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4341 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4342 if (pkt.pts != AV_NOPTS_VALUE) {
4343 pkt.pts += duration;
4344 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4345 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4348 if (pkt.dts != AV_NOPTS_VALUE)
4349 pkt.dts += duration;
4351 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4352 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4353 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4354 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4356 int64_t delta = pkt_dts - ist->next_dts;
4357 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4358 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4359 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4360 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4361 ifile->ts_offset -= delta;
4362 av_log(NULL, AV_LOG_DEBUG,
4363 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4364 delta, ifile->ts_offset);
4365 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4366 if (pkt.pts != AV_NOPTS_VALUE)
4367 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4370 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4371 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4372 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4373 pkt.dts = AV_NOPTS_VALUE;
4375 if (pkt.pts != AV_NOPTS_VALUE){
4376 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4377 delta = pkt_pts - ist->next_dts;
4378 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4379 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4380 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4381 pkt.pts = AV_NOPTS_VALUE;
4387 if (pkt.dts != AV_NOPTS_VALUE)
4388 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4391 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4392 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4393 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4394 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4395 av_ts2str(input_files[ist->file_index]->ts_offset),
4396 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4399 sub2video_heartbeat(ist, pkt.pts);
4401 process_input_packet(ist, &pkt, 0);
4404 av_packet_unref(&pkt);
4410 * Perform a step of transcoding for the specified filter graph.
4412 * @param[in] graph filter graph to consider
4413 * @param[out] best_ist input stream where a frame would allow to continue
4414 * @return 0 for success, <0 for error
4416 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4419 int nb_requests, nb_requests_max = 0;
4420 InputFilter *ifilter;
4424 ret = avfilter_graph_request_oldest(graph->graph);
4426 return reap_filters(0);
4428 if (ret == AVERROR_EOF) {
4429 ret = reap_filters(1);
4430 for (i = 0; i < graph->nb_outputs; i++)
4431 close_output_stream(graph->outputs[i]->ost);
4434 if (ret != AVERROR(EAGAIN))
4437 for (i = 0; i < graph->nb_inputs; i++) {
4438 ifilter = graph->inputs[i];
4440 if (input_files[ist->file_index]->eagain ||
4441 input_files[ist->file_index]->eof_reached)
4443 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4444 if (nb_requests > nb_requests_max) {
4445 nb_requests_max = nb_requests;
4451 for (i = 0; i < graph->nb_outputs; i++)
4452 graph->outputs[i]->ost->unavailable = 1;
4458 * Run a single step of transcoding.
4460 * @return 0 for success, <0 for error
4462 static int transcode_step(void)
4465 InputStream *ist = NULL;
4468 ost = choose_output();
4475 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4479 if (ost->filter && !ost->filter->graph->graph) {
4480 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4481 ret = configure_filtergraph(ost->filter->graph);
4483 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4489 if (ost->filter && ost->filter->graph->graph) {
4490 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4494 } else if (ost->filter) {
4496 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4497 InputFilter *ifilter = ost->filter->graph->inputs[i];
4498 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4504 ost->inputs_done = 1;
4508 av_assert0(ost->source_index >= 0);
4509 ist = input_streams[ost->source_index];
4512 ret = process_input(ist->file_index);
4513 if (ret == AVERROR(EAGAIN)) {
4514 if (input_files[ist->file_index]->eagain)
4515 ost->unavailable = 1;
4520 return ret == AVERROR_EOF ? 0 : ret;
4522 return reap_filters(0);
4526 * The following code is the main loop of the file converter
4528 static int transcode(void)
4531 AVFormatContext *os;
4534 int64_t timer_start;
4535 int64_t total_packets_written = 0;
4537 ret = transcode_init();
4541 if (stdin_interaction) {
4542 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4545 timer_start = av_gettime_relative();
4548 if ((ret = init_input_threads()) < 0)
4552 while (!received_sigterm) {
4553 int64_t cur_time= av_gettime_relative();
4555 /* if 'q' pressed, exits */
4556 if (stdin_interaction)
4557 if (check_keyboard_interaction(cur_time) < 0)
4560 /* check if there's any stream where output is still needed */
4561 if (!need_output()) {
4562 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4566 ret = transcode_step();
4567 if (ret < 0 && ret != AVERROR_EOF) {
4569 av_strerror(ret, errbuf, sizeof(errbuf));
4571 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4575 /* dump report by using the output first video and audio streams */
4576 print_report(0, timer_start, cur_time);
4579 free_input_threads();
4582 /* at the end of stream, we must flush the decoder buffers */
4583 for (i = 0; i < nb_input_streams; i++) {
4584 ist = input_streams[i];
4585 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4586 process_input_packet(ist, NULL, 0);
4593 /* write the trailer if needed and close file */
4594 for (i = 0; i < nb_output_files; i++) {
4595 os = output_files[i]->ctx;
4596 if (!output_files[i]->header_written) {
4597 av_log(NULL, AV_LOG_ERROR,
4598 "Nothing was written into output file %d (%s), because "
4599 "at least one of its streams received no packets.\n",
4603 if ((ret = av_write_trailer(os)) < 0) {
4604 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4610 /* dump report by using the first video and audio streams */
4611 print_report(1, timer_start, av_gettime_relative());
4613 /* close each encoder */
4614 for (i = 0; i < nb_output_streams; i++) {
4615 ost = output_streams[i];
4616 if (ost->encoding_needed) {
4617 av_freep(&ost->enc_ctx->stats_in);
4619 total_packets_written += ost->packets_written;
4622 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4623 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4627 /* close each decoder */
4628 for (i = 0; i < nb_input_streams; i++) {
4629 ist = input_streams[i];
4630 if (ist->decoding_needed) {
4631 avcodec_close(ist->dec_ctx);
4632 if (ist->hwaccel_uninit)
4633 ist->hwaccel_uninit(ist->dec_ctx);
4637 av_buffer_unref(&hw_device_ctx);
4644 free_input_threads();
4647 if (output_streams) {
4648 for (i = 0; i < nb_output_streams; i++) {
4649 ost = output_streams[i];
4652 if (fclose(ost->logfile))
4653 av_log(NULL, AV_LOG_ERROR,
4654 "Error closing logfile, loss of information possible: %s\n",
4655 av_err2str(AVERROR(errno)));
4656 ost->logfile = NULL;
4658 av_freep(&ost->forced_kf_pts);
4659 av_freep(&ost->apad);
4660 av_freep(&ost->disposition);
4661 av_dict_free(&ost->encoder_opts);
4662 av_dict_free(&ost->sws_dict);
4663 av_dict_free(&ost->swr_opts);
4664 av_dict_free(&ost->resample_opts);
4672 static int64_t getutime(void)
4675 struct rusage rusage;
4677 getrusage(RUSAGE_SELF, &rusage);
4678 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4679 #elif HAVE_GETPROCESSTIMES
4681 FILETIME c, e, k, u;
4682 proc = GetCurrentProcess();
4683 GetProcessTimes(proc, &c, &e, &k, &u);
4684 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4686 return av_gettime_relative();
4690 static int64_t getmaxrss(void)
4692 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4693 struct rusage rusage;
4694 getrusage(RUSAGE_SELF, &rusage);
4695 return (int64_t)rusage.ru_maxrss * 1024;
4696 #elif HAVE_GETPROCESSMEMORYINFO
4698 PROCESS_MEMORY_COUNTERS memcounters;
4699 proc = GetCurrentProcess();
4700 memcounters.cb = sizeof(memcounters);
4701 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4702 return memcounters.PeakPagefileUsage;
4708 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4712 int main(int argc, char **argv)
4719 register_exit(ffmpeg_cleanup);
4721 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4723 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4724 parse_loglevel(argc, argv, options);
4726 if(argc>1 && !strcmp(argv[1], "-d")){
4728 av_log_set_callback(log_callback_null);
4733 avcodec_register_all();
4735 avdevice_register_all();
4737 avfilter_register_all();
4739 avformat_network_init();
4741 show_banner(argc, argv, options);
4743 /* parse options and open all input/output files */
4744 ret = ffmpeg_parse_options(argc, argv);
4748 if (nb_output_files <= 0 && nb_input_files == 0) {
4750 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4754 /* file converter / grab */
4755 if (nb_output_files <= 0) {
4756 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4760 // if (nb_input_files == 0) {
4761 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4765 for (i = 0; i < nb_output_files; i++) {
4766 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4770 current_time = ti = getutime();
4771 if (transcode() < 0)
4773 ti = getutime() - ti;
4775 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4777 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4778 decode_error_stat[0], decode_error_stat[1]);
4779 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4782 exit_program(received_nb_signals ? 255 : main_return_code);
4783 return main_return_code;