2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
126 static int ifilter_has_all_input_formats(FilterGraph *fg);
128 static int run_as_daemon = 0;
129 static int nb_frames_dup = 0;
130 static unsigned dup_warning = 1000;
131 static int nb_frames_drop = 0;
132 static int64_t decode_error_stat[2];
134 static int want_sdp = 1;
136 static int current_time;
137 AVIOContext *progress_avio = NULL;
139 static uint8_t *subtitle_out;
141 InputStream **input_streams = NULL;
142 int nb_input_streams = 0;
143 InputFile **input_files = NULL;
144 int nb_input_files = 0;
146 OutputStream **output_streams = NULL;
147 int nb_output_streams = 0;
148 OutputFile **output_files = NULL;
149 int nb_output_files = 0;
151 FilterGraph **filtergraphs;
156 /* init terminal so that we can grab keys */
157 static struct termios oldtty;
158 static int restore_tty;
162 static void free_input_threads(void);
166 Convert subtitles to video with alpha to insert them in filter graphs.
167 This is a temporary solution until libavfilter gets real subtitles support.
170 static int sub2video_get_blank_frame(InputStream *ist)
173 AVFrame *frame = ist->sub2video.frame;
175 av_frame_unref(frame);
176 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
177 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
178 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
179 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
181 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
185 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
188 uint32_t *pal, *dst2;
192 if (r->type != SUBTITLE_BITMAP) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
196 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
197 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
198 r->x, r->y, r->w, r->h, w, h
203 dst += r->y * dst_linesize + r->x * 4;
205 pal = (uint32_t *)r->data[1];
206 for (y = 0; y < r->h; y++) {
207 dst2 = (uint32_t *)dst;
209 for (x = 0; x < r->w; x++)
210 *(dst2++) = pal[*(src2++)];
212 src += r->linesize[0];
216 static void sub2video_push_ref(InputStream *ist, int64_t pts)
218 AVFrame *frame = ist->sub2video.frame;
221 av_assert1(frame->data[0]);
222 ist->sub2video.last_pts = frame->pts = pts;
223 for (i = 0; i < ist->nb_filters; i++)
224 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
225 AV_BUFFERSRC_FLAG_KEEP_REF |
226 AV_BUFFERSRC_FLAG_PUSH);
229 void sub2video_update(InputStream *ist, AVSubtitle *sub)
231 AVFrame *frame = ist->sub2video.frame;
235 int64_t pts, end_pts;
240 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
241 AV_TIME_BASE_Q, ist->st->time_base);
242 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
243 AV_TIME_BASE_Q, ist->st->time_base);
244 num_rects = sub->num_rects;
246 pts = ist->sub2video.end_pts;
250 if (sub2video_get_blank_frame(ist) < 0) {
251 av_log(ist->dec_ctx, AV_LOG_ERROR,
252 "Impossible to get a blank canvas.\n");
255 dst = frame->data [0];
256 dst_linesize = frame->linesize[0];
257 for (i = 0; i < num_rects; i++)
258 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
259 sub2video_push_ref(ist, pts);
260 ist->sub2video.end_pts = end_pts;
263 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
265 InputFile *infile = input_files[ist->file_index];
269 /* When a frame is read from a file, examine all sub2video streams in
270 the same file and send the sub2video frame again. Otherwise, decoded
271 video frames could be accumulating in the filter graph while a filter
272 (possibly overlay) is desperately waiting for a subtitle frame. */
273 for (i = 0; i < infile->nb_streams; i++) {
274 InputStream *ist2 = input_streams[infile->ist_index + i];
275 if (!ist2->sub2video.frame)
277 /* subtitles seem to be usually muxed ahead of other streams;
278 if not, subtracting a larger time here is necessary */
279 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
280 /* do not send the heartbeat frame if the subtitle is already ahead */
281 if (pts2 <= ist2->sub2video.last_pts)
283 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
284 sub2video_update(ist2, NULL);
285 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
286 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
288 sub2video_push_ref(ist2, pts2);
292 static void sub2video_flush(InputStream *ist)
296 if (ist->sub2video.end_pts < INT64_MAX)
297 sub2video_update(ist, NULL);
298 for (i = 0; i < ist->nb_filters; i++)
299 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
302 /* end of sub2video hack */
304 static void term_exit_sigsafe(void)
308 tcsetattr (0, TCSANOW, &oldtty);
314 av_log(NULL, AV_LOG_QUIET, "%s", "");
318 static volatile int received_sigterm = 0;
319 static volatile int received_nb_signals = 0;
320 static volatile int transcode_init_done = 0;
321 static volatile int ffmpeg_exited = 0;
322 static int main_return_code = 0;
325 sigterm_handler(int sig)
327 received_sigterm = sig;
328 received_nb_signals++;
330 if(received_nb_signals > 3) {
331 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
332 strlen("Received > 3 system signals, hard exiting\n"));
338 #if HAVE_SETCONSOLECTRLHANDLER
339 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
341 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
346 case CTRL_BREAK_EVENT:
347 sigterm_handler(SIGINT);
350 case CTRL_CLOSE_EVENT:
351 case CTRL_LOGOFF_EVENT:
352 case CTRL_SHUTDOWN_EVENT:
353 sigterm_handler(SIGTERM);
354 /* Basically, with these 3 events, when we return from this method the
355 process is hard terminated, so stall as long as we need to
356 to try and let the main thread(s) clean up and gracefully terminate
357 (we have at most 5 seconds, but should be done far before that). */
358 while (!ffmpeg_exited) {
364 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
373 if (!run_as_daemon && stdin_interaction) {
375 if (tcgetattr (0, &tty) == 0) {
379 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
380 |INLCR|IGNCR|ICRNL|IXON);
381 tty.c_oflag |= OPOST;
382 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
383 tty.c_cflag &= ~(CSIZE|PARENB);
388 tcsetattr (0, TCSANOW, &tty);
390 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
394 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
395 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
397 signal(SIGXCPU, sigterm_handler);
399 #if HAVE_SETCONSOLECTRLHANDLER
400 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
404 /* read a key without blocking */
405 static int read_key(void)
417 n = select(1, &rfds, NULL, NULL, &tv);
426 # if HAVE_PEEKNAMEDPIPE
428 static HANDLE input_handle;
431 input_handle = GetStdHandle(STD_INPUT_HANDLE);
432 is_pipe = !GetConsoleMode(input_handle, &dw);
436 /* When running under a GUI, you will end here. */
437 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
438 // input pipe may have been closed by the program that ran ffmpeg
456 static int decode_interrupt_cb(void *ctx)
458 return received_nb_signals > transcode_init_done;
461 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
463 static void ffmpeg_cleanup(int ret)
468 int maxrss = getmaxrss() / 1024;
469 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
472 for (i = 0; i < nb_filtergraphs; i++) {
473 FilterGraph *fg = filtergraphs[i];
474 avfilter_graph_free(&fg->graph);
475 for (j = 0; j < fg->nb_inputs; j++) {
476 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
478 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
479 sizeof(frame), NULL);
480 av_frame_free(&frame);
482 av_fifo_free(fg->inputs[j]->frame_queue);
483 if (fg->inputs[j]->ist->sub2video.sub_queue) {
484 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
486 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
487 &sub, sizeof(sub), NULL);
488 avsubtitle_free(&sub);
490 av_fifo_free(fg->inputs[j]->ist->sub2video.sub_queue);
492 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
493 av_freep(&fg->inputs[j]->name);
494 av_freep(&fg->inputs[j]);
496 av_freep(&fg->inputs);
497 for (j = 0; j < fg->nb_outputs; j++) {
498 av_freep(&fg->outputs[j]->name);
499 av_freep(&fg->outputs[j]->formats);
500 av_freep(&fg->outputs[j]->channel_layouts);
501 av_freep(&fg->outputs[j]->sample_rates);
502 av_freep(&fg->outputs[j]);
504 av_freep(&fg->outputs);
505 av_freep(&fg->graph_desc);
507 av_freep(&filtergraphs[i]);
509 av_freep(&filtergraphs);
511 av_freep(&subtitle_out);
514 for (i = 0; i < nb_output_files; i++) {
515 OutputFile *of = output_files[i];
520 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
522 avformat_free_context(s);
523 av_dict_free(&of->opts);
525 av_freep(&output_files[i]);
527 for (i = 0; i < nb_output_streams; i++) {
528 OutputStream *ost = output_streams[i];
533 for (j = 0; j < ost->nb_bitstream_filters; j++)
534 av_bsf_free(&ost->bsf_ctx[j]);
535 av_freep(&ost->bsf_ctx);
536 av_freep(&ost->bsf_extradata_updated);
538 av_frame_free(&ost->filtered_frame);
539 av_frame_free(&ost->last_frame);
540 av_dict_free(&ost->encoder_opts);
542 av_parser_close(ost->parser);
543 avcodec_free_context(&ost->parser_avctx);
545 av_freep(&ost->forced_keyframes);
546 av_expr_free(ost->forced_keyframes_pexpr);
547 av_freep(&ost->avfilter);
548 av_freep(&ost->logfile_prefix);
550 av_freep(&ost->audio_channels_map);
551 ost->audio_channels_mapped = 0;
553 av_dict_free(&ost->sws_dict);
555 avcodec_free_context(&ost->enc_ctx);
556 avcodec_parameters_free(&ost->ref_par);
558 if (ost->muxing_queue) {
559 while (av_fifo_size(ost->muxing_queue)) {
561 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
562 av_packet_unref(&pkt);
564 av_fifo_freep(&ost->muxing_queue);
567 av_freep(&output_streams[i]);
570 free_input_threads();
572 for (i = 0; i < nb_input_files; i++) {
573 avformat_close_input(&input_files[i]->ctx);
574 av_freep(&input_files[i]);
576 for (i = 0; i < nb_input_streams; i++) {
577 InputStream *ist = input_streams[i];
579 av_frame_free(&ist->decoded_frame);
580 av_frame_free(&ist->filter_frame);
581 av_dict_free(&ist->decoder_opts);
582 avsubtitle_free(&ist->prev_sub.subtitle);
583 av_frame_free(&ist->sub2video.frame);
584 av_freep(&ist->filters);
585 av_freep(&ist->hwaccel_device);
586 av_freep(&ist->dts_buffer);
588 avcodec_free_context(&ist->dec_ctx);
590 av_freep(&input_streams[i]);
594 if (fclose(vstats_file))
595 av_log(NULL, AV_LOG_ERROR,
596 "Error closing vstats file, loss of information possible: %s\n",
597 av_err2str(AVERROR(errno)));
599 av_freep(&vstats_filename);
601 av_freep(&input_streams);
602 av_freep(&input_files);
603 av_freep(&output_streams);
604 av_freep(&output_files);
608 avformat_network_deinit();
610 if (received_sigterm) {
611 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
612 (int) received_sigterm);
613 } else if (ret && transcode_init_done) {
614 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
620 void remove_avoptions(AVDictionary **a, AVDictionary *b)
622 AVDictionaryEntry *t = NULL;
624 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
625 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
629 void assert_avoptions(AVDictionary *m)
631 AVDictionaryEntry *t;
632 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
633 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
638 static void abort_codec_experimental(AVCodec *c, int encoder)
643 static void update_benchmark(const char *fmt, ...)
645 if (do_benchmark_all) {
646 int64_t t = getutime();
652 vsnprintf(buf, sizeof(buf), fmt, va);
654 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
660 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
663 for (i = 0; i < nb_output_streams; i++) {
664 OutputStream *ost2 = output_streams[i];
665 ost2->finished |= ost == ost2 ? this_stream : others;
669 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
671 AVFormatContext *s = of->ctx;
672 AVStream *st = ost->st;
675 if (!of->header_written) {
676 AVPacket tmp_pkt = {0};
677 /* the muxer is not initialized yet, buffer the packet */
678 if (!av_fifo_space(ost->muxing_queue)) {
679 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
680 ost->max_muxing_queue_size);
681 if (new_size <= av_fifo_size(ost->muxing_queue)) {
682 av_log(NULL, AV_LOG_ERROR,
683 "Too many packets buffered for output stream %d:%d.\n",
684 ost->file_index, ost->st->index);
687 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
691 ret = av_packet_ref(&tmp_pkt, pkt);
694 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
695 av_packet_unref(pkt);
699 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
700 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
701 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
704 * Audio encoders may split the packets -- #frames in != #packets out.
705 * But there is no reordering, so we can limit the number of output packets
706 * by simply dropping them here.
707 * Counting encoded video frames needs to be done separately because of
708 * reordering, see do_video_out()
710 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
711 if (ost->frame_number >= ost->max_frames) {
712 av_packet_unref(pkt);
717 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
719 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
721 ost->quality = sd ? AV_RL32(sd) : -1;
722 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
724 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
726 ost->error[i] = AV_RL64(sd + 8 + 8*i);
731 if (ost->frame_rate.num && ost->is_cfr) {
732 if (pkt->duration > 0)
733 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
734 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
739 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
741 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
742 if (pkt->dts != AV_NOPTS_VALUE &&
743 pkt->pts != AV_NOPTS_VALUE &&
744 pkt->dts > pkt->pts) {
745 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
747 ost->file_index, ost->st->index);
749 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
750 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
751 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
753 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
754 pkt->dts != AV_NOPTS_VALUE &&
755 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
756 ost->last_mux_dts != AV_NOPTS_VALUE) {
757 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
758 if (pkt->dts < max) {
759 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
760 av_log(s, loglevel, "Non-monotonous DTS in output stream "
761 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
762 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
764 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
767 av_log(s, loglevel, "changing to %"PRId64". This may result "
768 "in incorrect timestamps in the output file.\n",
770 if (pkt->pts >= pkt->dts)
771 pkt->pts = FFMAX(pkt->pts, max);
776 ost->last_mux_dts = pkt->dts;
778 ost->data_size += pkt->size;
779 ost->packets_written++;
781 pkt->stream_index = ost->index;
784 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
785 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
786 av_get_media_type_string(ost->enc_ctx->codec_type),
787 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
788 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
793 ret = av_interleaved_write_frame(s, pkt);
795 print_error("av_interleaved_write_frame()", ret);
796 main_return_code = 1;
797 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
799 av_packet_unref(pkt);
802 static void close_output_stream(OutputStream *ost)
804 OutputFile *of = output_files[ost->file_index];
806 ost->finished |= ENCODER_FINISHED;
808 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
809 of->recording_time = FFMIN(of->recording_time, end);
813 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
817 /* apply the output bitstream filters, if any */
818 if (ost->nb_bitstream_filters) {
821 av_packet_split_side_data(pkt);
822 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
828 /* get a packet from the previous filter up the chain */
829 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
830 if (ret == AVERROR(EAGAIN)) {
836 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
837 * the api states this shouldn't happen after init(). Propagate it here to the
838 * muxer and to the next filters in the chain to workaround this.
839 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
840 * par_out->extradata and adapt muxers accordingly to get rid of this. */
841 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
842 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
845 ost->bsf_extradata_updated[idx - 1] |= 1;
848 /* send it to the next filter down the chain or to the muxer */
849 if (idx < ost->nb_bitstream_filters) {
850 /* HACK/FIXME! - See above */
851 if (!(ost->bsf_extradata_updated[idx] & 2)) {
852 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
855 ost->bsf_extradata_updated[idx] |= 2;
857 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
862 write_packet(of, pkt, ost);
865 write_packet(of, pkt, ost);
868 if (ret < 0 && ret != AVERROR_EOF) {
869 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
870 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
876 static int check_recording_time(OutputStream *ost)
878 OutputFile *of = output_files[ost->file_index];
880 if (of->recording_time != INT64_MAX &&
881 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
882 AV_TIME_BASE_Q) >= 0) {
883 close_output_stream(ost);
889 static void do_audio_out(OutputFile *of, OutputStream *ost,
892 AVCodecContext *enc = ost->enc_ctx;
896 av_init_packet(&pkt);
900 if (!check_recording_time(ost))
903 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
904 frame->pts = ost->sync_opts;
905 ost->sync_opts = frame->pts + frame->nb_samples;
906 ost->samples_encoded += frame->nb_samples;
907 ost->frames_encoded++;
909 av_assert0(pkt.size || !pkt.data);
910 update_benchmark(NULL);
912 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
913 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
914 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
915 enc->time_base.num, enc->time_base.den);
918 ret = avcodec_send_frame(enc, frame);
923 ret = avcodec_receive_packet(enc, &pkt);
924 if (ret == AVERROR(EAGAIN))
929 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
931 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
934 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
935 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
936 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
937 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
940 output_packet(of, &pkt, ost);
945 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
949 static void do_subtitle_out(OutputFile *of,
953 int subtitle_out_max_size = 1024 * 1024;
954 int subtitle_out_size, nb, i;
959 if (sub->pts == AV_NOPTS_VALUE) {
960 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
969 subtitle_out = av_malloc(subtitle_out_max_size);
971 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
976 /* Note: DVB subtitle need one packet to draw them and one other
977 packet to clear them */
978 /* XXX: signal it in the codec context ? */
979 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
984 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
986 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
987 pts -= output_files[ost->file_index]->start_time;
988 for (i = 0; i < nb; i++) {
989 unsigned save_num_rects = sub->num_rects;
991 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
992 if (!check_recording_time(ost))
996 // start_display_time is required to be 0
997 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
998 sub->end_display_time -= sub->start_display_time;
999 sub->start_display_time = 0;
1003 ost->frames_encoded++;
1005 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1006 subtitle_out_max_size, sub);
1008 sub->num_rects = save_num_rects;
1009 if (subtitle_out_size < 0) {
1010 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1014 av_init_packet(&pkt);
1015 pkt.data = subtitle_out;
1016 pkt.size = subtitle_out_size;
1017 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1018 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1019 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1020 /* XXX: the pts correction is handled here. Maybe handling
1021 it in the codec would be better */
1023 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1025 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1028 output_packet(of, &pkt, ost);
1032 static void do_video_out(OutputFile *of,
1034 AVFrame *next_picture,
1037 int ret, format_video_sync;
1039 AVCodecContext *enc = ost->enc_ctx;
1040 AVCodecParameters *mux_par = ost->st->codecpar;
1041 AVRational frame_rate;
1042 int nb_frames, nb0_frames, i;
1043 double delta, delta0;
1044 double duration = 0;
1046 InputStream *ist = NULL;
1047 AVFilterContext *filter = ost->filter->filter;
1049 if (ost->source_index >= 0)
1050 ist = input_streams[ost->source_index];
1052 frame_rate = av_buffersink_get_frame_rate(filter);
1053 if (frame_rate.num > 0 && frame_rate.den > 0)
1054 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1056 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1057 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1059 if (!ost->filters_script &&
1063 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1064 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1067 if (!next_picture) {
1069 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1070 ost->last_nb0_frames[1],
1071 ost->last_nb0_frames[2]);
1073 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1074 delta = delta0 + duration;
1076 /* by default, we output a single frame */
1077 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1080 format_video_sync = video_sync_method;
1081 if (format_video_sync == VSYNC_AUTO) {
1082 if(!strcmp(of->ctx->oformat->name, "avi")) {
1083 format_video_sync = VSYNC_VFR;
1085 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1087 && format_video_sync == VSYNC_CFR
1088 && input_files[ist->file_index]->ctx->nb_streams == 1
1089 && input_files[ist->file_index]->input_ts_offset == 0) {
1090 format_video_sync = VSYNC_VSCFR;
1092 if (format_video_sync == VSYNC_CFR && copy_ts) {
1093 format_video_sync = VSYNC_VSCFR;
1096 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1100 format_video_sync != VSYNC_PASSTHROUGH &&
1101 format_video_sync != VSYNC_DROP) {
1102 if (delta0 < -0.6) {
1103 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1105 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1106 sync_ipts = ost->sync_opts;
1111 switch (format_video_sync) {
1113 if (ost->frame_number == 0 && delta0 >= 0.5) {
1114 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1117 ost->sync_opts = lrint(sync_ipts);
1120 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1121 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1123 } else if (delta < -1.1)
1125 else if (delta > 1.1) {
1126 nb_frames = lrintf(delta);
1128 nb0_frames = lrintf(delta0 - 0.6);
1134 else if (delta > 0.6)
1135 ost->sync_opts = lrint(sync_ipts);
1138 case VSYNC_PASSTHROUGH:
1139 ost->sync_opts = lrint(sync_ipts);
1146 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1147 nb0_frames = FFMIN(nb0_frames, nb_frames);
1149 memmove(ost->last_nb0_frames + 1,
1150 ost->last_nb0_frames,
1151 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1152 ost->last_nb0_frames[0] = nb0_frames;
1154 if (nb0_frames == 0 && ost->last_dropped) {
1156 av_log(NULL, AV_LOG_VERBOSE,
1157 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1158 ost->frame_number, ost->st->index, ost->last_frame->pts);
1160 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1161 if (nb_frames > dts_error_threshold * 30) {
1162 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1166 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1167 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1168 if (nb_frames_dup > dup_warning) {
1169 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1173 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1175 /* duplicates frame if needed */
1176 for (i = 0; i < nb_frames; i++) {
1177 AVFrame *in_picture;
1178 av_init_packet(&pkt);
1182 if (i < nb0_frames && ost->last_frame) {
1183 in_picture = ost->last_frame;
1185 in_picture = next_picture;
1190 in_picture->pts = ost->sync_opts;
1193 if (!check_recording_time(ost))
1195 if (ost->frame_number >= ost->max_frames)
1199 #if FF_API_LAVF_FMT_RAWPICTURE
1200 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1201 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1202 /* raw pictures are written as AVPicture structure to
1203 avoid any copies. We support temporarily the older
1205 if (in_picture->interlaced_frame)
1206 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1208 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1209 pkt.data = (uint8_t *)in_picture;
1210 pkt.size = sizeof(AVPicture);
1211 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1212 pkt.flags |= AV_PKT_FLAG_KEY;
1214 output_packet(of, &pkt, ost);
1218 int forced_keyframe = 0;
1221 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1222 ost->top_field_first >= 0)
1223 in_picture->top_field_first = !!ost->top_field_first;
1225 if (in_picture->interlaced_frame) {
1226 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1227 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1229 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1231 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1233 in_picture->quality = enc->global_quality;
1234 in_picture->pict_type = 0;
1236 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1237 in_picture->pts * av_q2d(enc->time_base) : NAN;
1238 if (ost->forced_kf_index < ost->forced_kf_count &&
1239 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1240 ost->forced_kf_index++;
1241 forced_keyframe = 1;
1242 } else if (ost->forced_keyframes_pexpr) {
1244 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1245 res = av_expr_eval(ost->forced_keyframes_pexpr,
1246 ost->forced_keyframes_expr_const_values, NULL);
1247 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1248 ost->forced_keyframes_expr_const_values[FKF_N],
1249 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1250 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1251 ost->forced_keyframes_expr_const_values[FKF_T],
1252 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1255 forced_keyframe = 1;
1256 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1257 ost->forced_keyframes_expr_const_values[FKF_N];
1258 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1259 ost->forced_keyframes_expr_const_values[FKF_T];
1260 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1263 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1264 } else if ( ost->forced_keyframes
1265 && !strncmp(ost->forced_keyframes, "source", 6)
1266 && in_picture->key_frame==1) {
1267 forced_keyframe = 1;
1270 if (forced_keyframe) {
1271 in_picture->pict_type = AV_PICTURE_TYPE_I;
1272 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1275 update_benchmark(NULL);
1277 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1278 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1279 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1280 enc->time_base.num, enc->time_base.den);
1283 ost->frames_encoded++;
1285 ret = avcodec_send_frame(enc, in_picture);
1290 ret = avcodec_receive_packet(enc, &pkt);
1291 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1292 if (ret == AVERROR(EAGAIN))
1298 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1299 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1300 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1301 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1304 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1305 pkt.pts = ost->sync_opts;
1307 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1310 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1311 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1312 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1313 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1316 frame_size = pkt.size;
1317 output_packet(of, &pkt, ost);
1319 /* if two pass, output log */
1320 if (ost->logfile && enc->stats_out) {
1321 fprintf(ost->logfile, "%s", enc->stats_out);
1327 * For video, number of frames in == number of packets out.
1328 * But there may be reordering, so we can't throw away frames on encoder
1329 * flush, we need to limit them here, before they go into encoder.
1331 ost->frame_number++;
1333 if (vstats_filename && frame_size)
1334 do_video_stats(ost, frame_size);
1337 if (!ost->last_frame)
1338 ost->last_frame = av_frame_alloc();
1339 av_frame_unref(ost->last_frame);
1340 if (next_picture && ost->last_frame)
1341 av_frame_ref(ost->last_frame, next_picture);
1343 av_frame_free(&ost->last_frame);
1347 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1351 static double psnr(double d)
1353 return -10.0 * log10(d);
1356 static void do_video_stats(OutputStream *ost, int frame_size)
1358 AVCodecContext *enc;
1360 double ti1, bitrate, avg_bitrate;
1362 /* this is executed just the first time do_video_stats is called */
1364 vstats_file = fopen(vstats_filename, "w");
1372 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1373 frame_number = ost->st->nb_frames;
1374 if (vstats_version <= 1) {
1375 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1376 ost->quality / (float)FF_QP2LAMBDA);
1378 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1379 ost->quality / (float)FF_QP2LAMBDA);
1382 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1383 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1385 fprintf(vstats_file,"f_size= %6d ", frame_size);
1386 /* compute pts value */
1387 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1391 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1392 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1393 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1394 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1395 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1399 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1401 static void finish_output_stream(OutputStream *ost)
1403 OutputFile *of = output_files[ost->file_index];
1406 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1409 for (i = 0; i < of->ctx->nb_streams; i++)
1410 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1415 * Get and encode new output from any of the filtergraphs, without causing
1418 * @return 0 for success, <0 for severe errors
1420 static int reap_filters(int flush)
1422 AVFrame *filtered_frame = NULL;
1425 /* Reap all buffers present in the buffer sinks */
1426 for (i = 0; i < nb_output_streams; i++) {
1427 OutputStream *ost = output_streams[i];
1428 OutputFile *of = output_files[ost->file_index];
1429 AVFilterContext *filter;
1430 AVCodecContext *enc = ost->enc_ctx;
1433 if (!ost->filter || !ost->filter->graph->graph)
1435 filter = ost->filter->filter;
1437 if (!ost->initialized) {
1439 ret = init_output_stream(ost, error, sizeof(error));
1441 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1442 ost->file_index, ost->index, error);
1447 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1448 return AVERROR(ENOMEM);
1450 filtered_frame = ost->filtered_frame;
1453 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1454 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1455 AV_BUFFERSINK_FLAG_NO_REQUEST);
1457 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1458 av_log(NULL, AV_LOG_WARNING,
1459 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1460 } else if (flush && ret == AVERROR_EOF) {
1461 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1462 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1466 if (ost->finished) {
1467 av_frame_unref(filtered_frame);
1470 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1471 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1472 AVRational filter_tb = av_buffersink_get_time_base(filter);
1473 AVRational tb = enc->time_base;
1474 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1476 tb.den <<= extra_bits;
1478 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1479 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1480 float_pts /= 1 << extra_bits;
1481 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1482 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1484 filtered_frame->pts =
1485 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1486 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1488 //if (ost->source_index >= 0)
1489 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1491 switch (av_buffersink_get_type(filter)) {
1492 case AVMEDIA_TYPE_VIDEO:
1493 if (!ost->frame_aspect_ratio.num)
1494 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1497 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1498 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1500 enc->time_base.num, enc->time_base.den);
1503 do_video_out(of, ost, filtered_frame, float_pts);
1505 case AVMEDIA_TYPE_AUDIO:
1506 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1507 enc->channels != av_frame_get_channels(filtered_frame)) {
1508 av_log(NULL, AV_LOG_ERROR,
1509 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1512 do_audio_out(of, ost, filtered_frame);
1515 // TODO support subtitle filters
1519 av_frame_unref(filtered_frame);
1526 static void print_final_stats(int64_t total_size)
1528 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1529 uint64_t subtitle_size = 0;
1530 uint64_t data_size = 0;
1531 float percent = -1.0;
1535 for (i = 0; i < nb_output_streams; i++) {
1536 OutputStream *ost = output_streams[i];
1537 switch (ost->enc_ctx->codec_type) {
1538 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1539 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1540 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1541 default: other_size += ost->data_size; break;
1543 extra_size += ost->enc_ctx->extradata_size;
1544 data_size += ost->data_size;
1545 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1546 != AV_CODEC_FLAG_PASS1)
1550 if (data_size && total_size>0 && total_size >= data_size)
1551 percent = 100.0 * (total_size - data_size) / data_size;
1553 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1554 video_size / 1024.0,
1555 audio_size / 1024.0,
1556 subtitle_size / 1024.0,
1557 other_size / 1024.0,
1558 extra_size / 1024.0);
1560 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1562 av_log(NULL, AV_LOG_INFO, "unknown");
1563 av_log(NULL, AV_LOG_INFO, "\n");
1565 /* print verbose per-stream stats */
1566 for (i = 0; i < nb_input_files; i++) {
1567 InputFile *f = input_files[i];
1568 uint64_t total_packets = 0, total_size = 0;
1570 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1571 i, f->ctx->filename);
1573 for (j = 0; j < f->nb_streams; j++) {
1574 InputStream *ist = input_streams[f->ist_index + j];
1575 enum AVMediaType type = ist->dec_ctx->codec_type;
1577 total_size += ist->data_size;
1578 total_packets += ist->nb_packets;
1580 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1581 i, j, media_type_string(type));
1582 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1583 ist->nb_packets, ist->data_size);
1585 if (ist->decoding_needed) {
1586 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1587 ist->frames_decoded);
1588 if (type == AVMEDIA_TYPE_AUDIO)
1589 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1590 av_log(NULL, AV_LOG_VERBOSE, "; ");
1593 av_log(NULL, AV_LOG_VERBOSE, "\n");
1596 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1597 total_packets, total_size);
1600 for (i = 0; i < nb_output_files; i++) {
1601 OutputFile *of = output_files[i];
1602 uint64_t total_packets = 0, total_size = 0;
1604 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1605 i, of->ctx->filename);
1607 for (j = 0; j < of->ctx->nb_streams; j++) {
1608 OutputStream *ost = output_streams[of->ost_index + j];
1609 enum AVMediaType type = ost->enc_ctx->codec_type;
1611 total_size += ost->data_size;
1612 total_packets += ost->packets_written;
1614 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1615 i, j, media_type_string(type));
1616 if (ost->encoding_needed) {
1617 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1618 ost->frames_encoded);
1619 if (type == AVMEDIA_TYPE_AUDIO)
1620 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1621 av_log(NULL, AV_LOG_VERBOSE, "; ");
1624 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1625 ost->packets_written, ost->data_size);
1627 av_log(NULL, AV_LOG_VERBOSE, "\n");
1630 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1631 total_packets, total_size);
1633 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1634 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1636 av_log(NULL, AV_LOG_WARNING, "\n");
1638 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1643 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1646 AVBPrint buf_script;
1648 AVFormatContext *oc;
1650 AVCodecContext *enc;
1651 int frame_number, vid, i;
1654 int64_t pts = INT64_MIN + 1;
1655 static int64_t last_time = -1;
1656 static int qp_histogram[52];
1657 int hours, mins, secs, us;
1661 if (!print_stats && !is_last_report && !progress_avio)
1664 if (!is_last_report) {
1665 if (last_time == -1) {
1666 last_time = cur_time;
1669 if ((cur_time - last_time) < 500000)
1671 last_time = cur_time;
1674 t = (cur_time-timer_start) / 1000000.0;
1677 oc = output_files[0]->ctx;
1679 total_size = avio_size(oc->pb);
1680 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1681 total_size = avio_tell(oc->pb);
1685 av_bprint_init(&buf_script, 0, 1);
1686 for (i = 0; i < nb_output_streams; i++) {
1688 ost = output_streams[i];
1690 if (!ost->stream_copy)
1691 q = ost->quality / (float) FF_QP2LAMBDA;
1693 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1694 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1695 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1696 ost->file_index, ost->index, q);
1698 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1701 frame_number = ost->frame_number;
1702 fps = t > 1 ? frame_number / t : 0;
1703 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1704 frame_number, fps < 9.95, fps, q);
1705 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1706 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1707 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1708 ost->file_index, ost->index, q);
1710 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1714 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1716 for (j = 0; j < 32; j++)
1717 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1720 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1722 double error, error_sum = 0;
1723 double scale, scale_sum = 0;
1725 char type[3] = { 'Y','U','V' };
1726 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1727 for (j = 0; j < 3; j++) {
1728 if (is_last_report) {
1729 error = enc->error[j];
1730 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1732 error = ost->error[j];
1733 scale = enc->width * enc->height * 255.0 * 255.0;
1739 p = psnr(error / scale);
1740 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1741 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1742 ost->file_index, ost->index, type[j] | 32, p);
1744 p = psnr(error_sum / scale_sum);
1745 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1746 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1747 ost->file_index, ost->index, p);
1751 /* compute min output value */
1752 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1753 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1754 ost->st->time_base, AV_TIME_BASE_Q));
1756 nb_frames_drop += ost->last_dropped;
1759 secs = FFABS(pts) / AV_TIME_BASE;
1760 us = FFABS(pts) % AV_TIME_BASE;
1766 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1767 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1769 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1771 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1772 "size=%8.0fkB time=", total_size / 1024.0);
1774 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1775 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1776 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1777 (100 * us) / AV_TIME_BASE);
1780 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1781 av_bprintf(&buf_script, "bitrate=N/A\n");
1783 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1784 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1787 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1788 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1789 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1790 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1791 hours, mins, secs, us);
1793 if (nb_frames_dup || nb_frames_drop)
1794 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1795 nb_frames_dup, nb_frames_drop);
1796 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1797 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1800 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1801 av_bprintf(&buf_script, "speed=N/A\n");
1803 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1804 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1807 if (print_stats || is_last_report) {
1808 const char end = is_last_report ? '\n' : '\r';
1809 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1810 fprintf(stderr, "%s %c", buf, end);
1812 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1817 if (progress_avio) {
1818 av_bprintf(&buf_script, "progress=%s\n",
1819 is_last_report ? "end" : "continue");
1820 avio_write(progress_avio, buf_script.str,
1821 FFMIN(buf_script.len, buf_script.size - 1));
1822 avio_flush(progress_avio);
1823 av_bprint_finalize(&buf_script, NULL);
1824 if (is_last_report) {
1825 if ((ret = avio_closep(&progress_avio)) < 0)
1826 av_log(NULL, AV_LOG_ERROR,
1827 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1832 print_final_stats(total_size);
1835 static void flush_encoders(void)
1839 for (i = 0; i < nb_output_streams; i++) {
1840 OutputStream *ost = output_streams[i];
1841 AVCodecContext *enc = ost->enc_ctx;
1842 OutputFile *of = output_files[ost->file_index];
1844 if (!ost->encoding_needed)
1847 // Try to enable encoding with no input frames.
1848 // Maybe we should just let encoding fail instead.
1849 if (!ost->initialized) {
1850 FilterGraph *fg = ost->filter->graph;
1853 av_log(NULL, AV_LOG_WARNING,
1854 "Finishing stream %d:%d without any data written to it.\n",
1855 ost->file_index, ost->st->index);
1857 if (ost->filter && !fg->graph) {
1859 for (x = 0; x < fg->nb_inputs; x++) {
1860 InputFilter *ifilter = fg->inputs[x];
1861 if (ifilter->format < 0) {
1862 AVCodecParameters *par = ifilter->ist->st->codecpar;
1863 // We never got any input. Set a fake format, which will
1864 // come from libavformat.
1865 ifilter->format = par->format;
1866 ifilter->sample_rate = par->sample_rate;
1867 ifilter->channels = par->channels;
1868 ifilter->channel_layout = par->channel_layout;
1869 ifilter->width = par->width;
1870 ifilter->height = par->height;
1871 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1875 if (!ifilter_has_all_input_formats(fg))
1878 ret = configure_filtergraph(fg);
1880 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1884 finish_output_stream(ost);
1887 ret = init_output_stream(ost, error, sizeof(error));
1889 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1890 ost->file_index, ost->index, error);
1895 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1897 #if FF_API_LAVF_FMT_RAWPICTURE
1898 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1902 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1905 avcodec_send_frame(enc, NULL);
1908 const char *desc = NULL;
1912 switch (enc->codec_type) {
1913 case AVMEDIA_TYPE_AUDIO:
1916 case AVMEDIA_TYPE_VIDEO:
1923 av_init_packet(&pkt);
1927 update_benchmark(NULL);
1928 ret = avcodec_receive_packet(enc, &pkt);
1929 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1930 if (ret < 0 && ret != AVERROR_EOF) {
1931 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1936 if (ost->logfile && enc->stats_out) {
1937 fprintf(ost->logfile, "%s", enc->stats_out);
1939 if (ret == AVERROR_EOF) {
1942 if (ost->finished & MUXER_FINISHED) {
1943 av_packet_unref(&pkt);
1946 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1947 pkt_size = pkt.size;
1948 output_packet(of, &pkt, ost);
1949 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1950 do_video_stats(ost, pkt_size);
1957 * Check whether a packet from ist should be written into ost at this time
1959 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1961 OutputFile *of = output_files[ost->file_index];
1962 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1964 if (ost->source_index != ist_index)
1970 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1976 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1978 OutputFile *of = output_files[ost->file_index];
1979 InputFile *f = input_files [ist->file_index];
1980 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1981 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1985 av_init_packet(&opkt);
1987 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1988 !ost->copy_initial_nonkeyframes)
1991 if (!ost->frame_number && !ost->copy_prior_start) {
1992 int64_t comp_start = start_time;
1993 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1994 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1995 if (pkt->pts == AV_NOPTS_VALUE ?
1996 ist->pts < comp_start :
1997 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2001 if (of->recording_time != INT64_MAX &&
2002 ist->pts >= of->recording_time + start_time) {
2003 close_output_stream(ost);
2007 if (f->recording_time != INT64_MAX) {
2008 start_time = f->ctx->start_time;
2009 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2010 start_time += f->start_time;
2011 if (ist->pts >= f->recording_time + start_time) {
2012 close_output_stream(ost);
2017 /* force the input stream PTS */
2018 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2021 if (pkt->pts != AV_NOPTS_VALUE)
2022 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2024 opkt.pts = AV_NOPTS_VALUE;
2026 if (pkt->dts == AV_NOPTS_VALUE)
2027 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2029 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2030 opkt.dts -= ost_tb_start_time;
2032 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2033 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2035 duration = ist->dec_ctx->frame_size;
2036 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2037 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2038 ost->mux_timebase) - ost_tb_start_time;
2041 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2043 opkt.flags = pkt->flags;
2044 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2045 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2046 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2047 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2048 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2050 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2051 &opkt.data, &opkt.size,
2052 pkt->data, pkt->size,
2053 pkt->flags & AV_PKT_FLAG_KEY);
2055 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2060 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2065 opkt.data = pkt->data;
2066 opkt.size = pkt->size;
2068 av_copy_packet_side_data(&opkt, pkt);
2070 #if FF_API_LAVF_FMT_RAWPICTURE
2071 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2072 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2073 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2074 /* store AVPicture in AVPacket, as expected by the output format */
2075 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2077 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2081 opkt.data = (uint8_t *)&pict;
2082 opkt.size = sizeof(AVPicture);
2083 opkt.flags |= AV_PKT_FLAG_KEY;
2087 output_packet(of, &opkt, ost);
2090 int guess_input_channel_layout(InputStream *ist)
2092 AVCodecContext *dec = ist->dec_ctx;
2094 if (!dec->channel_layout) {
2095 char layout_name[256];
2097 if (dec->channels > ist->guess_layout_max)
2099 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2100 if (!dec->channel_layout)
2102 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2103 dec->channels, dec->channel_layout);
2104 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2105 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2110 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2112 if (*got_output || ret<0)
2113 decode_error_stat[ret<0] ++;
2115 if (ret < 0 && exit_on_error)
2118 if (exit_on_error && *got_output && ist) {
2119 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2120 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2126 // Filters can be configured only if the formats of all inputs are known.
2127 static int ifilter_has_all_input_formats(FilterGraph *fg)
2130 for (i = 0; i < fg->nb_inputs; i++) {
2131 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2132 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2138 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2140 FilterGraph *fg = ifilter->graph;
2141 int need_reinit, ret, i;
2143 /* determine if the parameters for this input changed */
2144 need_reinit = ifilter->format != frame->format;
2145 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2146 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2149 switch (ifilter->ist->st->codecpar->codec_type) {
2150 case AVMEDIA_TYPE_AUDIO:
2151 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2152 ifilter->channels != frame->channels ||
2153 ifilter->channel_layout != frame->channel_layout;
2155 case AVMEDIA_TYPE_VIDEO:
2156 need_reinit |= ifilter->width != frame->width ||
2157 ifilter->height != frame->height;
2162 ret = ifilter_parameters_from_frame(ifilter, frame);
2167 /* (re)init the graph if possible, otherwise buffer the frame and return */
2168 if (need_reinit || !fg->graph) {
2169 for (i = 0; i < fg->nb_inputs; i++) {
2170 if (!ifilter_has_all_input_formats(fg)) {
2171 AVFrame *tmp = av_frame_clone(frame);
2173 return AVERROR(ENOMEM);
2174 av_frame_unref(frame);
2176 if (!av_fifo_space(ifilter->frame_queue)) {
2177 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2181 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2186 ret = reap_filters(1);
2187 if (ret < 0 && ret != AVERROR_EOF) {
2189 av_strerror(ret, errbuf, sizeof(errbuf));
2191 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2195 ret = configure_filtergraph(fg);
2197 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2202 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2204 av_log(NULL, AV_LOG_ERROR, "Error while filtering\n");
2211 static int ifilter_send_eof(InputFilter *ifilter)
2217 if (ifilter->filter) {
2218 ret = av_buffersrc_add_frame_flags(ifilter->filter, NULL, AV_BUFFERSRC_FLAG_PUSH);
2222 // the filtergraph was never configured
2223 FilterGraph *fg = ifilter->graph;
2224 for (i = 0; i < fg->nb_inputs; i++)
2225 if (!fg->inputs[i]->eof)
2227 if (i == fg->nb_inputs) {
2228 // All the input streams have finished without the filtergraph
2229 // ever being configured.
2230 // Mark the output streams as finished.
2231 for (j = 0; j < fg->nb_outputs; j++)
2232 finish_output_stream(fg->outputs[j]->ost);
2239 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2240 // There is the following difference: if you got a frame, you must call
2241 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2242 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2243 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2250 ret = avcodec_send_packet(avctx, pkt);
2251 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2252 // decoded frames with avcodec_receive_frame() until done.
2253 if (ret < 0 && ret != AVERROR_EOF)
2257 ret = avcodec_receive_frame(avctx, frame);
2258 if (ret < 0 && ret != AVERROR(EAGAIN))
2266 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2271 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2272 for (i = 0; i < ist->nb_filters; i++) {
2273 if (i < ist->nb_filters - 1) {
2274 f = ist->filter_frame;
2275 ret = av_frame_ref(f, decoded_frame);
2280 ret = ifilter_send_frame(ist->filters[i], f);
2281 if (ret == AVERROR_EOF)
2282 ret = 0; /* ignore */
2284 av_log(NULL, AV_LOG_ERROR,
2285 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2292 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2294 AVFrame *decoded_frame;
2295 AVCodecContext *avctx = ist->dec_ctx;
2297 AVRational decoded_frame_tb;
2299 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2300 return AVERROR(ENOMEM);
2301 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2302 return AVERROR(ENOMEM);
2303 decoded_frame = ist->decoded_frame;
2305 update_benchmark(NULL);
2306 ret = decode(avctx, decoded_frame, got_output, pkt);
2307 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2309 if (ret >= 0 && avctx->sample_rate <= 0) {
2310 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2311 ret = AVERROR_INVALIDDATA;
2314 if (ret != AVERROR_EOF)
2315 check_decode_result(ist, got_output, ret);
2317 if (!*got_output || ret < 0)
2320 ist->samples_decoded += decoded_frame->nb_samples;
2321 ist->frames_decoded++;
2324 /* increment next_dts to use for the case where the input stream does not
2325 have timestamps or there are multiple frames in the packet */
2326 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2328 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2332 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2333 decoded_frame_tb = ist->st->time_base;
2334 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2335 decoded_frame->pts = pkt->pts;
2336 decoded_frame_tb = ist->st->time_base;
2338 decoded_frame->pts = ist->dts;
2339 decoded_frame_tb = AV_TIME_BASE_Q;
2341 if (decoded_frame->pts != AV_NOPTS_VALUE)
2342 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2343 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2344 (AVRational){1, avctx->sample_rate});
2345 ist->nb_samples = decoded_frame->nb_samples;
2346 err = send_frame_to_filters(ist, decoded_frame);
2348 av_frame_unref(ist->filter_frame);
2349 av_frame_unref(decoded_frame);
2350 return err < 0 ? err : ret;
2353 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
2355 AVFrame *decoded_frame;
2356 int i, ret = 0, err = 0;
2357 int64_t best_effort_timestamp;
2358 int64_t dts = AV_NOPTS_VALUE;
2361 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2362 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2364 if (!eof && pkt && pkt->size == 0)
2367 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2368 return AVERROR(ENOMEM);
2369 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2370 return AVERROR(ENOMEM);
2371 decoded_frame = ist->decoded_frame;
2372 if (ist->dts != AV_NOPTS_VALUE)
2373 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2376 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2379 // The old code used to set dts on the drain packet, which does not work
2380 // with the new API anymore.
2382 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2384 return AVERROR(ENOMEM);
2385 ist->dts_buffer = new;
2386 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2389 update_benchmark(NULL);
2390 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2391 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2393 // The following line may be required in some cases where there is no parser
2394 // or the parser does not has_b_frames correctly
2395 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2396 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2397 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2399 av_log(ist->dec_ctx, AV_LOG_WARNING,
2400 "video_delay is larger in decoder than demuxer %d > %d.\n"
2401 "If you want to help, upload a sample "
2402 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2403 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2404 ist->dec_ctx->has_b_frames,
2405 ist->st->codecpar->video_delay);
2408 if (ret != AVERROR_EOF)
2409 check_decode_result(ist, got_output, ret);
2411 if (*got_output && ret >= 0) {
2412 if (ist->dec_ctx->width != decoded_frame->width ||
2413 ist->dec_ctx->height != decoded_frame->height ||
2414 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2415 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2416 decoded_frame->width,
2417 decoded_frame->height,
2418 decoded_frame->format,
2419 ist->dec_ctx->width,
2420 ist->dec_ctx->height,
2421 ist->dec_ctx->pix_fmt);
2425 if (!*got_output || ret < 0)
2428 if(ist->top_field_first>=0)
2429 decoded_frame->top_field_first = ist->top_field_first;
2431 ist->frames_decoded++;
2433 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2434 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2438 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2440 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2442 if (ist->framerate.num)
2443 best_effort_timestamp = ist->cfr_next_pts++;
2445 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2446 best_effort_timestamp = ist->dts_buffer[0];
2448 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2449 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2450 ist->nb_dts_buffer--;
2453 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2454 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2456 if (ts != AV_NOPTS_VALUE)
2457 ist->next_pts = ist->pts = ts;
2461 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2462 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2463 ist->st->index, av_ts2str(decoded_frame->pts),
2464 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2465 best_effort_timestamp,
2466 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2467 decoded_frame->key_frame, decoded_frame->pict_type,
2468 ist->st->time_base.num, ist->st->time_base.den);
2471 if (ist->st->sample_aspect_ratio.num)
2472 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2474 err = send_frame_to_filters(ist, decoded_frame);
2477 av_frame_unref(ist->filter_frame);
2478 av_frame_unref(decoded_frame);
2479 return err < 0 ? err : ret;
2482 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2484 AVSubtitle subtitle;
2486 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2487 &subtitle, got_output, pkt);
2489 check_decode_result(NULL, got_output, ret);
2491 if (ret < 0 || !*got_output) {
2493 sub2video_flush(ist);
2497 if (ist->fix_sub_duration) {
2499 if (ist->prev_sub.got_output) {
2500 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2501 1000, AV_TIME_BASE);
2502 if (end < ist->prev_sub.subtitle.end_display_time) {
2503 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2504 "Subtitle duration reduced from %d to %d%s\n",
2505 ist->prev_sub.subtitle.end_display_time, end,
2506 end <= 0 ? ", dropping it" : "");
2507 ist->prev_sub.subtitle.end_display_time = end;
2510 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2511 FFSWAP(int, ret, ist->prev_sub.ret);
2512 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2520 if (ist->sub2video.frame) {
2521 sub2video_update(ist, &subtitle);
2522 } else if (ist->nb_filters) {
2523 if (!ist->sub2video.sub_queue)
2524 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2525 if (!ist->sub2video.sub_queue)
2527 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2528 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2532 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2536 if (!subtitle.num_rects)
2539 ist->frames_decoded++;
2541 for (i = 0; i < nb_output_streams; i++) {
2542 OutputStream *ost = output_streams[i];
2544 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2545 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2548 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2553 avsubtitle_free(&subtitle);
2557 static int send_filter_eof(InputStream *ist)
2560 for (i = 0; i < ist->nb_filters; i++) {
2561 ret = ifilter_send_eof(ist->filters[i]);
2568 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2569 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2573 int eof_reached = 0;
2576 if (!ist->saw_first_ts) {
2577 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2579 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2580 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2581 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2583 ist->saw_first_ts = 1;
2586 if (ist->next_dts == AV_NOPTS_VALUE)
2587 ist->next_dts = ist->dts;
2588 if (ist->next_pts == AV_NOPTS_VALUE)
2589 ist->next_pts = ist->pts;
2593 av_init_packet(&avpkt);
2600 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2601 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2602 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2603 ist->next_pts = ist->pts = ist->dts;
2606 // while we have more to decode or while the decoder did output something on EOF
2607 while (ist->decoding_needed) {
2611 ist->pts = ist->next_pts;
2612 ist->dts = ist->next_dts;
2614 switch (ist->dec_ctx->codec_type) {
2615 case AVMEDIA_TYPE_AUDIO:
2616 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
2618 case AVMEDIA_TYPE_VIDEO:
2619 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
2620 if (!repeating || !pkt || got_output) {
2621 if (pkt && pkt->duration) {
2622 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2623 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2624 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2625 duration = ((int64_t)AV_TIME_BASE *
2626 ist->dec_ctx->framerate.den * ticks) /
2627 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2630 if(ist->dts != AV_NOPTS_VALUE && duration) {
2631 ist->next_dts += duration;
2633 ist->next_dts = AV_NOPTS_VALUE;
2637 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2639 case AVMEDIA_TYPE_SUBTITLE:
2642 ret = transcode_subtitles(ist, &avpkt, &got_output);
2643 if (!pkt && ret >= 0)
2650 if (ret == AVERROR_EOF) {
2656 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2657 ist->file_index, ist->st->index, av_err2str(ret));
2664 ist->got_output = 1;
2669 // During draining, we might get multiple output frames in this loop.
2670 // ffmpeg.c does not drain the filter chain on configuration changes,
2671 // which means if we send multiple frames at once to the filters, and
2672 // one of those frames changes configuration, the buffered frames will
2673 // be lost. This can upset certain FATE tests.
2674 // Decode only 1 frame per call on EOF to appease these FATE tests.
2675 // The ideal solution would be to rewrite decoding to use the new
2676 // decoding API in a better way.
2683 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2684 /* except when looping we need to flush but not to send an EOF */
2685 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2686 int ret = send_filter_eof(ist);
2688 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2693 /* handle stream copy */
2694 if (!ist->decoding_needed) {
2695 ist->dts = ist->next_dts;
2696 switch (ist->dec_ctx->codec_type) {
2697 case AVMEDIA_TYPE_AUDIO:
2698 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2699 ist->dec_ctx->sample_rate;
2701 case AVMEDIA_TYPE_VIDEO:
2702 if (ist->framerate.num) {
2703 // TODO: Remove work-around for c99-to-c89 issue 7
2704 AVRational time_base_q = AV_TIME_BASE_Q;
2705 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2706 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2707 } else if (pkt->duration) {
2708 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2709 } else if(ist->dec_ctx->framerate.num != 0) {
2710 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2711 ist->next_dts += ((int64_t)AV_TIME_BASE *
2712 ist->dec_ctx->framerate.den * ticks) /
2713 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2717 ist->pts = ist->dts;
2718 ist->next_pts = ist->next_dts;
2720 for (i = 0; pkt && i < nb_output_streams; i++) {
2721 OutputStream *ost = output_streams[i];
2723 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2726 do_streamcopy(ist, ost, pkt);
2729 return !eof_reached;
2732 static void print_sdp(void)
2737 AVIOContext *sdp_pb;
2738 AVFormatContext **avc;
2740 for (i = 0; i < nb_output_files; i++) {
2741 if (!output_files[i]->header_written)
2745 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2748 for (i = 0, j = 0; i < nb_output_files; i++) {
2749 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2750 avc[j] = output_files[i]->ctx;
2758 av_sdp_create(avc, j, sdp, sizeof(sdp));
2760 if (!sdp_filename) {
2761 printf("SDP:\n%s\n", sdp);
2764 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2765 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2767 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2768 avio_closep(&sdp_pb);
2769 av_freep(&sdp_filename);
2777 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2780 for (i = 0; hwaccels[i].name; i++)
2781 if (hwaccels[i].pix_fmt == pix_fmt)
2782 return &hwaccels[i];
2786 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2788 InputStream *ist = s->opaque;
2789 const enum AVPixelFormat *p;
2792 for (p = pix_fmts; *p != -1; p++) {
2793 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2794 const HWAccel *hwaccel;
2796 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2799 hwaccel = get_hwaccel(*p);
2801 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2802 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2805 ret = hwaccel->init(s);
2807 if (ist->hwaccel_id == hwaccel->id) {
2808 av_log(NULL, AV_LOG_FATAL,
2809 "%s hwaccel requested for input stream #%d:%d, "
2810 "but cannot be initialized.\n", hwaccel->name,
2811 ist->file_index, ist->st->index);
2812 return AV_PIX_FMT_NONE;
2817 if (ist->hw_frames_ctx) {
2818 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2819 if (!s->hw_frames_ctx)
2820 return AV_PIX_FMT_NONE;
2823 ist->active_hwaccel_id = hwaccel->id;
2824 ist->hwaccel_pix_fmt = *p;
2831 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2833 InputStream *ist = s->opaque;
2835 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2836 return ist->hwaccel_get_buffer(s, frame, flags);
2838 return avcodec_default_get_buffer2(s, frame, flags);
2841 static int init_input_stream(int ist_index, char *error, int error_len)
2844 InputStream *ist = input_streams[ist_index];
2846 if (ist->decoding_needed) {
2847 AVCodec *codec = ist->dec;
2849 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2850 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2851 return AVERROR(EINVAL);
2854 ist->dec_ctx->opaque = ist;
2855 ist->dec_ctx->get_format = get_format;
2856 ist->dec_ctx->get_buffer2 = get_buffer;
2857 ist->dec_ctx->thread_safe_callbacks = 1;
2859 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2860 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2861 (ist->decoding_needed & DECODING_FOR_OST)) {
2862 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2863 if (ist->decoding_needed & DECODING_FOR_FILTER)
2864 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2867 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2869 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2870 * audio, and video decoders such as cuvid or mediacodec */
2871 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2873 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2874 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2875 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2876 if (ret == AVERROR_EXPERIMENTAL)
2877 abort_codec_experimental(codec, 0);
2879 snprintf(error, error_len,
2880 "Error while opening decoder for input stream "
2882 ist->file_index, ist->st->index, av_err2str(ret));
2885 assert_avoptions(ist->decoder_opts);
2888 ist->next_pts = AV_NOPTS_VALUE;
2889 ist->next_dts = AV_NOPTS_VALUE;
2894 static InputStream *get_input_stream(OutputStream *ost)
2896 if (ost->source_index >= 0)
2897 return input_streams[ost->source_index];
2901 static int compare_int64(const void *a, const void *b)
2903 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2906 /* open the muxer when all the streams are initialized */
2907 static int check_init_output_file(OutputFile *of, int file_index)
2911 for (i = 0; i < of->ctx->nb_streams; i++) {
2912 OutputStream *ost = output_streams[of->ost_index + i];
2913 if (!ost->initialized)
2917 of->ctx->interrupt_callback = int_cb;
2919 ret = avformat_write_header(of->ctx, &of->opts);
2921 av_log(NULL, AV_LOG_ERROR,
2922 "Could not write header for output file #%d "
2923 "(incorrect codec parameters ?): %s\n",
2924 file_index, av_err2str(ret));
2927 //assert_avoptions(of->opts);
2928 of->header_written = 1;
2930 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2932 if (sdp_filename || want_sdp)
2935 /* flush the muxing queues */
2936 for (i = 0; i < of->ctx->nb_streams; i++) {
2937 OutputStream *ost = output_streams[of->ost_index + i];
2939 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2940 if (!av_fifo_size(ost->muxing_queue))
2941 ost->mux_timebase = ost->st->time_base;
2943 while (av_fifo_size(ost->muxing_queue)) {
2945 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2946 write_packet(of, &pkt, ost);
2953 static int init_output_bsfs(OutputStream *ost)
2958 if (!ost->nb_bitstream_filters)
2961 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2962 ctx = ost->bsf_ctx[i];
2964 ret = avcodec_parameters_copy(ctx->par_in,
2965 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2969 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2971 ret = av_bsf_init(ctx);
2973 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2974 ost->bsf_ctx[i]->filter->name);
2979 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2980 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2984 ost->st->time_base = ctx->time_base_out;
2989 static int init_output_stream_streamcopy(OutputStream *ost)
2991 OutputFile *of = output_files[ost->file_index];
2992 InputStream *ist = get_input_stream(ost);
2993 AVCodecParameters *par_dst = ost->st->codecpar;
2994 AVCodecParameters *par_src = ost->ref_par;
2997 uint32_t codec_tag = par_dst->codec_tag;
2999 av_assert0(ist && !ost->filter);
3001 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3003 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3005 av_log(NULL, AV_LOG_FATAL,
3006 "Error setting up codec context options.\n");
3009 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3012 unsigned int codec_tag_tmp;
3013 if (!of->ctx->oformat->codec_tag ||
3014 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3015 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3016 codec_tag = par_src->codec_tag;
3019 ret = avcodec_parameters_copy(par_dst, par_src);
3023 par_dst->codec_tag = codec_tag;
3025 if (!ost->frame_rate.num)
3026 ost->frame_rate = ist->framerate;
3027 ost->st->avg_frame_rate = ost->frame_rate;
3029 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3033 // copy timebase while removing common factors
3034 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3035 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3037 // copy estimated duration as a hint to the muxer
3038 if (ost->st->duration <= 0 && ist->st->duration > 0)
3039 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3042 ost->st->disposition = ist->st->disposition;
3044 if (ist->st->nb_side_data) {
3045 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
3046 sizeof(*ist->st->side_data));
3047 if (!ost->st->side_data)
3048 return AVERROR(ENOMEM);
3050 ost->st->nb_side_data = 0;
3051 for (i = 0; i < ist->st->nb_side_data; i++) {
3052 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3053 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
3055 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
3058 sd_dst->data = av_malloc(sd_src->size);
3060 return AVERROR(ENOMEM);
3061 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3062 sd_dst->size = sd_src->size;
3063 sd_dst->type = sd_src->type;
3064 ost->st->nb_side_data++;
3068 ost->parser = av_parser_init(par_dst->codec_id);
3069 ost->parser_avctx = avcodec_alloc_context3(NULL);
3070 if (!ost->parser_avctx)
3071 return AVERROR(ENOMEM);
3073 switch (par_dst->codec_type) {
3074 case AVMEDIA_TYPE_AUDIO:
3075 if (audio_volume != 256) {
3076 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3079 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3080 par_dst->block_align= 0;
3081 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3082 par_dst->block_align= 0;
3084 case AVMEDIA_TYPE_VIDEO:
3085 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3087 av_mul_q(ost->frame_aspect_ratio,
3088 (AVRational){ par_dst->height, par_dst->width });
3089 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3090 "with stream copy may produce invalid files\n");
3092 else if (ist->st->sample_aspect_ratio.num)
3093 sar = ist->st->sample_aspect_ratio;
3095 sar = par_src->sample_aspect_ratio;
3096 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3097 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3098 ost->st->r_frame_rate = ist->st->r_frame_rate;
3102 ost->mux_timebase = ist->st->time_base;
3107 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3109 AVDictionaryEntry *e;
3111 uint8_t *encoder_string;
3112 int encoder_string_len;
3113 int format_flags = 0;
3114 int codec_flags = 0;
3116 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3119 e = av_dict_get(of->opts, "fflags", NULL, 0);
3121 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3124 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3126 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3128 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3131 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3134 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3135 encoder_string = av_mallocz(encoder_string_len);
3136 if (!encoder_string)
3139 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3140 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3142 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3143 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3144 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3145 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3148 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3149 AVCodecContext *avctx)
3152 int n = 1, i, size, index = 0;
3155 for (p = kf; *p; p++)
3159 pts = av_malloc_array(size, sizeof(*pts));
3161 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3166 for (i = 0; i < n; i++) {
3167 char *next = strchr(p, ',');
3172 if (!memcmp(p, "chapters", 8)) {
3174 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3177 if (avf->nb_chapters > INT_MAX - size ||
3178 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3180 av_log(NULL, AV_LOG_FATAL,
3181 "Could not allocate forced key frames array.\n");
3184 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3185 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3187 for (j = 0; j < avf->nb_chapters; j++) {
3188 AVChapter *c = avf->chapters[j];
3189 av_assert1(index < size);
3190 pts[index++] = av_rescale_q(c->start, c->time_base,
3191 avctx->time_base) + t;
3196 t = parse_time_or_die("force_key_frames", p, 1);
3197 av_assert1(index < size);
3198 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3205 av_assert0(index == size);
3206 qsort(pts, size, sizeof(*pts), compare_int64);
3207 ost->forced_kf_count = size;
3208 ost->forced_kf_pts = pts;
3211 static int init_output_stream_encode(OutputStream *ost)
3213 InputStream *ist = get_input_stream(ost);
3214 AVCodecContext *enc_ctx = ost->enc_ctx;
3215 AVCodecContext *dec_ctx = NULL;
3216 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3219 set_encoder_id(output_files[ost->file_index], ost);
3222 ost->st->disposition = ist->st->disposition;
3224 dec_ctx = ist->dec_ctx;
3226 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3228 for (j = 0; j < oc->nb_streams; j++) {
3229 AVStream *st = oc->streams[j];
3230 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3233 if (j == oc->nb_streams)
3234 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3235 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3236 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3239 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3240 if (!ost->frame_rate.num)
3241 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3242 if (ist && !ost->frame_rate.num)
3243 ost->frame_rate = ist->framerate;
3244 if (ist && !ost->frame_rate.num)
3245 ost->frame_rate = ist->st->r_frame_rate;
3246 if (ist && !ost->frame_rate.num) {
3247 ost->frame_rate = (AVRational){25, 1};
3248 av_log(NULL, AV_LOG_WARNING,
3250 "about the input framerate is available. Falling "
3251 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3252 "if you want a different framerate.\n",
3253 ost->file_index, ost->index);
3255 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3256 if (ost->enc->supported_framerates && !ost->force_fps) {
3257 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3258 ost->frame_rate = ost->enc->supported_framerates[idx];
3260 // reduce frame rate for mpeg4 to be within the spec limits
3261 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3262 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3263 ost->frame_rate.num, ost->frame_rate.den, 65535);
3267 switch (enc_ctx->codec_type) {
3268 case AVMEDIA_TYPE_AUDIO:
3269 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3271 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3272 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3273 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3274 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3275 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3276 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3278 case AVMEDIA_TYPE_VIDEO:
3279 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3280 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3281 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3282 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3283 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3284 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3285 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3287 for (j = 0; j < ost->forced_kf_count; j++)
3288 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3290 enc_ctx->time_base);
3292 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3293 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3294 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3295 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3296 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3297 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3298 if (!strncmp(ost->enc->name, "libx264", 7) &&
3299 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3300 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3301 av_log(NULL, AV_LOG_WARNING,
3302 "No pixel format specified, %s for H.264 encoding chosen.\n"
3303 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3304 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3305 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3306 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3307 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3308 av_log(NULL, AV_LOG_WARNING,
3309 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3310 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3311 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3312 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3314 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3315 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3317 ost->st->avg_frame_rate = ost->frame_rate;
3320 enc_ctx->width != dec_ctx->width ||
3321 enc_ctx->height != dec_ctx->height ||
3322 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3323 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3326 if (ost->forced_keyframes) {
3327 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3328 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3329 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3331 av_log(NULL, AV_LOG_ERROR,
3332 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3335 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3336 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3337 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3338 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3340 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3341 // parse it only for static kf timings
3342 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3343 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3347 case AVMEDIA_TYPE_SUBTITLE:
3348 enc_ctx->time_base = AV_TIME_BASE_Q;
3349 if (!enc_ctx->width) {
3350 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3351 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3354 case AVMEDIA_TYPE_DATA:
3361 ost->mux_timebase = enc_ctx->time_base;
3366 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3370 if (ost->encoding_needed) {
3371 AVCodec *codec = ost->enc;
3372 AVCodecContext *dec = NULL;
3375 ret = init_output_stream_encode(ost);
3379 if ((ist = get_input_stream(ost)))
3381 if (dec && dec->subtitle_header) {
3382 /* ASS code assumes this buffer is null terminated so add extra byte. */
3383 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3384 if (!ost->enc_ctx->subtitle_header)
3385 return AVERROR(ENOMEM);
3386 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3387 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3389 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3390 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3391 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3393 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3394 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3395 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3397 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter)) {
3398 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3399 if (!ost->enc_ctx->hw_frames_ctx)
3400 return AVERROR(ENOMEM);
3403 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3404 if (ret == AVERROR_EXPERIMENTAL)
3405 abort_codec_experimental(codec, 1);
3406 snprintf(error, error_len,
3407 "Error while opening encoder for output stream #%d:%d - "
3408 "maybe incorrect parameters such as bit_rate, rate, width or height",
3409 ost->file_index, ost->index);
3412 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3413 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3414 av_buffersink_set_frame_size(ost->filter->filter,
3415 ost->enc_ctx->frame_size);
3416 assert_avoptions(ost->encoder_opts);
3417 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3418 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3419 " It takes bits/s as argument, not kbits/s\n");
3421 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3423 av_log(NULL, AV_LOG_FATAL,
3424 "Error initializing the output stream codec context.\n");
3428 * FIXME: ost->st->codec should't be needed here anymore.
3430 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3434 if (ost->enc_ctx->nb_coded_side_data) {
3437 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
3438 sizeof(*ost->st->side_data));
3439 if (!ost->st->side_data)
3440 return AVERROR(ENOMEM);
3442 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3443 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3444 AVPacketSideData *sd_dst = &ost->st->side_data[i];
3446 sd_dst->data = av_malloc(sd_src->size);
3448 return AVERROR(ENOMEM);
3449 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3450 sd_dst->size = sd_src->size;
3451 sd_dst->type = sd_src->type;
3452 ost->st->nb_side_data++;
3456 // copy timebase while removing common factors
3457 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3458 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3460 // copy estimated duration as a hint to the muxer
3461 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3462 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3464 ost->st->codec->codec= ost->enc_ctx->codec;
3465 } else if (ost->stream_copy) {
3466 ret = init_output_stream_streamcopy(ost);
3471 * FIXME: will the codec context used by the parser during streamcopy
3472 * This should go away with the new parser API.
3474 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3479 // parse user provided disposition, and update stream values
3480 if (ost->disposition) {
3481 static const AVOption opts[] = {
3482 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3483 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3484 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3485 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3486 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3487 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3488 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3489 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3490 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3491 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3492 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3493 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3494 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3495 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3498 static const AVClass class = {
3500 .item_name = av_default_item_name,
3502 .version = LIBAVUTIL_VERSION_INT,
3504 const AVClass *pclass = &class;
3506 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3511 /* initialize bitstream filters for the output stream
3512 * needs to be done here, because the codec id for streamcopy is not
3513 * known until now */
3514 ret = init_output_bsfs(ost);
3518 ost->initialized = 1;
3520 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3527 static void report_new_stream(int input_index, AVPacket *pkt)
3529 InputFile *file = input_files[input_index];
3530 AVStream *st = file->ctx->streams[pkt->stream_index];
3532 if (pkt->stream_index < file->nb_streams_warn)
3534 av_log(file->ctx, AV_LOG_WARNING,
3535 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3536 av_get_media_type_string(st->codecpar->codec_type),
3537 input_index, pkt->stream_index,
3538 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3539 file->nb_streams_warn = pkt->stream_index + 1;
3542 static int transcode_init(void)
3544 int ret = 0, i, j, k;
3545 AVFormatContext *oc;
3548 char error[1024] = {0};
3550 for (i = 0; i < nb_filtergraphs; i++) {
3551 FilterGraph *fg = filtergraphs[i];
3552 for (j = 0; j < fg->nb_outputs; j++) {
3553 OutputFilter *ofilter = fg->outputs[j];
3554 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3556 if (fg->nb_inputs != 1)
3558 for (k = nb_input_streams-1; k >= 0 ; k--)
3559 if (fg->inputs[0]->ist == input_streams[k])
3561 ofilter->ost->source_index = k;
3565 /* init framerate emulation */
3566 for (i = 0; i < nb_input_files; i++) {
3567 InputFile *ifile = input_files[i];
3568 if (ifile->rate_emu)
3569 for (j = 0; j < ifile->nb_streams; j++)
3570 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3573 /* init input streams */
3574 for (i = 0; i < nb_input_streams; i++)
3575 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3576 for (i = 0; i < nb_output_streams; i++) {
3577 ost = output_streams[i];
3578 avcodec_close(ost->enc_ctx);
3583 /* open each encoder */
3584 for (i = 0; i < nb_output_streams; i++) {
3585 // skip streams fed from filtergraphs until we have a frame for them
3586 if (output_streams[i]->filter)
3589 ret = init_output_stream(output_streams[i], error, sizeof(error));
3594 /* discard unused programs */
3595 for (i = 0; i < nb_input_files; i++) {
3596 InputFile *ifile = input_files[i];
3597 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3598 AVProgram *p = ifile->ctx->programs[j];
3599 int discard = AVDISCARD_ALL;
3601 for (k = 0; k < p->nb_stream_indexes; k++)
3602 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3603 discard = AVDISCARD_DEFAULT;
3606 p->discard = discard;
3610 /* write headers for files with no streams */
3611 for (i = 0; i < nb_output_files; i++) {
3612 oc = output_files[i]->ctx;
3613 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3614 ret = check_init_output_file(output_files[i], i);
3621 /* dump the stream mapping */
3622 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3623 for (i = 0; i < nb_input_streams; i++) {
3624 ist = input_streams[i];
3626 for (j = 0; j < ist->nb_filters; j++) {
3627 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3628 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3629 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3630 ist->filters[j]->name);
3631 if (nb_filtergraphs > 1)
3632 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3633 av_log(NULL, AV_LOG_INFO, "\n");
3638 for (i = 0; i < nb_output_streams; i++) {
3639 ost = output_streams[i];
3641 if (ost->attachment_filename) {
3642 /* an attached file */
3643 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3644 ost->attachment_filename, ost->file_index, ost->index);
3648 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3649 /* output from a complex graph */
3650 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3651 if (nb_filtergraphs > 1)
3652 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3654 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3655 ost->index, ost->enc ? ost->enc->name : "?");
3659 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3660 input_streams[ost->source_index]->file_index,
3661 input_streams[ost->source_index]->st->index,
3664 if (ost->sync_ist != input_streams[ost->source_index])
3665 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3666 ost->sync_ist->file_index,
3667 ost->sync_ist->st->index);
3668 if (ost->stream_copy)
3669 av_log(NULL, AV_LOG_INFO, " (copy)");
3671 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3672 const AVCodec *out_codec = ost->enc;
3673 const char *decoder_name = "?";
3674 const char *in_codec_name = "?";
3675 const char *encoder_name = "?";
3676 const char *out_codec_name = "?";
3677 const AVCodecDescriptor *desc;
3680 decoder_name = in_codec->name;
3681 desc = avcodec_descriptor_get(in_codec->id);
3683 in_codec_name = desc->name;
3684 if (!strcmp(decoder_name, in_codec_name))
3685 decoder_name = "native";
3689 encoder_name = out_codec->name;
3690 desc = avcodec_descriptor_get(out_codec->id);
3692 out_codec_name = desc->name;
3693 if (!strcmp(encoder_name, out_codec_name))
3694 encoder_name = "native";
3697 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3698 in_codec_name, decoder_name,
3699 out_codec_name, encoder_name);
3701 av_log(NULL, AV_LOG_INFO, "\n");
3705 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3709 transcode_init_done = 1;
3714 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3715 static int need_output(void)
3719 for (i = 0; i < nb_output_streams; i++) {
3720 OutputStream *ost = output_streams[i];
3721 OutputFile *of = output_files[ost->file_index];
3722 AVFormatContext *os = output_files[ost->file_index]->ctx;
3724 if (ost->finished ||
3725 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3727 if (ost->frame_number >= ost->max_frames) {
3729 for (j = 0; j < of->ctx->nb_streams; j++)
3730 close_output_stream(output_streams[of->ost_index + j]);
3741 * Select the output stream to process.
3743 * @return selected output stream, or NULL if none available
3745 static OutputStream *choose_output(void)
3748 int64_t opts_min = INT64_MAX;
3749 OutputStream *ost_min = NULL;
3751 for (i = 0; i < nb_output_streams; i++) {
3752 OutputStream *ost = output_streams[i];
3753 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3754 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3756 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3757 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3759 if (!ost->initialized && !ost->inputs_done)
3762 if (!ost->finished && opts < opts_min) {
3764 ost_min = ost->unavailable ? NULL : ost;
3770 static void set_tty_echo(int on)
3774 if (tcgetattr(0, &tty) == 0) {
3775 if (on) tty.c_lflag |= ECHO;
3776 else tty.c_lflag &= ~ECHO;
3777 tcsetattr(0, TCSANOW, &tty);
3782 static int check_keyboard_interaction(int64_t cur_time)
3785 static int64_t last_time;
3786 if (received_nb_signals)
3787 return AVERROR_EXIT;
3788 /* read_key() returns 0 on EOF */
3789 if(cur_time - last_time >= 100000 && !run_as_daemon){
3791 last_time = cur_time;
3795 return AVERROR_EXIT;
3796 if (key == '+') av_log_set_level(av_log_get_level()+10);
3797 if (key == '-') av_log_set_level(av_log_get_level()-10);
3798 if (key == 's') qp_hist ^= 1;
3801 do_hex_dump = do_pkt_dump = 0;
3802 } else if(do_pkt_dump){
3806 av_log_set_level(AV_LOG_DEBUG);
3808 if (key == 'c' || key == 'C'){
3809 char buf[4096], target[64], command[256], arg[256] = {0};
3812 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3815 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3820 fprintf(stderr, "\n");
3822 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3823 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3824 target, time, command, arg);
3825 for (i = 0; i < nb_filtergraphs; i++) {
3826 FilterGraph *fg = filtergraphs[i];
3829 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3830 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3831 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3832 } else if (key == 'c') {
3833 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3834 ret = AVERROR_PATCHWELCOME;
3836 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3838 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3843 av_log(NULL, AV_LOG_ERROR,
3844 "Parse error, at least 3 arguments were expected, "
3845 "only %d given in string '%s'\n", n, buf);
3848 if (key == 'd' || key == 'D'){
3851 debug = input_streams[0]->st->codec->debug<<1;
3852 if(!debug) debug = 1;
3853 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3860 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3865 fprintf(stderr, "\n");
3866 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3867 fprintf(stderr,"error parsing debug value\n");
3869 for(i=0;i<nb_input_streams;i++) {
3870 input_streams[i]->st->codec->debug = debug;
3872 for(i=0;i<nb_output_streams;i++) {
3873 OutputStream *ost = output_streams[i];
3874 ost->enc_ctx->debug = debug;
3876 if(debug) av_log_set_level(AV_LOG_DEBUG);
3877 fprintf(stderr,"debug=%d\n", debug);
3880 fprintf(stderr, "key function\n"
3881 "? show this help\n"
3882 "+ increase verbosity\n"
3883 "- decrease verbosity\n"
3884 "c Send command to first matching filter supporting it\n"
3885 "C Send/Queue command to all matching filters\n"
3886 "D cycle through available debug modes\n"
3887 "h dump packets/hex press to cycle through the 3 states\n"
3889 "s Show QP histogram\n"
3896 static void *input_thread(void *arg)
3899 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3904 ret = av_read_frame(f->ctx, &pkt);
3906 if (ret == AVERROR(EAGAIN)) {
3911 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3914 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3915 if (flags && ret == AVERROR(EAGAIN)) {
3917 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3918 av_log(f->ctx, AV_LOG_WARNING,
3919 "Thread message queue blocking; consider raising the "
3920 "thread_queue_size option (current value: %d)\n",
3921 f->thread_queue_size);
3924 if (ret != AVERROR_EOF)
3925 av_log(f->ctx, AV_LOG_ERROR,
3926 "Unable to send packet to main thread: %s\n",
3928 av_packet_unref(&pkt);
3929 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3937 static void free_input_threads(void)
3941 for (i = 0; i < nb_input_files; i++) {
3942 InputFile *f = input_files[i];
3945 if (!f || !f->in_thread_queue)
3947 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3948 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3949 av_packet_unref(&pkt);
3951 pthread_join(f->thread, NULL);
3953 av_thread_message_queue_free(&f->in_thread_queue);
3957 static int init_input_threads(void)
3961 if (nb_input_files == 1)
3964 for (i = 0; i < nb_input_files; i++) {
3965 InputFile *f = input_files[i];
3967 if (f->ctx->pb ? !f->ctx->pb->seekable :
3968 strcmp(f->ctx->iformat->name, "lavfi"))
3969 f->non_blocking = 1;
3970 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3971 f->thread_queue_size, sizeof(AVPacket));
3975 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3976 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3977 av_thread_message_queue_free(&f->in_thread_queue);
3978 return AVERROR(ret);
3984 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3986 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3988 AV_THREAD_MESSAGE_NONBLOCK : 0);
3992 static int get_input_packet(InputFile *f, AVPacket *pkt)
3996 for (i = 0; i < f->nb_streams; i++) {
3997 InputStream *ist = input_streams[f->ist_index + i];
3998 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3999 int64_t now = av_gettime_relative() - ist->start;
4001 return AVERROR(EAGAIN);
4006 if (nb_input_files > 1)
4007 return get_input_packet_mt(f, pkt);
4009 return av_read_frame(f->ctx, pkt);
4012 static int got_eagain(void)
4015 for (i = 0; i < nb_output_streams; i++)
4016 if (output_streams[i]->unavailable)
4021 static void reset_eagain(void)
4024 for (i = 0; i < nb_input_files; i++)
4025 input_files[i]->eagain = 0;
4026 for (i = 0; i < nb_output_streams; i++)
4027 output_streams[i]->unavailable = 0;
4030 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4031 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4032 AVRational time_base)
4038 return tmp_time_base;
4041 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4044 return tmp_time_base;
4050 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4053 AVCodecContext *avctx;
4054 int i, ret, has_audio = 0;
4055 int64_t duration = 0;
4057 ret = av_seek_frame(is, -1, is->start_time, 0);
4061 for (i = 0; i < ifile->nb_streams; i++) {
4062 ist = input_streams[ifile->ist_index + i];
4063 avctx = ist->dec_ctx;
4066 if (ist->decoding_needed) {
4067 process_input_packet(ist, NULL, 1);
4068 avcodec_flush_buffers(avctx);
4071 /* duration is the length of the last frame in a stream
4072 * when audio stream is present we don't care about
4073 * last video frame length because it's not defined exactly */
4074 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4078 for (i = 0; i < ifile->nb_streams; i++) {
4079 ist = input_streams[ifile->ist_index + i];
4080 avctx = ist->dec_ctx;
4083 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4084 AVRational sample_rate = {1, avctx->sample_rate};
4086 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4090 if (ist->framerate.num) {
4091 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4092 } else if (ist->st->avg_frame_rate.num) {
4093 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4094 } else duration = 1;
4096 if (!ifile->duration)
4097 ifile->time_base = ist->st->time_base;
4098 /* the total duration of the stream, max_pts - min_pts is
4099 * the duration of the stream without the last frame */
4100 duration += ist->max_pts - ist->min_pts;
4101 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4105 if (ifile->loop > 0)
4113 * - 0 -- one packet was read and processed
4114 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4115 * this function should be called again
4116 * - AVERROR_EOF -- this function should not be called again
4118 static int process_input(int file_index)
4120 InputFile *ifile = input_files[file_index];
4121 AVFormatContext *is;
4129 ret = get_input_packet(ifile, &pkt);
4131 if (ret == AVERROR(EAGAIN)) {
4135 if (ret < 0 && ifile->loop) {
4136 if ((ret = seek_to_start(ifile, is)) < 0)
4138 ret = get_input_packet(ifile, &pkt);
4139 if (ret == AVERROR(EAGAIN)) {
4145 if (ret != AVERROR_EOF) {
4146 print_error(is->filename, ret);
4151 for (i = 0; i < ifile->nb_streams; i++) {
4152 ist = input_streams[ifile->ist_index + i];
4153 if (ist->decoding_needed) {
4154 ret = process_input_packet(ist, NULL, 0);
4159 /* mark all outputs that don't go through lavfi as finished */
4160 for (j = 0; j < nb_output_streams; j++) {
4161 OutputStream *ost = output_streams[j];
4163 if (ost->source_index == ifile->ist_index + i &&
4164 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4165 finish_output_stream(ost);
4169 ifile->eof_reached = 1;
4170 return AVERROR(EAGAIN);
4176 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4177 is->streams[pkt.stream_index]);
4179 /* the following test is needed in case new streams appear
4180 dynamically in stream : we ignore them */
4181 if (pkt.stream_index >= ifile->nb_streams) {
4182 report_new_stream(file_index, &pkt);
4183 goto discard_packet;
4186 ist = input_streams[ifile->ist_index + pkt.stream_index];
4188 ist->data_size += pkt.size;
4192 goto discard_packet;
4194 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4195 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4200 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4201 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4202 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4203 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4204 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4205 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4206 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4207 av_ts2str(input_files[ist->file_index]->ts_offset),
4208 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4211 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4212 int64_t stime, stime2;
4213 // Correcting starttime based on the enabled streams
4214 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4215 // so we instead do it here as part of discontinuity handling
4216 if ( ist->next_dts == AV_NOPTS_VALUE
4217 && ifile->ts_offset == -is->start_time
4218 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4219 int64_t new_start_time = INT64_MAX;
4220 for (i=0; i<is->nb_streams; i++) {
4221 AVStream *st = is->streams[i];
4222 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4224 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4226 if (new_start_time > is->start_time) {
4227 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4228 ifile->ts_offset = -new_start_time;
4232 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4233 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4234 ist->wrap_correction_done = 1;
4236 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4237 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4238 ist->wrap_correction_done = 0;
4240 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4241 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4242 ist->wrap_correction_done = 0;
4246 /* add the stream-global side data to the first packet */
4247 if (ist->nb_packets == 1) {
4248 if (ist->st->nb_side_data)
4249 av_packet_split_side_data(&pkt);
4250 for (i = 0; i < ist->st->nb_side_data; i++) {
4251 AVPacketSideData *src_sd = &ist->st->side_data[i];
4254 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4256 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4259 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4263 memcpy(dst_data, src_sd->data, src_sd->size);
4267 if (pkt.dts != AV_NOPTS_VALUE)
4268 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4269 if (pkt.pts != AV_NOPTS_VALUE)
4270 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4272 if (pkt.pts != AV_NOPTS_VALUE)
4273 pkt.pts *= ist->ts_scale;
4274 if (pkt.dts != AV_NOPTS_VALUE)
4275 pkt.dts *= ist->ts_scale;
4277 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4278 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4279 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4280 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4281 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4282 int64_t delta = pkt_dts - ifile->last_ts;
4283 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4284 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4285 ifile->ts_offset -= delta;
4286 av_log(NULL, AV_LOG_DEBUG,
4287 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4288 delta, ifile->ts_offset);
4289 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4290 if (pkt.pts != AV_NOPTS_VALUE)
4291 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4295 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4296 if (pkt.pts != AV_NOPTS_VALUE) {
4297 pkt.pts += duration;
4298 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4299 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4302 if (pkt.dts != AV_NOPTS_VALUE)
4303 pkt.dts += duration;
4305 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4306 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4307 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4308 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4310 int64_t delta = pkt_dts - ist->next_dts;
4311 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4312 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4313 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4314 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4315 ifile->ts_offset -= delta;
4316 av_log(NULL, AV_LOG_DEBUG,
4317 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4318 delta, ifile->ts_offset);
4319 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4320 if (pkt.pts != AV_NOPTS_VALUE)
4321 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4324 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4325 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4326 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4327 pkt.dts = AV_NOPTS_VALUE;
4329 if (pkt.pts != AV_NOPTS_VALUE){
4330 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4331 delta = pkt_pts - ist->next_dts;
4332 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4333 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4334 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4335 pkt.pts = AV_NOPTS_VALUE;
4341 if (pkt.dts != AV_NOPTS_VALUE)
4342 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4345 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4346 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4347 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4348 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4349 av_ts2str(input_files[ist->file_index]->ts_offset),
4350 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4353 sub2video_heartbeat(ist, pkt.pts);
4355 process_input_packet(ist, &pkt, 0);
4358 av_packet_unref(&pkt);
4364 * Perform a step of transcoding for the specified filter graph.
4366 * @param[in] graph filter graph to consider
4367 * @param[out] best_ist input stream where a frame would allow to continue
4368 * @return 0 for success, <0 for error
4370 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4373 int nb_requests, nb_requests_max = 0;
4374 InputFilter *ifilter;
4378 ret = avfilter_graph_request_oldest(graph->graph);
4380 return reap_filters(0);
4382 if (ret == AVERROR_EOF) {
4383 ret = reap_filters(1);
4384 for (i = 0; i < graph->nb_outputs; i++)
4385 close_output_stream(graph->outputs[i]->ost);
4388 if (ret != AVERROR(EAGAIN))
4391 for (i = 0; i < graph->nb_inputs; i++) {
4392 ifilter = graph->inputs[i];
4394 if (input_files[ist->file_index]->eagain ||
4395 input_files[ist->file_index]->eof_reached)
4397 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4398 if (nb_requests > nb_requests_max) {
4399 nb_requests_max = nb_requests;
4405 for (i = 0; i < graph->nb_outputs; i++)
4406 graph->outputs[i]->ost->unavailable = 1;
4412 * Run a single step of transcoding.
4414 * @return 0 for success, <0 for error
4416 static int transcode_step(void)
4419 InputStream *ist = NULL;
4422 ost = choose_output();
4429 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4433 if (ost->filter && !ost->filter->graph->graph) {
4434 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4435 ret = configure_filtergraph(ost->filter->graph);
4437 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4443 if (ost->filter && ost->filter->graph->graph) {
4444 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4448 } else if (ost->filter) {
4450 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4451 InputFilter *ifilter = ost->filter->graph->inputs[i];
4452 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4458 ost->inputs_done = 1;
4462 av_assert0(ost->source_index >= 0);
4463 ist = input_streams[ost->source_index];
4466 ret = process_input(ist->file_index);
4467 if (ret == AVERROR(EAGAIN)) {
4468 if (input_files[ist->file_index]->eagain)
4469 ost->unavailable = 1;
4474 return ret == AVERROR_EOF ? 0 : ret;
4476 return reap_filters(0);
4480 * The following code is the main loop of the file converter
4482 static int transcode(void)
4485 AVFormatContext *os;
4488 int64_t timer_start;
4489 int64_t total_packets_written = 0;
4491 ret = transcode_init();
4495 if (stdin_interaction) {
4496 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4499 timer_start = av_gettime_relative();
4502 if ((ret = init_input_threads()) < 0)
4506 while (!received_sigterm) {
4507 int64_t cur_time= av_gettime_relative();
4509 /* if 'q' pressed, exits */
4510 if (stdin_interaction)
4511 if (check_keyboard_interaction(cur_time) < 0)
4514 /* check if there's any stream where output is still needed */
4515 if (!need_output()) {
4516 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4520 ret = transcode_step();
4521 if (ret < 0 && ret != AVERROR_EOF) {
4523 av_strerror(ret, errbuf, sizeof(errbuf));
4525 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4529 /* dump report by using the output first video and audio streams */
4530 print_report(0, timer_start, cur_time);
4533 free_input_threads();
4536 /* at the end of stream, we must flush the decoder buffers */
4537 for (i = 0; i < nb_input_streams; i++) {
4538 ist = input_streams[i];
4539 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4540 process_input_packet(ist, NULL, 0);
4547 /* write the trailer if needed and close file */
4548 for (i = 0; i < nb_output_files; i++) {
4549 os = output_files[i]->ctx;
4550 if (!output_files[i]->header_written) {
4551 av_log(NULL, AV_LOG_ERROR,
4552 "Nothing was written into output file %d (%s), because "
4553 "at least one of its streams received no packets.\n",
4557 if ((ret = av_write_trailer(os)) < 0) {
4558 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4564 /* dump report by using the first video and audio streams */
4565 print_report(1, timer_start, av_gettime_relative());
4567 /* close each encoder */
4568 for (i = 0; i < nb_output_streams; i++) {
4569 ost = output_streams[i];
4570 if (ost->encoding_needed) {
4571 av_freep(&ost->enc_ctx->stats_in);
4573 total_packets_written += ost->packets_written;
4576 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4577 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4581 /* close each decoder */
4582 for (i = 0; i < nb_input_streams; i++) {
4583 ist = input_streams[i];
4584 if (ist->decoding_needed) {
4585 avcodec_close(ist->dec_ctx);
4586 if (ist->hwaccel_uninit)
4587 ist->hwaccel_uninit(ist->dec_ctx);
4591 av_buffer_unref(&hw_device_ctx);
4598 free_input_threads();
4601 if (output_streams) {
4602 for (i = 0; i < nb_output_streams; i++) {
4603 ost = output_streams[i];
4606 if (fclose(ost->logfile))
4607 av_log(NULL, AV_LOG_ERROR,
4608 "Error closing logfile, loss of information possible: %s\n",
4609 av_err2str(AVERROR(errno)));
4610 ost->logfile = NULL;
4612 av_freep(&ost->forced_kf_pts);
4613 av_freep(&ost->apad);
4614 av_freep(&ost->disposition);
4615 av_dict_free(&ost->encoder_opts);
4616 av_dict_free(&ost->sws_dict);
4617 av_dict_free(&ost->swr_opts);
4618 av_dict_free(&ost->resample_opts);
4626 static int64_t getutime(void)
4629 struct rusage rusage;
4631 getrusage(RUSAGE_SELF, &rusage);
4632 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4633 #elif HAVE_GETPROCESSTIMES
4635 FILETIME c, e, k, u;
4636 proc = GetCurrentProcess();
4637 GetProcessTimes(proc, &c, &e, &k, &u);
4638 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4640 return av_gettime_relative();
4644 static int64_t getmaxrss(void)
4646 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4647 struct rusage rusage;
4648 getrusage(RUSAGE_SELF, &rusage);
4649 return (int64_t)rusage.ru_maxrss * 1024;
4650 #elif HAVE_GETPROCESSMEMORYINFO
4652 PROCESS_MEMORY_COUNTERS memcounters;
4653 proc = GetCurrentProcess();
4654 memcounters.cb = sizeof(memcounters);
4655 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4656 return memcounters.PeakPagefileUsage;
4662 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4666 int main(int argc, char **argv)
4673 register_exit(ffmpeg_cleanup);
4675 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4677 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4678 parse_loglevel(argc, argv, options);
4680 if(argc>1 && !strcmp(argv[1], "-d")){
4682 av_log_set_callback(log_callback_null);
4687 avcodec_register_all();
4689 avdevice_register_all();
4691 avfilter_register_all();
4693 avformat_network_init();
4695 show_banner(argc, argv, options);
4697 /* parse options and open all input/output files */
4698 ret = ffmpeg_parse_options(argc, argv);
4702 if (nb_output_files <= 0 && nb_input_files == 0) {
4704 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4708 /* file converter / grab */
4709 if (nb_output_files <= 0) {
4710 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4714 // if (nb_input_files == 0) {
4715 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4719 for (i = 0; i < nb_output_files; i++) {
4720 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4724 current_time = ti = getutime();
4725 if (transcode() < 0)
4727 ti = getutime() - ti;
4729 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4731 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4732 decode_error_stat[0], decode_error_stat[1]);
4733 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4736 exit_program(received_nb_signals ? 255 : main_return_code);
4737 return main_return_code;