2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
126 static int ifilter_has_all_input_formats(FilterGraph *fg);
128 static int run_as_daemon = 0;
129 static int nb_frames_dup = 0;
130 static unsigned dup_warning = 1000;
131 static int nb_frames_drop = 0;
132 static int64_t decode_error_stat[2];
134 static int want_sdp = 1;
136 static int current_time;
137 AVIOContext *progress_avio = NULL;
139 static uint8_t *subtitle_out;
141 InputStream **input_streams = NULL;
142 int nb_input_streams = 0;
143 InputFile **input_files = NULL;
144 int nb_input_files = 0;
146 OutputStream **output_streams = NULL;
147 int nb_output_streams = 0;
148 OutputFile **output_files = NULL;
149 int nb_output_files = 0;
151 FilterGraph **filtergraphs;
156 /* init terminal so that we can grab keys */
157 static struct termios oldtty;
158 static int restore_tty;
162 static void free_input_threads(void);
166 Convert subtitles to video with alpha to insert them in filter graphs.
167 This is a temporary solution until libavfilter gets real subtitles support.
170 static int sub2video_get_blank_frame(InputStream *ist)
173 AVFrame *frame = ist->sub2video.frame;
175 av_frame_unref(frame);
176 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
177 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
178 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
179 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
181 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
185 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
188 uint32_t *pal, *dst2;
192 if (r->type != SUBTITLE_BITMAP) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
196 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
197 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
198 r->x, r->y, r->w, r->h, w, h
203 dst += r->y * dst_linesize + r->x * 4;
205 pal = (uint32_t *)r->data[1];
206 for (y = 0; y < r->h; y++) {
207 dst2 = (uint32_t *)dst;
209 for (x = 0; x < r->w; x++)
210 *(dst2++) = pal[*(src2++)];
212 src += r->linesize[0];
216 static void sub2video_push_ref(InputStream *ist, int64_t pts)
218 AVFrame *frame = ist->sub2video.frame;
221 av_assert1(frame->data[0]);
222 ist->sub2video.last_pts = frame->pts = pts;
223 for (i = 0; i < ist->nb_filters; i++)
224 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
225 AV_BUFFERSRC_FLAG_KEEP_REF |
226 AV_BUFFERSRC_FLAG_PUSH);
229 void sub2video_update(InputStream *ist, AVSubtitle *sub)
231 AVFrame *frame = ist->sub2video.frame;
235 int64_t pts, end_pts;
240 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
241 AV_TIME_BASE_Q, ist->st->time_base);
242 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
243 AV_TIME_BASE_Q, ist->st->time_base);
244 num_rects = sub->num_rects;
246 pts = ist->sub2video.end_pts;
250 if (sub2video_get_blank_frame(ist) < 0) {
251 av_log(ist->dec_ctx, AV_LOG_ERROR,
252 "Impossible to get a blank canvas.\n");
255 dst = frame->data [0];
256 dst_linesize = frame->linesize[0];
257 for (i = 0; i < num_rects; i++)
258 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
259 sub2video_push_ref(ist, pts);
260 ist->sub2video.end_pts = end_pts;
263 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
265 InputFile *infile = input_files[ist->file_index];
269 /* When a frame is read from a file, examine all sub2video streams in
270 the same file and send the sub2video frame again. Otherwise, decoded
271 video frames could be accumulating in the filter graph while a filter
272 (possibly overlay) is desperately waiting for a subtitle frame. */
273 for (i = 0; i < infile->nb_streams; i++) {
274 InputStream *ist2 = input_streams[infile->ist_index + i];
275 if (!ist2->sub2video.frame)
277 /* subtitles seem to be usually muxed ahead of other streams;
278 if not, subtracting a larger time here is necessary */
279 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
280 /* do not send the heartbeat frame if the subtitle is already ahead */
281 if (pts2 <= ist2->sub2video.last_pts)
283 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
284 sub2video_update(ist2, NULL);
285 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
286 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
288 sub2video_push_ref(ist2, pts2);
292 static void sub2video_flush(InputStream *ist)
296 if (ist->sub2video.end_pts < INT64_MAX)
297 sub2video_update(ist, NULL);
298 for (i = 0; i < ist->nb_filters; i++)
299 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
302 /* end of sub2video hack */
304 static void term_exit_sigsafe(void)
308 tcsetattr (0, TCSANOW, &oldtty);
314 av_log(NULL, AV_LOG_QUIET, "%s", "");
318 static volatile int received_sigterm = 0;
319 static volatile int received_nb_signals = 0;
320 static volatile int transcode_init_done = 0;
321 static volatile int ffmpeg_exited = 0;
322 static int main_return_code = 0;
325 sigterm_handler(int sig)
327 received_sigterm = sig;
328 received_nb_signals++;
330 if(received_nb_signals > 3) {
331 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
332 strlen("Received > 3 system signals, hard exiting\n"));
338 #if HAVE_SETCONSOLECTRLHANDLER
339 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
341 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
346 case CTRL_BREAK_EVENT:
347 sigterm_handler(SIGINT);
350 case CTRL_CLOSE_EVENT:
351 case CTRL_LOGOFF_EVENT:
352 case CTRL_SHUTDOWN_EVENT:
353 sigterm_handler(SIGTERM);
354 /* Basically, with these 3 events, when we return from this method the
355 process is hard terminated, so stall as long as we need to
356 to try and let the main thread(s) clean up and gracefully terminate
357 (we have at most 5 seconds, but should be done far before that). */
358 while (!ffmpeg_exited) {
364 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
373 if (!run_as_daemon && stdin_interaction) {
375 if (tcgetattr (0, &tty) == 0) {
379 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
380 |INLCR|IGNCR|ICRNL|IXON);
381 tty.c_oflag |= OPOST;
382 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
383 tty.c_cflag &= ~(CSIZE|PARENB);
388 tcsetattr (0, TCSANOW, &tty);
390 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
394 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
395 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
397 signal(SIGXCPU, sigterm_handler);
399 #if HAVE_SETCONSOLECTRLHANDLER
400 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
404 /* read a key without blocking */
405 static int read_key(void)
417 n = select(1, &rfds, NULL, NULL, &tv);
426 # if HAVE_PEEKNAMEDPIPE
428 static HANDLE input_handle;
431 input_handle = GetStdHandle(STD_INPUT_HANDLE);
432 is_pipe = !GetConsoleMode(input_handle, &dw);
436 /* When running under a GUI, you will end here. */
437 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
438 // input pipe may have been closed by the program that ran ffmpeg
456 static int decode_interrupt_cb(void *ctx)
458 return received_nb_signals > transcode_init_done;
461 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
463 static void ffmpeg_cleanup(int ret)
468 int maxrss = getmaxrss() / 1024;
469 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
472 for (i = 0; i < nb_filtergraphs; i++) {
473 FilterGraph *fg = filtergraphs[i];
474 avfilter_graph_free(&fg->graph);
475 for (j = 0; j < fg->nb_inputs; j++) {
476 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
478 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
479 sizeof(frame), NULL);
480 av_frame_free(&frame);
482 av_fifo_free(fg->inputs[j]->frame_queue);
483 if (fg->inputs[j]->ist->sub2video.sub_queue) {
484 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
486 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
487 &sub, sizeof(sub), NULL);
488 avsubtitle_free(&sub);
490 av_fifo_free(fg->inputs[j]->ist->sub2video.sub_queue);
492 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
493 av_freep(&fg->inputs[j]->name);
494 av_freep(&fg->inputs[j]);
496 av_freep(&fg->inputs);
497 for (j = 0; j < fg->nb_outputs; j++) {
498 av_freep(&fg->outputs[j]->name);
499 av_freep(&fg->outputs[j]->formats);
500 av_freep(&fg->outputs[j]->channel_layouts);
501 av_freep(&fg->outputs[j]->sample_rates);
502 av_freep(&fg->outputs[j]);
504 av_freep(&fg->outputs);
505 av_freep(&fg->graph_desc);
507 av_freep(&filtergraphs[i]);
509 av_freep(&filtergraphs);
511 av_freep(&subtitle_out);
514 for (i = 0; i < nb_output_files; i++) {
515 OutputFile *of = output_files[i];
520 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
522 avformat_free_context(s);
523 av_dict_free(&of->opts);
525 av_freep(&output_files[i]);
527 for (i = 0; i < nb_output_streams; i++) {
528 OutputStream *ost = output_streams[i];
533 for (j = 0; j < ost->nb_bitstream_filters; j++)
534 av_bsf_free(&ost->bsf_ctx[j]);
535 av_freep(&ost->bsf_ctx);
536 av_freep(&ost->bsf_extradata_updated);
538 av_frame_free(&ost->filtered_frame);
539 av_frame_free(&ost->last_frame);
540 av_dict_free(&ost->encoder_opts);
542 av_parser_close(ost->parser);
543 avcodec_free_context(&ost->parser_avctx);
545 av_freep(&ost->forced_keyframes);
546 av_expr_free(ost->forced_keyframes_pexpr);
547 av_freep(&ost->avfilter);
548 av_freep(&ost->logfile_prefix);
550 av_freep(&ost->audio_channels_map);
551 ost->audio_channels_mapped = 0;
553 av_dict_free(&ost->sws_dict);
555 avcodec_free_context(&ost->enc_ctx);
556 avcodec_parameters_free(&ost->ref_par);
558 while (ost->muxing_queue && av_fifo_size(ost->muxing_queue)) {
560 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
561 av_packet_unref(&pkt);
563 av_fifo_freep(&ost->muxing_queue);
565 av_freep(&output_streams[i]);
568 free_input_threads();
570 for (i = 0; i < nb_input_files; i++) {
571 avformat_close_input(&input_files[i]->ctx);
572 av_freep(&input_files[i]);
574 for (i = 0; i < nb_input_streams; i++) {
575 InputStream *ist = input_streams[i];
577 av_frame_free(&ist->decoded_frame);
578 av_frame_free(&ist->filter_frame);
579 av_dict_free(&ist->decoder_opts);
580 avsubtitle_free(&ist->prev_sub.subtitle);
581 av_frame_free(&ist->sub2video.frame);
582 av_freep(&ist->filters);
583 av_freep(&ist->hwaccel_device);
584 av_freep(&ist->dts_buffer);
586 avcodec_free_context(&ist->dec_ctx);
588 av_freep(&input_streams[i]);
592 if (fclose(vstats_file))
593 av_log(NULL, AV_LOG_ERROR,
594 "Error closing vstats file, loss of information possible: %s\n",
595 av_err2str(AVERROR(errno)));
597 av_freep(&vstats_filename);
599 av_freep(&input_streams);
600 av_freep(&input_files);
601 av_freep(&output_streams);
602 av_freep(&output_files);
606 avformat_network_deinit();
608 if (received_sigterm) {
609 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
610 (int) received_sigterm);
611 } else if (ret && transcode_init_done) {
612 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
618 void remove_avoptions(AVDictionary **a, AVDictionary *b)
620 AVDictionaryEntry *t = NULL;
622 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
623 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
627 void assert_avoptions(AVDictionary *m)
629 AVDictionaryEntry *t;
630 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
631 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
636 static void abort_codec_experimental(AVCodec *c, int encoder)
641 static void update_benchmark(const char *fmt, ...)
643 if (do_benchmark_all) {
644 int64_t t = getutime();
650 vsnprintf(buf, sizeof(buf), fmt, va);
652 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
658 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
661 for (i = 0; i < nb_output_streams; i++) {
662 OutputStream *ost2 = output_streams[i];
663 ost2->finished |= ost == ost2 ? this_stream : others;
667 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
669 AVFormatContext *s = of->ctx;
670 AVStream *st = ost->st;
673 if (!of->header_written) {
674 AVPacket tmp_pkt = {0};
675 /* the muxer is not initialized yet, buffer the packet */
676 if (!av_fifo_space(ost->muxing_queue)) {
677 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
678 ost->max_muxing_queue_size);
679 if (new_size <= av_fifo_size(ost->muxing_queue)) {
680 av_log(NULL, AV_LOG_ERROR,
681 "Too many packets buffered for output stream %d:%d.\n",
682 ost->file_index, ost->st->index);
685 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
689 ret = av_packet_ref(&tmp_pkt, pkt);
692 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
693 av_packet_unref(pkt);
697 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
698 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
699 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
702 * Audio encoders may split the packets -- #frames in != #packets out.
703 * But there is no reordering, so we can limit the number of output packets
704 * by simply dropping them here.
705 * Counting encoded video frames needs to be done separately because of
706 * reordering, see do_video_out()
708 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
709 if (ost->frame_number >= ost->max_frames) {
710 av_packet_unref(pkt);
715 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
717 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
719 ost->quality = sd ? AV_RL32(sd) : -1;
720 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
722 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
724 ost->error[i] = AV_RL64(sd + 8 + 8*i);
729 if (ost->frame_rate.num && ost->is_cfr) {
730 if (pkt->duration > 0)
731 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
732 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
737 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
739 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
740 if (pkt->dts != AV_NOPTS_VALUE &&
741 pkt->pts != AV_NOPTS_VALUE &&
742 pkt->dts > pkt->pts) {
743 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
745 ost->file_index, ost->st->index);
747 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
748 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
749 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
751 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
752 pkt->dts != AV_NOPTS_VALUE &&
753 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
754 ost->last_mux_dts != AV_NOPTS_VALUE) {
755 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
756 if (pkt->dts < max) {
757 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
758 av_log(s, loglevel, "Non-monotonous DTS in output stream "
759 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
760 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
762 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
765 av_log(s, loglevel, "changing to %"PRId64". This may result "
766 "in incorrect timestamps in the output file.\n",
768 if (pkt->pts >= pkt->dts)
769 pkt->pts = FFMAX(pkt->pts, max);
774 ost->last_mux_dts = pkt->dts;
776 ost->data_size += pkt->size;
777 ost->packets_written++;
779 pkt->stream_index = ost->index;
782 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
783 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
784 av_get_media_type_string(ost->enc_ctx->codec_type),
785 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
786 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
791 ret = av_interleaved_write_frame(s, pkt);
793 print_error("av_interleaved_write_frame()", ret);
794 main_return_code = 1;
795 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
797 av_packet_unref(pkt);
800 static void close_output_stream(OutputStream *ost)
802 OutputFile *of = output_files[ost->file_index];
804 ost->finished |= ENCODER_FINISHED;
806 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
807 of->recording_time = FFMIN(of->recording_time, end);
811 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
815 /* apply the output bitstream filters, if any */
816 if (ost->nb_bitstream_filters) {
819 av_packet_split_side_data(pkt);
820 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
826 /* get a packet from the previous filter up the chain */
827 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
828 if (ret == AVERROR(EAGAIN)) {
834 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
835 * the api states this shouldn't happen after init(). Propagate it here to the
836 * muxer and to the next filters in the chain to workaround this.
837 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
838 * par_out->extradata and adapt muxers accordingly to get rid of this. */
839 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
840 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
843 ost->bsf_extradata_updated[idx - 1] |= 1;
846 /* send it to the next filter down the chain or to the muxer */
847 if (idx < ost->nb_bitstream_filters) {
848 /* HACK/FIXME! - See above */
849 if (!(ost->bsf_extradata_updated[idx] & 2)) {
850 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
853 ost->bsf_extradata_updated[idx] |= 2;
855 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
860 write_packet(of, pkt, ost);
863 write_packet(of, pkt, ost);
866 if (ret < 0 && ret != AVERROR_EOF) {
867 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
868 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
874 static int check_recording_time(OutputStream *ost)
876 OutputFile *of = output_files[ost->file_index];
878 if (of->recording_time != INT64_MAX &&
879 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
880 AV_TIME_BASE_Q) >= 0) {
881 close_output_stream(ost);
887 static void do_audio_out(OutputFile *of, OutputStream *ost,
890 AVCodecContext *enc = ost->enc_ctx;
894 av_init_packet(&pkt);
898 if (!check_recording_time(ost))
901 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
902 frame->pts = ost->sync_opts;
903 ost->sync_opts = frame->pts + frame->nb_samples;
904 ost->samples_encoded += frame->nb_samples;
905 ost->frames_encoded++;
907 av_assert0(pkt.size || !pkt.data);
908 update_benchmark(NULL);
910 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
911 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
912 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
913 enc->time_base.num, enc->time_base.den);
916 ret = avcodec_send_frame(enc, frame);
921 ret = avcodec_receive_packet(enc, &pkt);
922 if (ret == AVERROR(EAGAIN))
927 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
929 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
932 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
933 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
934 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
935 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
938 output_packet(of, &pkt, ost);
943 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
947 static void do_subtitle_out(OutputFile *of,
951 int subtitle_out_max_size = 1024 * 1024;
952 int subtitle_out_size, nb, i;
957 if (sub->pts == AV_NOPTS_VALUE) {
958 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
967 subtitle_out = av_malloc(subtitle_out_max_size);
969 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
974 /* Note: DVB subtitle need one packet to draw them and one other
975 packet to clear them */
976 /* XXX: signal it in the codec context ? */
977 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
982 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
984 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
985 pts -= output_files[ost->file_index]->start_time;
986 for (i = 0; i < nb; i++) {
987 unsigned save_num_rects = sub->num_rects;
989 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
990 if (!check_recording_time(ost))
994 // start_display_time is required to be 0
995 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
996 sub->end_display_time -= sub->start_display_time;
997 sub->start_display_time = 0;
1001 ost->frames_encoded++;
1003 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1004 subtitle_out_max_size, sub);
1006 sub->num_rects = save_num_rects;
1007 if (subtitle_out_size < 0) {
1008 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1012 av_init_packet(&pkt);
1013 pkt.data = subtitle_out;
1014 pkt.size = subtitle_out_size;
1015 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1016 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1017 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1018 /* XXX: the pts correction is handled here. Maybe handling
1019 it in the codec would be better */
1021 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1023 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1026 output_packet(of, &pkt, ost);
1030 static void do_video_out(OutputFile *of,
1032 AVFrame *next_picture,
1035 int ret, format_video_sync;
1037 AVCodecContext *enc = ost->enc_ctx;
1038 AVCodecParameters *mux_par = ost->st->codecpar;
1039 AVRational frame_rate;
1040 int nb_frames, nb0_frames, i;
1041 double delta, delta0;
1042 double duration = 0;
1044 InputStream *ist = NULL;
1045 AVFilterContext *filter = ost->filter->filter;
1047 if (ost->source_index >= 0)
1048 ist = input_streams[ost->source_index];
1050 frame_rate = av_buffersink_get_frame_rate(filter);
1051 if (frame_rate.num > 0 && frame_rate.den > 0)
1052 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1054 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1055 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1057 if (!ost->filters_script &&
1061 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1062 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1065 if (!next_picture) {
1067 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1068 ost->last_nb0_frames[1],
1069 ost->last_nb0_frames[2]);
1071 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1072 delta = delta0 + duration;
1074 /* by default, we output a single frame */
1075 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1078 format_video_sync = video_sync_method;
1079 if (format_video_sync == VSYNC_AUTO) {
1080 if(!strcmp(of->ctx->oformat->name, "avi")) {
1081 format_video_sync = VSYNC_VFR;
1083 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1085 && format_video_sync == VSYNC_CFR
1086 && input_files[ist->file_index]->ctx->nb_streams == 1
1087 && input_files[ist->file_index]->input_ts_offset == 0) {
1088 format_video_sync = VSYNC_VSCFR;
1090 if (format_video_sync == VSYNC_CFR && copy_ts) {
1091 format_video_sync = VSYNC_VSCFR;
1094 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1098 format_video_sync != VSYNC_PASSTHROUGH &&
1099 format_video_sync != VSYNC_DROP) {
1100 if (delta0 < -0.6) {
1101 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1103 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1104 sync_ipts = ost->sync_opts;
1109 switch (format_video_sync) {
1111 if (ost->frame_number == 0 && delta0 >= 0.5) {
1112 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1115 ost->sync_opts = lrint(sync_ipts);
1118 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1119 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1121 } else if (delta < -1.1)
1123 else if (delta > 1.1) {
1124 nb_frames = lrintf(delta);
1126 nb0_frames = lrintf(delta0 - 0.6);
1132 else if (delta > 0.6)
1133 ost->sync_opts = lrint(sync_ipts);
1136 case VSYNC_PASSTHROUGH:
1137 ost->sync_opts = lrint(sync_ipts);
1144 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1145 nb0_frames = FFMIN(nb0_frames, nb_frames);
1147 memmove(ost->last_nb0_frames + 1,
1148 ost->last_nb0_frames,
1149 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1150 ost->last_nb0_frames[0] = nb0_frames;
1152 if (nb0_frames == 0 && ost->last_dropped) {
1154 av_log(NULL, AV_LOG_VERBOSE,
1155 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1156 ost->frame_number, ost->st->index, ost->last_frame->pts);
1158 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1159 if (nb_frames > dts_error_threshold * 30) {
1160 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1164 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1165 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1166 if (nb_frames_dup > dup_warning) {
1167 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1171 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1173 /* duplicates frame if needed */
1174 for (i = 0; i < nb_frames; i++) {
1175 AVFrame *in_picture;
1176 av_init_packet(&pkt);
1180 if (i < nb0_frames && ost->last_frame) {
1181 in_picture = ost->last_frame;
1183 in_picture = next_picture;
1188 in_picture->pts = ost->sync_opts;
1191 if (!check_recording_time(ost))
1193 if (ost->frame_number >= ost->max_frames)
1197 #if FF_API_LAVF_FMT_RAWPICTURE
1198 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1199 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1200 /* raw pictures are written as AVPicture structure to
1201 avoid any copies. We support temporarily the older
1203 if (in_picture->interlaced_frame)
1204 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1206 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1207 pkt.data = (uint8_t *)in_picture;
1208 pkt.size = sizeof(AVPicture);
1209 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1210 pkt.flags |= AV_PKT_FLAG_KEY;
1212 output_packet(of, &pkt, ost);
1216 int forced_keyframe = 0;
1219 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1220 ost->top_field_first >= 0)
1221 in_picture->top_field_first = !!ost->top_field_first;
1223 if (in_picture->interlaced_frame) {
1224 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1225 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1227 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1229 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1231 in_picture->quality = enc->global_quality;
1232 in_picture->pict_type = 0;
1234 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1235 in_picture->pts * av_q2d(enc->time_base) : NAN;
1236 if (ost->forced_kf_index < ost->forced_kf_count &&
1237 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1238 ost->forced_kf_index++;
1239 forced_keyframe = 1;
1240 } else if (ost->forced_keyframes_pexpr) {
1242 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1243 res = av_expr_eval(ost->forced_keyframes_pexpr,
1244 ost->forced_keyframes_expr_const_values, NULL);
1245 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1246 ost->forced_keyframes_expr_const_values[FKF_N],
1247 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1248 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1249 ost->forced_keyframes_expr_const_values[FKF_T],
1250 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1253 forced_keyframe = 1;
1254 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1255 ost->forced_keyframes_expr_const_values[FKF_N];
1256 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1257 ost->forced_keyframes_expr_const_values[FKF_T];
1258 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1261 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1262 } else if ( ost->forced_keyframes
1263 && !strncmp(ost->forced_keyframes, "source", 6)
1264 && in_picture->key_frame==1) {
1265 forced_keyframe = 1;
1268 if (forced_keyframe) {
1269 in_picture->pict_type = AV_PICTURE_TYPE_I;
1270 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1273 update_benchmark(NULL);
1275 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1276 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1277 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1278 enc->time_base.num, enc->time_base.den);
1281 ost->frames_encoded++;
1283 ret = avcodec_send_frame(enc, in_picture);
1288 ret = avcodec_receive_packet(enc, &pkt);
1289 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1290 if (ret == AVERROR(EAGAIN))
1296 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1297 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1298 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1299 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1302 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1303 pkt.pts = ost->sync_opts;
1305 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1308 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1309 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1310 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1311 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1314 frame_size = pkt.size;
1315 output_packet(of, &pkt, ost);
1317 /* if two pass, output log */
1318 if (ost->logfile && enc->stats_out) {
1319 fprintf(ost->logfile, "%s", enc->stats_out);
1325 * For video, number of frames in == number of packets out.
1326 * But there may be reordering, so we can't throw away frames on encoder
1327 * flush, we need to limit them here, before they go into encoder.
1329 ost->frame_number++;
1331 if (vstats_filename && frame_size)
1332 do_video_stats(ost, frame_size);
1335 if (!ost->last_frame)
1336 ost->last_frame = av_frame_alloc();
1337 av_frame_unref(ost->last_frame);
1338 if (next_picture && ost->last_frame)
1339 av_frame_ref(ost->last_frame, next_picture);
1341 av_frame_free(&ost->last_frame);
1345 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1349 static double psnr(double d)
1351 return -10.0 * log10(d);
1354 static void do_video_stats(OutputStream *ost, int frame_size)
1356 AVCodecContext *enc;
1358 double ti1, bitrate, avg_bitrate;
1360 /* this is executed just the first time do_video_stats is called */
1362 vstats_file = fopen(vstats_filename, "w");
1370 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1371 frame_number = ost->st->nb_frames;
1372 if (vstats_version <= 1) {
1373 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1374 ost->quality / (float)FF_QP2LAMBDA);
1376 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1377 ost->quality / (float)FF_QP2LAMBDA);
1380 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1381 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1383 fprintf(vstats_file,"f_size= %6d ", frame_size);
1384 /* compute pts value */
1385 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1389 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1390 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1391 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1392 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1393 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1397 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1399 static void finish_output_stream(OutputStream *ost)
1401 OutputFile *of = output_files[ost->file_index];
1404 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1407 for (i = 0; i < of->ctx->nb_streams; i++)
1408 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1413 * Get and encode new output from any of the filtergraphs, without causing
1416 * @return 0 for success, <0 for severe errors
1418 static int reap_filters(int flush)
1420 AVFrame *filtered_frame = NULL;
1423 /* Reap all buffers present in the buffer sinks */
1424 for (i = 0; i < nb_output_streams; i++) {
1425 OutputStream *ost = output_streams[i];
1426 OutputFile *of = output_files[ost->file_index];
1427 AVFilterContext *filter;
1428 AVCodecContext *enc = ost->enc_ctx;
1431 if (!ost->filter || !ost->filter->graph->graph)
1433 filter = ost->filter->filter;
1435 if (!ost->initialized) {
1437 ret = init_output_stream(ost, error, sizeof(error));
1439 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1440 ost->file_index, ost->index, error);
1445 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1446 return AVERROR(ENOMEM);
1448 filtered_frame = ost->filtered_frame;
1451 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1452 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1453 AV_BUFFERSINK_FLAG_NO_REQUEST);
1455 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1456 av_log(NULL, AV_LOG_WARNING,
1457 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1458 } else if (flush && ret == AVERROR_EOF) {
1459 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1460 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1464 if (ost->finished) {
1465 av_frame_unref(filtered_frame);
1468 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1469 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1470 AVRational filter_tb = av_buffersink_get_time_base(filter);
1471 AVRational tb = enc->time_base;
1472 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1474 tb.den <<= extra_bits;
1476 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1477 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1478 float_pts /= 1 << extra_bits;
1479 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1480 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1482 filtered_frame->pts =
1483 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1484 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1486 //if (ost->source_index >= 0)
1487 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1489 switch (av_buffersink_get_type(filter)) {
1490 case AVMEDIA_TYPE_VIDEO:
1491 if (!ost->frame_aspect_ratio.num)
1492 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1495 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1496 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1498 enc->time_base.num, enc->time_base.den);
1501 do_video_out(of, ost, filtered_frame, float_pts);
1503 case AVMEDIA_TYPE_AUDIO:
1504 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1505 enc->channels != av_frame_get_channels(filtered_frame)) {
1506 av_log(NULL, AV_LOG_ERROR,
1507 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1510 do_audio_out(of, ost, filtered_frame);
1513 // TODO support subtitle filters
1517 av_frame_unref(filtered_frame);
1524 static void print_final_stats(int64_t total_size)
1526 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1527 uint64_t subtitle_size = 0;
1528 uint64_t data_size = 0;
1529 float percent = -1.0;
1533 for (i = 0; i < nb_output_streams; i++) {
1534 OutputStream *ost = output_streams[i];
1535 switch (ost->enc_ctx->codec_type) {
1536 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1537 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1538 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1539 default: other_size += ost->data_size; break;
1541 extra_size += ost->enc_ctx->extradata_size;
1542 data_size += ost->data_size;
1543 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1544 != AV_CODEC_FLAG_PASS1)
1548 if (data_size && total_size>0 && total_size >= data_size)
1549 percent = 100.0 * (total_size - data_size) / data_size;
1551 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1552 video_size / 1024.0,
1553 audio_size / 1024.0,
1554 subtitle_size / 1024.0,
1555 other_size / 1024.0,
1556 extra_size / 1024.0);
1558 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1560 av_log(NULL, AV_LOG_INFO, "unknown");
1561 av_log(NULL, AV_LOG_INFO, "\n");
1563 /* print verbose per-stream stats */
1564 for (i = 0; i < nb_input_files; i++) {
1565 InputFile *f = input_files[i];
1566 uint64_t total_packets = 0, total_size = 0;
1568 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1569 i, f->ctx->filename);
1571 for (j = 0; j < f->nb_streams; j++) {
1572 InputStream *ist = input_streams[f->ist_index + j];
1573 enum AVMediaType type = ist->dec_ctx->codec_type;
1575 total_size += ist->data_size;
1576 total_packets += ist->nb_packets;
1578 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1579 i, j, media_type_string(type));
1580 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1581 ist->nb_packets, ist->data_size);
1583 if (ist->decoding_needed) {
1584 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1585 ist->frames_decoded);
1586 if (type == AVMEDIA_TYPE_AUDIO)
1587 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1588 av_log(NULL, AV_LOG_VERBOSE, "; ");
1591 av_log(NULL, AV_LOG_VERBOSE, "\n");
1594 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1595 total_packets, total_size);
1598 for (i = 0; i < nb_output_files; i++) {
1599 OutputFile *of = output_files[i];
1600 uint64_t total_packets = 0, total_size = 0;
1602 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1603 i, of->ctx->filename);
1605 for (j = 0; j < of->ctx->nb_streams; j++) {
1606 OutputStream *ost = output_streams[of->ost_index + j];
1607 enum AVMediaType type = ost->enc_ctx->codec_type;
1609 total_size += ost->data_size;
1610 total_packets += ost->packets_written;
1612 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1613 i, j, media_type_string(type));
1614 if (ost->encoding_needed) {
1615 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1616 ost->frames_encoded);
1617 if (type == AVMEDIA_TYPE_AUDIO)
1618 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1619 av_log(NULL, AV_LOG_VERBOSE, "; ");
1622 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1623 ost->packets_written, ost->data_size);
1625 av_log(NULL, AV_LOG_VERBOSE, "\n");
1628 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1629 total_packets, total_size);
1631 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1632 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1634 av_log(NULL, AV_LOG_WARNING, "\n");
1636 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1641 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1644 AVBPrint buf_script;
1646 AVFormatContext *oc;
1648 AVCodecContext *enc;
1649 int frame_number, vid, i;
1652 int64_t pts = INT64_MIN + 1;
1653 static int64_t last_time = -1;
1654 static int qp_histogram[52];
1655 int hours, mins, secs, us;
1659 if (!print_stats && !is_last_report && !progress_avio)
1662 if (!is_last_report) {
1663 if (last_time == -1) {
1664 last_time = cur_time;
1667 if ((cur_time - last_time) < 500000)
1669 last_time = cur_time;
1672 t = (cur_time-timer_start) / 1000000.0;
1675 oc = output_files[0]->ctx;
1677 total_size = avio_size(oc->pb);
1678 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1679 total_size = avio_tell(oc->pb);
1683 av_bprint_init(&buf_script, 0, 1);
1684 for (i = 0; i < nb_output_streams; i++) {
1686 ost = output_streams[i];
1688 if (!ost->stream_copy)
1689 q = ost->quality / (float) FF_QP2LAMBDA;
1691 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1692 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1693 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1694 ost->file_index, ost->index, q);
1696 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1699 frame_number = ost->frame_number;
1700 fps = t > 1 ? frame_number / t : 0;
1701 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1702 frame_number, fps < 9.95, fps, q);
1703 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1704 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1705 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1706 ost->file_index, ost->index, q);
1708 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1712 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1714 for (j = 0; j < 32; j++)
1715 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1718 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1720 double error, error_sum = 0;
1721 double scale, scale_sum = 0;
1723 char type[3] = { 'Y','U','V' };
1724 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1725 for (j = 0; j < 3; j++) {
1726 if (is_last_report) {
1727 error = enc->error[j];
1728 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1730 error = ost->error[j];
1731 scale = enc->width * enc->height * 255.0 * 255.0;
1737 p = psnr(error / scale);
1738 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1739 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1740 ost->file_index, ost->index, type[j] | 32, p);
1742 p = psnr(error_sum / scale_sum);
1743 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1744 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1745 ost->file_index, ost->index, p);
1749 /* compute min output value */
1750 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1751 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1752 ost->st->time_base, AV_TIME_BASE_Q));
1754 nb_frames_drop += ost->last_dropped;
1757 secs = FFABS(pts) / AV_TIME_BASE;
1758 us = FFABS(pts) % AV_TIME_BASE;
1764 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1765 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1767 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1769 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1770 "size=%8.0fkB time=", total_size / 1024.0);
1772 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1773 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1774 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1775 (100 * us) / AV_TIME_BASE);
1778 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1779 av_bprintf(&buf_script, "bitrate=N/A\n");
1781 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1782 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1785 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1786 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1787 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1788 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1789 hours, mins, secs, us);
1791 if (nb_frames_dup || nb_frames_drop)
1792 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1793 nb_frames_dup, nb_frames_drop);
1794 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1795 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1798 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1799 av_bprintf(&buf_script, "speed=N/A\n");
1801 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1802 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1805 if (print_stats || is_last_report) {
1806 const char end = is_last_report ? '\n' : '\r';
1807 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1808 fprintf(stderr, "%s %c", buf, end);
1810 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1815 if (progress_avio) {
1816 av_bprintf(&buf_script, "progress=%s\n",
1817 is_last_report ? "end" : "continue");
1818 avio_write(progress_avio, buf_script.str,
1819 FFMIN(buf_script.len, buf_script.size - 1));
1820 avio_flush(progress_avio);
1821 av_bprint_finalize(&buf_script, NULL);
1822 if (is_last_report) {
1823 if ((ret = avio_closep(&progress_avio)) < 0)
1824 av_log(NULL, AV_LOG_ERROR,
1825 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1830 print_final_stats(total_size);
1833 static void flush_encoders(void)
1837 for (i = 0; i < nb_output_streams; i++) {
1838 OutputStream *ost = output_streams[i];
1839 AVCodecContext *enc = ost->enc_ctx;
1840 OutputFile *of = output_files[ost->file_index];
1842 if (!ost->encoding_needed)
1845 // Try to enable encoding with no input frames.
1846 // Maybe we should just let encoding fail instead.
1847 if (!ost->initialized) {
1848 FilterGraph *fg = ost->filter->graph;
1851 av_log(NULL, AV_LOG_WARNING,
1852 "Finishing stream %d:%d without any data written to it.\n",
1853 ost->file_index, ost->st->index);
1855 if (ost->filter && !fg->graph) {
1857 for (x = 0; x < fg->nb_inputs; x++) {
1858 InputFilter *ifilter = fg->inputs[x];
1859 if (ifilter->format < 0) {
1860 AVCodecParameters *par = ifilter->ist->st->codecpar;
1861 // We never got any input. Set a fake format, which will
1862 // come from libavformat.
1863 ifilter->format = par->format;
1864 ifilter->sample_rate = par->sample_rate;
1865 ifilter->channels = par->channels;
1866 ifilter->channel_layout = par->channel_layout;
1867 ifilter->width = par->width;
1868 ifilter->height = par->height;
1869 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1873 if (!ifilter_has_all_input_formats(fg))
1876 ret = configure_filtergraph(fg);
1878 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1882 finish_output_stream(ost);
1885 ret = init_output_stream(ost, error, sizeof(error));
1887 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1888 ost->file_index, ost->index, error);
1893 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1895 #if FF_API_LAVF_FMT_RAWPICTURE
1896 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1900 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1903 avcodec_send_frame(enc, NULL);
1906 const char *desc = NULL;
1910 switch (enc->codec_type) {
1911 case AVMEDIA_TYPE_AUDIO:
1914 case AVMEDIA_TYPE_VIDEO:
1921 av_init_packet(&pkt);
1925 update_benchmark(NULL);
1926 ret = avcodec_receive_packet(enc, &pkt);
1927 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1928 if (ret < 0 && ret != AVERROR_EOF) {
1929 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1934 if (ost->logfile && enc->stats_out) {
1935 fprintf(ost->logfile, "%s", enc->stats_out);
1937 if (ret == AVERROR_EOF) {
1940 if (ost->finished & MUXER_FINISHED) {
1941 av_packet_unref(&pkt);
1944 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1945 pkt_size = pkt.size;
1946 output_packet(of, &pkt, ost);
1947 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1948 do_video_stats(ost, pkt_size);
1955 * Check whether a packet from ist should be written into ost at this time
1957 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1959 OutputFile *of = output_files[ost->file_index];
1960 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1962 if (ost->source_index != ist_index)
1968 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1974 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1976 OutputFile *of = output_files[ost->file_index];
1977 InputFile *f = input_files [ist->file_index];
1978 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1979 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1983 av_init_packet(&opkt);
1985 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1986 !ost->copy_initial_nonkeyframes)
1989 if (!ost->frame_number && !ost->copy_prior_start) {
1990 int64_t comp_start = start_time;
1991 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1992 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1993 if (pkt->pts == AV_NOPTS_VALUE ?
1994 ist->pts < comp_start :
1995 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1999 if (of->recording_time != INT64_MAX &&
2000 ist->pts >= of->recording_time + start_time) {
2001 close_output_stream(ost);
2005 if (f->recording_time != INT64_MAX) {
2006 start_time = f->ctx->start_time;
2007 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2008 start_time += f->start_time;
2009 if (ist->pts >= f->recording_time + start_time) {
2010 close_output_stream(ost);
2015 /* force the input stream PTS */
2016 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2019 if (pkt->pts != AV_NOPTS_VALUE)
2020 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2022 opkt.pts = AV_NOPTS_VALUE;
2024 if (pkt->dts == AV_NOPTS_VALUE)
2025 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2027 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2028 opkt.dts -= ost_tb_start_time;
2030 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2031 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2033 duration = ist->dec_ctx->frame_size;
2034 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2035 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2036 ost->mux_timebase) - ost_tb_start_time;
2039 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2041 opkt.flags = pkt->flags;
2042 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2043 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2044 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2045 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2046 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2048 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2049 &opkt.data, &opkt.size,
2050 pkt->data, pkt->size,
2051 pkt->flags & AV_PKT_FLAG_KEY);
2053 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2058 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2063 opkt.data = pkt->data;
2064 opkt.size = pkt->size;
2066 av_copy_packet_side_data(&opkt, pkt);
2068 #if FF_API_LAVF_FMT_RAWPICTURE
2069 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2070 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2071 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2072 /* store AVPicture in AVPacket, as expected by the output format */
2073 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2075 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2079 opkt.data = (uint8_t *)&pict;
2080 opkt.size = sizeof(AVPicture);
2081 opkt.flags |= AV_PKT_FLAG_KEY;
2085 output_packet(of, &opkt, ost);
2088 int guess_input_channel_layout(InputStream *ist)
2090 AVCodecContext *dec = ist->dec_ctx;
2092 if (!dec->channel_layout) {
2093 char layout_name[256];
2095 if (dec->channels > ist->guess_layout_max)
2097 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2098 if (!dec->channel_layout)
2100 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2101 dec->channels, dec->channel_layout);
2102 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2103 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2108 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2110 if (*got_output || ret<0)
2111 decode_error_stat[ret<0] ++;
2113 if (ret < 0 && exit_on_error)
2116 if (exit_on_error && *got_output && ist) {
2117 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2118 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2124 // Filters can be configured only if the formats of all inputs are known.
2125 static int ifilter_has_all_input_formats(FilterGraph *fg)
2128 for (i = 0; i < fg->nb_inputs; i++) {
2129 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2130 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2136 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2138 FilterGraph *fg = ifilter->graph;
2139 int need_reinit, ret, i;
2141 /* determine if the parameters for this input changed */
2142 need_reinit = ifilter->format != frame->format;
2143 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2144 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2147 switch (ifilter->ist->st->codecpar->codec_type) {
2148 case AVMEDIA_TYPE_AUDIO:
2149 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2150 ifilter->channels != frame->channels ||
2151 ifilter->channel_layout != frame->channel_layout;
2153 case AVMEDIA_TYPE_VIDEO:
2154 need_reinit |= ifilter->width != frame->width ||
2155 ifilter->height != frame->height;
2160 ret = ifilter_parameters_from_frame(ifilter, frame);
2165 /* (re)init the graph if possible, otherwise buffer the frame and return */
2166 if (need_reinit || !fg->graph) {
2167 for (i = 0; i < fg->nb_inputs; i++) {
2168 if (!ifilter_has_all_input_formats(fg)) {
2169 AVFrame *tmp = av_frame_clone(frame);
2171 return AVERROR(ENOMEM);
2172 av_frame_unref(frame);
2174 if (!av_fifo_space(ifilter->frame_queue)) {
2175 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2179 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2184 ret = reap_filters(1);
2185 if (ret < 0 && ret != AVERROR_EOF) {
2187 av_strerror(ret, errbuf, sizeof(errbuf));
2189 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2193 ret = configure_filtergraph(fg);
2195 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2200 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2202 av_log(NULL, AV_LOG_ERROR, "Error while filtering\n");
2209 static int ifilter_send_eof(InputFilter *ifilter)
2215 if (ifilter->filter) {
2216 ret = av_buffersrc_add_frame_flags(ifilter->filter, NULL, AV_BUFFERSRC_FLAG_PUSH);
2220 // the filtergraph was never configured
2221 FilterGraph *fg = ifilter->graph;
2222 for (i = 0; i < fg->nb_inputs; i++)
2223 if (!fg->inputs[i]->eof)
2225 if (i == fg->nb_inputs) {
2226 // All the input streams have finished without the filtergraph
2227 // ever being configured.
2228 // Mark the output streams as finished.
2229 for (j = 0; j < fg->nb_outputs; j++)
2230 finish_output_stream(fg->outputs[j]->ost);
2237 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2238 // There is the following difference: if you got a frame, you must call
2239 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2240 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2241 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2248 ret = avcodec_send_packet(avctx, pkt);
2249 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2250 // decoded frames with avcodec_receive_frame() until done.
2251 if (ret < 0 && ret != AVERROR_EOF)
2255 ret = avcodec_receive_frame(avctx, frame);
2256 if (ret < 0 && ret != AVERROR(EAGAIN))
2264 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2269 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2270 for (i = 0; i < ist->nb_filters; i++) {
2271 if (i < ist->nb_filters - 1) {
2272 f = ist->filter_frame;
2273 ret = av_frame_ref(f, decoded_frame);
2278 ret = ifilter_send_frame(ist->filters[i], f);
2279 if (ret == AVERROR_EOF)
2280 ret = 0; /* ignore */
2282 av_log(NULL, AV_LOG_ERROR,
2283 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2290 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2292 AVFrame *decoded_frame;
2293 AVCodecContext *avctx = ist->dec_ctx;
2295 AVRational decoded_frame_tb;
2297 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2298 return AVERROR(ENOMEM);
2299 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2300 return AVERROR(ENOMEM);
2301 decoded_frame = ist->decoded_frame;
2303 update_benchmark(NULL);
2304 ret = decode(avctx, decoded_frame, got_output, pkt);
2305 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2307 if (ret >= 0 && avctx->sample_rate <= 0) {
2308 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2309 ret = AVERROR_INVALIDDATA;
2312 if (ret != AVERROR_EOF)
2313 check_decode_result(ist, got_output, ret);
2315 if (!*got_output || ret < 0)
2318 ist->samples_decoded += decoded_frame->nb_samples;
2319 ist->frames_decoded++;
2322 /* increment next_dts to use for the case where the input stream does not
2323 have timestamps or there are multiple frames in the packet */
2324 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2326 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2330 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2331 decoded_frame_tb = ist->st->time_base;
2332 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2333 decoded_frame->pts = pkt->pts;
2334 decoded_frame_tb = ist->st->time_base;
2336 decoded_frame->pts = ist->dts;
2337 decoded_frame_tb = AV_TIME_BASE_Q;
2339 if (decoded_frame->pts != AV_NOPTS_VALUE)
2340 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2341 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2342 (AVRational){1, avctx->sample_rate});
2343 ist->nb_samples = decoded_frame->nb_samples;
2344 err = send_frame_to_filters(ist, decoded_frame);
2346 av_frame_unref(ist->filter_frame);
2347 av_frame_unref(decoded_frame);
2348 return err < 0 ? err : ret;
2351 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
2353 AVFrame *decoded_frame;
2354 int i, ret = 0, err = 0;
2355 int64_t best_effort_timestamp;
2356 int64_t dts = AV_NOPTS_VALUE;
2359 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2360 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2362 if (!eof && pkt && pkt->size == 0)
2365 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2366 return AVERROR(ENOMEM);
2367 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2368 return AVERROR(ENOMEM);
2369 decoded_frame = ist->decoded_frame;
2370 if (ist->dts != AV_NOPTS_VALUE)
2371 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2374 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2377 // The old code used to set dts on the drain packet, which does not work
2378 // with the new API anymore.
2380 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2382 return AVERROR(ENOMEM);
2383 ist->dts_buffer = new;
2384 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2387 update_benchmark(NULL);
2388 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2389 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2391 // The following line may be required in some cases where there is no parser
2392 // or the parser does not has_b_frames correctly
2393 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2394 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2395 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2397 av_log(ist->dec_ctx, AV_LOG_WARNING,
2398 "video_delay is larger in decoder than demuxer %d > %d.\n"
2399 "If you want to help, upload a sample "
2400 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2401 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2402 ist->dec_ctx->has_b_frames,
2403 ist->st->codecpar->video_delay);
2406 if (ret != AVERROR_EOF)
2407 check_decode_result(ist, got_output, ret);
2409 if (*got_output && ret >= 0) {
2410 if (ist->dec_ctx->width != decoded_frame->width ||
2411 ist->dec_ctx->height != decoded_frame->height ||
2412 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2413 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2414 decoded_frame->width,
2415 decoded_frame->height,
2416 decoded_frame->format,
2417 ist->dec_ctx->width,
2418 ist->dec_ctx->height,
2419 ist->dec_ctx->pix_fmt);
2423 if (!*got_output || ret < 0)
2426 if(ist->top_field_first>=0)
2427 decoded_frame->top_field_first = ist->top_field_first;
2429 ist->frames_decoded++;
2431 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2432 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2436 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2438 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2440 if (ist->framerate.num)
2441 best_effort_timestamp = ist->cfr_next_pts++;
2443 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2444 best_effort_timestamp = ist->dts_buffer[0];
2446 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2447 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2448 ist->nb_dts_buffer--;
2451 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2452 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2454 if (ts != AV_NOPTS_VALUE)
2455 ist->next_pts = ist->pts = ts;
2459 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2460 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2461 ist->st->index, av_ts2str(decoded_frame->pts),
2462 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2463 best_effort_timestamp,
2464 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2465 decoded_frame->key_frame, decoded_frame->pict_type,
2466 ist->st->time_base.num, ist->st->time_base.den);
2469 if (ist->st->sample_aspect_ratio.num)
2470 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2472 err = send_frame_to_filters(ist, decoded_frame);
2475 av_frame_unref(ist->filter_frame);
2476 av_frame_unref(decoded_frame);
2477 return err < 0 ? err : ret;
2480 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2482 AVSubtitle subtitle;
2484 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2485 &subtitle, got_output, pkt);
2487 check_decode_result(NULL, got_output, ret);
2489 if (ret < 0 || !*got_output) {
2491 sub2video_flush(ist);
2495 if (ist->fix_sub_duration) {
2497 if (ist->prev_sub.got_output) {
2498 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2499 1000, AV_TIME_BASE);
2500 if (end < ist->prev_sub.subtitle.end_display_time) {
2501 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2502 "Subtitle duration reduced from %d to %d%s\n",
2503 ist->prev_sub.subtitle.end_display_time, end,
2504 end <= 0 ? ", dropping it" : "");
2505 ist->prev_sub.subtitle.end_display_time = end;
2508 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2509 FFSWAP(int, ret, ist->prev_sub.ret);
2510 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2518 if (ist->sub2video.frame) {
2519 sub2video_update(ist, &subtitle);
2520 } else if (ist->nb_filters) {
2521 if (!ist->sub2video.sub_queue)
2522 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2523 if (!ist->sub2video.sub_queue)
2525 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2526 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2530 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2534 if (!subtitle.num_rects)
2537 ist->frames_decoded++;
2539 for (i = 0; i < nb_output_streams; i++) {
2540 OutputStream *ost = output_streams[i];
2542 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2543 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2546 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2551 avsubtitle_free(&subtitle);
2555 static int send_filter_eof(InputStream *ist)
2558 for (i = 0; i < ist->nb_filters; i++) {
2559 ret = ifilter_send_eof(ist->filters[i]);
2566 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2567 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2571 int eof_reached = 0;
2574 if (!ist->saw_first_ts) {
2575 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2577 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2578 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2579 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2581 ist->saw_first_ts = 1;
2584 if (ist->next_dts == AV_NOPTS_VALUE)
2585 ist->next_dts = ist->dts;
2586 if (ist->next_pts == AV_NOPTS_VALUE)
2587 ist->next_pts = ist->pts;
2591 av_init_packet(&avpkt);
2598 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2599 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2600 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2601 ist->next_pts = ist->pts = ist->dts;
2604 // while we have more to decode or while the decoder did output something on EOF
2605 while (ist->decoding_needed) {
2609 ist->pts = ist->next_pts;
2610 ist->dts = ist->next_dts;
2612 switch (ist->dec_ctx->codec_type) {
2613 case AVMEDIA_TYPE_AUDIO:
2614 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
2616 case AVMEDIA_TYPE_VIDEO:
2617 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
2618 if (!repeating || !pkt || got_output) {
2619 if (pkt && pkt->duration) {
2620 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2621 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2622 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2623 duration = ((int64_t)AV_TIME_BASE *
2624 ist->dec_ctx->framerate.den * ticks) /
2625 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2628 if(ist->dts != AV_NOPTS_VALUE && duration) {
2629 ist->next_dts += duration;
2631 ist->next_dts = AV_NOPTS_VALUE;
2635 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2637 case AVMEDIA_TYPE_SUBTITLE:
2640 ret = transcode_subtitles(ist, &avpkt, &got_output);
2641 if (!pkt && ret >= 0)
2648 if (ret == AVERROR_EOF) {
2654 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2655 ist->file_index, ist->st->index, av_err2str(ret));
2662 ist->got_output = 1;
2667 // During draining, we might get multiple output frames in this loop.
2668 // ffmpeg.c does not drain the filter chain on configuration changes,
2669 // which means if we send multiple frames at once to the filters, and
2670 // one of those frames changes configuration, the buffered frames will
2671 // be lost. This can upset certain FATE tests.
2672 // Decode only 1 frame per call on EOF to appease these FATE tests.
2673 // The ideal solution would be to rewrite decoding to use the new
2674 // decoding API in a better way.
2681 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2682 /* except when looping we need to flush but not to send an EOF */
2683 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2684 int ret = send_filter_eof(ist);
2686 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2691 /* handle stream copy */
2692 if (!ist->decoding_needed) {
2693 ist->dts = ist->next_dts;
2694 switch (ist->dec_ctx->codec_type) {
2695 case AVMEDIA_TYPE_AUDIO:
2696 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2697 ist->dec_ctx->sample_rate;
2699 case AVMEDIA_TYPE_VIDEO:
2700 if (ist->framerate.num) {
2701 // TODO: Remove work-around for c99-to-c89 issue 7
2702 AVRational time_base_q = AV_TIME_BASE_Q;
2703 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2704 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2705 } else if (pkt->duration) {
2706 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2707 } else if(ist->dec_ctx->framerate.num != 0) {
2708 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2709 ist->next_dts += ((int64_t)AV_TIME_BASE *
2710 ist->dec_ctx->framerate.den * ticks) /
2711 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2715 ist->pts = ist->dts;
2716 ist->next_pts = ist->next_dts;
2718 for (i = 0; pkt && i < nb_output_streams; i++) {
2719 OutputStream *ost = output_streams[i];
2721 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2724 do_streamcopy(ist, ost, pkt);
2727 return !eof_reached;
2730 static void print_sdp(void)
2735 AVIOContext *sdp_pb;
2736 AVFormatContext **avc;
2738 for (i = 0; i < nb_output_files; i++) {
2739 if (!output_files[i]->header_written)
2743 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2746 for (i = 0, j = 0; i < nb_output_files; i++) {
2747 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2748 avc[j] = output_files[i]->ctx;
2756 av_sdp_create(avc, j, sdp, sizeof(sdp));
2758 if (!sdp_filename) {
2759 printf("SDP:\n%s\n", sdp);
2762 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2763 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2765 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2766 avio_closep(&sdp_pb);
2767 av_freep(&sdp_filename);
2775 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2778 for (i = 0; hwaccels[i].name; i++)
2779 if (hwaccels[i].pix_fmt == pix_fmt)
2780 return &hwaccels[i];
2784 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2786 InputStream *ist = s->opaque;
2787 const enum AVPixelFormat *p;
2790 for (p = pix_fmts; *p != -1; p++) {
2791 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2792 const HWAccel *hwaccel;
2794 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2797 hwaccel = get_hwaccel(*p);
2799 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2800 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2803 ret = hwaccel->init(s);
2805 if (ist->hwaccel_id == hwaccel->id) {
2806 av_log(NULL, AV_LOG_FATAL,
2807 "%s hwaccel requested for input stream #%d:%d, "
2808 "but cannot be initialized.\n", hwaccel->name,
2809 ist->file_index, ist->st->index);
2810 return AV_PIX_FMT_NONE;
2815 if (ist->hw_frames_ctx) {
2816 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2817 if (!s->hw_frames_ctx)
2818 return AV_PIX_FMT_NONE;
2821 ist->active_hwaccel_id = hwaccel->id;
2822 ist->hwaccel_pix_fmt = *p;
2829 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2831 InputStream *ist = s->opaque;
2833 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2834 return ist->hwaccel_get_buffer(s, frame, flags);
2836 return avcodec_default_get_buffer2(s, frame, flags);
2839 static int init_input_stream(int ist_index, char *error, int error_len)
2842 InputStream *ist = input_streams[ist_index];
2844 if (ist->decoding_needed) {
2845 AVCodec *codec = ist->dec;
2847 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2848 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2849 return AVERROR(EINVAL);
2852 ist->dec_ctx->opaque = ist;
2853 ist->dec_ctx->get_format = get_format;
2854 ist->dec_ctx->get_buffer2 = get_buffer;
2855 ist->dec_ctx->thread_safe_callbacks = 1;
2857 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2858 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2859 (ist->decoding_needed & DECODING_FOR_OST)) {
2860 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2861 if (ist->decoding_needed & DECODING_FOR_FILTER)
2862 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2865 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2867 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2868 * audio, and video decoders such as cuvid or mediacodec */
2869 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2871 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2872 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2873 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2874 if (ret == AVERROR_EXPERIMENTAL)
2875 abort_codec_experimental(codec, 0);
2877 snprintf(error, error_len,
2878 "Error while opening decoder for input stream "
2880 ist->file_index, ist->st->index, av_err2str(ret));
2883 assert_avoptions(ist->decoder_opts);
2886 ist->next_pts = AV_NOPTS_VALUE;
2887 ist->next_dts = AV_NOPTS_VALUE;
2892 static InputStream *get_input_stream(OutputStream *ost)
2894 if (ost->source_index >= 0)
2895 return input_streams[ost->source_index];
2899 static int compare_int64(const void *a, const void *b)
2901 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2904 /* open the muxer when all the streams are initialized */
2905 static int check_init_output_file(OutputFile *of, int file_index)
2909 for (i = 0; i < of->ctx->nb_streams; i++) {
2910 OutputStream *ost = output_streams[of->ost_index + i];
2911 if (!ost->initialized)
2915 of->ctx->interrupt_callback = int_cb;
2917 ret = avformat_write_header(of->ctx, &of->opts);
2919 av_log(NULL, AV_LOG_ERROR,
2920 "Could not write header for output file #%d "
2921 "(incorrect codec parameters ?): %s\n",
2922 file_index, av_err2str(ret));
2925 //assert_avoptions(of->opts);
2926 of->header_written = 1;
2928 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2930 if (sdp_filename || want_sdp)
2933 /* flush the muxing queues */
2934 for (i = 0; i < of->ctx->nb_streams; i++) {
2935 OutputStream *ost = output_streams[of->ost_index + i];
2937 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2938 if (!av_fifo_size(ost->muxing_queue))
2939 ost->mux_timebase = ost->st->time_base;
2941 while (av_fifo_size(ost->muxing_queue)) {
2943 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2944 write_packet(of, &pkt, ost);
2951 static int init_output_bsfs(OutputStream *ost)
2956 if (!ost->nb_bitstream_filters)
2959 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2960 ctx = ost->bsf_ctx[i];
2962 ret = avcodec_parameters_copy(ctx->par_in,
2963 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2967 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2969 ret = av_bsf_init(ctx);
2971 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2972 ost->bsf_ctx[i]->filter->name);
2977 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2978 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2982 ost->st->time_base = ctx->time_base_out;
2987 static int init_output_stream_streamcopy(OutputStream *ost)
2989 OutputFile *of = output_files[ost->file_index];
2990 InputStream *ist = get_input_stream(ost);
2991 AVCodecParameters *par_dst = ost->st->codecpar;
2992 AVCodecParameters *par_src = ost->ref_par;
2995 uint32_t codec_tag = par_dst->codec_tag;
2997 av_assert0(ist && !ost->filter);
2999 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3001 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3003 av_log(NULL, AV_LOG_FATAL,
3004 "Error setting up codec context options.\n");
3007 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3010 unsigned int codec_tag_tmp;
3011 if (!of->ctx->oformat->codec_tag ||
3012 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3013 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3014 codec_tag = par_src->codec_tag;
3017 ret = avcodec_parameters_copy(par_dst, par_src);
3021 par_dst->codec_tag = codec_tag;
3023 if (!ost->frame_rate.num)
3024 ost->frame_rate = ist->framerate;
3025 ost->st->avg_frame_rate = ost->frame_rate;
3027 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3031 // copy timebase while removing common factors
3032 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3033 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3035 // copy estimated duration as a hint to the muxer
3036 if (ost->st->duration <= 0 && ist->st->duration > 0)
3037 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3040 ost->st->disposition = ist->st->disposition;
3042 if (ist->st->nb_side_data) {
3043 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
3044 sizeof(*ist->st->side_data));
3045 if (!ost->st->side_data)
3046 return AVERROR(ENOMEM);
3048 ost->st->nb_side_data = 0;
3049 for (i = 0; i < ist->st->nb_side_data; i++) {
3050 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3051 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
3053 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
3056 sd_dst->data = av_malloc(sd_src->size);
3058 return AVERROR(ENOMEM);
3059 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3060 sd_dst->size = sd_src->size;
3061 sd_dst->type = sd_src->type;
3062 ost->st->nb_side_data++;
3066 ost->parser = av_parser_init(par_dst->codec_id);
3067 ost->parser_avctx = avcodec_alloc_context3(NULL);
3068 if (!ost->parser_avctx)
3069 return AVERROR(ENOMEM);
3071 switch (par_dst->codec_type) {
3072 case AVMEDIA_TYPE_AUDIO:
3073 if (audio_volume != 256) {
3074 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3077 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3078 par_dst->block_align= 0;
3079 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3080 par_dst->block_align= 0;
3082 case AVMEDIA_TYPE_VIDEO:
3083 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3085 av_mul_q(ost->frame_aspect_ratio,
3086 (AVRational){ par_dst->height, par_dst->width });
3087 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3088 "with stream copy may produce invalid files\n");
3090 else if (ist->st->sample_aspect_ratio.num)
3091 sar = ist->st->sample_aspect_ratio;
3093 sar = par_src->sample_aspect_ratio;
3094 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3095 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3096 ost->st->r_frame_rate = ist->st->r_frame_rate;
3100 ost->mux_timebase = ist->st->time_base;
3105 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3107 AVDictionaryEntry *e;
3109 uint8_t *encoder_string;
3110 int encoder_string_len;
3111 int format_flags = 0;
3112 int codec_flags = 0;
3114 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3117 e = av_dict_get(of->opts, "fflags", NULL, 0);
3119 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3122 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3124 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3126 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3129 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3132 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3133 encoder_string = av_mallocz(encoder_string_len);
3134 if (!encoder_string)
3137 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3138 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3140 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3141 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3142 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3143 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3146 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3147 AVCodecContext *avctx)
3150 int n = 1, i, size, index = 0;
3153 for (p = kf; *p; p++)
3157 pts = av_malloc_array(size, sizeof(*pts));
3159 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3164 for (i = 0; i < n; i++) {
3165 char *next = strchr(p, ',');
3170 if (!memcmp(p, "chapters", 8)) {
3172 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3175 if (avf->nb_chapters > INT_MAX - size ||
3176 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3178 av_log(NULL, AV_LOG_FATAL,
3179 "Could not allocate forced key frames array.\n");
3182 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3183 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3185 for (j = 0; j < avf->nb_chapters; j++) {
3186 AVChapter *c = avf->chapters[j];
3187 av_assert1(index < size);
3188 pts[index++] = av_rescale_q(c->start, c->time_base,
3189 avctx->time_base) + t;
3194 t = parse_time_or_die("force_key_frames", p, 1);
3195 av_assert1(index < size);
3196 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3203 av_assert0(index == size);
3204 qsort(pts, size, sizeof(*pts), compare_int64);
3205 ost->forced_kf_count = size;
3206 ost->forced_kf_pts = pts;
3209 static int init_output_stream_encode(OutputStream *ost)
3211 InputStream *ist = get_input_stream(ost);
3212 AVCodecContext *enc_ctx = ost->enc_ctx;
3213 AVCodecContext *dec_ctx = NULL;
3214 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3217 set_encoder_id(output_files[ost->file_index], ost);
3220 ost->st->disposition = ist->st->disposition;
3222 dec_ctx = ist->dec_ctx;
3224 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3226 for (j = 0; j < oc->nb_streams; j++) {
3227 AVStream *st = oc->streams[j];
3228 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3231 if (j == oc->nb_streams)
3232 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3233 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3234 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3237 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3238 if (!ost->frame_rate.num)
3239 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3240 if (ist && !ost->frame_rate.num)
3241 ost->frame_rate = ist->framerate;
3242 if (ist && !ost->frame_rate.num)
3243 ost->frame_rate = ist->st->r_frame_rate;
3244 if (ist && !ost->frame_rate.num) {
3245 ost->frame_rate = (AVRational){25, 1};
3246 av_log(NULL, AV_LOG_WARNING,
3248 "about the input framerate is available. Falling "
3249 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3250 "if you want a different framerate.\n",
3251 ost->file_index, ost->index);
3253 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3254 if (ost->enc->supported_framerates && !ost->force_fps) {
3255 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3256 ost->frame_rate = ost->enc->supported_framerates[idx];
3258 // reduce frame rate for mpeg4 to be within the spec limits
3259 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3260 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3261 ost->frame_rate.num, ost->frame_rate.den, 65535);
3265 switch (enc_ctx->codec_type) {
3266 case AVMEDIA_TYPE_AUDIO:
3267 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3269 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3270 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3271 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3272 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3273 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3274 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3276 case AVMEDIA_TYPE_VIDEO:
3277 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3278 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3279 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3280 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3281 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3282 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3283 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3285 for (j = 0; j < ost->forced_kf_count; j++)
3286 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3288 enc_ctx->time_base);
3290 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3291 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3292 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3293 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3294 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3295 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3296 if (!strncmp(ost->enc->name, "libx264", 7) &&
3297 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3298 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3299 av_log(NULL, AV_LOG_WARNING,
3300 "No pixel format specified, %s for H.264 encoding chosen.\n"
3301 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3302 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3303 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3304 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3305 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3306 av_log(NULL, AV_LOG_WARNING,
3307 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3308 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3309 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3310 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3312 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3313 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3315 ost->st->avg_frame_rate = ost->frame_rate;
3318 enc_ctx->width != dec_ctx->width ||
3319 enc_ctx->height != dec_ctx->height ||
3320 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3321 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3324 if (ost->forced_keyframes) {
3325 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3326 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3327 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3329 av_log(NULL, AV_LOG_ERROR,
3330 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3333 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3334 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3335 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3336 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3338 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3339 // parse it only for static kf timings
3340 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3341 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3345 case AVMEDIA_TYPE_SUBTITLE:
3346 enc_ctx->time_base = AV_TIME_BASE_Q;
3347 if (!enc_ctx->width) {
3348 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3349 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3352 case AVMEDIA_TYPE_DATA:
3359 ost->mux_timebase = enc_ctx->time_base;
3364 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3368 if (ost->encoding_needed) {
3369 AVCodec *codec = ost->enc;
3370 AVCodecContext *dec = NULL;
3373 ret = init_output_stream_encode(ost);
3377 if ((ist = get_input_stream(ost)))
3379 if (dec && dec->subtitle_header) {
3380 /* ASS code assumes this buffer is null terminated so add extra byte. */
3381 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3382 if (!ost->enc_ctx->subtitle_header)
3383 return AVERROR(ENOMEM);
3384 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3385 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3387 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3388 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3389 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3391 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3392 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3393 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3395 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter)) {
3396 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3397 if (!ost->enc_ctx->hw_frames_ctx)
3398 return AVERROR(ENOMEM);
3401 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3402 if (ret == AVERROR_EXPERIMENTAL)
3403 abort_codec_experimental(codec, 1);
3404 snprintf(error, error_len,
3405 "Error while opening encoder for output stream #%d:%d - "
3406 "maybe incorrect parameters such as bit_rate, rate, width or height",
3407 ost->file_index, ost->index);
3410 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3411 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3412 av_buffersink_set_frame_size(ost->filter->filter,
3413 ost->enc_ctx->frame_size);
3414 assert_avoptions(ost->encoder_opts);
3415 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3416 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3417 " It takes bits/s as argument, not kbits/s\n");
3419 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3421 av_log(NULL, AV_LOG_FATAL,
3422 "Error initializing the output stream codec context.\n");
3426 * FIXME: ost->st->codec should't be needed here anymore.
3428 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3432 if (ost->enc_ctx->nb_coded_side_data) {
3435 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
3436 sizeof(*ost->st->side_data));
3437 if (!ost->st->side_data)
3438 return AVERROR(ENOMEM);
3440 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3441 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3442 AVPacketSideData *sd_dst = &ost->st->side_data[i];
3444 sd_dst->data = av_malloc(sd_src->size);
3446 return AVERROR(ENOMEM);
3447 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3448 sd_dst->size = sd_src->size;
3449 sd_dst->type = sd_src->type;
3450 ost->st->nb_side_data++;
3454 // copy timebase while removing common factors
3455 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3456 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3458 // copy estimated duration as a hint to the muxer
3459 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3460 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3462 ost->st->codec->codec= ost->enc_ctx->codec;
3463 } else if (ost->stream_copy) {
3464 ret = init_output_stream_streamcopy(ost);
3469 * FIXME: will the codec context used by the parser during streamcopy
3470 * This should go away with the new parser API.
3472 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3477 // parse user provided disposition, and update stream values
3478 if (ost->disposition) {
3479 static const AVOption opts[] = {
3480 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3481 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3482 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3483 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3484 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3485 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3486 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3487 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3488 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3489 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3490 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3491 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3492 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3493 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3496 static const AVClass class = {
3498 .item_name = av_default_item_name,
3500 .version = LIBAVUTIL_VERSION_INT,
3502 const AVClass *pclass = &class;
3504 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3509 /* initialize bitstream filters for the output stream
3510 * needs to be done here, because the codec id for streamcopy is not
3511 * known until now */
3512 ret = init_output_bsfs(ost);
3516 ost->initialized = 1;
3518 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3525 static void report_new_stream(int input_index, AVPacket *pkt)
3527 InputFile *file = input_files[input_index];
3528 AVStream *st = file->ctx->streams[pkt->stream_index];
3530 if (pkt->stream_index < file->nb_streams_warn)
3532 av_log(file->ctx, AV_LOG_WARNING,
3533 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3534 av_get_media_type_string(st->codecpar->codec_type),
3535 input_index, pkt->stream_index,
3536 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3537 file->nb_streams_warn = pkt->stream_index + 1;
3540 static int transcode_init(void)
3542 int ret = 0, i, j, k;
3543 AVFormatContext *oc;
3546 char error[1024] = {0};
3548 for (i = 0; i < nb_filtergraphs; i++) {
3549 FilterGraph *fg = filtergraphs[i];
3550 for (j = 0; j < fg->nb_outputs; j++) {
3551 OutputFilter *ofilter = fg->outputs[j];
3552 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3554 if (fg->nb_inputs != 1)
3556 for (k = nb_input_streams-1; k >= 0 ; k--)
3557 if (fg->inputs[0]->ist == input_streams[k])
3559 ofilter->ost->source_index = k;
3563 /* init framerate emulation */
3564 for (i = 0; i < nb_input_files; i++) {
3565 InputFile *ifile = input_files[i];
3566 if (ifile->rate_emu)
3567 for (j = 0; j < ifile->nb_streams; j++)
3568 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3571 /* init input streams */
3572 for (i = 0; i < nb_input_streams; i++)
3573 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3574 for (i = 0; i < nb_output_streams; i++) {
3575 ost = output_streams[i];
3576 avcodec_close(ost->enc_ctx);
3581 /* open each encoder */
3582 for (i = 0; i < nb_output_streams; i++) {
3583 // skip streams fed from filtergraphs until we have a frame for them
3584 if (output_streams[i]->filter)
3587 ret = init_output_stream(output_streams[i], error, sizeof(error));
3592 /* discard unused programs */
3593 for (i = 0; i < nb_input_files; i++) {
3594 InputFile *ifile = input_files[i];
3595 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3596 AVProgram *p = ifile->ctx->programs[j];
3597 int discard = AVDISCARD_ALL;
3599 for (k = 0; k < p->nb_stream_indexes; k++)
3600 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3601 discard = AVDISCARD_DEFAULT;
3604 p->discard = discard;
3608 /* write headers for files with no streams */
3609 for (i = 0; i < nb_output_files; i++) {
3610 oc = output_files[i]->ctx;
3611 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3612 ret = check_init_output_file(output_files[i], i);
3619 /* dump the stream mapping */
3620 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3621 for (i = 0; i < nb_input_streams; i++) {
3622 ist = input_streams[i];
3624 for (j = 0; j < ist->nb_filters; j++) {
3625 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3626 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3627 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3628 ist->filters[j]->name);
3629 if (nb_filtergraphs > 1)
3630 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3631 av_log(NULL, AV_LOG_INFO, "\n");
3636 for (i = 0; i < nb_output_streams; i++) {
3637 ost = output_streams[i];
3639 if (ost->attachment_filename) {
3640 /* an attached file */
3641 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3642 ost->attachment_filename, ost->file_index, ost->index);
3646 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3647 /* output from a complex graph */
3648 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3649 if (nb_filtergraphs > 1)
3650 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3652 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3653 ost->index, ost->enc ? ost->enc->name : "?");
3657 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3658 input_streams[ost->source_index]->file_index,
3659 input_streams[ost->source_index]->st->index,
3662 if (ost->sync_ist != input_streams[ost->source_index])
3663 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3664 ost->sync_ist->file_index,
3665 ost->sync_ist->st->index);
3666 if (ost->stream_copy)
3667 av_log(NULL, AV_LOG_INFO, " (copy)");
3669 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3670 const AVCodec *out_codec = ost->enc;
3671 const char *decoder_name = "?";
3672 const char *in_codec_name = "?";
3673 const char *encoder_name = "?";
3674 const char *out_codec_name = "?";
3675 const AVCodecDescriptor *desc;
3678 decoder_name = in_codec->name;
3679 desc = avcodec_descriptor_get(in_codec->id);
3681 in_codec_name = desc->name;
3682 if (!strcmp(decoder_name, in_codec_name))
3683 decoder_name = "native";
3687 encoder_name = out_codec->name;
3688 desc = avcodec_descriptor_get(out_codec->id);
3690 out_codec_name = desc->name;
3691 if (!strcmp(encoder_name, out_codec_name))
3692 encoder_name = "native";
3695 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3696 in_codec_name, decoder_name,
3697 out_codec_name, encoder_name);
3699 av_log(NULL, AV_LOG_INFO, "\n");
3703 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3707 transcode_init_done = 1;
3712 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3713 static int need_output(void)
3717 for (i = 0; i < nb_output_streams; i++) {
3718 OutputStream *ost = output_streams[i];
3719 OutputFile *of = output_files[ost->file_index];
3720 AVFormatContext *os = output_files[ost->file_index]->ctx;
3722 if (ost->finished ||
3723 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3725 if (ost->frame_number >= ost->max_frames) {
3727 for (j = 0; j < of->ctx->nb_streams; j++)
3728 close_output_stream(output_streams[of->ost_index + j]);
3739 * Select the output stream to process.
3741 * @return selected output stream, or NULL if none available
3743 static OutputStream *choose_output(void)
3746 int64_t opts_min = INT64_MAX;
3747 OutputStream *ost_min = NULL;
3749 for (i = 0; i < nb_output_streams; i++) {
3750 OutputStream *ost = output_streams[i];
3751 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3752 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3754 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3755 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3757 if (!ost->initialized && !ost->inputs_done)
3760 if (!ost->finished && opts < opts_min) {
3762 ost_min = ost->unavailable ? NULL : ost;
3768 static void set_tty_echo(int on)
3772 if (tcgetattr(0, &tty) == 0) {
3773 if (on) tty.c_lflag |= ECHO;
3774 else tty.c_lflag &= ~ECHO;
3775 tcsetattr(0, TCSANOW, &tty);
3780 static int check_keyboard_interaction(int64_t cur_time)
3783 static int64_t last_time;
3784 if (received_nb_signals)
3785 return AVERROR_EXIT;
3786 /* read_key() returns 0 on EOF */
3787 if(cur_time - last_time >= 100000 && !run_as_daemon){
3789 last_time = cur_time;
3793 return AVERROR_EXIT;
3794 if (key == '+') av_log_set_level(av_log_get_level()+10);
3795 if (key == '-') av_log_set_level(av_log_get_level()-10);
3796 if (key == 's') qp_hist ^= 1;
3799 do_hex_dump = do_pkt_dump = 0;
3800 } else if(do_pkt_dump){
3804 av_log_set_level(AV_LOG_DEBUG);
3806 if (key == 'c' || key == 'C'){
3807 char buf[4096], target[64], command[256], arg[256] = {0};
3810 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3813 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3818 fprintf(stderr, "\n");
3820 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3821 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3822 target, time, command, arg);
3823 for (i = 0; i < nb_filtergraphs; i++) {
3824 FilterGraph *fg = filtergraphs[i];
3827 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3828 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3829 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3830 } else if (key == 'c') {
3831 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3832 ret = AVERROR_PATCHWELCOME;
3834 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3836 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3841 av_log(NULL, AV_LOG_ERROR,
3842 "Parse error, at least 3 arguments were expected, "
3843 "only %d given in string '%s'\n", n, buf);
3846 if (key == 'd' || key == 'D'){
3849 debug = input_streams[0]->st->codec->debug<<1;
3850 if(!debug) debug = 1;
3851 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3858 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3863 fprintf(stderr, "\n");
3864 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3865 fprintf(stderr,"error parsing debug value\n");
3867 for(i=0;i<nb_input_streams;i++) {
3868 input_streams[i]->st->codec->debug = debug;
3870 for(i=0;i<nb_output_streams;i++) {
3871 OutputStream *ost = output_streams[i];
3872 ost->enc_ctx->debug = debug;
3874 if(debug) av_log_set_level(AV_LOG_DEBUG);
3875 fprintf(stderr,"debug=%d\n", debug);
3878 fprintf(stderr, "key function\n"
3879 "? show this help\n"
3880 "+ increase verbosity\n"
3881 "- decrease verbosity\n"
3882 "c Send command to first matching filter supporting it\n"
3883 "C Send/Queue command to all matching filters\n"
3884 "D cycle through available debug modes\n"
3885 "h dump packets/hex press to cycle through the 3 states\n"
3887 "s Show QP histogram\n"
3894 static void *input_thread(void *arg)
3897 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3902 ret = av_read_frame(f->ctx, &pkt);
3904 if (ret == AVERROR(EAGAIN)) {
3909 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3912 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3913 if (flags && ret == AVERROR(EAGAIN)) {
3915 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3916 av_log(f->ctx, AV_LOG_WARNING,
3917 "Thread message queue blocking; consider raising the "
3918 "thread_queue_size option (current value: %d)\n",
3919 f->thread_queue_size);
3922 if (ret != AVERROR_EOF)
3923 av_log(f->ctx, AV_LOG_ERROR,
3924 "Unable to send packet to main thread: %s\n",
3926 av_packet_unref(&pkt);
3927 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3935 static void free_input_threads(void)
3939 for (i = 0; i < nb_input_files; i++) {
3940 InputFile *f = input_files[i];
3943 if (!f || !f->in_thread_queue)
3945 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3946 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3947 av_packet_unref(&pkt);
3949 pthread_join(f->thread, NULL);
3951 av_thread_message_queue_free(&f->in_thread_queue);
3955 static int init_input_threads(void)
3959 if (nb_input_files == 1)
3962 for (i = 0; i < nb_input_files; i++) {
3963 InputFile *f = input_files[i];
3965 if (f->ctx->pb ? !f->ctx->pb->seekable :
3966 strcmp(f->ctx->iformat->name, "lavfi"))
3967 f->non_blocking = 1;
3968 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3969 f->thread_queue_size, sizeof(AVPacket));
3973 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3974 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3975 av_thread_message_queue_free(&f->in_thread_queue);
3976 return AVERROR(ret);
3982 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3984 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3986 AV_THREAD_MESSAGE_NONBLOCK : 0);
3990 static int get_input_packet(InputFile *f, AVPacket *pkt)
3994 for (i = 0; i < f->nb_streams; i++) {
3995 InputStream *ist = input_streams[f->ist_index + i];
3996 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3997 int64_t now = av_gettime_relative() - ist->start;
3999 return AVERROR(EAGAIN);
4004 if (nb_input_files > 1)
4005 return get_input_packet_mt(f, pkt);
4007 return av_read_frame(f->ctx, pkt);
4010 static int got_eagain(void)
4013 for (i = 0; i < nb_output_streams; i++)
4014 if (output_streams[i]->unavailable)
4019 static void reset_eagain(void)
4022 for (i = 0; i < nb_input_files; i++)
4023 input_files[i]->eagain = 0;
4024 for (i = 0; i < nb_output_streams; i++)
4025 output_streams[i]->unavailable = 0;
4028 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4029 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4030 AVRational time_base)
4036 return tmp_time_base;
4039 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4042 return tmp_time_base;
4048 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4051 AVCodecContext *avctx;
4052 int i, ret, has_audio = 0;
4053 int64_t duration = 0;
4055 ret = av_seek_frame(is, -1, is->start_time, 0);
4059 for (i = 0; i < ifile->nb_streams; i++) {
4060 ist = input_streams[ifile->ist_index + i];
4061 avctx = ist->dec_ctx;
4064 if (ist->decoding_needed) {
4065 process_input_packet(ist, NULL, 1);
4066 avcodec_flush_buffers(avctx);
4069 /* duration is the length of the last frame in a stream
4070 * when audio stream is present we don't care about
4071 * last video frame length because it's not defined exactly */
4072 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4076 for (i = 0; i < ifile->nb_streams; i++) {
4077 ist = input_streams[ifile->ist_index + i];
4078 avctx = ist->dec_ctx;
4081 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4082 AVRational sample_rate = {1, avctx->sample_rate};
4084 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4088 if (ist->framerate.num) {
4089 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4090 } else if (ist->st->avg_frame_rate.num) {
4091 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4092 } else duration = 1;
4094 if (!ifile->duration)
4095 ifile->time_base = ist->st->time_base;
4096 /* the total duration of the stream, max_pts - min_pts is
4097 * the duration of the stream without the last frame */
4098 duration += ist->max_pts - ist->min_pts;
4099 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4103 if (ifile->loop > 0)
4111 * - 0 -- one packet was read and processed
4112 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4113 * this function should be called again
4114 * - AVERROR_EOF -- this function should not be called again
4116 static int process_input(int file_index)
4118 InputFile *ifile = input_files[file_index];
4119 AVFormatContext *is;
4127 ret = get_input_packet(ifile, &pkt);
4129 if (ret == AVERROR(EAGAIN)) {
4133 if (ret < 0 && ifile->loop) {
4134 if ((ret = seek_to_start(ifile, is)) < 0)
4136 ret = get_input_packet(ifile, &pkt);
4137 if (ret == AVERROR(EAGAIN)) {
4143 if (ret != AVERROR_EOF) {
4144 print_error(is->filename, ret);
4149 for (i = 0; i < ifile->nb_streams; i++) {
4150 ist = input_streams[ifile->ist_index + i];
4151 if (ist->decoding_needed) {
4152 ret = process_input_packet(ist, NULL, 0);
4157 /* mark all outputs that don't go through lavfi as finished */
4158 for (j = 0; j < nb_output_streams; j++) {
4159 OutputStream *ost = output_streams[j];
4161 if (ost->source_index == ifile->ist_index + i &&
4162 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4163 finish_output_stream(ost);
4167 ifile->eof_reached = 1;
4168 return AVERROR(EAGAIN);
4174 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4175 is->streams[pkt.stream_index]);
4177 /* the following test is needed in case new streams appear
4178 dynamically in stream : we ignore them */
4179 if (pkt.stream_index >= ifile->nb_streams) {
4180 report_new_stream(file_index, &pkt);
4181 goto discard_packet;
4184 ist = input_streams[ifile->ist_index + pkt.stream_index];
4186 ist->data_size += pkt.size;
4190 goto discard_packet;
4192 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4193 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4198 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4199 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4200 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4201 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4202 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4203 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4204 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4205 av_ts2str(input_files[ist->file_index]->ts_offset),
4206 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4209 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4210 int64_t stime, stime2;
4211 // Correcting starttime based on the enabled streams
4212 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4213 // so we instead do it here as part of discontinuity handling
4214 if ( ist->next_dts == AV_NOPTS_VALUE
4215 && ifile->ts_offset == -is->start_time
4216 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4217 int64_t new_start_time = INT64_MAX;
4218 for (i=0; i<is->nb_streams; i++) {
4219 AVStream *st = is->streams[i];
4220 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4222 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4224 if (new_start_time > is->start_time) {
4225 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4226 ifile->ts_offset = -new_start_time;
4230 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4231 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4232 ist->wrap_correction_done = 1;
4234 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4235 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4236 ist->wrap_correction_done = 0;
4238 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4239 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4240 ist->wrap_correction_done = 0;
4244 /* add the stream-global side data to the first packet */
4245 if (ist->nb_packets == 1) {
4246 if (ist->st->nb_side_data)
4247 av_packet_split_side_data(&pkt);
4248 for (i = 0; i < ist->st->nb_side_data; i++) {
4249 AVPacketSideData *src_sd = &ist->st->side_data[i];
4252 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4254 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4257 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4261 memcpy(dst_data, src_sd->data, src_sd->size);
4265 if (pkt.dts != AV_NOPTS_VALUE)
4266 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4267 if (pkt.pts != AV_NOPTS_VALUE)
4268 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4270 if (pkt.pts != AV_NOPTS_VALUE)
4271 pkt.pts *= ist->ts_scale;
4272 if (pkt.dts != AV_NOPTS_VALUE)
4273 pkt.dts *= ist->ts_scale;
4275 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4276 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4277 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4278 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4279 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4280 int64_t delta = pkt_dts - ifile->last_ts;
4281 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4282 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4283 ifile->ts_offset -= delta;
4284 av_log(NULL, AV_LOG_DEBUG,
4285 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4286 delta, ifile->ts_offset);
4287 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4288 if (pkt.pts != AV_NOPTS_VALUE)
4289 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4293 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4294 if (pkt.pts != AV_NOPTS_VALUE) {
4295 pkt.pts += duration;
4296 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4297 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4300 if (pkt.dts != AV_NOPTS_VALUE)
4301 pkt.dts += duration;
4303 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4304 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4305 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4306 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4308 int64_t delta = pkt_dts - ist->next_dts;
4309 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4310 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4311 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4312 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4313 ifile->ts_offset -= delta;
4314 av_log(NULL, AV_LOG_DEBUG,
4315 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4316 delta, ifile->ts_offset);
4317 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4318 if (pkt.pts != AV_NOPTS_VALUE)
4319 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4322 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4323 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4324 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4325 pkt.dts = AV_NOPTS_VALUE;
4327 if (pkt.pts != AV_NOPTS_VALUE){
4328 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4329 delta = pkt_pts - ist->next_dts;
4330 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4331 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4332 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4333 pkt.pts = AV_NOPTS_VALUE;
4339 if (pkt.dts != AV_NOPTS_VALUE)
4340 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4343 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4344 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4345 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4346 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4347 av_ts2str(input_files[ist->file_index]->ts_offset),
4348 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4351 sub2video_heartbeat(ist, pkt.pts);
4353 process_input_packet(ist, &pkt, 0);
4356 av_packet_unref(&pkt);
4362 * Perform a step of transcoding for the specified filter graph.
4364 * @param[in] graph filter graph to consider
4365 * @param[out] best_ist input stream where a frame would allow to continue
4366 * @return 0 for success, <0 for error
4368 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4371 int nb_requests, nb_requests_max = 0;
4372 InputFilter *ifilter;
4376 ret = avfilter_graph_request_oldest(graph->graph);
4378 return reap_filters(0);
4380 if (ret == AVERROR_EOF) {
4381 ret = reap_filters(1);
4382 for (i = 0; i < graph->nb_outputs; i++)
4383 close_output_stream(graph->outputs[i]->ost);
4386 if (ret != AVERROR(EAGAIN))
4389 for (i = 0; i < graph->nb_inputs; i++) {
4390 ifilter = graph->inputs[i];
4392 if (input_files[ist->file_index]->eagain ||
4393 input_files[ist->file_index]->eof_reached)
4395 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4396 if (nb_requests > nb_requests_max) {
4397 nb_requests_max = nb_requests;
4403 for (i = 0; i < graph->nb_outputs; i++)
4404 graph->outputs[i]->ost->unavailable = 1;
4410 * Run a single step of transcoding.
4412 * @return 0 for success, <0 for error
4414 static int transcode_step(void)
4417 InputStream *ist = NULL;
4420 ost = choose_output();
4427 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4431 if (ost->filter && !ost->filter->graph->graph) {
4432 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4433 ret = configure_filtergraph(ost->filter->graph);
4435 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4441 if (ost->filter && ost->filter->graph->graph) {
4442 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4446 } else if (ost->filter) {
4448 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4449 InputFilter *ifilter = ost->filter->graph->inputs[i];
4450 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4456 ost->inputs_done = 1;
4460 av_assert0(ost->source_index >= 0);
4461 ist = input_streams[ost->source_index];
4464 ret = process_input(ist->file_index);
4465 if (ret == AVERROR(EAGAIN)) {
4466 if (input_files[ist->file_index]->eagain)
4467 ost->unavailable = 1;
4472 return ret == AVERROR_EOF ? 0 : ret;
4474 return reap_filters(0);
4478 * The following code is the main loop of the file converter
4480 static int transcode(void)
4483 AVFormatContext *os;
4486 int64_t timer_start;
4487 int64_t total_packets_written = 0;
4489 ret = transcode_init();
4493 if (stdin_interaction) {
4494 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4497 timer_start = av_gettime_relative();
4500 if ((ret = init_input_threads()) < 0)
4504 while (!received_sigterm) {
4505 int64_t cur_time= av_gettime_relative();
4507 /* if 'q' pressed, exits */
4508 if (stdin_interaction)
4509 if (check_keyboard_interaction(cur_time) < 0)
4512 /* check if there's any stream where output is still needed */
4513 if (!need_output()) {
4514 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4518 ret = transcode_step();
4519 if (ret < 0 && ret != AVERROR_EOF) {
4521 av_strerror(ret, errbuf, sizeof(errbuf));
4523 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4527 /* dump report by using the output first video and audio streams */
4528 print_report(0, timer_start, cur_time);
4531 free_input_threads();
4534 /* at the end of stream, we must flush the decoder buffers */
4535 for (i = 0; i < nb_input_streams; i++) {
4536 ist = input_streams[i];
4537 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4538 process_input_packet(ist, NULL, 0);
4545 /* write the trailer if needed and close file */
4546 for (i = 0; i < nb_output_files; i++) {
4547 os = output_files[i]->ctx;
4548 if (!output_files[i]->header_written) {
4549 av_log(NULL, AV_LOG_ERROR,
4550 "Nothing was written into output file %d (%s), because "
4551 "at least one of its streams received no packets.\n",
4555 if ((ret = av_write_trailer(os)) < 0) {
4556 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4562 /* dump report by using the first video and audio streams */
4563 print_report(1, timer_start, av_gettime_relative());
4565 /* close each encoder */
4566 for (i = 0; i < nb_output_streams; i++) {
4567 ost = output_streams[i];
4568 if (ost->encoding_needed) {
4569 av_freep(&ost->enc_ctx->stats_in);
4571 total_packets_written += ost->packets_written;
4574 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4575 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4579 /* close each decoder */
4580 for (i = 0; i < nb_input_streams; i++) {
4581 ist = input_streams[i];
4582 if (ist->decoding_needed) {
4583 avcodec_close(ist->dec_ctx);
4584 if (ist->hwaccel_uninit)
4585 ist->hwaccel_uninit(ist->dec_ctx);
4589 av_buffer_unref(&hw_device_ctx);
4596 free_input_threads();
4599 if (output_streams) {
4600 for (i = 0; i < nb_output_streams; i++) {
4601 ost = output_streams[i];
4604 if (fclose(ost->logfile))
4605 av_log(NULL, AV_LOG_ERROR,
4606 "Error closing logfile, loss of information possible: %s\n",
4607 av_err2str(AVERROR(errno)));
4608 ost->logfile = NULL;
4610 av_freep(&ost->forced_kf_pts);
4611 av_freep(&ost->apad);
4612 av_freep(&ost->disposition);
4613 av_dict_free(&ost->encoder_opts);
4614 av_dict_free(&ost->sws_dict);
4615 av_dict_free(&ost->swr_opts);
4616 av_dict_free(&ost->resample_opts);
4624 static int64_t getutime(void)
4627 struct rusage rusage;
4629 getrusage(RUSAGE_SELF, &rusage);
4630 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4631 #elif HAVE_GETPROCESSTIMES
4633 FILETIME c, e, k, u;
4634 proc = GetCurrentProcess();
4635 GetProcessTimes(proc, &c, &e, &k, &u);
4636 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4638 return av_gettime_relative();
4642 static int64_t getmaxrss(void)
4644 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4645 struct rusage rusage;
4646 getrusage(RUSAGE_SELF, &rusage);
4647 return (int64_t)rusage.ru_maxrss * 1024;
4648 #elif HAVE_GETPROCESSMEMORYINFO
4650 PROCESS_MEMORY_COUNTERS memcounters;
4651 proc = GetCurrentProcess();
4652 memcounters.cb = sizeof(memcounters);
4653 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4654 return memcounters.PeakPagefileUsage;
4660 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4664 int main(int argc, char **argv)
4671 register_exit(ffmpeg_cleanup);
4673 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4675 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4676 parse_loglevel(argc, argv, options);
4678 if(argc>1 && !strcmp(argv[1], "-d")){
4680 av_log_set_callback(log_callback_null);
4685 avcodec_register_all();
4687 avdevice_register_all();
4689 avfilter_register_all();
4691 avformat_network_init();
4693 show_banner(argc, argv, options);
4695 /* parse options and open all input/output files */
4696 ret = ffmpeg_parse_options(argc, argv);
4700 if (nb_output_files <= 0 && nb_input_files == 0) {
4702 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4706 /* file converter / grab */
4707 if (nb_output_files <= 0) {
4708 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4712 // if (nb_input_files == 0) {
4713 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4717 for (i = 0; i < nb_output_files; i++) {
4718 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4722 current_time = ti = getutime();
4723 if (transcode() < 0)
4725 ti = getutime() - ti;
4727 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4729 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4730 decode_error_stat[0], decode_error_stat[1]);
4731 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4734 exit_program(received_nb_signals ? 255 : main_return_code);
4735 return main_return_code;