2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/threadmessage.h"
65 #include "libavcodec/mathops.h"
66 #include "libavformat/os_support.h"
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
72 #if HAVE_SYS_RESOURCE_H
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
79 #if HAVE_GETPROCESSMEMORYINFO
83 #if HAVE_SETCONSOLECTRLHANDLER
89 #include <sys/select.h>
94 #include <sys/ioctl.h>
108 #include "cmdutils.h"
110 #include "libavutil/avassert.h"
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
115 static FILE *vstats_file;
117 const char *const forced_keyframes_const_names[] = {
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
129 static int ifilter_has_all_input_formats(FilterGraph *fg);
131 static int run_as_daemon = 0;
132 static int nb_frames_dup = 0;
133 static unsigned dup_warning = 1000;
134 static int nb_frames_drop = 0;
135 static int64_t decode_error_stat[2];
137 static int want_sdp = 1;
139 static int current_time;
140 AVIOContext *progress_avio = NULL;
142 static uint8_t *subtitle_out;
144 InputStream **input_streams = NULL;
145 int nb_input_streams = 0;
146 InputFile **input_files = NULL;
147 int nb_input_files = 0;
149 OutputStream **output_streams = NULL;
150 int nb_output_streams = 0;
151 OutputFile **output_files = NULL;
152 int nb_output_files = 0;
154 FilterGraph **filtergraphs;
159 /* init terminal so that we can grab keys */
160 static struct termios oldtty;
161 static int restore_tty;
165 static void free_input_threads(void);
169 Convert subtitles to video with alpha to insert them in filter graphs.
170 This is a temporary solution until libavfilter gets real subtitles support.
173 static int sub2video_get_blank_frame(InputStream *ist)
176 AVFrame *frame = ist->sub2video.frame;
178 av_frame_unref(frame);
179 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
180 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
181 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
182 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
184 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
191 uint32_t *pal, *dst2;
195 if (r->type != SUBTITLE_BITMAP) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
199 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
200 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
201 r->x, r->y, r->w, r->h, w, h
206 dst += r->y * dst_linesize + r->x * 4;
208 pal = (uint32_t *)r->data[1];
209 for (y = 0; y < r->h; y++) {
210 dst2 = (uint32_t *)dst;
212 for (x = 0; x < r->w; x++)
213 *(dst2++) = pal[*(src2++)];
215 src += r->linesize[0];
219 static void sub2video_push_ref(InputStream *ist, int64_t pts)
221 AVFrame *frame = ist->sub2video.frame;
224 av_assert1(frame->data[0]);
225 ist->sub2video.last_pts = frame->pts = pts;
226 for (i = 0; i < ist->nb_filters; i++)
227 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
228 AV_BUFFERSRC_FLAG_KEEP_REF |
229 AV_BUFFERSRC_FLAG_PUSH);
232 void sub2video_update(InputStream *ist, AVSubtitle *sub)
234 AVFrame *frame = ist->sub2video.frame;
238 int64_t pts, end_pts;
243 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
244 AV_TIME_BASE_Q, ist->st->time_base);
245 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
246 AV_TIME_BASE_Q, ist->st->time_base);
247 num_rects = sub->num_rects;
249 pts = ist->sub2video.end_pts;
253 if (sub2video_get_blank_frame(ist) < 0) {
254 av_log(ist->dec_ctx, AV_LOG_ERROR,
255 "Impossible to get a blank canvas.\n");
258 dst = frame->data [0];
259 dst_linesize = frame->linesize[0];
260 for (i = 0; i < num_rects; i++)
261 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
262 sub2video_push_ref(ist, pts);
263 ist->sub2video.end_pts = end_pts;
266 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
268 InputFile *infile = input_files[ist->file_index];
272 /* When a frame is read from a file, examine all sub2video streams in
273 the same file and send the sub2video frame again. Otherwise, decoded
274 video frames could be accumulating in the filter graph while a filter
275 (possibly overlay) is desperately waiting for a subtitle frame. */
276 for (i = 0; i < infile->nb_streams; i++) {
277 InputStream *ist2 = input_streams[infile->ist_index + i];
278 if (!ist2->sub2video.frame)
280 /* subtitles seem to be usually muxed ahead of other streams;
281 if not, subtracting a larger time here is necessary */
282 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
283 /* do not send the heartbeat frame if the subtitle is already ahead */
284 if (pts2 <= ist2->sub2video.last_pts)
286 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
287 sub2video_update(ist2, NULL);
288 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
289 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
291 sub2video_push_ref(ist2, pts2);
295 static void sub2video_flush(InputStream *ist)
299 if (ist->sub2video.end_pts < INT64_MAX)
300 sub2video_update(ist, NULL);
301 for (i = 0; i < ist->nb_filters; i++)
302 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
305 /* end of sub2video hack */
307 static void term_exit_sigsafe(void)
311 tcsetattr (0, TCSANOW, &oldtty);
317 av_log(NULL, AV_LOG_QUIET, "%s", "");
321 static volatile int received_sigterm = 0;
322 static volatile int received_nb_signals = 0;
323 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
324 static volatile int ffmpeg_exited = 0;
325 static int main_return_code = 0;
328 sigterm_handler(int sig)
330 received_sigterm = sig;
331 received_nb_signals++;
333 if(received_nb_signals > 3) {
334 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
335 strlen("Received > 3 system signals, hard exiting\n"));
341 #if HAVE_SETCONSOLECTRLHANDLER
342 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
344 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
349 case CTRL_BREAK_EVENT:
350 sigterm_handler(SIGINT);
353 case CTRL_CLOSE_EVENT:
354 case CTRL_LOGOFF_EVENT:
355 case CTRL_SHUTDOWN_EVENT:
356 sigterm_handler(SIGTERM);
357 /* Basically, with these 3 events, when we return from this method the
358 process is hard terminated, so stall as long as we need to
359 to try and let the main thread(s) clean up and gracefully terminate
360 (we have at most 5 seconds, but should be done far before that). */
361 while (!ffmpeg_exited) {
367 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
376 if (!run_as_daemon && stdin_interaction) {
378 if (tcgetattr (0, &tty) == 0) {
382 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383 |INLCR|IGNCR|ICRNL|IXON);
384 tty.c_oflag |= OPOST;
385 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386 tty.c_cflag &= ~(CSIZE|PARENB);
391 tcsetattr (0, TCSANOW, &tty);
393 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
397 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
400 signal(SIGXCPU, sigterm_handler);
402 #if HAVE_SETCONSOLECTRLHANDLER
403 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
407 /* read a key without blocking */
408 static int read_key(void)
420 n = select(1, &rfds, NULL, NULL, &tv);
429 # if HAVE_PEEKNAMEDPIPE
431 static HANDLE input_handle;
434 input_handle = GetStdHandle(STD_INPUT_HANDLE);
435 is_pipe = !GetConsoleMode(input_handle, &dw);
439 /* When running under a GUI, you will end here. */
440 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441 // input pipe may have been closed by the program that ran ffmpeg
459 static int decode_interrupt_cb(void *ctx)
461 return received_nb_signals > atomic_load(&transcode_init_done);
464 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
466 static void ffmpeg_cleanup(int ret)
471 int maxrss = getmaxrss() / 1024;
472 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
475 for (i = 0; i < nb_filtergraphs; i++) {
476 FilterGraph *fg = filtergraphs[i];
477 avfilter_graph_free(&fg->graph);
478 for (j = 0; j < fg->nb_inputs; j++) {
479 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
481 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
482 sizeof(frame), NULL);
483 av_frame_free(&frame);
485 av_fifo_freep(&fg->inputs[j]->frame_queue);
486 if (fg->inputs[j]->ist->sub2video.sub_queue) {
487 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
489 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
490 &sub, sizeof(sub), NULL);
491 avsubtitle_free(&sub);
493 av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
495 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
496 av_freep(&fg->inputs[j]->name);
497 av_freep(&fg->inputs[j]);
499 av_freep(&fg->inputs);
500 for (j = 0; j < fg->nb_outputs; j++) {
501 av_freep(&fg->outputs[j]->name);
502 av_freep(&fg->outputs[j]->formats);
503 av_freep(&fg->outputs[j]->channel_layouts);
504 av_freep(&fg->outputs[j]->sample_rates);
505 av_freep(&fg->outputs[j]);
507 av_freep(&fg->outputs);
508 av_freep(&fg->graph_desc);
510 av_freep(&filtergraphs[i]);
512 av_freep(&filtergraphs);
514 av_freep(&subtitle_out);
517 for (i = 0; i < nb_output_files; i++) {
518 OutputFile *of = output_files[i];
523 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
525 avformat_free_context(s);
526 av_dict_free(&of->opts);
528 av_freep(&output_files[i]);
530 for (i = 0; i < nb_output_streams; i++) {
531 OutputStream *ost = output_streams[i];
536 for (j = 0; j < ost->nb_bitstream_filters; j++)
537 av_bsf_free(&ost->bsf_ctx[j]);
538 av_freep(&ost->bsf_ctx);
540 av_frame_free(&ost->filtered_frame);
541 av_frame_free(&ost->last_frame);
542 av_dict_free(&ost->encoder_opts);
544 av_parser_close(ost->parser);
545 avcodec_free_context(&ost->parser_avctx);
547 av_freep(&ost->forced_keyframes);
548 av_expr_free(ost->forced_keyframes_pexpr);
549 av_freep(&ost->avfilter);
550 av_freep(&ost->logfile_prefix);
552 av_freep(&ost->audio_channels_map);
553 ost->audio_channels_mapped = 0;
555 av_dict_free(&ost->sws_dict);
557 avcodec_free_context(&ost->enc_ctx);
558 avcodec_parameters_free(&ost->ref_par);
560 if (ost->muxing_queue) {
561 while (av_fifo_size(ost->muxing_queue)) {
563 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
564 av_packet_unref(&pkt);
566 av_fifo_freep(&ost->muxing_queue);
569 av_freep(&output_streams[i]);
572 free_input_threads();
574 for (i = 0; i < nb_input_files; i++) {
575 avformat_close_input(&input_files[i]->ctx);
576 av_freep(&input_files[i]);
578 for (i = 0; i < nb_input_streams; i++) {
579 InputStream *ist = input_streams[i];
581 av_frame_free(&ist->decoded_frame);
582 av_frame_free(&ist->filter_frame);
583 av_dict_free(&ist->decoder_opts);
584 avsubtitle_free(&ist->prev_sub.subtitle);
585 av_frame_free(&ist->sub2video.frame);
586 av_freep(&ist->filters);
587 av_freep(&ist->hwaccel_device);
588 av_freep(&ist->dts_buffer);
590 avcodec_free_context(&ist->dec_ctx);
592 av_freep(&input_streams[i]);
596 if (fclose(vstats_file))
597 av_log(NULL, AV_LOG_ERROR,
598 "Error closing vstats file, loss of information possible: %s\n",
599 av_err2str(AVERROR(errno)));
601 av_freep(&vstats_filename);
603 av_freep(&input_streams);
604 av_freep(&input_files);
605 av_freep(&output_streams);
606 av_freep(&output_files);
610 avformat_network_deinit();
612 if (received_sigterm) {
613 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
614 (int) received_sigterm);
615 } else if (ret && atomic_load(&transcode_init_done)) {
616 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
622 void remove_avoptions(AVDictionary **a, AVDictionary *b)
624 AVDictionaryEntry *t = NULL;
626 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
627 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
631 void assert_avoptions(AVDictionary *m)
633 AVDictionaryEntry *t;
634 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
635 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
640 static void abort_codec_experimental(AVCodec *c, int encoder)
645 static void update_benchmark(const char *fmt, ...)
647 if (do_benchmark_all) {
648 int64_t t = getutime();
654 vsnprintf(buf, sizeof(buf), fmt, va);
656 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
662 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
665 for (i = 0; i < nb_output_streams; i++) {
666 OutputStream *ost2 = output_streams[i];
667 ost2->finished |= ost == ost2 ? this_stream : others;
671 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
673 AVFormatContext *s = of->ctx;
674 AVStream *st = ost->st;
678 * Audio encoders may split the packets -- #frames in != #packets out.
679 * But there is no reordering, so we can limit the number of output packets
680 * by simply dropping them here.
681 * Counting encoded video frames needs to be done separately because of
682 * reordering, see do_video_out().
683 * Do not count the packet when unqueued because it has been counted when queued.
685 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
686 if (ost->frame_number >= ost->max_frames) {
687 av_packet_unref(pkt);
693 if (!of->header_written) {
694 AVPacket tmp_pkt = {0};
695 /* the muxer is not initialized yet, buffer the packet */
696 if (!av_fifo_space(ost->muxing_queue)) {
697 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
698 ost->max_muxing_queue_size);
699 if (new_size <= av_fifo_size(ost->muxing_queue)) {
700 av_log(NULL, AV_LOG_ERROR,
701 "Too many packets buffered for output stream %d:%d.\n",
702 ost->file_index, ost->st->index);
705 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
709 ret = av_packet_ref(&tmp_pkt, pkt);
712 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
713 av_packet_unref(pkt);
717 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
718 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
719 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
721 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
723 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
725 ost->quality = sd ? AV_RL32(sd) : -1;
726 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
728 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
730 ost->error[i] = AV_RL64(sd + 8 + 8*i);
735 if (ost->frame_rate.num && ost->is_cfr) {
736 if (pkt->duration > 0)
737 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
738 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
743 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
745 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
746 if (pkt->dts != AV_NOPTS_VALUE &&
747 pkt->pts != AV_NOPTS_VALUE &&
748 pkt->dts > pkt->pts) {
749 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
751 ost->file_index, ost->st->index);
753 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
754 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
755 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
757 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
758 pkt->dts != AV_NOPTS_VALUE &&
759 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
760 ost->last_mux_dts != AV_NOPTS_VALUE) {
761 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
762 if (pkt->dts < max) {
763 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
764 av_log(s, loglevel, "Non-monotonous DTS in output stream "
765 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
766 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
768 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
771 av_log(s, loglevel, "changing to %"PRId64". This may result "
772 "in incorrect timestamps in the output file.\n",
774 if (pkt->pts >= pkt->dts)
775 pkt->pts = FFMAX(pkt->pts, max);
780 ost->last_mux_dts = pkt->dts;
782 ost->data_size += pkt->size;
783 ost->packets_written++;
785 pkt->stream_index = ost->index;
788 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
789 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
790 av_get_media_type_string(ost->enc_ctx->codec_type),
791 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
792 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
797 ret = av_interleaved_write_frame(s, pkt);
799 print_error("av_interleaved_write_frame()", ret);
800 main_return_code = 1;
801 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
803 av_packet_unref(pkt);
806 static void close_output_stream(OutputStream *ost)
808 OutputFile *of = output_files[ost->file_index];
810 ost->finished |= ENCODER_FINISHED;
812 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
813 of->recording_time = FFMIN(of->recording_time, end);
818 * Send a single packet to the output, applying any bitstream filters
819 * associated with the output stream. This may result in any number
820 * of packets actually being written, depending on what bitstream
821 * filters are applied. The supplied packet is consumed and will be
822 * blank (as if newly-allocated) when this function returns.
824 * If eof is set, instead indicate EOF to all bitstream filters and
825 * therefore flush any delayed packets to the output. A blank packet
826 * must be supplied in this case.
828 static void output_packet(OutputFile *of, AVPacket *pkt,
829 OutputStream *ost, int eof)
833 /* apply the output bitstream filters, if any */
834 if (ost->nb_bitstream_filters) {
837 ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
844 /* get a packet from the previous filter up the chain */
845 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
846 if (ret == AVERROR(EAGAIN)) {
850 } else if (ret == AVERROR_EOF) {
855 /* send it to the next filter down the chain or to the muxer */
856 if (idx < ost->nb_bitstream_filters) {
857 ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
865 write_packet(of, pkt, ost, 0);
868 write_packet(of, pkt, ost, 0);
871 if (ret < 0 && ret != AVERROR_EOF) {
872 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
873 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
879 static int check_recording_time(OutputStream *ost)
881 OutputFile *of = output_files[ost->file_index];
883 if (of->recording_time != INT64_MAX &&
884 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
885 AV_TIME_BASE_Q) >= 0) {
886 close_output_stream(ost);
892 static void do_audio_out(OutputFile *of, OutputStream *ost,
895 AVCodecContext *enc = ost->enc_ctx;
899 av_init_packet(&pkt);
903 if (!check_recording_time(ost))
906 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
907 frame->pts = ost->sync_opts;
908 ost->sync_opts = frame->pts + frame->nb_samples;
909 ost->samples_encoded += frame->nb_samples;
910 ost->frames_encoded++;
912 av_assert0(pkt.size || !pkt.data);
913 update_benchmark(NULL);
915 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
916 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
917 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
918 enc->time_base.num, enc->time_base.den);
921 ret = avcodec_send_frame(enc, frame);
926 ret = avcodec_receive_packet(enc, &pkt);
927 if (ret == AVERROR(EAGAIN))
932 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
934 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
937 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
938 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
939 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
940 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
943 output_packet(of, &pkt, ost, 0);
948 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
952 static void do_subtitle_out(OutputFile *of,
956 int subtitle_out_max_size = 1024 * 1024;
957 int subtitle_out_size, nb, i;
962 if (sub->pts == AV_NOPTS_VALUE) {
963 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
972 subtitle_out = av_malloc(subtitle_out_max_size);
974 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
979 /* Note: DVB subtitle need one packet to draw them and one other
980 packet to clear them */
981 /* XXX: signal it in the codec context ? */
982 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
987 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
989 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
990 pts -= output_files[ost->file_index]->start_time;
991 for (i = 0; i < nb; i++) {
992 unsigned save_num_rects = sub->num_rects;
994 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
995 if (!check_recording_time(ost))
999 // start_display_time is required to be 0
1000 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1001 sub->end_display_time -= sub->start_display_time;
1002 sub->start_display_time = 0;
1006 ost->frames_encoded++;
1008 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1009 subtitle_out_max_size, sub);
1011 sub->num_rects = save_num_rects;
1012 if (subtitle_out_size < 0) {
1013 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1017 av_init_packet(&pkt);
1018 pkt.data = subtitle_out;
1019 pkt.size = subtitle_out_size;
1020 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1021 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1022 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1023 /* XXX: the pts correction is handled here. Maybe handling
1024 it in the codec would be better */
1026 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1028 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1031 output_packet(of, &pkt, ost, 0);
1035 static void do_video_out(OutputFile *of,
1037 AVFrame *next_picture,
1040 int ret, format_video_sync;
1042 AVCodecContext *enc = ost->enc_ctx;
1043 AVCodecParameters *mux_par = ost->st->codecpar;
1044 AVRational frame_rate;
1045 int nb_frames, nb0_frames, i;
1046 double delta, delta0;
1047 double duration = 0;
1049 InputStream *ist = NULL;
1050 AVFilterContext *filter = ost->filter->filter;
1052 if (ost->source_index >= 0)
1053 ist = input_streams[ost->source_index];
1055 frame_rate = av_buffersink_get_frame_rate(filter);
1056 if (frame_rate.num > 0 && frame_rate.den > 0)
1057 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1059 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1060 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1062 if (!ost->filters_script &&
1066 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1067 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1070 if (!next_picture) {
1072 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1073 ost->last_nb0_frames[1],
1074 ost->last_nb0_frames[2]);
1076 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1077 delta = delta0 + duration;
1079 /* by default, we output a single frame */
1080 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1083 format_video_sync = video_sync_method;
1084 if (format_video_sync == VSYNC_AUTO) {
1085 if(!strcmp(of->ctx->oformat->name, "avi")) {
1086 format_video_sync = VSYNC_VFR;
1088 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1090 && format_video_sync == VSYNC_CFR
1091 && input_files[ist->file_index]->ctx->nb_streams == 1
1092 && input_files[ist->file_index]->input_ts_offset == 0) {
1093 format_video_sync = VSYNC_VSCFR;
1095 if (format_video_sync == VSYNC_CFR && copy_ts) {
1096 format_video_sync = VSYNC_VSCFR;
1099 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1103 format_video_sync != VSYNC_PASSTHROUGH &&
1104 format_video_sync != VSYNC_DROP) {
1105 if (delta0 < -0.6) {
1106 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1108 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1109 sync_ipts = ost->sync_opts;
1114 switch (format_video_sync) {
1116 if (ost->frame_number == 0 && delta0 >= 0.5) {
1117 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1120 ost->sync_opts = lrint(sync_ipts);
1123 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1124 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1126 } else if (delta < -1.1)
1128 else if (delta > 1.1) {
1129 nb_frames = lrintf(delta);
1131 nb0_frames = lrintf(delta0 - 0.6);
1137 else if (delta > 0.6)
1138 ost->sync_opts = lrint(sync_ipts);
1141 case VSYNC_PASSTHROUGH:
1142 ost->sync_opts = lrint(sync_ipts);
1149 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1150 nb0_frames = FFMIN(nb0_frames, nb_frames);
1152 memmove(ost->last_nb0_frames + 1,
1153 ost->last_nb0_frames,
1154 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1155 ost->last_nb0_frames[0] = nb0_frames;
1157 if (nb0_frames == 0 && ost->last_dropped) {
1159 av_log(NULL, AV_LOG_VERBOSE,
1160 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1161 ost->frame_number, ost->st->index, ost->last_frame->pts);
1163 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1164 if (nb_frames > dts_error_threshold * 30) {
1165 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1169 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1170 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1171 if (nb_frames_dup > dup_warning) {
1172 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1176 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1178 /* duplicates frame if needed */
1179 for (i = 0; i < nb_frames; i++) {
1180 AVFrame *in_picture;
1181 av_init_packet(&pkt);
1185 if (i < nb0_frames && ost->last_frame) {
1186 in_picture = ost->last_frame;
1188 in_picture = next_picture;
1193 in_picture->pts = ost->sync_opts;
1196 if (!check_recording_time(ost))
1198 if (ost->frame_number >= ost->max_frames)
1203 int forced_keyframe = 0;
1206 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1207 ost->top_field_first >= 0)
1208 in_picture->top_field_first = !!ost->top_field_first;
1210 if (in_picture->interlaced_frame) {
1211 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1212 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1214 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1216 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1218 in_picture->quality = enc->global_quality;
1219 in_picture->pict_type = 0;
1221 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1222 in_picture->pts * av_q2d(enc->time_base) : NAN;
1223 if (ost->forced_kf_index < ost->forced_kf_count &&
1224 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1225 ost->forced_kf_index++;
1226 forced_keyframe = 1;
1227 } else if (ost->forced_keyframes_pexpr) {
1229 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1230 res = av_expr_eval(ost->forced_keyframes_pexpr,
1231 ost->forced_keyframes_expr_const_values, NULL);
1232 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1233 ost->forced_keyframes_expr_const_values[FKF_N],
1234 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1235 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1236 ost->forced_keyframes_expr_const_values[FKF_T],
1237 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1240 forced_keyframe = 1;
1241 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1242 ost->forced_keyframes_expr_const_values[FKF_N];
1243 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1244 ost->forced_keyframes_expr_const_values[FKF_T];
1245 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1248 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1249 } else if ( ost->forced_keyframes
1250 && !strncmp(ost->forced_keyframes, "source", 6)
1251 && in_picture->key_frame==1) {
1252 forced_keyframe = 1;
1255 if (forced_keyframe) {
1256 in_picture->pict_type = AV_PICTURE_TYPE_I;
1257 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1260 update_benchmark(NULL);
1262 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1263 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1264 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1265 enc->time_base.num, enc->time_base.den);
1268 ost->frames_encoded++;
1270 ret = avcodec_send_frame(enc, in_picture);
1275 ret = avcodec_receive_packet(enc, &pkt);
1276 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1277 if (ret == AVERROR(EAGAIN))
1283 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1284 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1285 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1286 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1289 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1290 pkt.pts = ost->sync_opts;
1292 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1295 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1296 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1297 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1298 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1301 frame_size = pkt.size;
1302 output_packet(of, &pkt, ost, 0);
1304 /* if two pass, output log */
1305 if (ost->logfile && enc->stats_out) {
1306 fprintf(ost->logfile, "%s", enc->stats_out);
1312 * For video, number of frames in == number of packets out.
1313 * But there may be reordering, so we can't throw away frames on encoder
1314 * flush, we need to limit them here, before they go into encoder.
1316 ost->frame_number++;
1318 if (vstats_filename && frame_size)
1319 do_video_stats(ost, frame_size);
1322 if (!ost->last_frame)
1323 ost->last_frame = av_frame_alloc();
1324 av_frame_unref(ost->last_frame);
1325 if (next_picture && ost->last_frame)
1326 av_frame_ref(ost->last_frame, next_picture);
1328 av_frame_free(&ost->last_frame);
1332 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1336 static double psnr(double d)
1338 return -10.0 * log10(d);
1341 static void do_video_stats(OutputStream *ost, int frame_size)
1343 AVCodecContext *enc;
1345 double ti1, bitrate, avg_bitrate;
1347 /* this is executed just the first time do_video_stats is called */
1349 vstats_file = fopen(vstats_filename, "w");
1357 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1358 frame_number = ost->st->nb_frames;
1359 if (vstats_version <= 1) {
1360 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1361 ost->quality / (float)FF_QP2LAMBDA);
1363 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1364 ost->quality / (float)FF_QP2LAMBDA);
1367 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1368 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1370 fprintf(vstats_file,"f_size= %6d ", frame_size);
1371 /* compute pts value */
1372 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1376 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1377 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1378 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1379 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1380 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1384 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1386 static void finish_output_stream(OutputStream *ost)
1388 OutputFile *of = output_files[ost->file_index];
1391 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1394 for (i = 0; i < of->ctx->nb_streams; i++)
1395 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1400 * Get and encode new output from any of the filtergraphs, without causing
1403 * @return 0 for success, <0 for severe errors
1405 static int reap_filters(int flush)
1407 AVFrame *filtered_frame = NULL;
1410 /* Reap all buffers present in the buffer sinks */
1411 for (i = 0; i < nb_output_streams; i++) {
1412 OutputStream *ost = output_streams[i];
1413 OutputFile *of = output_files[ost->file_index];
1414 AVFilterContext *filter;
1415 AVCodecContext *enc = ost->enc_ctx;
1418 if (!ost->filter || !ost->filter->graph->graph)
1420 filter = ost->filter->filter;
1422 if (!ost->initialized) {
1423 char error[1024] = "";
1424 ret = init_output_stream(ost, error, sizeof(error));
1426 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1427 ost->file_index, ost->index, error);
1432 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1433 return AVERROR(ENOMEM);
1435 filtered_frame = ost->filtered_frame;
1438 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1439 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1440 AV_BUFFERSINK_FLAG_NO_REQUEST);
1442 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1443 av_log(NULL, AV_LOG_WARNING,
1444 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1445 } else if (flush && ret == AVERROR_EOF) {
1446 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1447 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1451 if (ost->finished) {
1452 av_frame_unref(filtered_frame);
1455 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1456 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1457 AVRational filter_tb = av_buffersink_get_time_base(filter);
1458 AVRational tb = enc->time_base;
1459 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1461 tb.den <<= extra_bits;
1463 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1464 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1465 float_pts /= 1 << extra_bits;
1466 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1467 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1469 filtered_frame->pts =
1470 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1471 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1473 //if (ost->source_index >= 0)
1474 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1476 switch (av_buffersink_get_type(filter)) {
1477 case AVMEDIA_TYPE_VIDEO:
1478 if (!ost->frame_aspect_ratio.num)
1479 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1482 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1483 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1485 enc->time_base.num, enc->time_base.den);
1488 do_video_out(of, ost, filtered_frame, float_pts);
1490 case AVMEDIA_TYPE_AUDIO:
1491 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1492 enc->channels != filtered_frame->channels) {
1493 av_log(NULL, AV_LOG_ERROR,
1494 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1497 do_audio_out(of, ost, filtered_frame);
1500 // TODO support subtitle filters
1504 av_frame_unref(filtered_frame);
1511 static void print_final_stats(int64_t total_size)
1513 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1514 uint64_t subtitle_size = 0;
1515 uint64_t data_size = 0;
1516 float percent = -1.0;
1520 for (i = 0; i < nb_output_streams; i++) {
1521 OutputStream *ost = output_streams[i];
1522 switch (ost->enc_ctx->codec_type) {
1523 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1524 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1525 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1526 default: other_size += ost->data_size; break;
1528 extra_size += ost->enc_ctx->extradata_size;
1529 data_size += ost->data_size;
1530 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1531 != AV_CODEC_FLAG_PASS1)
1535 if (data_size && total_size>0 && total_size >= data_size)
1536 percent = 100.0 * (total_size - data_size) / data_size;
1538 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1539 video_size / 1024.0,
1540 audio_size / 1024.0,
1541 subtitle_size / 1024.0,
1542 other_size / 1024.0,
1543 extra_size / 1024.0);
1545 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1547 av_log(NULL, AV_LOG_INFO, "unknown");
1548 av_log(NULL, AV_LOG_INFO, "\n");
1550 /* print verbose per-stream stats */
1551 for (i = 0; i < nb_input_files; i++) {
1552 InputFile *f = input_files[i];
1553 uint64_t total_packets = 0, total_size = 0;
1555 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1556 i, f->ctx->filename);
1558 for (j = 0; j < f->nb_streams; j++) {
1559 InputStream *ist = input_streams[f->ist_index + j];
1560 enum AVMediaType type = ist->dec_ctx->codec_type;
1562 total_size += ist->data_size;
1563 total_packets += ist->nb_packets;
1565 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1566 i, j, media_type_string(type));
1567 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1568 ist->nb_packets, ist->data_size);
1570 if (ist->decoding_needed) {
1571 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1572 ist->frames_decoded);
1573 if (type == AVMEDIA_TYPE_AUDIO)
1574 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1575 av_log(NULL, AV_LOG_VERBOSE, "; ");
1578 av_log(NULL, AV_LOG_VERBOSE, "\n");
1581 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1582 total_packets, total_size);
1585 for (i = 0; i < nb_output_files; i++) {
1586 OutputFile *of = output_files[i];
1587 uint64_t total_packets = 0, total_size = 0;
1589 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1590 i, of->ctx->filename);
1592 for (j = 0; j < of->ctx->nb_streams; j++) {
1593 OutputStream *ost = output_streams[of->ost_index + j];
1594 enum AVMediaType type = ost->enc_ctx->codec_type;
1596 total_size += ost->data_size;
1597 total_packets += ost->packets_written;
1599 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1600 i, j, media_type_string(type));
1601 if (ost->encoding_needed) {
1602 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1603 ost->frames_encoded);
1604 if (type == AVMEDIA_TYPE_AUDIO)
1605 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1606 av_log(NULL, AV_LOG_VERBOSE, "; ");
1609 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1610 ost->packets_written, ost->data_size);
1612 av_log(NULL, AV_LOG_VERBOSE, "\n");
1615 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1616 total_packets, total_size);
1618 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1619 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1621 av_log(NULL, AV_LOG_WARNING, "\n");
1623 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1628 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1631 AVBPrint buf_script;
1633 AVFormatContext *oc;
1635 AVCodecContext *enc;
1636 int frame_number, vid, i;
1639 int64_t pts = INT64_MIN + 1;
1640 static int64_t last_time = -1;
1641 static int qp_histogram[52];
1642 int hours, mins, secs, us;
1646 if (!print_stats && !is_last_report && !progress_avio)
1649 if (!is_last_report) {
1650 if (last_time == -1) {
1651 last_time = cur_time;
1654 if ((cur_time - last_time) < 500000)
1656 last_time = cur_time;
1659 t = (cur_time-timer_start) / 1000000.0;
1662 oc = output_files[0]->ctx;
1664 total_size = avio_size(oc->pb);
1665 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1666 total_size = avio_tell(oc->pb);
1670 av_bprint_init(&buf_script, 0, 1);
1671 for (i = 0; i < nb_output_streams; i++) {
1673 ost = output_streams[i];
1675 if (!ost->stream_copy)
1676 q = ost->quality / (float) FF_QP2LAMBDA;
1678 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1679 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1680 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1681 ost->file_index, ost->index, q);
1683 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1686 frame_number = ost->frame_number;
1687 fps = t > 1 ? frame_number / t : 0;
1688 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1689 frame_number, fps < 9.95, fps, q);
1690 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1691 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1692 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1693 ost->file_index, ost->index, q);
1695 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1699 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1701 for (j = 0; j < 32; j++)
1702 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1705 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1707 double error, error_sum = 0;
1708 double scale, scale_sum = 0;
1710 char type[3] = { 'Y','U','V' };
1711 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1712 for (j = 0; j < 3; j++) {
1713 if (is_last_report) {
1714 error = enc->error[j];
1715 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1717 error = ost->error[j];
1718 scale = enc->width * enc->height * 255.0 * 255.0;
1724 p = psnr(error / scale);
1725 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1726 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1727 ost->file_index, ost->index, type[j] | 32, p);
1729 p = psnr(error_sum / scale_sum);
1730 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1731 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1732 ost->file_index, ost->index, p);
1736 /* compute min output value */
1737 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1738 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1739 ost->st->time_base, AV_TIME_BASE_Q));
1741 nb_frames_drop += ost->last_dropped;
1744 secs = FFABS(pts) / AV_TIME_BASE;
1745 us = FFABS(pts) % AV_TIME_BASE;
1751 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1752 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1754 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1756 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1757 "size=%8.0fkB time=", total_size / 1024.0);
1759 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1760 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1761 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1762 (100 * us) / AV_TIME_BASE);
1765 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1766 av_bprintf(&buf_script, "bitrate=N/A\n");
1768 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1769 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1772 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1773 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1774 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1775 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1776 hours, mins, secs, us);
1778 if (nb_frames_dup || nb_frames_drop)
1779 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1780 nb_frames_dup, nb_frames_drop);
1781 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1782 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1785 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1786 av_bprintf(&buf_script, "speed=N/A\n");
1788 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1789 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1792 if (print_stats || is_last_report) {
1793 const char end = is_last_report ? '\n' : '\r';
1794 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1795 fprintf(stderr, "%s %c", buf, end);
1797 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1802 if (progress_avio) {
1803 av_bprintf(&buf_script, "progress=%s\n",
1804 is_last_report ? "end" : "continue");
1805 avio_write(progress_avio, buf_script.str,
1806 FFMIN(buf_script.len, buf_script.size - 1));
1807 avio_flush(progress_avio);
1808 av_bprint_finalize(&buf_script, NULL);
1809 if (is_last_report) {
1810 if ((ret = avio_closep(&progress_avio)) < 0)
1811 av_log(NULL, AV_LOG_ERROR,
1812 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1817 print_final_stats(total_size);
1820 static void flush_encoders(void)
1824 for (i = 0; i < nb_output_streams; i++) {
1825 OutputStream *ost = output_streams[i];
1826 AVCodecContext *enc = ost->enc_ctx;
1827 OutputFile *of = output_files[ost->file_index];
1829 if (!ost->encoding_needed)
1832 // Try to enable encoding with no input frames.
1833 // Maybe we should just let encoding fail instead.
1834 if (!ost->initialized) {
1835 FilterGraph *fg = ost->filter->graph;
1836 char error[1024] = "";
1838 av_log(NULL, AV_LOG_WARNING,
1839 "Finishing stream %d:%d without any data written to it.\n",
1840 ost->file_index, ost->st->index);
1842 if (ost->filter && !fg->graph) {
1844 for (x = 0; x < fg->nb_inputs; x++) {
1845 InputFilter *ifilter = fg->inputs[x];
1846 if (ifilter->format < 0) {
1847 AVCodecParameters *par = ifilter->ist->st->codecpar;
1848 // We never got any input. Set a fake format, which will
1849 // come from libavformat.
1850 ifilter->format = par->format;
1851 ifilter->sample_rate = par->sample_rate;
1852 ifilter->channels = par->channels;
1853 ifilter->channel_layout = par->channel_layout;
1854 ifilter->width = par->width;
1855 ifilter->height = par->height;
1856 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1860 if (!ifilter_has_all_input_formats(fg))
1863 ret = configure_filtergraph(fg);
1865 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1869 finish_output_stream(ost);
1872 ret = init_output_stream(ost, error, sizeof(error));
1874 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1875 ost->file_index, ost->index, error);
1880 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1883 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1887 const char *desc = NULL;
1891 switch (enc->codec_type) {
1892 case AVMEDIA_TYPE_AUDIO:
1895 case AVMEDIA_TYPE_VIDEO:
1902 av_init_packet(&pkt);
1906 update_benchmark(NULL);
1908 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1909 ret = avcodec_send_frame(enc, NULL);
1911 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1918 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1919 if (ret < 0 && ret != AVERROR_EOF) {
1920 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1925 if (ost->logfile && enc->stats_out) {
1926 fprintf(ost->logfile, "%s", enc->stats_out);
1928 if (ret == AVERROR_EOF) {
1929 output_packet(of, &pkt, ost, 1);
1932 if (ost->finished & MUXER_FINISHED) {
1933 av_packet_unref(&pkt);
1936 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1937 pkt_size = pkt.size;
1938 output_packet(of, &pkt, ost, 0);
1939 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1940 do_video_stats(ost, pkt_size);
1947 * Check whether a packet from ist should be written into ost at this time
1949 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1951 OutputFile *of = output_files[ost->file_index];
1952 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1954 if (ost->source_index != ist_index)
1960 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1966 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1968 OutputFile *of = output_files[ost->file_index];
1969 InputFile *f = input_files [ist->file_index];
1970 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1971 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1972 AVPacket opkt = { 0 };
1974 av_init_packet(&opkt);
1976 // EOF: flush output bitstream filters.
1978 output_packet(of, &opkt, ost, 1);
1982 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1983 !ost->copy_initial_nonkeyframes)
1986 if (!ost->frame_number && !ost->copy_prior_start) {
1987 int64_t comp_start = start_time;
1988 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1989 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1990 if (pkt->pts == AV_NOPTS_VALUE ?
1991 ist->pts < comp_start :
1992 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1996 if (of->recording_time != INT64_MAX &&
1997 ist->pts >= of->recording_time + start_time) {
1998 close_output_stream(ost);
2002 if (f->recording_time != INT64_MAX) {
2003 start_time = f->ctx->start_time;
2004 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2005 start_time += f->start_time;
2006 if (ist->pts >= f->recording_time + start_time) {
2007 close_output_stream(ost);
2012 /* force the input stream PTS */
2013 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2016 if (pkt->pts != AV_NOPTS_VALUE)
2017 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2019 opkt.pts = AV_NOPTS_VALUE;
2021 if (pkt->dts == AV_NOPTS_VALUE)
2022 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2024 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2025 opkt.dts -= ost_tb_start_time;
2027 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2028 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2030 duration = ist->dec_ctx->frame_size;
2031 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2032 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2033 ost->mux_timebase) - ost_tb_start_time;
2036 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2038 opkt.flags = pkt->flags;
2039 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2040 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2041 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2042 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2043 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2045 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2046 &opkt.data, &opkt.size,
2047 pkt->data, pkt->size,
2048 pkt->flags & AV_PKT_FLAG_KEY);
2050 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2055 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2060 opkt.data = pkt->data;
2061 opkt.size = pkt->size;
2063 av_copy_packet_side_data(&opkt, pkt);
2065 output_packet(of, &opkt, ost, 0);
2068 int guess_input_channel_layout(InputStream *ist)
2070 AVCodecContext *dec = ist->dec_ctx;
2072 if (!dec->channel_layout) {
2073 char layout_name[256];
2075 if (dec->channels > ist->guess_layout_max)
2077 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2078 if (!dec->channel_layout)
2080 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2081 dec->channels, dec->channel_layout);
2082 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2083 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2088 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2090 if (*got_output || ret<0)
2091 decode_error_stat[ret<0] ++;
2093 if (ret < 0 && exit_on_error)
2096 if (exit_on_error && *got_output && ist) {
2097 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2098 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2104 // Filters can be configured only if the formats of all inputs are known.
2105 static int ifilter_has_all_input_formats(FilterGraph *fg)
2108 for (i = 0; i < fg->nb_inputs; i++) {
2109 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2110 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2116 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2118 FilterGraph *fg = ifilter->graph;
2119 int need_reinit, ret, i;
2121 /* determine if the parameters for this input changed */
2122 need_reinit = ifilter->format != frame->format;
2123 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2124 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2127 switch (ifilter->ist->st->codecpar->codec_type) {
2128 case AVMEDIA_TYPE_AUDIO:
2129 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2130 ifilter->channels != frame->channels ||
2131 ifilter->channel_layout != frame->channel_layout;
2133 case AVMEDIA_TYPE_VIDEO:
2134 need_reinit |= ifilter->width != frame->width ||
2135 ifilter->height != frame->height;
2140 ret = ifilter_parameters_from_frame(ifilter, frame);
2145 /* (re)init the graph if possible, otherwise buffer the frame and return */
2146 if (need_reinit || !fg->graph) {
2147 for (i = 0; i < fg->nb_inputs; i++) {
2148 if (!ifilter_has_all_input_formats(fg)) {
2149 AVFrame *tmp = av_frame_clone(frame);
2151 return AVERROR(ENOMEM);
2152 av_frame_unref(frame);
2154 if (!av_fifo_space(ifilter->frame_queue)) {
2155 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2157 av_frame_free(&tmp);
2161 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2166 ret = reap_filters(1);
2167 if (ret < 0 && ret != AVERROR_EOF) {
2169 av_strerror(ret, errbuf, sizeof(errbuf));
2171 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2175 ret = configure_filtergraph(fg);
2177 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2182 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2184 if (ret != AVERROR_EOF)
2185 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2192 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2198 if (ifilter->filter) {
2199 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2203 // the filtergraph was never configured
2204 FilterGraph *fg = ifilter->graph;
2205 for (i = 0; i < fg->nb_inputs; i++)
2206 if (!fg->inputs[i]->eof)
2208 if (i == fg->nb_inputs) {
2209 // All the input streams have finished without the filtergraph
2210 // ever being configured.
2211 // Mark the output streams as finished.
2212 for (j = 0; j < fg->nb_outputs; j++)
2213 finish_output_stream(fg->outputs[j]->ost);
2220 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2221 // There is the following difference: if you got a frame, you must call
2222 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2223 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2224 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2231 ret = avcodec_send_packet(avctx, pkt);
2232 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2233 // decoded frames with avcodec_receive_frame() until done.
2234 if (ret < 0 && ret != AVERROR_EOF)
2238 ret = avcodec_receive_frame(avctx, frame);
2239 if (ret < 0 && ret != AVERROR(EAGAIN))
2247 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2252 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2253 for (i = 0; i < ist->nb_filters; i++) {
2254 if (i < ist->nb_filters - 1) {
2255 f = ist->filter_frame;
2256 ret = av_frame_ref(f, decoded_frame);
2261 ret = ifilter_send_frame(ist->filters[i], f);
2262 if (ret == AVERROR_EOF)
2263 ret = 0; /* ignore */
2265 av_log(NULL, AV_LOG_ERROR,
2266 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2273 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2276 AVFrame *decoded_frame;
2277 AVCodecContext *avctx = ist->dec_ctx;
2279 AVRational decoded_frame_tb;
2281 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2282 return AVERROR(ENOMEM);
2283 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2284 return AVERROR(ENOMEM);
2285 decoded_frame = ist->decoded_frame;
2287 update_benchmark(NULL);
2288 ret = decode(avctx, decoded_frame, got_output, pkt);
2289 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2293 if (ret >= 0 && avctx->sample_rate <= 0) {
2294 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2295 ret = AVERROR_INVALIDDATA;
2298 if (ret != AVERROR_EOF)
2299 check_decode_result(ist, got_output, ret);
2301 if (!*got_output || ret < 0)
2304 ist->samples_decoded += decoded_frame->nb_samples;
2305 ist->frames_decoded++;
2308 /* increment next_dts to use for the case where the input stream does not
2309 have timestamps or there are multiple frames in the packet */
2310 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2312 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2316 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2317 decoded_frame_tb = ist->st->time_base;
2318 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2319 decoded_frame->pts = pkt->pts;
2320 decoded_frame_tb = ist->st->time_base;
2322 decoded_frame->pts = ist->dts;
2323 decoded_frame_tb = AV_TIME_BASE_Q;
2325 if (decoded_frame->pts != AV_NOPTS_VALUE)
2326 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2327 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2328 (AVRational){1, avctx->sample_rate});
2329 ist->nb_samples = decoded_frame->nb_samples;
2330 err = send_frame_to_filters(ist, decoded_frame);
2332 av_frame_unref(ist->filter_frame);
2333 av_frame_unref(decoded_frame);
2334 return err < 0 ? err : ret;
2337 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2340 AVFrame *decoded_frame;
2341 int i, ret = 0, err = 0;
2342 int64_t best_effort_timestamp;
2343 int64_t dts = AV_NOPTS_VALUE;
2346 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2347 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2349 if (!eof && pkt && pkt->size == 0)
2352 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2353 return AVERROR(ENOMEM);
2354 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2355 return AVERROR(ENOMEM);
2356 decoded_frame = ist->decoded_frame;
2357 if (ist->dts != AV_NOPTS_VALUE)
2358 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2361 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2364 // The old code used to set dts on the drain packet, which does not work
2365 // with the new API anymore.
2367 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2369 return AVERROR(ENOMEM);
2370 ist->dts_buffer = new;
2371 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2374 update_benchmark(NULL);
2375 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2376 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2380 // The following line may be required in some cases where there is no parser
2381 // or the parser does not has_b_frames correctly
2382 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2383 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2384 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2386 av_log(ist->dec_ctx, AV_LOG_WARNING,
2387 "video_delay is larger in decoder than demuxer %d > %d.\n"
2388 "If you want to help, upload a sample "
2389 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2390 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2391 ist->dec_ctx->has_b_frames,
2392 ist->st->codecpar->video_delay);
2395 if (ret != AVERROR_EOF)
2396 check_decode_result(ist, got_output, ret);
2398 if (*got_output && ret >= 0) {
2399 if (ist->dec_ctx->width != decoded_frame->width ||
2400 ist->dec_ctx->height != decoded_frame->height ||
2401 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2402 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2403 decoded_frame->width,
2404 decoded_frame->height,
2405 decoded_frame->format,
2406 ist->dec_ctx->width,
2407 ist->dec_ctx->height,
2408 ist->dec_ctx->pix_fmt);
2412 if (!*got_output || ret < 0)
2415 if(ist->top_field_first>=0)
2416 decoded_frame->top_field_first = ist->top_field_first;
2418 ist->frames_decoded++;
2420 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2421 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2425 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2427 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2428 *duration_pts = decoded_frame->pkt_duration;
2430 if (ist->framerate.num)
2431 best_effort_timestamp = ist->cfr_next_pts++;
2433 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2434 best_effort_timestamp = ist->dts_buffer[0];
2436 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2437 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2438 ist->nb_dts_buffer--;
2441 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2442 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2444 if (ts != AV_NOPTS_VALUE)
2445 ist->next_pts = ist->pts = ts;
2449 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2450 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2451 ist->st->index, av_ts2str(decoded_frame->pts),
2452 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2453 best_effort_timestamp,
2454 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2455 decoded_frame->key_frame, decoded_frame->pict_type,
2456 ist->st->time_base.num, ist->st->time_base.den);
2459 if (ist->st->sample_aspect_ratio.num)
2460 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2462 err = send_frame_to_filters(ist, decoded_frame);
2465 av_frame_unref(ist->filter_frame);
2466 av_frame_unref(decoded_frame);
2467 return err < 0 ? err : ret;
2470 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2473 AVSubtitle subtitle;
2475 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2476 &subtitle, got_output, pkt);
2478 check_decode_result(NULL, got_output, ret);
2480 if (ret < 0 || !*got_output) {
2483 sub2video_flush(ist);
2487 if (ist->fix_sub_duration) {
2489 if (ist->prev_sub.got_output) {
2490 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2491 1000, AV_TIME_BASE);
2492 if (end < ist->prev_sub.subtitle.end_display_time) {
2493 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2494 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2495 ist->prev_sub.subtitle.end_display_time, end,
2496 end <= 0 ? ", dropping it" : "");
2497 ist->prev_sub.subtitle.end_display_time = end;
2500 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2501 FFSWAP(int, ret, ist->prev_sub.ret);
2502 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2510 if (ist->sub2video.frame) {
2511 sub2video_update(ist, &subtitle);
2512 } else if (ist->nb_filters) {
2513 if (!ist->sub2video.sub_queue)
2514 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2515 if (!ist->sub2video.sub_queue)
2517 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2518 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2522 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2526 if (!subtitle.num_rects)
2529 ist->frames_decoded++;
2531 for (i = 0; i < nb_output_streams; i++) {
2532 OutputStream *ost = output_streams[i];
2534 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2535 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2538 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2543 avsubtitle_free(&subtitle);
2547 static int send_filter_eof(InputStream *ist)
2550 /* TODO keep pts also in stream time base to avoid converting back */
2551 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2552 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2554 for (i = 0; i < ist->nb_filters; i++) {
2555 ret = ifilter_send_eof(ist->filters[i], pts);
2562 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2563 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2567 int eof_reached = 0;
2570 if (!ist->saw_first_ts) {
2571 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2573 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2574 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2575 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2577 ist->saw_first_ts = 1;
2580 if (ist->next_dts == AV_NOPTS_VALUE)
2581 ist->next_dts = ist->dts;
2582 if (ist->next_pts == AV_NOPTS_VALUE)
2583 ist->next_pts = ist->pts;
2587 av_init_packet(&avpkt);
2594 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2595 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2596 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2597 ist->next_pts = ist->pts = ist->dts;
2600 // while we have more to decode or while the decoder did output something on EOF
2601 while (ist->decoding_needed) {
2602 int64_t duration_dts = 0;
2603 int64_t duration_pts = 0;
2605 int decode_failed = 0;
2607 ist->pts = ist->next_pts;
2608 ist->dts = ist->next_dts;
2610 switch (ist->dec_ctx->codec_type) {
2611 case AVMEDIA_TYPE_AUDIO:
2612 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2615 case AVMEDIA_TYPE_VIDEO:
2616 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2618 if (!repeating || !pkt || got_output) {
2619 if (pkt && pkt->duration) {
2620 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2621 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2622 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2623 duration_dts = ((int64_t)AV_TIME_BASE *
2624 ist->dec_ctx->framerate.den * ticks) /
2625 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2628 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2629 ist->next_dts += duration_dts;
2631 ist->next_dts = AV_NOPTS_VALUE;
2635 if (duration_pts > 0) {
2636 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2638 ist->next_pts += duration_dts;
2642 case AVMEDIA_TYPE_SUBTITLE:
2645 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2646 if (!pkt && ret >= 0)
2653 if (ret == AVERROR_EOF) {
2659 if (decode_failed) {
2660 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2661 ist->file_index, ist->st->index, av_err2str(ret));
2663 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2664 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2666 if (!decode_failed || exit_on_error)
2672 ist->got_output = 1;
2677 // During draining, we might get multiple output frames in this loop.
2678 // ffmpeg.c does not drain the filter chain on configuration changes,
2679 // which means if we send multiple frames at once to the filters, and
2680 // one of those frames changes configuration, the buffered frames will
2681 // be lost. This can upset certain FATE tests.
2682 // Decode only 1 frame per call on EOF to appease these FATE tests.
2683 // The ideal solution would be to rewrite decoding to use the new
2684 // decoding API in a better way.
2691 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2692 /* except when looping we need to flush but not to send an EOF */
2693 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2694 int ret = send_filter_eof(ist);
2696 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2701 /* handle stream copy */
2702 if (!ist->decoding_needed && pkt) {
2703 ist->dts = ist->next_dts;
2704 switch (ist->dec_ctx->codec_type) {
2705 case AVMEDIA_TYPE_AUDIO:
2706 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2707 ist->dec_ctx->sample_rate;
2709 case AVMEDIA_TYPE_VIDEO:
2710 if (ist->framerate.num) {
2711 // TODO: Remove work-around for c99-to-c89 issue 7
2712 AVRational time_base_q = AV_TIME_BASE_Q;
2713 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2714 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2715 } else if (pkt->duration) {
2716 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2717 } else if(ist->dec_ctx->framerate.num != 0) {
2718 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2719 ist->next_dts += ((int64_t)AV_TIME_BASE *
2720 ist->dec_ctx->framerate.den * ticks) /
2721 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2725 ist->pts = ist->dts;
2726 ist->next_pts = ist->next_dts;
2728 for (i = 0; i < nb_output_streams; i++) {
2729 OutputStream *ost = output_streams[i];
2731 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2734 do_streamcopy(ist, ost, pkt);
2737 return !eof_reached;
2740 static void print_sdp(void)
2745 AVIOContext *sdp_pb;
2746 AVFormatContext **avc;
2748 for (i = 0; i < nb_output_files; i++) {
2749 if (!output_files[i]->header_written)
2753 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2756 for (i = 0, j = 0; i < nb_output_files; i++) {
2757 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2758 avc[j] = output_files[i]->ctx;
2766 av_sdp_create(avc, j, sdp, sizeof(sdp));
2768 if (!sdp_filename) {
2769 printf("SDP:\n%s\n", sdp);
2772 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2773 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2775 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2776 avio_closep(&sdp_pb);
2777 av_freep(&sdp_filename);
2785 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2788 for (i = 0; hwaccels[i].name; i++)
2789 if (hwaccels[i].pix_fmt == pix_fmt)
2790 return &hwaccels[i];
2794 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2796 InputStream *ist = s->opaque;
2797 const enum AVPixelFormat *p;
2800 for (p = pix_fmts; *p != -1; p++) {
2801 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2802 const HWAccel *hwaccel;
2804 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2807 hwaccel = get_hwaccel(*p);
2809 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2810 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2813 ret = hwaccel->init(s);
2815 if (ist->hwaccel_id == hwaccel->id) {
2816 av_log(NULL, AV_LOG_FATAL,
2817 "%s hwaccel requested for input stream #%d:%d, "
2818 "but cannot be initialized.\n", hwaccel->name,
2819 ist->file_index, ist->st->index);
2820 return AV_PIX_FMT_NONE;
2825 if (ist->hw_frames_ctx) {
2826 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2827 if (!s->hw_frames_ctx)
2828 return AV_PIX_FMT_NONE;
2831 ist->active_hwaccel_id = hwaccel->id;
2832 ist->hwaccel_pix_fmt = *p;
2839 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2841 InputStream *ist = s->opaque;
2843 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2844 return ist->hwaccel_get_buffer(s, frame, flags);
2846 return avcodec_default_get_buffer2(s, frame, flags);
2849 static int init_input_stream(int ist_index, char *error, int error_len)
2852 InputStream *ist = input_streams[ist_index];
2854 if (ist->decoding_needed) {
2855 AVCodec *codec = ist->dec;
2857 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2858 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2859 return AVERROR(EINVAL);
2862 ist->dec_ctx->opaque = ist;
2863 ist->dec_ctx->get_format = get_format;
2864 ist->dec_ctx->get_buffer2 = get_buffer;
2865 ist->dec_ctx->thread_safe_callbacks = 1;
2867 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2868 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2869 (ist->decoding_needed & DECODING_FOR_OST)) {
2870 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2871 if (ist->decoding_needed & DECODING_FOR_FILTER)
2872 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2875 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2877 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2878 * audio, and video decoders such as cuvid or mediacodec */
2879 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2881 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2882 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2883 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2884 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2885 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2887 ret = hw_device_setup_for_decode(ist);
2889 snprintf(error, error_len, "Device setup failed for "
2890 "decoder on input stream #%d:%d : %s",
2891 ist->file_index, ist->st->index, av_err2str(ret));
2895 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2896 if (ret == AVERROR_EXPERIMENTAL)
2897 abort_codec_experimental(codec, 0);
2899 snprintf(error, error_len,
2900 "Error while opening decoder for input stream "
2902 ist->file_index, ist->st->index, av_err2str(ret));
2905 assert_avoptions(ist->decoder_opts);
2908 ist->next_pts = AV_NOPTS_VALUE;
2909 ist->next_dts = AV_NOPTS_VALUE;
2914 static InputStream *get_input_stream(OutputStream *ost)
2916 if (ost->source_index >= 0)
2917 return input_streams[ost->source_index];
2921 static int compare_int64(const void *a, const void *b)
2923 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2926 /* open the muxer when all the streams are initialized */
2927 static int check_init_output_file(OutputFile *of, int file_index)
2931 for (i = 0; i < of->ctx->nb_streams; i++) {
2932 OutputStream *ost = output_streams[of->ost_index + i];
2933 if (!ost->initialized)
2937 of->ctx->interrupt_callback = int_cb;
2939 ret = avformat_write_header(of->ctx, &of->opts);
2941 av_log(NULL, AV_LOG_ERROR,
2942 "Could not write header for output file #%d "
2943 "(incorrect codec parameters ?): %s\n",
2944 file_index, av_err2str(ret));
2947 //assert_avoptions(of->opts);
2948 of->header_written = 1;
2950 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2952 if (sdp_filename || want_sdp)
2955 /* flush the muxing queues */
2956 for (i = 0; i < of->ctx->nb_streams; i++) {
2957 OutputStream *ost = output_streams[of->ost_index + i];
2959 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2960 if (!av_fifo_size(ost->muxing_queue))
2961 ost->mux_timebase = ost->st->time_base;
2963 while (av_fifo_size(ost->muxing_queue)) {
2965 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2966 write_packet(of, &pkt, ost, 1);
2973 static int init_output_bsfs(OutputStream *ost)
2978 if (!ost->nb_bitstream_filters)
2981 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2982 ctx = ost->bsf_ctx[i];
2984 ret = avcodec_parameters_copy(ctx->par_in,
2985 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2989 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2991 ret = av_bsf_init(ctx);
2993 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2994 ost->bsf_ctx[i]->filter->name);
2999 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3000 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3004 ost->st->time_base = ctx->time_base_out;
3009 static int init_output_stream_streamcopy(OutputStream *ost)
3011 OutputFile *of = output_files[ost->file_index];
3012 InputStream *ist = get_input_stream(ost);
3013 AVCodecParameters *par_dst = ost->st->codecpar;
3014 AVCodecParameters *par_src = ost->ref_par;
3017 uint32_t codec_tag = par_dst->codec_tag;
3019 av_assert0(ist && !ost->filter);
3021 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3023 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3025 av_log(NULL, AV_LOG_FATAL,
3026 "Error setting up codec context options.\n");
3029 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3032 unsigned int codec_tag_tmp;
3033 if (!of->ctx->oformat->codec_tag ||
3034 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3035 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3036 codec_tag = par_src->codec_tag;
3039 ret = avcodec_parameters_copy(par_dst, par_src);
3043 par_dst->codec_tag = codec_tag;
3045 if (!ost->frame_rate.num)
3046 ost->frame_rate = ist->framerate;
3047 ost->st->avg_frame_rate = ost->frame_rate;
3049 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3053 // copy timebase while removing common factors
3054 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3055 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3057 // copy estimated duration as a hint to the muxer
3058 if (ost->st->duration <= 0 && ist->st->duration > 0)
3059 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3062 ost->st->disposition = ist->st->disposition;
3064 if (ist->st->nb_side_data) {
3065 for (i = 0; i < ist->st->nb_side_data; i++) {
3066 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3069 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3071 return AVERROR(ENOMEM);
3072 memcpy(dst_data, sd_src->data, sd_src->size);
3076 if (ost->rotate_overridden) {
3077 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3078 sizeof(int32_t) * 9);
3080 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3083 ost->parser = av_parser_init(par_dst->codec_id);
3084 ost->parser_avctx = avcodec_alloc_context3(NULL);
3085 if (!ost->parser_avctx)
3086 return AVERROR(ENOMEM);
3088 switch (par_dst->codec_type) {
3089 case AVMEDIA_TYPE_AUDIO:
3090 if (audio_volume != 256) {
3091 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3094 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3095 par_dst->block_align= 0;
3096 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3097 par_dst->block_align= 0;
3099 case AVMEDIA_TYPE_VIDEO:
3100 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3102 av_mul_q(ost->frame_aspect_ratio,
3103 (AVRational){ par_dst->height, par_dst->width });
3104 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3105 "with stream copy may produce invalid files\n");
3107 else if (ist->st->sample_aspect_ratio.num)
3108 sar = ist->st->sample_aspect_ratio;
3110 sar = par_src->sample_aspect_ratio;
3111 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3112 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3113 ost->st->r_frame_rate = ist->st->r_frame_rate;
3117 ost->mux_timebase = ist->st->time_base;
3122 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3124 AVDictionaryEntry *e;
3126 uint8_t *encoder_string;
3127 int encoder_string_len;
3128 int format_flags = 0;
3129 int codec_flags = ost->enc_ctx->flags;
3131 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3134 e = av_dict_get(of->opts, "fflags", NULL, 0);
3136 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3139 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3141 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3143 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3146 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3149 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3150 encoder_string = av_mallocz(encoder_string_len);
3151 if (!encoder_string)
3154 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3155 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3157 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3158 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3159 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3160 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3163 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3164 AVCodecContext *avctx)
3167 int n = 1, i, size, index = 0;
3170 for (p = kf; *p; p++)
3174 pts = av_malloc_array(size, sizeof(*pts));
3176 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3181 for (i = 0; i < n; i++) {
3182 char *next = strchr(p, ',');
3187 if (!memcmp(p, "chapters", 8)) {
3189 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3192 if (avf->nb_chapters > INT_MAX - size ||
3193 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3195 av_log(NULL, AV_LOG_FATAL,
3196 "Could not allocate forced key frames array.\n");
3199 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3200 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3202 for (j = 0; j < avf->nb_chapters; j++) {
3203 AVChapter *c = avf->chapters[j];
3204 av_assert1(index < size);
3205 pts[index++] = av_rescale_q(c->start, c->time_base,
3206 avctx->time_base) + t;
3211 t = parse_time_or_die("force_key_frames", p, 1);
3212 av_assert1(index < size);
3213 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3220 av_assert0(index == size);
3221 qsort(pts, size, sizeof(*pts), compare_int64);
3222 ost->forced_kf_count = size;
3223 ost->forced_kf_pts = pts;
3226 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3228 InputStream *ist = get_input_stream(ost);
3229 AVCodecContext *enc_ctx = ost->enc_ctx;
3230 AVFormatContext *oc;
3232 if (ost->enc_timebase.num > 0) {
3233 enc_ctx->time_base = ost->enc_timebase;
3237 if (ost->enc_timebase.num < 0) {
3239 enc_ctx->time_base = ist->st->time_base;
3243 oc = output_files[ost->file_index]->ctx;
3244 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3247 enc_ctx->time_base = default_time_base;
3250 static int init_output_stream_encode(OutputStream *ost)
3252 InputStream *ist = get_input_stream(ost);
3253 AVCodecContext *enc_ctx = ost->enc_ctx;
3254 AVCodecContext *dec_ctx = NULL;
3255 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3258 set_encoder_id(output_files[ost->file_index], ost);
3260 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3261 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3262 // which have to be filtered out to prevent leaking them to output files.
3263 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3266 ost->st->disposition = ist->st->disposition;
3268 dec_ctx = ist->dec_ctx;
3270 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3272 for (j = 0; j < oc->nb_streams; j++) {
3273 AVStream *st = oc->streams[j];
3274 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3277 if (j == oc->nb_streams)
3278 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3279 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3280 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3283 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3284 if (!ost->frame_rate.num)
3285 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3286 if (ist && !ost->frame_rate.num)
3287 ost->frame_rate = ist->framerate;
3288 if (ist && !ost->frame_rate.num)
3289 ost->frame_rate = ist->st->r_frame_rate;
3290 if (ist && !ost->frame_rate.num) {
3291 ost->frame_rate = (AVRational){25, 1};
3292 av_log(NULL, AV_LOG_WARNING,
3294 "about the input framerate is available. Falling "
3295 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3296 "if you want a different framerate.\n",
3297 ost->file_index, ost->index);
3299 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3300 if (ost->enc->supported_framerates && !ost->force_fps) {
3301 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3302 ost->frame_rate = ost->enc->supported_framerates[idx];
3304 // reduce frame rate for mpeg4 to be within the spec limits
3305 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3306 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3307 ost->frame_rate.num, ost->frame_rate.den, 65535);
3311 switch (enc_ctx->codec_type) {
3312 case AVMEDIA_TYPE_AUDIO:
3313 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3315 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3316 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3317 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3318 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3319 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3321 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3324 case AVMEDIA_TYPE_VIDEO:
3325 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3327 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3328 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3329 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3330 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3331 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3332 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3334 for (j = 0; j < ost->forced_kf_count; j++)
3335 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3337 enc_ctx->time_base);
3339 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3340 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3341 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3342 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3343 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3344 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3346 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3348 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3349 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3351 enc_ctx->framerate = ost->frame_rate;
3353 ost->st->avg_frame_rate = ost->frame_rate;
3356 enc_ctx->width != dec_ctx->width ||
3357 enc_ctx->height != dec_ctx->height ||
3358 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3359 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3362 if (ost->forced_keyframes) {
3363 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3364 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3365 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3367 av_log(NULL, AV_LOG_ERROR,
3368 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3371 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3372 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3373 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3374 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3376 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3377 // parse it only for static kf timings
3378 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3379 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3383 case AVMEDIA_TYPE_SUBTITLE:
3384 enc_ctx->time_base = AV_TIME_BASE_Q;
3385 if (!enc_ctx->width) {
3386 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3387 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3390 case AVMEDIA_TYPE_DATA:
3397 ost->mux_timebase = enc_ctx->time_base;
3402 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3406 if (ost->encoding_needed) {
3407 AVCodec *codec = ost->enc;
3408 AVCodecContext *dec = NULL;
3411 ret = init_output_stream_encode(ost);
3415 if ((ist = get_input_stream(ost)))
3417 if (dec && dec->subtitle_header) {
3418 /* ASS code assumes this buffer is null terminated so add extra byte. */
3419 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3420 if (!ost->enc_ctx->subtitle_header)
3421 return AVERROR(ENOMEM);
3422 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3423 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3425 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3426 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3427 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3429 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3430 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3431 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3433 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3434 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3435 av_buffersink_get_format(ost->filter->filter)) {
3436 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3437 if (!ost->enc_ctx->hw_frames_ctx)
3438 return AVERROR(ENOMEM);
3440 ret = hw_device_setup_for_encode(ost);
3442 snprintf(error, error_len, "Device setup failed for "
3443 "encoder on output stream #%d:%d : %s",
3444 ost->file_index, ost->index, av_err2str(ret));
3449 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3450 if (ret == AVERROR_EXPERIMENTAL)
3451 abort_codec_experimental(codec, 1);
3452 snprintf(error, error_len,
3453 "Error while opening encoder for output stream #%d:%d - "
3454 "maybe incorrect parameters such as bit_rate, rate, width or height",
3455 ost->file_index, ost->index);
3458 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3459 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3460 av_buffersink_set_frame_size(ost->filter->filter,
3461 ost->enc_ctx->frame_size);
3462 assert_avoptions(ost->encoder_opts);
3463 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3464 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3465 " It takes bits/s as argument, not kbits/s\n");
3467 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3469 av_log(NULL, AV_LOG_FATAL,
3470 "Error initializing the output stream codec context.\n");
3474 * FIXME: ost->st->codec should't be needed here anymore.
3476 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3480 if (ost->enc_ctx->nb_coded_side_data) {
3483 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3484 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3487 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3489 return AVERROR(ENOMEM);
3490 memcpy(dst_data, sd_src->data, sd_src->size);
3495 * Add global input side data. For now this is naive, and copies it
3496 * from the input stream's global side data. All side data should
3497 * really be funneled over AVFrame and libavfilter, then added back to
3498 * packet side data, and then potentially using the first packet for
3503 for (i = 0; i < ist->st->nb_side_data; i++) {
3504 AVPacketSideData *sd = &ist->st->side_data[i];
3505 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3507 return AVERROR(ENOMEM);
3508 memcpy(dst, sd->data, sd->size);
3509 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3510 av_display_rotation_set((uint32_t *)dst, 0);
3514 // copy timebase while removing common factors
3515 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3516 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3518 // copy estimated duration as a hint to the muxer
3519 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3520 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3522 ost->st->codec->codec= ost->enc_ctx->codec;
3523 } else if (ost->stream_copy) {
3524 ret = init_output_stream_streamcopy(ost);
3529 * FIXME: will the codec context used by the parser during streamcopy
3530 * This should go away with the new parser API.
3532 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3537 // parse user provided disposition, and update stream values
3538 if (ost->disposition) {
3539 static const AVOption opts[] = {
3540 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3541 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3542 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3543 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3544 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3545 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3546 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3547 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3548 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3549 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3550 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3551 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3552 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3553 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3556 static const AVClass class = {
3558 .item_name = av_default_item_name,
3560 .version = LIBAVUTIL_VERSION_INT,
3562 const AVClass *pclass = &class;
3564 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3569 /* initialize bitstream filters for the output stream
3570 * needs to be done here, because the codec id for streamcopy is not
3571 * known until now */
3572 ret = init_output_bsfs(ost);
3576 ost->initialized = 1;
3578 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3585 static void report_new_stream(int input_index, AVPacket *pkt)
3587 InputFile *file = input_files[input_index];
3588 AVStream *st = file->ctx->streams[pkt->stream_index];
3590 if (pkt->stream_index < file->nb_streams_warn)
3592 av_log(file->ctx, AV_LOG_WARNING,
3593 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3594 av_get_media_type_string(st->codecpar->codec_type),
3595 input_index, pkt->stream_index,
3596 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3597 file->nb_streams_warn = pkt->stream_index + 1;
3600 static int transcode_init(void)
3602 int ret = 0, i, j, k;
3603 AVFormatContext *oc;
3606 char error[1024] = {0};
3608 for (i = 0; i < nb_filtergraphs; i++) {
3609 FilterGraph *fg = filtergraphs[i];
3610 for (j = 0; j < fg->nb_outputs; j++) {
3611 OutputFilter *ofilter = fg->outputs[j];
3612 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3614 if (fg->nb_inputs != 1)
3616 for (k = nb_input_streams-1; k >= 0 ; k--)
3617 if (fg->inputs[0]->ist == input_streams[k])
3619 ofilter->ost->source_index = k;
3623 /* init framerate emulation */
3624 for (i = 0; i < nb_input_files; i++) {
3625 InputFile *ifile = input_files[i];
3626 if (ifile->rate_emu)
3627 for (j = 0; j < ifile->nb_streams; j++)
3628 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3631 /* init input streams */
3632 for (i = 0; i < nb_input_streams; i++)
3633 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3634 for (i = 0; i < nb_output_streams; i++) {
3635 ost = output_streams[i];
3636 avcodec_close(ost->enc_ctx);
3641 /* open each encoder */
3642 for (i = 0; i < nb_output_streams; i++) {
3643 // skip streams fed from filtergraphs until we have a frame for them
3644 if (output_streams[i]->filter)
3647 ret = init_output_stream(output_streams[i], error, sizeof(error));
3652 /* discard unused programs */
3653 for (i = 0; i < nb_input_files; i++) {
3654 InputFile *ifile = input_files[i];
3655 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3656 AVProgram *p = ifile->ctx->programs[j];
3657 int discard = AVDISCARD_ALL;
3659 for (k = 0; k < p->nb_stream_indexes; k++)
3660 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3661 discard = AVDISCARD_DEFAULT;
3664 p->discard = discard;
3668 /* write headers for files with no streams */
3669 for (i = 0; i < nb_output_files; i++) {
3670 oc = output_files[i]->ctx;
3671 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3672 ret = check_init_output_file(output_files[i], i);
3679 /* dump the stream mapping */
3680 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3681 for (i = 0; i < nb_input_streams; i++) {
3682 ist = input_streams[i];
3684 for (j = 0; j < ist->nb_filters; j++) {
3685 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3686 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3687 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3688 ist->filters[j]->name);
3689 if (nb_filtergraphs > 1)
3690 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3691 av_log(NULL, AV_LOG_INFO, "\n");
3696 for (i = 0; i < nb_output_streams; i++) {
3697 ost = output_streams[i];
3699 if (ost->attachment_filename) {
3700 /* an attached file */
3701 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3702 ost->attachment_filename, ost->file_index, ost->index);
3706 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3707 /* output from a complex graph */
3708 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3709 if (nb_filtergraphs > 1)
3710 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3712 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3713 ost->index, ost->enc ? ost->enc->name : "?");
3717 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3718 input_streams[ost->source_index]->file_index,
3719 input_streams[ost->source_index]->st->index,
3722 if (ost->sync_ist != input_streams[ost->source_index])
3723 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3724 ost->sync_ist->file_index,
3725 ost->sync_ist->st->index);
3726 if (ost->stream_copy)
3727 av_log(NULL, AV_LOG_INFO, " (copy)");
3729 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3730 const AVCodec *out_codec = ost->enc;
3731 const char *decoder_name = "?";
3732 const char *in_codec_name = "?";
3733 const char *encoder_name = "?";
3734 const char *out_codec_name = "?";
3735 const AVCodecDescriptor *desc;
3738 decoder_name = in_codec->name;
3739 desc = avcodec_descriptor_get(in_codec->id);
3741 in_codec_name = desc->name;
3742 if (!strcmp(decoder_name, in_codec_name))
3743 decoder_name = "native";
3747 encoder_name = out_codec->name;
3748 desc = avcodec_descriptor_get(out_codec->id);
3750 out_codec_name = desc->name;
3751 if (!strcmp(encoder_name, out_codec_name))
3752 encoder_name = "native";
3755 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3756 in_codec_name, decoder_name,
3757 out_codec_name, encoder_name);
3759 av_log(NULL, AV_LOG_INFO, "\n");
3763 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3767 atomic_store(&transcode_init_done, 1);
3772 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3773 static int need_output(void)
3777 for (i = 0; i < nb_output_streams; i++) {
3778 OutputStream *ost = output_streams[i];
3779 OutputFile *of = output_files[ost->file_index];
3780 AVFormatContext *os = output_files[ost->file_index]->ctx;
3782 if (ost->finished ||
3783 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3785 if (ost->frame_number >= ost->max_frames) {
3787 for (j = 0; j < of->ctx->nb_streams; j++)
3788 close_output_stream(output_streams[of->ost_index + j]);
3799 * Select the output stream to process.
3801 * @return selected output stream, or NULL if none available
3803 static OutputStream *choose_output(void)
3806 int64_t opts_min = INT64_MAX;
3807 OutputStream *ost_min = NULL;
3809 for (i = 0; i < nb_output_streams; i++) {
3810 OutputStream *ost = output_streams[i];
3811 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3812 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3814 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3815 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3817 if (!ost->initialized && !ost->inputs_done)
3820 if (!ost->finished && opts < opts_min) {
3822 ost_min = ost->unavailable ? NULL : ost;
3828 static void set_tty_echo(int on)
3832 if (tcgetattr(0, &tty) == 0) {
3833 if (on) tty.c_lflag |= ECHO;
3834 else tty.c_lflag &= ~ECHO;
3835 tcsetattr(0, TCSANOW, &tty);
3840 static int check_keyboard_interaction(int64_t cur_time)
3843 static int64_t last_time;
3844 if (received_nb_signals)
3845 return AVERROR_EXIT;
3846 /* read_key() returns 0 on EOF */
3847 if(cur_time - last_time >= 100000 && !run_as_daemon){
3849 last_time = cur_time;
3853 return AVERROR_EXIT;
3854 if (key == '+') av_log_set_level(av_log_get_level()+10);
3855 if (key == '-') av_log_set_level(av_log_get_level()-10);
3856 if (key == 's') qp_hist ^= 1;
3859 do_hex_dump = do_pkt_dump = 0;
3860 } else if(do_pkt_dump){
3864 av_log_set_level(AV_LOG_DEBUG);
3866 if (key == 'c' || key == 'C'){
3867 char buf[4096], target[64], command[256], arg[256] = {0};
3870 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3873 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3878 fprintf(stderr, "\n");
3880 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3881 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3882 target, time, command, arg);
3883 for (i = 0; i < nb_filtergraphs; i++) {
3884 FilterGraph *fg = filtergraphs[i];
3887 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3888 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3889 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3890 } else if (key == 'c') {
3891 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3892 ret = AVERROR_PATCHWELCOME;
3894 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3896 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3901 av_log(NULL, AV_LOG_ERROR,
3902 "Parse error, at least 3 arguments were expected, "
3903 "only %d given in string '%s'\n", n, buf);
3906 if (key == 'd' || key == 'D'){
3909 debug = input_streams[0]->st->codec->debug<<1;
3910 if(!debug) debug = 1;
3911 while(debug & (FF_DEBUG_DCT_COEFF
3913 |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3915 )) //unsupported, would just crash
3922 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3927 fprintf(stderr, "\n");
3928 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3929 fprintf(stderr,"error parsing debug value\n");
3931 for(i=0;i<nb_input_streams;i++) {
3932 input_streams[i]->st->codec->debug = debug;
3934 for(i=0;i<nb_output_streams;i++) {
3935 OutputStream *ost = output_streams[i];
3936 ost->enc_ctx->debug = debug;
3938 if(debug) av_log_set_level(AV_LOG_DEBUG);
3939 fprintf(stderr,"debug=%d\n", debug);
3942 fprintf(stderr, "key function\n"
3943 "? show this help\n"
3944 "+ increase verbosity\n"
3945 "- decrease verbosity\n"
3946 "c Send command to first matching filter supporting it\n"
3947 "C Send/Queue command to all matching filters\n"
3948 "D cycle through available debug modes\n"
3949 "h dump packets/hex press to cycle through the 3 states\n"
3951 "s Show QP histogram\n"
3958 static void *input_thread(void *arg)
3961 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3966 ret = av_read_frame(f->ctx, &pkt);
3968 if (ret == AVERROR(EAGAIN)) {
3973 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3976 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3977 if (flags && ret == AVERROR(EAGAIN)) {
3979 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3980 av_log(f->ctx, AV_LOG_WARNING,
3981 "Thread message queue blocking; consider raising the "
3982 "thread_queue_size option (current value: %d)\n",
3983 f->thread_queue_size);
3986 if (ret != AVERROR_EOF)
3987 av_log(f->ctx, AV_LOG_ERROR,
3988 "Unable to send packet to main thread: %s\n",
3990 av_packet_unref(&pkt);
3991 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3999 static void free_input_threads(void)
4003 for (i = 0; i < nb_input_files; i++) {
4004 InputFile *f = input_files[i];
4007 if (!f || !f->in_thread_queue)
4009 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4010 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4011 av_packet_unref(&pkt);
4013 pthread_join(f->thread, NULL);
4015 av_thread_message_queue_free(&f->in_thread_queue);
4019 static int init_input_threads(void)
4023 if (nb_input_files == 1)
4026 for (i = 0; i < nb_input_files; i++) {
4027 InputFile *f = input_files[i];
4029 if (f->ctx->pb ? !f->ctx->pb->seekable :
4030 strcmp(f->ctx->iformat->name, "lavfi"))
4031 f->non_blocking = 1;
4032 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4033 f->thread_queue_size, sizeof(AVPacket));
4037 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4038 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4039 av_thread_message_queue_free(&f->in_thread_queue);
4040 return AVERROR(ret);
4046 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4048 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4050 AV_THREAD_MESSAGE_NONBLOCK : 0);
4054 static int get_input_packet(InputFile *f, AVPacket *pkt)
4058 for (i = 0; i < f->nb_streams; i++) {
4059 InputStream *ist = input_streams[f->ist_index + i];
4060 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4061 int64_t now = av_gettime_relative() - ist->start;
4063 return AVERROR(EAGAIN);
4068 if (nb_input_files > 1)
4069 return get_input_packet_mt(f, pkt);
4071 return av_read_frame(f->ctx, pkt);
4074 static int got_eagain(void)
4077 for (i = 0; i < nb_output_streams; i++)
4078 if (output_streams[i]->unavailable)
4083 static void reset_eagain(void)
4086 for (i = 0; i < nb_input_files; i++)
4087 input_files[i]->eagain = 0;
4088 for (i = 0; i < nb_output_streams; i++)
4089 output_streams[i]->unavailable = 0;
4092 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4093 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4094 AVRational time_base)
4100 return tmp_time_base;
4103 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4106 return tmp_time_base;
4112 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4115 AVCodecContext *avctx;
4116 int i, ret, has_audio = 0;
4117 int64_t duration = 0;
4119 ret = av_seek_frame(is, -1, is->start_time, 0);
4123 for (i = 0; i < ifile->nb_streams; i++) {
4124 ist = input_streams[ifile->ist_index + i];
4125 avctx = ist->dec_ctx;
4128 if (ist->decoding_needed) {
4129 process_input_packet(ist, NULL, 1);
4130 avcodec_flush_buffers(avctx);
4133 /* duration is the length of the last frame in a stream
4134 * when audio stream is present we don't care about
4135 * last video frame length because it's not defined exactly */
4136 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4140 for (i = 0; i < ifile->nb_streams; i++) {
4141 ist = input_streams[ifile->ist_index + i];
4142 avctx = ist->dec_ctx;
4145 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4146 AVRational sample_rate = {1, avctx->sample_rate};
4148 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4152 if (ist->framerate.num) {
4153 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4154 } else if (ist->st->avg_frame_rate.num) {
4155 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4156 } else duration = 1;
4158 if (!ifile->duration)
4159 ifile->time_base = ist->st->time_base;
4160 /* the total duration of the stream, max_pts - min_pts is
4161 * the duration of the stream without the last frame */
4162 duration += ist->max_pts - ist->min_pts;
4163 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4167 if (ifile->loop > 0)
4175 * - 0 -- one packet was read and processed
4176 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4177 * this function should be called again
4178 * - AVERROR_EOF -- this function should not be called again
4180 static int process_input(int file_index)
4182 InputFile *ifile = input_files[file_index];
4183 AVFormatContext *is;
4191 ret = get_input_packet(ifile, &pkt);
4193 if (ret == AVERROR(EAGAIN)) {
4197 if (ret < 0 && ifile->loop) {
4198 if ((ret = seek_to_start(ifile, is)) < 0)
4200 ret = get_input_packet(ifile, &pkt);
4201 if (ret == AVERROR(EAGAIN)) {
4207 if (ret != AVERROR_EOF) {
4208 print_error(is->filename, ret);
4213 for (i = 0; i < ifile->nb_streams; i++) {
4214 ist = input_streams[ifile->ist_index + i];
4215 if (ist->decoding_needed) {
4216 ret = process_input_packet(ist, NULL, 0);
4221 /* mark all outputs that don't go through lavfi as finished */
4222 for (j = 0; j < nb_output_streams; j++) {
4223 OutputStream *ost = output_streams[j];
4225 if (ost->source_index == ifile->ist_index + i &&
4226 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4227 finish_output_stream(ost);
4231 ifile->eof_reached = 1;
4232 return AVERROR(EAGAIN);
4238 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4239 is->streams[pkt.stream_index]);
4241 /* the following test is needed in case new streams appear
4242 dynamically in stream : we ignore them */
4243 if (pkt.stream_index >= ifile->nb_streams) {
4244 report_new_stream(file_index, &pkt);
4245 goto discard_packet;
4248 ist = input_streams[ifile->ist_index + pkt.stream_index];
4250 ist->data_size += pkt.size;
4254 goto discard_packet;
4256 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4257 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4262 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4263 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4264 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4265 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4266 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4267 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4268 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4269 av_ts2str(input_files[ist->file_index]->ts_offset),
4270 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4273 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4274 int64_t stime, stime2;
4275 // Correcting starttime based on the enabled streams
4276 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4277 // so we instead do it here as part of discontinuity handling
4278 if ( ist->next_dts == AV_NOPTS_VALUE
4279 && ifile->ts_offset == -is->start_time
4280 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4281 int64_t new_start_time = INT64_MAX;
4282 for (i=0; i<is->nb_streams; i++) {
4283 AVStream *st = is->streams[i];
4284 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4286 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4288 if (new_start_time > is->start_time) {
4289 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4290 ifile->ts_offset = -new_start_time;
4294 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4295 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4296 ist->wrap_correction_done = 1;
4298 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4299 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4300 ist->wrap_correction_done = 0;
4302 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4303 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4304 ist->wrap_correction_done = 0;
4308 /* add the stream-global side data to the first packet */
4309 if (ist->nb_packets == 1) {
4310 for (i = 0; i < ist->st->nb_side_data; i++) {
4311 AVPacketSideData *src_sd = &ist->st->side_data[i];
4314 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4317 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4320 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4324 memcpy(dst_data, src_sd->data, src_sd->size);
4328 if (pkt.dts != AV_NOPTS_VALUE)
4329 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4330 if (pkt.pts != AV_NOPTS_VALUE)
4331 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4333 if (pkt.pts != AV_NOPTS_VALUE)
4334 pkt.pts *= ist->ts_scale;
4335 if (pkt.dts != AV_NOPTS_VALUE)
4336 pkt.dts *= ist->ts_scale;
4338 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4339 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4340 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4341 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4342 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4343 int64_t delta = pkt_dts - ifile->last_ts;
4344 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4345 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4346 ifile->ts_offset -= delta;
4347 av_log(NULL, AV_LOG_DEBUG,
4348 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4349 delta, ifile->ts_offset);
4350 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4351 if (pkt.pts != AV_NOPTS_VALUE)
4352 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4356 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4357 if (pkt.pts != AV_NOPTS_VALUE) {
4358 pkt.pts += duration;
4359 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4360 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4363 if (pkt.dts != AV_NOPTS_VALUE)
4364 pkt.dts += duration;
4366 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4367 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4368 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4369 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4371 int64_t delta = pkt_dts - ist->next_dts;
4372 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4373 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4374 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4375 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4376 ifile->ts_offset -= delta;
4377 av_log(NULL, AV_LOG_DEBUG,
4378 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4379 delta, ifile->ts_offset);
4380 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4381 if (pkt.pts != AV_NOPTS_VALUE)
4382 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4385 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4386 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4387 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4388 pkt.dts = AV_NOPTS_VALUE;
4390 if (pkt.pts != AV_NOPTS_VALUE){
4391 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4392 delta = pkt_pts - ist->next_dts;
4393 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4394 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4395 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4396 pkt.pts = AV_NOPTS_VALUE;
4402 if (pkt.dts != AV_NOPTS_VALUE)
4403 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4406 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4407 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4408 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4409 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4410 av_ts2str(input_files[ist->file_index]->ts_offset),
4411 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4414 sub2video_heartbeat(ist, pkt.pts);
4416 process_input_packet(ist, &pkt, 0);
4419 av_packet_unref(&pkt);
4425 * Perform a step of transcoding for the specified filter graph.
4427 * @param[in] graph filter graph to consider
4428 * @param[out] best_ist input stream where a frame would allow to continue
4429 * @return 0 for success, <0 for error
4431 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4434 int nb_requests, nb_requests_max = 0;
4435 InputFilter *ifilter;
4439 ret = avfilter_graph_request_oldest(graph->graph);
4441 return reap_filters(0);
4443 if (ret == AVERROR_EOF) {
4444 ret = reap_filters(1);
4445 for (i = 0; i < graph->nb_outputs; i++)
4446 close_output_stream(graph->outputs[i]->ost);
4449 if (ret != AVERROR(EAGAIN))
4452 for (i = 0; i < graph->nb_inputs; i++) {
4453 ifilter = graph->inputs[i];
4455 if (input_files[ist->file_index]->eagain ||
4456 input_files[ist->file_index]->eof_reached)
4458 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4459 if (nb_requests > nb_requests_max) {
4460 nb_requests_max = nb_requests;
4466 for (i = 0; i < graph->nb_outputs; i++)
4467 graph->outputs[i]->ost->unavailable = 1;
4473 * Run a single step of transcoding.
4475 * @return 0 for success, <0 for error
4477 static int transcode_step(void)
4480 InputStream *ist = NULL;
4483 ost = choose_output();
4490 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4494 if (ost->filter && !ost->filter->graph->graph) {
4495 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4496 ret = configure_filtergraph(ost->filter->graph);
4498 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4504 if (ost->filter && ost->filter->graph->graph) {
4505 if (!ost->initialized) {
4506 char error[1024] = {0};
4507 ret = init_output_stream(ost, error, sizeof(error));
4509 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4510 ost->file_index, ost->index, error);
4514 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4518 } else if (ost->filter) {
4520 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4521 InputFilter *ifilter = ost->filter->graph->inputs[i];
4522 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4528 ost->inputs_done = 1;
4532 av_assert0(ost->source_index >= 0);
4533 ist = input_streams[ost->source_index];
4536 ret = process_input(ist->file_index);
4537 if (ret == AVERROR(EAGAIN)) {
4538 if (input_files[ist->file_index]->eagain)
4539 ost->unavailable = 1;
4544 return ret == AVERROR_EOF ? 0 : ret;
4546 return reap_filters(0);
4550 * The following code is the main loop of the file converter
4552 static int transcode(void)
4555 AVFormatContext *os;
4558 int64_t timer_start;
4559 int64_t total_packets_written = 0;
4561 ret = transcode_init();
4565 if (stdin_interaction) {
4566 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4569 timer_start = av_gettime_relative();
4572 if ((ret = init_input_threads()) < 0)
4576 while (!received_sigterm) {
4577 int64_t cur_time= av_gettime_relative();
4579 /* if 'q' pressed, exits */
4580 if (stdin_interaction)
4581 if (check_keyboard_interaction(cur_time) < 0)
4584 /* check if there's any stream where output is still needed */
4585 if (!need_output()) {
4586 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4590 ret = transcode_step();
4591 if (ret < 0 && ret != AVERROR_EOF) {
4593 av_strerror(ret, errbuf, sizeof(errbuf));
4595 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4599 /* dump report by using the output first video and audio streams */
4600 print_report(0, timer_start, cur_time);
4603 free_input_threads();
4606 /* at the end of stream, we must flush the decoder buffers */
4607 for (i = 0; i < nb_input_streams; i++) {
4608 ist = input_streams[i];
4609 if (!input_files[ist->file_index]->eof_reached) {
4610 process_input_packet(ist, NULL, 0);
4617 /* write the trailer if needed and close file */
4618 for (i = 0; i < nb_output_files; i++) {
4619 os = output_files[i]->ctx;
4620 if (!output_files[i]->header_written) {
4621 av_log(NULL, AV_LOG_ERROR,
4622 "Nothing was written into output file %d (%s), because "
4623 "at least one of its streams received no packets.\n",
4627 if ((ret = av_write_trailer(os)) < 0) {
4628 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4634 /* dump report by using the first video and audio streams */
4635 print_report(1, timer_start, av_gettime_relative());
4637 /* close each encoder */
4638 for (i = 0; i < nb_output_streams; i++) {
4639 ost = output_streams[i];
4640 if (ost->encoding_needed) {
4641 av_freep(&ost->enc_ctx->stats_in);
4643 total_packets_written += ost->packets_written;
4646 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4647 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4651 /* close each decoder */
4652 for (i = 0; i < nb_input_streams; i++) {
4653 ist = input_streams[i];
4654 if (ist->decoding_needed) {
4655 avcodec_close(ist->dec_ctx);
4656 if (ist->hwaccel_uninit)
4657 ist->hwaccel_uninit(ist->dec_ctx);
4661 av_buffer_unref(&hw_device_ctx);
4662 hw_device_free_all();
4669 free_input_threads();
4672 if (output_streams) {
4673 for (i = 0; i < nb_output_streams; i++) {
4674 ost = output_streams[i];
4677 if (fclose(ost->logfile))
4678 av_log(NULL, AV_LOG_ERROR,
4679 "Error closing logfile, loss of information possible: %s\n",
4680 av_err2str(AVERROR(errno)));
4681 ost->logfile = NULL;
4683 av_freep(&ost->forced_kf_pts);
4684 av_freep(&ost->apad);
4685 av_freep(&ost->disposition);
4686 av_dict_free(&ost->encoder_opts);
4687 av_dict_free(&ost->sws_dict);
4688 av_dict_free(&ost->swr_opts);
4689 av_dict_free(&ost->resample_opts);
4697 static int64_t getutime(void)
4700 struct rusage rusage;
4702 getrusage(RUSAGE_SELF, &rusage);
4703 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4704 #elif HAVE_GETPROCESSTIMES
4706 FILETIME c, e, k, u;
4707 proc = GetCurrentProcess();
4708 GetProcessTimes(proc, &c, &e, &k, &u);
4709 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4711 return av_gettime_relative();
4715 static int64_t getmaxrss(void)
4717 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4718 struct rusage rusage;
4719 getrusage(RUSAGE_SELF, &rusage);
4720 return (int64_t)rusage.ru_maxrss * 1024;
4721 #elif HAVE_GETPROCESSMEMORYINFO
4723 PROCESS_MEMORY_COUNTERS memcounters;
4724 proc = GetCurrentProcess();
4725 memcounters.cb = sizeof(memcounters);
4726 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4727 return memcounters.PeakPagefileUsage;
4733 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4737 int main(int argc, char **argv)
4744 register_exit(ffmpeg_cleanup);
4746 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4748 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4749 parse_loglevel(argc, argv, options);
4751 if(argc>1 && !strcmp(argv[1], "-d")){
4753 av_log_set_callback(log_callback_null);
4758 avcodec_register_all();
4760 avdevice_register_all();
4762 avfilter_register_all();
4764 avformat_network_init();
4766 show_banner(argc, argv, options);
4768 /* parse options and open all input/output files */
4769 ret = ffmpeg_parse_options(argc, argv);
4773 if (nb_output_files <= 0 && nb_input_files == 0) {
4775 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4779 /* file converter / grab */
4780 if (nb_output_files <= 0) {
4781 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4785 // if (nb_input_files == 0) {
4786 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4790 for (i = 0; i < nb_output_files; i++) {
4791 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4795 current_time = ti = getutime();
4796 if (transcode() < 0)
4798 ti = getutime() - ti;
4800 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4802 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4803 decode_error_stat[0], decode_error_stat[1]);
4804 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4807 exit_program(received_nb_signals ? 255 : main_return_code);
4808 return main_return_code;